configuration for detection

193 views
Skip to first unread message

Tianxu Jia

unread,
Nov 21, 2012, 7:26:37 AM11/21/12
to ebl...@googlegroups.com
Hi
Below is the configuration for detection, but I can't run it successfully. Can you please so kindly give me a help?

Thanks in advance

Sunny

#########################################

# paths ########################################################################
name            = mnist
ebl             = ${HOME}/eblearn/          # eblearn root
root            = D:\work\EBLearn\eblearn-1.1\demos\mnist            # mnist data root #TJ
tblroot         = ${ebl}/tools/data/tables/ # location of table files

##########################################
run_type = detect #train # detect fprop  # TJ
##########################################

# network high level switches ##################################################
classifier_type = cf    # type of classifier, cf: 2-layer, c: 1-layer
classifier_hidden = 16  # number of hidden units in 2-layer classifier
features_type   = cscs  # type of features
nonlin          = tanh  # type of non-linearity
manual_load     = 0     # manually load weights for individual modules
norm            = 1     # use normalization layers
pool            = subs  # l2pool (is another option)

# features and classifiers #####################################################
arch            = ${pp},${arch_${run_type}} # run_type is set by runned tool
arch_train      = ${features},${classifier}
arch_detect     = ${arch_train}
arch_fprop      = ${features}
arch_name       = ${arch_name_${run_type}}
arch_name_fprop = ${features_name}
arch_name_train = ${features},${classifier}
features        = ${features_${features_type}}
features_name   = ${features_type}
classifier      = ${classifier_${classifier_type}}

# energies & answers ###########################################################
trainer         = trainable_module1  # the trainer module
trainable_module1_energy = l2_energy # type of energy
answer          = class_answer # how to infer answers from network raw outputs

# normalization ################################################################
norm0_0         =       # no normalization
norm2_0         =       # no normalization
norm0_1         = wstd0 # contrast normalization
norm2_1         = wstd2 # contrast normalization

# architecture #################################################################

# features
features_cscs   = ${c0},${s1},${c2},${s3}

# classifiers
classifier_c    = ${c5}
classifier_cf   = ${c5},${f7}

# main branch layers
c0              = conv0,addc0,${nonlin},abs0,${norm0_${norm}}
s1              = ${pool}1,addc1,${nonlin}
c2              = conv2,addc2,${nonlin},abs2,${norm2_${norm}}
s3              = ${pool}3,addc3,${nonlin}
c5              = conv5,addc5,${nonlin}
f6              = linear6,addc6,${nonlin}
f7              = linear7,addc7,${nonlin}

# main branch parameters
inputh = 28 # input's height  #TJ
inputw = 28 # input's width #TJ
zpad0_dims      = 5x5 # padding for this kernel size
conv0_kernel    = 5x5 # convolution kernel sizes (hxw)
conv0_stride    = 1x1 # convolution strides  (hxw)
conv0_table     =     # convolution table (optional)
conv0_table_in  = 1   # conv input max, used if table file not defined
conv0_table_out = 6   # features max, used if table file not defined
conv0_weights   =     # manual loading of weights (optional)
addc0_weights   =     # manual loading of weights (optional)
wstd0_kernel    = ${conv0_kernel} # normalization kernel sizes (hxw)
subs1_kernel    = 2x2 # subsampling kernel sizes (hxw)
subs1_stride    = ${subs1_kernel} # subsampling strides (hxw)
l2pool1_kernel  = 2x2 # subsampling kernel sizes (hxw)
l2pool1_stride  = ${l2pool1_kernel} # subsampling strides (hxw)
addc1_weights   = # manual loading of weights (optional)
conv2_kernel    = 5x5 # convolution kernel sizes (hxw)
conv2_stride    = 1x1 # convolution strides (hxw)
#conv2_table     = ${tblroot}/table_6_16_connect_60.mat # conv table (optional)
conv2_table_in  = thickness # use current thickness as max table input
conv2_table_out = 16  # TJ ${table1_max} # features max, used if table file not defined
conv2_weights   =     # manual loading of weights (optional)
addc2_weights   =     # manual loading of weights (optional)
wstd2_kernel    = ${conv2_kernel} # normalization kernel sizes (hxw)
subs3_kernel    = 2x2 # subsampling kernel sizes (hxw)
subs3_stride    = ${subs3_kernel} # subsampling strides (hxw)
l2pool3_kernel    = 2x2 # l2poolampling kernel sizes (hxw)
l2pool3_stride    = ${l2pool3_kernel} # subsampling strides (hxw)
addc3_weights   =     # manual loading of weights (optional)
linear5_in      = ${linear5_in_${net}} # linear module input features size
linear5_out     = noutputs # use number of classes as max table output
linear6_in      = thickness # linear module input features size
linear6_out     = ${classifier_hidden}
linear7_in      = thickness # use current thickness
linear7_out     = noutputs # use number of classes as max table output
conv5_kernel    = 5x5 # convolution kernel sizes (hxw)
conv5_stride    = 1x1 # convolution strides (hxw)
conv5_table_in  = thickness # use current thickness as max table input
conv5_table_out = 120 # features max, used if table file not defined

# preprocessing ################################################################
pp              = ${pp_${run_type}}
pp_train        = zpad0
pp_fprop        = zpad0
pp_detect       = zpad0

# possible preprocessings
pp_y            = rgb_to_y0
pp_yuv          = rgb_to_yuv0
pp_yp           = rgb_to_yp0
pp_ypuv         = rgb_to_ypuv0
rgb_to_ypuv0_kernel = 7x7
resizepp0_pp    = rgb_to_ypuv0
resizepp0_zpad  = 2x2x2x2

# training #####################################################################
classification  = 1 # load datasets in classification mode, regression otherwise
train           = ${root}/train-images-idx3-ubyte # training data
train_labels    = ${root}/train-labels-idx1-ubyte # training labels
train_size      = 2000                            # limit number of samples
val             = ${root}/t10k-images-idx3-ubyte  # validation data
val_labels      = ${root}/t10k-labels-idx1-ubyte  # validation labels
val_size        = 1000                            # limit number of samples
#data_bias       = -128                            # bias applied before coeff
data_coeff      = .01                             # coeff applied after bias
add_features_dimension = 1 # samples are 28x28, make them 1x28x28
test_display_modulo = 0  # modulo at which to display test results

# hyper-parameters
eta             = .0001  # learning rate
reg             = 0      # regularization
reg_l1          = ${reg} # L1 regularization
reg_l2          = ${reg} # L2 regularization
reg_time        = 0      # time (in samples) after which to start regularizing
inertia         = 0.0    # gradient inertia
anneal_value    = 0.0    # learning rate decay value
anneal_period   = 0      # period (in samples) at which to decay learning rate
gradient_threshold = 0.0
iterations      = 5     # number of training iterations
ndiaghessian    = 100    # number of sample for 2nd derivatives estimation
epoch_mode      = 1      # 0: fixed number 1: show all at least once
#epoch_size = 4000    # number of training samples per epoch. comment to ignore.
epoch_show_modulo = 400  # print message every n training samples
sample_probabilities = 0 # use probabilities to pick samples
hardest_focus   = 1      # 0: focus on easiest samples 1: focus on hardest ones
ignore_correct  = 0      # If 1, do not train on correctly classified samples
min_sample_weight = 0    # minimum probability of each sample
per_class_norm  = 1      # normalize probabiliy by class (1) or globally (0)
shuffle_passes  = 1      # shuffle samples between passes
balanced_training = 1    # show each class the same amount of samples or not
random_class_order = 0   # class order is randomized or not when balanced
no_training_test = 0     # do not test on training set if 1
no_testing_test = 0      # do not test on testing set if 1
max_testing     = 0      # limit testing to this number of samples
save_pickings   = 0      # save sample picking statistics
binary_target   = 0      # use only 1 output, -1 is negative, +1 positive
test_only       = 0      # if 1, just test the data and return
#save_weights    = 0      # if 0, do not save weights after each iteration  # TJ
keep_outputs    = 1      # keep all outputs in memory
training_precision = double #float
save_weights    = 1      # if 0, do not save weights when training

# training display #############################################################
display               = 1  # display results
show_conf             = 0  # show configuration variables or not
show_train            = 1  # enable/disable all training display
show_train_ninternals = 1  # number of internal examples to display
show_train_errors     = 0  # show worst errors on training set
show_train_correct    = 0  # show worst corrects on training set
show_val_errors       = 1  # show worst errors on validation set
show_val_correct      = 1  # show worst corrects on validation set
show_hsample          = 5  # number of samples to show on height axis
show_wsample          = 18 # number of samples to show on height axis
show_wait_user        = 0  # if 1, wait for user to close windows

# detection ####################################################################
#classes      = ${ebl}/demos/mnist/mnist_classes.mat # names and # of classes
classes      = D:\work\EBLearn\trunk\demos\mnist\mnist_classes.mat # names and # of classes  # TJ
scaling_type = 4           # 4: no multi-scaling, just original 28x28 image
input_gain   = ${data_coeff} # use same gain as in preprocessed training data
camera       = directory   # input images come from a directory
##############################################################
#input_dir    = ${ebl}/tools/data/mnist/ # directory for input images # TJ
input_dir    = D:\work\EBLearn\data\mnist\test28x28
#############################################################
next_on_key  = n   # with focus on display window, press "n" to process next
######################################
#weights      = mnist_net00010.mat # TJ
weights      = D:\work\EBLearn\eblearn-1.1\bin\_net00005.mat
######################################
################################################################################

Pierre Sermanet

unread,
Nov 21, 2012, 7:36:41 AM11/21/12
to ebl...@googlegroups.com
What is your output?
And what is the output using detect_debug?


--
 
 

Tianxu Jia

unread,
Nov 21, 2012, 11:16:32 AM11/21/12
to ebl...@googlegroups.com
Thanks Pierre, I add some lines as below and it run.

Regards

Sunny 

##########################
# paths #######################################################################

name=mnist
root = ${HOME}/mnist/
#root = /data/mnist/
tblroot = ${current_dir}/../../tools/data/tables/ # location of table files

# network high level switches ##################################################

run_type = train # detect fprop
classifier_type = cf_16
features_type = cscs

mirror = 0 # mirror instead of zero-padding (default)
nonlin = tanh # stdsig # non-linearity module
manual_load = 0 # manually load weights for individual modules
color = 0
norm = 1

features = ${features_${features_type}}
classifier = ${classifier_${net_type}}
arch = ${pp_${run_type}}${arch_${run_type}}
arch_name = ${arch_name_${run_type}}
arch_name_fprop = ${features_name}
arch_fprop = ${features}
arch_name_train = ${features},${classifier}
arch_train = ${features},${classifier} # global architecture
features_name = ${features_type}
net_type=`echo ${classifier_type} | cut -d_ -f1  | tr -d '\n'`

# energies & answers ###########################################################
trainer = trainable_module1
trainable_module1_energy = l2_energy
answer = class_answer

# normalization ################################################################
norm0_0 =
norm2_0 =
norm0_1 = wstd0
norm2_1 = wstd2

# architecture #################################################################

# features
features_cscs = zpad0,${c0},${s1},${c2},${s3}

# classifiers
classifier_c = ${c5}
classifier_cf = ${c5},${f7}

# main branch layers
c0 = conv0,addc0,${nonlin},abs0,${norm0_${norm}}
s1 = subs1,addc1,${nonlin}
c2 = conv2,addc2,${nonlin},abs2,${norm2_${norm}}
s3 = subs3,addc3,${nonlin}
c5 = conv5,addc5,${nonlin}
f6 = linear6,addc6,${nonlin}
f7 = linear7,addc7,${nonlin}

# main branch parameters
inputh = 28 # input's height
inputw = 28 # input's width
zpad0_dims = 5x5 # padding for this kernel size
conv0_kernel = 5x5 # convolution kernel sizes (hxw)
conv0_stride = 1x1 # convolution strides  (hxw)
conv0_table = # convolution table (optional)
conv0_table_in = 1 # conv input max, used if table file not defined
conv0_table_out = 6 # features max, used if table file not defined
conv0_weights = # manual loading of weights (optional)
addc0_weights = # manual loading of weights (optional) 
wstd0_kernel = ${conv0_kernel} # normalization kernel sizes (hxw)
subs1_kernel = 2x2 # subsampling kernel sizes (hxw)
subs1_stride = ${subs1_kernel} # subsampling strides (hxw)
addc1_weights = # manual loading of weights (optional) 
conv2_kernel = 5x5 # convolution kernel sizes (hxw)
conv2_stride = 1x1 # convolution strides (hxw)
conv2_table = ${tblroot}table_6_16_connect_60.mat # convolution table (optional)
conv2_table_in = thickness # use current thickness as max table input
conv2_table_out = ${table1_max} # features max, used if table file not defined
conv2_weights = # manual loading of weights (optional)
addc2_weights = # manual loading of weights (optional)
wstd2_kernel = ${conv2_kernel} # normalization kernel sizes (hxw)
subs3_kernel = 2x2 # subsampling kernel sizes (hxw)
subs3_stride = ${subs3_kernel} # subsampling strides (hxw)
addc3_weights = # manual loading of weights (optional)
linear5_in = ${linear5_in_${net}} #thickness # linear module input features size
linear5_out = noutputs # use number of classes as max table output
linear6_in = thickness # linear module input features size
nhidden = `echo ${classifier_type} | cut -d_ -f2  | tr -d '\n'`
linear6_out = ${nhidden}
linear7_in = thickness
linear7_out = noutputs # use number of classes as max table output
conv5_kernel = 5x5 # convolution kernel sizes (hxw)
conv5_stride = 1x1 # convolution strides (hxw)
conv5_table_in = thickness # use current thickness as max table input
conv5_table_out = 120 # features max, used if table file not defined

# preprocessing ################################################################
preprocessing = 1 # 0: none 1: contrast normalization (optional)
resize = mean # bilinear
normalization_size = 7x7 # 9

# preprocessing modules
pp_y = rgb_to_y0
pp_yuv = rgb_to_yuv0
pp_yp = rgb_to_yp0
pp_ypuv = rgb_to_ypuv0
rgb_to_ypuv0_kernel = 7x7
resizepp0_pp = rgb_to_ypuv0
#resizepp0_fovea = ${fovea}
resizepp0_zpad = 
pp_train = #rgb_to_ypuv0 #mschan0
pp_fprop = #rgb_to_ypuv0 #mschan0
pp_detect = resizepp0,

# training #####################################################################
classification = 1 # load datasets in classification mode, regression otherwise
train = ${root}/train-images-idx3-ubyte
train_labels = ${root}/train-labels-idx1-ubyte
train_size = 2000 # limit number of samples
val = ${root}/t10k-images-idx3-ubyte
val_labels = ${root}/t10k-labels-idx1-ubyte
val_size = 1000 # limit number of samples
#data_bias = -128 # bias applied before coeff
data_coeff = .01 # coeff applied after bias

eta = .0001 # double learning rate
#eta = .00001 # float learning rate
reg = 0 #.0001 .00005
reg_l1 = ${reg} #.0001 # L1 regularization
reg_l2 = ${reg} #.0001 # L2 regularization
reg_time = 0 # time (in samples) after which to start regularizing
inertia = 0.0 # gradient inertia
anneal_value = 0.0 # learning rate decay value
annea_period = 0 # period (in samples) at which to decay learning rate
gradient_threshold = 0.0
iterations = 20 # number of training iterations
ndiaghessian = 100 #800 1200 # number of sample for 2nd derivatives estimation
epoch_mode = 1 # 0: fixed number 1: show all at least once
#epoch_size = 4000 # number of training samples per epoch. comment to ignore.
epoch_show_modulo = 400 # print message every n training samples
sample_probabilities = 0 # use probabilities to pick samples
hardest_focus = 1 # 0: focus on easiest samples 1: focus on hardest ones
ignore_correct = 0 # If 1, do not train on correctly classified samples
min_sample_weight = 0 #.1 .5 1 # minimum probability of each sample
per_class_norm = 1 # normalize probabiliy by class (1) or globally (0)
shuffle_passes = 1 # shuffle samples between passes
balanced_training = 1 # show each class the same amount of samples or not
random_class_order = 0 #1 # class order is randomized or not when balanced
no_training_test = 0 # do not test on training set if 1
no_testing_test = 0 # do not test on testing set if 1
max_testing = 0 # limit testing to this number of samples
save_pickings = 0 # save sample picking statistics
binary_target = 0 # use only 1 output, -1 for negative examples, +1 for positive
test_only = 0 # if 1, just test the data and return
save_weights = 0 # if 0, do not save weights after each iteration
keep_outputs = 1
training_precision = double #float

# training display #############################################################
show_conf = 0
show_train = 1 # enable/disable all training display
show_train_ninternals = 1 # number of internal examples to display
show_train_errors = 0 # show worst errors on training set
show_train_correct = 0 # show worst corrects on training set
show_val_errors = 1 # show worst errors on validation set
show_val_correct = 1 # show worst corrects on validation set
show_hsample = 5 # number of samples to show on height axis
show_wsample = 18 # number of samples to show on height axis
show_wait_user = 0 # if 1, wait for user to close windows, otherwise close them.

# retraining ###################################################################
retrain = 0
retrain_weights = # ${root1}/${job_name_retraining}_net040.mat

# detection ####################################################################
net_min_height = ${inputh}
net_min_width = ${inputw}
nthreads = 1 # number of detection threads
threads_loading = 0 # threads load the data themselves
ipp_cores = 1 # number of cores used by IPP
weights = ${root2}${weights_file}
#classes = ${root2}${job_name}_classes.mat
#classes = ${root2}${train}_classes.mat
threshold = .1 # confidence detection threshold
gain = 1
input_height = -1 # use -1 to use original size
input_width = -1 # use -1 to use original size
input_min = 0 # minimum height or width for minimum scale
input_max = 1200 # maximum height or width for maximum scale
# multi-scaling type. 0: manually set each scale sizes, 1: manually set each
# scale step, 2: number of scales between min and max, 3: step factor between
# min and max, 4: 1 scale, the original image size.
scaling_type = 6
min_scale_pred = 1 # lower cap on scale prediction
max_scale_pred = 2 # higher cap on scale prediction
scaling = 1.09 # scaling ratio between scales
min_scale = 1 #.75 # min scale as factor of minimal network size
max_scale = 1.3 # max scale as factor of original resolution
input_random = 1 # randomize input list (only works for 'directory' camera).
input_npasses = 1 # passes on the input list (only works for 'directory' cam).
hzpad = .3 # vertical zero padding on each side as ratio of network's min input
wzpad = .3 # horizontal zero padding on each side as ratio of network's min in
#mem_optimization = 1
bbox_saving = 2 # 0: none 1: all styles 2: eblearn style 3: caltech style
max_object_hratio = 0 #13.5 # image's height / object's height, 0 to ignore
smoothing = 0 # smooth network outputs
background_name = bg # name of background class (optional)

# nms (non-maximum suppression) ################################################
cluster_nms = 0 # cluster similar boxes into 1
scaler_mode = 0 # use scale prediction for boxes
nms = 1 # 0: no pruning 1: ignore overlapping bb 2: pedestrian custom
bbox_file = #bbox.txt # ignore processing, feed pre-computed bboxes to nms
bbox_hfactor = .75 # height factor to apply to bounding boxes
bbox_wfactor = .5 # width factor to apply to bounding boxes
bbox_max_overlap = .6 # minimum ratio with smallest bbox to declare overlap
bbox_max_overlap2 = .6
bbox_woverh = .43 # bbox's width is eventually adjusted wrt its height
bbox_max_center_dist = 10 # max centers distance of 2 boxes to be merged
min_hcenter_dist = .3 # centers closer than this ratio over height cancel out
min_wcenter_dist = .1 # centers closer than this ratio over width cancel out

# detection display ############################################################
skip_frames = 0 # skip this number of frames before each processed frame
save_detections = 0 # output saving and display
save_max = 25000 # Exit when this number of objects have been saved
save_max_per_frame = 10 # Only save the first n objects per frame
save_video = 0 # save each classified frame and make a video out of it
save_video_fps = 5
use_original_fps =0
display = 1
display_zoom = 1 # zooming
display_min = -1.7 # minimum data range to display (optional)
display_max = 1.7 # maximum data range to display (optional)
display_in_min = 0 # input image min display range (optional)
display_in_max = 255 # input image max display range (optional)
display_bb_transparency = .5 # bbox transp factor (modulated by confidence)
display_threads = 0 # each thread displays on its own
display_states = 0 # display internal states of 1 resolution
show_parts = 0 # show parts composing an object or not
silent = 0 # minimize outputs to be printed
sync_outputs = 1 # synchronize output between threads
minimal_display = 1 # only show classified input
display_sleep = 0 # sleep in milliseconds after displaying
ninternals = 1
# demo display variables
queue1 = 0
qstep1 = 1
qheight1 = 5
qwidth1 = 2
queue2 = 0
qstep2 = 50
qheight2 = 5
qwidth2 = 5
precamera = 0 # pre-camera (used before regular camera)
precamdir = ${root2}/

camera = directory # camera options: opencv shmem video directory
# specify a custom image search pattern (optional)
file_pattern = #".*[.](png|jpg|jpeg|PNG|JPG|JPEG|bmp|BMP|ppm|PPM|pnm|PNM|pgm|PGM|gif|GIF)"
# limit of input video duration in seconds, 0 means no limit
input_video_max_duration = 0 
# step between input frames in seconds, 0 means no step
input_video_sstep = 0

# evaluation ###################################################################
set = Train # Test
evaluate = 0
evaluate_cmd = "${visiongrader} ${visiongrader_params}"
visiongrader = ${HOME}/visiongrader/src/main.py
visiongrader_params = "${input_params} ${groundtruth_params} ${compare_params} ${curve_params} ${ignore} "
input_params = "--input bbox.txt --input_parser eblearn --sampling 50 "
#annotations = ${root}/../INRIAPerson/${set}/annotations/
annotations = /data/sermanet/ped/inria/small/${set}/annotations/
groundtruth_params = "--groundtruth ${annotations} --groundtruth_parser inria --gt_whratio .43 "
compare_params = "--comparator overlap50percent --comparator_param .5 "
curve_params = "--det --saving-file curve.pickle --show-no-curve "
#input_dir = /home/sermanet/${machine}data/ped/inria/INRIAPerson/${set}/pos
input_dir = /data/sermanet/ped/inria/small/${set}/pos/
input_list = crop001102.png,crop001614.png,crop001536.png,crop001616.png
ignore = "--ignore ${HOME}/visiongrader/datasets/pedestrians/inria/ignore/${set}/ "

detection_test = 0 # test detection during training
detection_test_nthreads = 1
detection_params = run_type=detect\nscaling=1.2\nthreshold=.1\nclasses=${root}/${train}_classes.mat

################################################################################
# metarun configuration
# Note: variables starting with "meta_" are reserved for meta configuration

meta_command = train # command to run
# optional meta variables ######################################################
meta_name = ${name} # name of this meta job
meta_max_jobs = 12 # maximum number of jobs to run at the same time
#meta_output_dir = ${root}/../out/ # directory where to write outputs
meta_output_dir = `pwd | tr -d '\n'` # directory where to write outputs
meta_gnuplot_params="set grid ytics;set ytics;set mytics;set grid mytics;set logscale y; set mxtics; set grid xtics; set pointsize 0.5; set key spacing .5;" # extra gnuplot parameters
meta_gnuplot_terminal="postscript"
#meta_gnuplot_font="Times=6" # font options
#meta_gnuplot_line="lw .1" # line options
# analyze processes output or not. if 0, the meta_trainer will have no notion
# of iteration and will only send 1 report at the very end.
# if 1, meta_trainer will try to find iteration and other variable values
# in each process' output.
meta_analyze = 0
meta_send_email = 1 # emailing results or not
meta_email=${myemail} # email to use (use environment variable "myemail")
# iterations at which to send an email
meta_email_iters = 0,1,2,3,4,5,7,10,15,20,30,50,75,100,200
meta_email_period = 1 # send email with this freq (if email_iters not defined)
meta_watch_interval = 30 # interval sec to analyze outputs and check who's alive
# variables to minimize, process and iteration with lowest value will
# be used to report best weights, or start consequent training
meta_minimize = test_errors,errors,test_energy,energy,1FPPI,.01FPPI
meta_ignore_iter0 = 1 # do not take results for i = 0 into account
meta_sticky_vars = meta_conf_shortname,classifier_type,job,config,classes,eta,reg # vars to keep around at each iterations
meta_watch_vars = #job,1FPPI,.01FPPI # restrict variable watching to those
meta_nbest = 5 # number of best answers to show/send
meta_send_best = 0 # if 1 send best answers files minimizing meta_minimize's value
meta_send_logs = 0 # send logs of all jobs or not
meta_no_conf_id = 0 # do not use conf ids for naming
meta_best_keycomb = classifier_type #eta,reg,arch,classifier # print best of each possible comb
meta_plot_keys = classifier_type,tenergy,i # plot all combs vs last one as x-axis
meta_timeout = 7200 # scheduler considers a job is not running after n seconds
meta_display_all = 0 # display all jobs results while metaparsing
meta_hierarchy = meta_conf_shortname,i # the hierarchy of parsed elements
meta_job_var = meta_conf_shortname # defines the job variable name
#################
Reply all
Reply to author
Forward
0 new messages