def encapsulate_bash_call(cmd,log):
p = subprocess.call(['bash', '-c', cmd])
# out, _ = p.communicate()
# subprocess.check_output(['bash','-c',cmd])
# print 'out', out[:20]
# with open(log, 'w') as lw:
# lw.write(out)
def run_k_fold_parallel( solver_config_paths, log_path=path_log, num_gpus=[1,2] ):
logs = []
train = 0
val = 0
psnr = 0
num_solv= len(solver_config_paths)
results=[]
while len(solver_config_paths)>0:
pool = ThreadPool(processes=min(len(num_gpus), len(solver_config_paths)))
for i in range(min(len(num_gpus), len(solver_config_paths))):
s_path = solver_config_paths.pop()
log = log_path+os.path.basename(os.path.normpath(s_path)).replace('.prototxt','.log')
bashCommand = 'caffe train --solver='+s_path+' --gpu='+str(num_gpus[i])
results.append( pool.apply_async(encapsulate_bash_call, [bashCommand, log]) )
logs.append(log)
pool.close()
pool.join()
while len(logs)>0:
tr, va = parse_log.parse_log(logs.pop())
psnr += 10 * np.log10((255**2)/(va[-1]['loss']))
train += (tr[-1]['loss'])
val += (va[-1]['loss'])
return train/num_solv, val/num_solv, psnr/num_solv