caffe / U-Net: Could not compute map between tops; are they connected by spatial layers?

854 views
Skip to first unread message

Tuelle

unread,
Dec 23, 2016, 5:21:55 AM12/23/16
to Caffe Users

I want to generate a U-Net like deep learning architecture with the following python code (for pycaffe):


from caffe import layers as L
from caffe import params as P
import caffe
from caffe.coord_map import crop
from PythonDataLayer import PythonDataLayer


# Some macro functions
def max_pool(bottom):
    return L.Pooling(bottom, kernel_size=2, stride=2, pool=P.Pooling.MAX)

def macro_deconv(bottom, _num_output , _kernel_size, _stride):
    deconv = L.Deconvolution(bottom,
                         convolution_param=dict(num_output=_num_output,kernel_size=_kernel_size, stride=_stride),
                         param=[dict(lr_mult=1,decay_mult=1)]
                         )
    return deconv, L.ReLU(deconv, in_place=True)

def conv(bottom, _num_output, _kernel_size):
    c = L.Convolution(bottom,
                          num_output=_num_output,
                          kernel_size=_kernel_size,
                          pad=0, weight_filler=dict(type='xavier'),
                          param=[{'lr_mult':1},{'lr_mult':0.1}],
                          bias_filler=dict(type='constant', value=0))
    return c, L.ReLU(c, in_place=True)

def crop_merge(bottom_down, bottom_up):
    c = crop(bottom_down, bottom_up)
    m = L.Concat(bottom_up, c)
    return c,m





def unet():
    net = caffe.NetSpec()

    pydata_params = dict()
    pydata_params['image_file_list'] = '/home/xx/workspace/TRAIN_DATA_RAW.txt'
    pydata_params['label_file_list'] = '/home/xx/workspace/TRAIN_LABEL_RAW.txt'
    net.data, net.label = L.Python(module='PythonDataLayer',     layer='PythonDataLayer', ntop=2, param_str=str(pydata_params))

    # Level 1 down
    net.down_conv1a, net.down_relu1a = conv(net.data, 24, 3)
    net.pool1 = max_pool(net.down_relu1a)

    # Level 2 down
    net.down_conv2a, net.down_relu2a = conv(net.pool1, 32, 3)
    net.down_conv2b, net.down_relu2b = conv(net.down_relu2a, 32, 3)
    net.pool2 = max_pool(net.down_relu2b)

    # Level 3 down
    net.down_conv3a, net.down_relu3a = conv(net.pool2, 32, 3)
    net.down_conv3b, net.down_relu3b = conv(net.down_relu3a, 32, 3)
    net.pool3 = max_pool(net.down_relu3b)

    #Bottom level
    net.conv4a, net.relu4a = conv(net.pool3, 32, 3)
    net.conv4b, net.relu4b = conv(net.relu4a, 32, 3)

    # Level 3 up
    net.up_deconv4, net.up_relu3a = macro_deconv(net.relu4b, 32, 2, 2)
    net.up_crop3, net.up_merge3 = crop_merge(net.down_relu3b, net.up_relu3a)
    net.up_conv3a, net.up_relu3b = conv(net.up_merge3, 32, 3)
    net.up_conv3b, net.up_relu3c = conv(net.up_relu3b, 32, 3)

    # Level 2 up
    net.up_deconv3, net.up_relu2a = macro_deconv(net.up_relu3c, 32, 2, 2)
    net.up_crop2, net.up_merge2 = crop_merge(net.down_relu2b, net.up_relu2a)
    net.up_conv2a, net.up_relu2b = conv(net.up_merge2, 32, 3)
    net.up_conv2b, net.up_relu2c = conv(net.up_relu2b, 32, 3)

    # Level 1 up
    net.up_deconv2, net.up_relu1a = macro_deconv(net.up_relu2c, 32, 2, 2)
    net.up_crop1, net.up_merge1 = crop_merge(net.down_relu1a, net.up_relu1a)
    net.up_conv1a, net.up_relu1b = conv(net.up_merge1, 32, 3)
    net.up_conv1b, net.up_relu1c = conv(net.up_relu1b, 32, 3)

    # Final layer
    net.last = L.Convolution(net.up_relu1c,
                          num_output=2,
                          kernel_size=1,
                          pad=0,
                          weight_filler=dict(type='xavier'),
                          param=[{'lr_mult':1},{'lr_mult':0.1}],
                          bias_filler=dict(type='constant', value=0))

    net.loss = L.EuclideanLoss(net.fullyconv, net.label)

    return net.to_proto()


print unet()

When executed the following error message is returned:


xx@pc-01:~/workspace$ python UNET.py
Traceback (most recent call last):
  File "UNET.py", line 109, in <module>
    print unet()
  File "UNET.py", line 79, in unet
    net.up_crop2, net.up_merge2 = crop_merge(net.down_relu2b, net.up_relu2a)
  File "UNET.py", line 34, in crop_merge
    c = crop(bottom_down, bottom_up)
  File "/usr/local/caffe/python/caffe/coord_map.py", line 178, in crop
    ax, a, b = coord_map_from_to(top_from, top_to)
  File "/usr/local/caffe/python/caffe/coord_map.py", line 168, in coord_map_from                           _to
    raise RuntimeError('Could not compute map between tops; are they '
RuntimeError: Could not compute map between tops; are they connected by spatial layers?


I cannot figure out what the problem is. If I reduce the layers such that there is only a single deconvolution+crop+merge step left, the network description string is generated without any problem. Can anyone help?

sk06

unread,
Feb 14, 2017, 5:33:11 AM2/14/17
to Caffe Users
Hey,

I am also struck exactly with same issue. Have you resolved it. If so, please let me know how to resolve this error. 

Thanks
SK

Philipp H

unread,
Mar 16, 2017, 3:31:23 AM3/16/17
to Caffe Users
Have you found a solution?

jason....@gmail.com

unread,
Apr 5, 2017, 10:02:00 AM4/5/17
to Caffe Users
Hi,

I think U-Net does not need a crop operation, just using padding=1 when doing a convolution (this can make sure that the output image size is the same as the input image). The following is my code and I referred your code (This code is used for retinal blood segmentation), but I'm not sure about the loss layer.



import caffe
from caffe import layers as L, params as P, to_proto

def conv(bottom, ks, nout, stride=1, pad=1, group=1):
    conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
                                num_output=nout, pad=pad, group=group, weight_filler=dict(type='xavier'),bias_filler=dict(type='constant', value=0))
    return conv, L.ReLU(conv, in_place=True)

def max_pool(bottom, ks, stride=2):
    return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)

def macro_deconv(bottom, _num_output , _kernel_size, _stride=2):
    deconv = L.Deconvolution(bottom,
                         convolution_param=dict(num_output=_num_output,kernel_size=_kernel_size, stride=_stride),
                         param=[dict(lr_mult=1,decay_mult=1)]
                         )
    return deconv, L.ReLU(deconv, in_place=True)

def make_unet(batch_size, patch_size):
    with open('train.prototxt', 'w') as f:
        print(get_unet('../caffedb/train_HDF5/HDF5_train_list.txt', batch_size=batch_size, patch_size=patch_size), file=f)
    with open('test.prototxt', 'w') as f:
        print(get_unet('../caffedb/test_HDF5/HDF5_test_list.txt', batch_size=batch_size, patch_size=patch_size), file=f)

def get_unet(hdf5, batch_size, patch_size):
    
    net = caffe.NetSpec()
    net.data, net.label = L.HDF5Data(source=hdf5, batch_size=batch_size, ntop=2)

    # Level 1 down
    net.down_conv1a, net.down_relu1a = conv(net.data, ks=3, nout=32)
    net.down_relu1a = L.Dropout(net.down_relu1a, dropout_ratio=0.2)
    net.down_conv1b, net.down_relu1b = conv(net.down_relu1a, ks=3, nout=32)
    net.pool1 = max_pool(net.down_relu1b, ks=2)

    # Level 2 down
    net.down_conv2a, net.down_relu2a = conv(net.pool1, ks=3, nout=64)
    net.down_relu2a = L.Dropout(net.down_relu2a, dropout_ratio=0.2)
    net.down_conv2b, net.down_relu2b = conv(net.down_relu2a, ks=3, nout=64)
    net.pool2 = max_pool(net.down_relu2b, ks=2)

    #Bottom level
    net.conv3a, net.relu3a = conv(net.pool2, ks=3, nout=128)
    net.relu3a = L.Dropout(net.relu3a, dropout_ratio=0.2)
    net.conv3b, net.relu3b = conv(net.relu3a, ks=3, nout=128)

    # Level 2 up
    net.up_deconv3, net.up_relu2a = macro_deconv(net.relu3b, _kernel_size=2, _num_output=64)
    net.up_merge2 = L.Concat(net.down_relu2b, net.up_relu2a)
    net.up_conv2a, net.up_relu2b = conv(net.up_merge2, ks=3, nout=64)
    net.up_relu2b = L.Dropout(net.up_relu2b, dropout_ratio=0.2)
    net.up_conv2b, net.up_relu2c = conv(net.up_relu2b, ks=3, nout=64)

    # Level 1 up
    net.up_deconv2, net.up_relu1a = macro_deconv(net.up_relu2c, _kernel_size=2, _num_output=32)
    net.up_merge1 = L.Concat(net.down_relu1b, net.up_relu1a)
    net.up_conv1a, net.up_relu1b = conv(net.up_merge1, ks=3, nout=32)
    net.up_relu1b = L.Dropout(net.up_relu1b, dropout_ratio=0.2)
    net.up_conv1b, net.up_relu1c = conv(net.up_relu1b, ks=3, nout=32)

    # Final Layer
    net.last_conv, net.last_relu = conv(net.up_relu1c, pad=0, ks=1, nout=2)
    net.last = L.Reshape(net.last_relu, reshape_param={'shape':{'dim': [batch_size, 2, patch_size**2]}})

    net.last = L.Softmax(net.last)
    net.loss = L.SigmoidCrossEntropyLoss(net.last, net.label)

    return net.to_proto()

make_unet(batch_size,patch_size)
exit()



Reply all
Reply to author
Forward
0 new messages