Re: Regression: image as input and image as output

154 views
Skip to first unread message
Message has been deleted
Message has been deleted

Jan

unread,
Apr 26, 2016, 4:38:28 AM4/26/16
to Caffe Users
Mhm, the loss drops and then stays more or less constant. Doesn't look really wrong to me, but maybe your network is simply not adequate to express the function you'd like to learn? And/Or training settings are not optimal. And/Or the number of (unique) training examples is not enough. As for mean subtraction/preprocessing: You can always try it and see if it improves your network. Shouldn't be much additional work. Although in theory it should also be able to learn from raw byte values (I read somewhere), in my experiance learning without preprocessing is very hard to impossible.

Have you tried to feed an image manually and look at how it differs from the target (visually, and maybe numerically)? That might get you an idea how well-trained your network really is, and whether it actually learns or not. Just looking at the loss value does not really give you that kind of information.

Jan


Am Montag, 25. April 2016 14:33:08 UTC+2 schrieb Thorsten Laude:
Additional information: I am not applying any preprocessing to my input data since my goal is to get gray sample values (0 to 255) as output. Thus, I assume that I will not benefit from methods like mean subtractions etc. Is this assumption correct?

Message has been deleted

Jan

unread,
May 10, 2016, 8:36:04 AM5/10/16
to Caffe Users
Either use a reshape layer or do the reshaping yourself, which e.g. is easy in pycaffe with the numpy interface to the blobs. Just reshape it to something like [batch_size, num_channels, img_height, img_width].

Jan


Am Dienstag, 10. Mai 2016 14:31:32 UTC+2 schrieb Thorsten Laude:
Thanks for the reply. I was busy with other things until now, so there was no response.

I want to try your suggestion to feed an image manually and evaluate the output visually. My final layer is fully connected with the number of neurons being equal to the number of samples in my target picture. How can I "reorder" these neurons to get a viewable image?

Thanks!

Thorsten

15535...@qq.com

unread,
Sep 14, 2016, 6:03:56 AM9/14/16
to Caffe Users
my net is also image as input and image as output. But I have no label.
Can you help me to see my net?

name: "upconv_7"
layer {
  name: "data"
  type: "Data"
  top: "input_data"
  include {
phase: TRAIN
  }
  transform_param {
scale: 0.00390625
    mirror: false
    mean_file: "/home/yuyadan/src/sourceCode/lltcggie-caffe/data/waifu2x_trainning_cpp/waifu2x_mean.binaryproto"
  }
  data_param {
    source: "/home/yuyadan/src/sourceCode/lltcggie-caffe/examples/waifu2x_trainning_cpp/waifu2x_train0_lmdb"
    batch_size: 16
backend: LMDB
  }
}
layer {
  name: "data"
  type: "Data"
  top: "input_data"
  include {
phase: TEST
  }
  transform_param {
scale: 0.00390625
    mirror: false
    mean_file: "/home/yuyadan/src/sourceCode/lltcggie-caffe/data/waifu2x_trainning_cpp/waifu2x_mean.binaryproto"
  }
  data_param {
    source: "/home/yuyadan/src/sourceCode/lltcggie-caffe/examples/waifu2x_trainning_cpp/waifu2x_val0_lmdb"
    batch_size: 16
backend: LMDB
  }
}
layer {
  name: "conv1_layer"
  type: "Convolution"
  bottom: "input_data"
  top: "conv1"
  convolution_param {
    num_output: 16
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "conv1_relu_layer"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
  relu_param {
    negative_slope: 0.1
  }
}
layer {
  name: "conv2_layer"
  type: "Convolution"
  bottom: "conv1"
  top: "conv2"
  convolution_param {
    num_output: 32
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "conv2_relu_layer"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
  relu_param {
    negative_slope: 0.1
  }
}
layer {
  name: "conv3_layer"
  type: "Convolution"
  bottom: "conv2"
  top: "conv3"
  convolution_param {
    num_output: 64
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "conv3_relu_layer"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
  relu_param {
    negative_slope: 0.1
  }
}
layer {
  name: "conv4_layer"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  convolution_param {
    num_output: 128
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "conv4_relu_layer"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
  relu_param {
    negative_slope: 0.1
  }
}
layer {
  name: "conv5_layer"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  convolution_param {
    num_output: 128
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "conv5_relu_layer"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
  relu_param {
    negative_slope: 0.1
  }
}
layer {
  name: "conv6_layer"
  type: "Convolution"
  bottom: "conv5"
  top: "conv6"
  convolution_param {
    num_output: 256
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "conv6_relu_layer"
  type: "ReLU"
  bottom: "conv6"
  top: "conv6"
  relu_param {
    negative_slope: 0.1
  }
}
layer {
  name: "conv7_layer"
  type: "Deconvolution"
  bottom: "conv6"
  top: "conv7"
  convolution_param {
    num_output: 3
    kernel_size: 4
    stride: 2
    pad: 3
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
  }
}
layer {
  name: "target"
  type: "Data"
  top: "target"
  transform_param{
scale: 0.00390625
  }
  data_param {
source: "/home/yuyadan/src/sourceCode/lltcggie-caffe/examples/waifu2x_trainning_cpp/waifu2x_train1_lmdb"
batch_size: 16 
backend: LMDB
  }
  include: { phase: TRAIN }
}
layer {
  name: "target"
  type: "Data"
  top: "target"
  transform_param{
scale: 0.00390625
  }
  data_param {
source: "/home/yuyadan/src/sourceCode/lltcggie-caffe/examples/waifu2x_trainning_cpp/waifu2x_val1_lmdb"
batch_size: 16 
backend: LMDB
  }
  include: { phase: TEST }
}
layer {
  name: "loss"
  type: "EuclideanLoss"
  bottom: "conv7"
  bottom: "target"
  top: "loss"
  include: { phase: TRAIN }
}

Reply all
Reply to author
Forward
0 new messages