Applying CNN to time series data

256 views
Skip to first unread message

HARSHIT KUMAR GUPTA

unread,
Jun 9, 2015, 1:50:50 AM6/9/15
to caffe...@googlegroups.com
This is my caffee model for time series data

name: "AusticNet"
layer
{
  name
: "data"
  type
: "Data"
  top
: "data"
  top
: "label"
  data_param
{
    source
: "austiclmdb"
    backend
: LMDB
    batch_size
: 200
   
 
}
}

layer
{
  name
: "conv1"
  type
: "Convolution"
  bottom
: "data"
  top
: "conv1"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 96
    kernel_size
: 11
    stride
: 4
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0
   
}
 
}
}
layer
{
  name
: "relu1"
  type
: "ReLU"
  bottom
: "conv1"
  top
: "conv1"
}
layer
{
  name
: "pool1"
  type
: "Pooling"
  bottom
: "conv1"
  top
: "pool1"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
 
}
}
layer
{
  name
: "norm1"
  type
: "LRN"
  bottom
: "pool1"
  top
: "norm1"
  lrn_param
{
    local_size
: 5
    alpha
: 0.0001
    beta
: 0.75
 
}
}
layer
{
  name
: "conv2"
  type
: "Convolution"
  bottom
: "norm1"
  top
: "conv2"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 256
    pad
: 2
    kernel_size
: 5
   
group: 2
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu2"
  type
: "ReLU"
  bottom
: "conv2"
  top
: "conv2"
}
layer
{
  name
: "pool2"
  type
: "Pooling"
  bottom
: "conv2"
  top
: "pool2"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
 
}
}
layer
{
  name
: "norm2"
  type
: "LRN"
  bottom
: "pool2"
  top
: "norm2"
  lrn_param
{
    local_size
: 5
    alpha
: 0.0001
    beta
: 0.75
 
}
}
layer
{
  name
: "conv3"
  type
: "Convolution"
  bottom
: "norm2"
  top
: "conv3"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 384
    pad
: 1
    kernel_size
: 3
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0
   
}
 
}
}
layer
{
  name
: "relu3"
  type
: "ReLU"
  bottom
: "conv3"
  top
: "conv3"
}
layer
{
  name
: "conv4"
  type
: "Convolution"
  bottom
: "conv3"
  top
: "conv4"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 384
    pad
: 1
    kernel_size
: 3
   
group: 2
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu4"
  type
: "ReLU"
  bottom
: "conv4"
  top
: "conv4"
}
layer
{
  name
: "conv5"
  type
: "Convolution"
  bottom
: "conv4"
  top
: "conv5"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 256
    pad
: 1
    kernel_size
: 3
   
group: 2
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu5"
  type
: "ReLU"
  bottom
: "conv5"
  top
: "conv5"
}
layer
{
  name
: "pool5"
  type
: "Pooling"
  bottom
: "conv5"
  top
: "pool5"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
 
}
}
layer
{
  name
: "fc6"
  type
: "InnerProduct"
  bottom
: "pool5"
  top
: "fc6"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 4096
    weight_filler
{
      type
: "gaussian"
      std
: 0.005
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu6"
  type
: "ReLU"
  bottom
: "fc6"
  top
: "fc6"
}
layer
{
  name
: "drop6"
  type
: "Dropout"
  bottom
: "fc6"
  top
: "fc6"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}
layer
{
  name
: "fc7"
  type
: "InnerProduct"
  bottom
: "fc6"
  top
: "fc7"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 4096
    weight_filler
{
      type
: "gaussian"
      std
: 0.005
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu7"
  type
: "ReLU"
  bottom
: "fc7"
  top
: "fc7"
}
layer
{
  name
: "drop7"
  type
: "Dropout"
  bottom
: "fc7"
  top
: "fc7"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}
layer
{
  name
: "fc8"
  type
: "InnerProduct"
  bottom
: "fc7"
  top
: "fc8"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 2
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0
   
}
 
}
}
layer
{
  name
: "accuracy"
  type
: "Accuracy"
  bottom
: "fc8"
  bottom
: "label"
  top
: "accuracy"
  include
{
    phase
: TEST
 
}
}
layer
{
  name
: "loss"
  type
: "SoftmaxWithLoss"
  bottom
: "fc8"
  bottom
: "label"
  top
: "loss"
}



Following error log  I got upon training


I0609 11:10:27.874151 23576 layer_factory.hpp:74] Creating layer data
I0609 11:10:27.874186 23576 net.cpp:84] Creating Layer data
I0609 11:10:27.874202 23576 net.cpp:338] data -> data
I0609 11:10:27.874233 23576 net.cpp:338] data -> label
I0609 11:10:27.874251 23576 net.cpp:113] Setting up data
I0609 11:10:27.874326 23576 db.cpp:34] Opened lmdb austiclmdb
I0609 11:10:27.874443 23576 data_layer.cpp:67] output data size: 200,1,1,65000
I0609 11:10:27.874670 23576 net.cpp:120] Top shape: 200 1 1 65000 (130000)
I0609 11:10:27.874689 23576 net.cpp:120] Top shape: 200
I0609 11:10:27.874707 23576 layer_factory.hpp:74] Creating layer conv1
I0609 11:10:27.874727 23576 net.cpp:84] Creating Layer conv1
I0609 11:10:27.874742 23576 net.cpp:380] conv1 <- data
I0609 11:10:27.874761 23576 net.cpp:338] conv1 -> conv1
I0609 11:10:27.874781 23576 net.cpp:113] Setting up conv1
F0609 11:10:27.875177 23576 blob.cpp:28] Check failed: shape[i] >= 0 (-1 vs. 0)
*** Check failure stack trace: ***
    @     0x7feb1d8b4f7d  google::LogMessage::Fail()
    @     0x7feb1d8b708f  google::LogMessage::SendToLog()
    @     0x7feb1d8b4b6c  google::LogMessage::Flush()
    @     0x7feb1d8b792d  google::LogMessageFatal::~LogMessageFatal()
    @     0x7feb1dbfd5ed  caffe::Blob<>::Reshape()
    @     0x7feb1dbfda8a  caffe::Blob<>::Reshape()
    @     0x7feb1dca7b88  caffe::BaseConvolutionLayer<>::Reshape()
    @     0x7feb1dceae59  caffe::Net<>::Init()
    @     0x7feb1dced5f1  caffe::Net<>::Net()
    @     0x7feb1dd05ccf  caffe::Solver<>::InitTrainNet()
    @     0x7feb1dd06322  caffe::Solver<>::Init()
    @     0x7feb1dd06945  caffe::Solver<>::Solver()
    @           0x40d788  caffe::GetSolver<>()
    @           0x406ca6  train()
    @           0x404d2b  main
    @     0x7feb1cfebec5  (unknown)
    @           0x4051af  (unknown)

Abhishek Sharma

unread,
Jun 22, 2015, 5:24:14 AM6/22/15
to caffe...@googlegroups.com
How did you convert your time series data into lmdb??
...

Abhishek Sharma

unread,
Jun 29, 2015, 5:26:06 AM6/29/15
to caffe...@googlegroups.com
Hey, I am also struck with the same problem. Did you figure this out?


On Tuesday, June 9, 2015 at 11:20:50 AM UTC+5:30, HARSHIT KUMAR GUPTA wrote:

ee15res...@iith.ac.in

unread,
Sep 9, 2016, 2:19:43 AM9/9/16
to Caffe Users
Hey Harshit Can I know how you have converted time series data into images?
Thanks


On Tuesday, June 9, 2015 at 11:20:50 AM UTC+5:30, HARSHIT KUMAR GUPTA wrote:
Reply all
Reply to author
Forward
0 new messages