net1 = nn.Sequential()
net1:add(nn.SpatialConvolution(3, 16, 3, 3))
net1:add(nn.ReLU())
net1:add(nn.SpatialMaxPooling(2, 2))
net1:add(nn.View(16*7*7)) -- flattening out
net2 = nn.Sequential()
net2:add(nn.SpatialConvolution(3, 16, 5, 5))
net2:add(nn.ReLU())
net2:add(nn.SpatialMaxPooling(2, 2))
net2:add(nn.View(16*6*6)) -- flattening out
parallel_model = nn.Parallel(1, 1) -- model that concatenates net1 and net2
parallel_model:add(net1)
parallel_model:add(net2)
-- Putting everything together
model = nn.Sequential()
model:add(parallel_model)
model:add(nn.Linear(1360, 100)) -- the final FC layer
input = torch.Tensor(2, 3, 16, 16)
out = model:forward(input)
out:size()
100
[torch.LongStorage of size 1]
cudnn.convert(model, cudnn);
model = model:cuda();
t = torch.rand(10,2,3,16,16):cuda();
out = model:forward(t)
...torch/install/share/lua/5.1/cudnn/SpatialConvolution.lua:101: assertion failed!
stack traceback:
[C]: in function 'assert'
...torch/install/share/lua/5.1/cudnn/SpatialConvolution.lua:101: in function 'createIODescriptors'
...torch/install/share/lua/5.1/cudnn/SpatialConvolution.lua:349: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Parallel.lua:18: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
[string "out = mlp:forward(t:cuda());"]:1: in main chunk
[C]: in function 'xpcall'
/data/torch/install/share/lua/5.1/trepl/init.lua:648: in function 'repl'
/data/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:185: in main chunk
[C]: at 0x00406670
--
You received this message because you are subscribed to the Google Groups "torch7" group.
To unsubscribe from this group and stop receiving emails from it, send an email to torch7+un...@googlegroups.com.
To post to this group, send email to tor...@googlegroups.com.
Visit this group at https://groups.google.com/group/torch7.
For more options, visit https://groups.google.com/d/optout.
let me push a fix for this.
great thanks!
I guess I will be waiting for the fix and then update cudnn after it is done?
On Thursday, February 25, 2016 at 11:11:50 PM UTC-5, smth chntla wrote:
let me push a fix for this.
...h/install/share/lua/5.1/nn/SpatialBatchNormalization.lua:84: attempt to index field 'THNN' (a nil value)
stack traceback:
...h/install/share/lua/5.1/nn/SpatialBatchNormalization.lua:84: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Parallel.lua:18: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
[string "out = mlp:forward(t);"]:1: in main chunk
[C]: in function 'xpcall'
/data/torch/install/share/lua/5.1/trepl/init.lua:648: in function 'repl'
/data/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:185: in main chunk
[C]: at 0x00406670
Ok I pushed a fix.Now do: luarocks install cudnnAfter that no error.
On Thu, Feb 25, 2016 at 11:17 PM, Chen-Ping Yu via torch7 <torch7+APn2wQfWL8x7t9n3lwCNlISDsxLM5IPD0mId5P4fyPO7rdnPMXmJRmYNf@googlegroups.com> wrote:
great thanks!
I guess I will be waiting for the fix and then update cudnn after it is done?
On Thursday, February 25, 2016 at 11:11:50 PM UTC-5, smth chntla wrote:
let me push a fix for this.
great than solved the cudnn/SpatialConvlution problem! Thanks!
By the way I'm also getting this strange issue, if I explicitly code my network as cudnn.SpatialconVolution...cudnn.SpatialBatchNormalization...etc, it works fine just now after your fix.
However, if my network is coded using nn.xxx, nn.xxx, then convert them using cudnn.convert( ), I get the following error:
...h/install/share/lua/5.1/nn/SpatialBatchNormalization.lua:84: attempt to index field 'THNN' (a nil value)
stack traceback:
...h/install/share/lua/5.1/nn/SpatialBatchNormalization.lua:84: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Parallel.lua:18: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
[string "out = mlp:forward(t);"]:1: in main chunk
[C]: in function 'xpcall'
/data/torch/install/share/lua/5.1/trepl/init.lua:648: in function 'repl'
/data/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:185: in main chunk
[C]: at 0x00406670
Any idea what is causing that? I did update cutorch, cunn, nn, cudnn to the latest version...
I also noticed that cudnn.convert did not convert nn.SpatialBatchNormalization into cudnn.SpatialBatchNormalization, is that something that might need to be updated as well for cudnn? Thanks.
On Thursday, February 25, 2016 at 11:28:25 PM UTC-5, smth chntla wrote:
Ok I pushed a fix.Now do: luarocks install cudnnAfter that no error.
yes, reinstall nn and cunn, that is definitely the reason for the error.luarocks install nn
luarocks install cunn
On Thu, Feb 25, 2016 at 11:51 PM, Chen-Ping Yu via torch7 <torch7+APn2wQfWL8x7t9n3lwCNlISDsxLM5IPD0mId5P4fyPO7rdnPMXmJRmYNf@googlegroups.com> wrote:
great than solved the cudnn/SpatialConvlution problem! Thanks!
By the way I'm also getting this strange issue, if I explicitly code my network as cudnn.SpatialconVolution...cudnn.SpatialBatchNormalization...etc, it works fine just now after your fix.
However, if my network is coded using nn.xxx, nn.xxx, then convert them using cudnn.convert( ), I get the following error:
...h/install/share/lua/5.1/nn/SpatialBatchNormalization.lua:84: attempt to index field 'THNN' (a nil value)
stack traceback:
...h/install/share/lua/5.1/nn/SpatialBatchNormalization.lua:84: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Parallel.lua:18: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
[string "out = mlp:forward(t);"]:1: in main chunk
[C]: in function 'xpcall'
/data/torch/install/share/lua/5.1/trepl/init.lua:648: in function 'repl'
/data/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:185: in main chunk
[C]: at 0x00406670
Any idea what is causing that? I did update cutorch, cunn, nn, cudnn to the latest version...
I also noticed that cudnn.convert did not convert nn.SpatialBatchNormalization into cudnn.SpatialBatchNormalization, is that something that might need to be updated as well for cudnn? Thanks.
On Thursday, February 25, 2016 at 11:28:25 PM UTC-5, smth chntla wrote:
Ok I pushed a fix.Now do: luarocks install cudnnAfter that no error.
hmm, I followed your suggestion and the same error is still coming up,
in fact it was working before (sometimes it fails) but somehow after I did your two lines of updates, now i'm getting a bunch of /nn/THNN.lua errors, do you have any other suggestions..?
On Friday, February 26, 2016 at 12:08:25 AM UTC-5, smth chntla wrote:
yes, reinstall nn and cunn, that is definitely the reason for the error.luarocks install nn
luarocks install cunn
input = torch.rand(1,2,3,16,16):cuda();
out = model:forward(input)
/data/torch/install/share/lua/5.1/nn/Parallel.lua:20: bad argument #1 to 'size' (out of range)
stack traceback:
[C]: in function 'size'
/data/torch/install/share/lua/5.1/nn/Parallel.lua:20: in function 'updateOutput'
/data/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
[string "out = mlp:forward(t);"]:1: in main chunk
[C]: in function 'xpcall'
/data/torch/install/share/lua/5.1/trepl/init.lua:648: in function 'repl'
/data/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:185: in main chunk
[C]: at 0x00406670
so the 1st dimension of my input must be greater than 1, but cannot be 1, how should I correctly pass in a single input for a forward pass with the nn.Parallel container? Do you have multiple and old torch installs on your machine?
I suggest running: https://github.com/torch/ezinstall/blob/master/clean-old.sh
On Fri, Feb 26, 2016 at 12:27 AM, Chen-Ping Yu via torch7 <torch7+APn2wQfWL8x7t9n3lwCNlISDsxLM5IPD0mId5P4fyPO7rdnPMXmJRmYNf@googlegroups.com> wrote:
hmm, I followed your suggestion and the same error is still coming up,
in fact it was working before (sometimes it fails) but somehow after I did your two lines of updates, now i'm getting a bunch of /nn/THNN.lua errors, do you have any other suggestions..?
On Friday, February 26, 2016 at 12:08:25 AM UTC-5, smth chntla wrote:
yes, reinstall nn and cunn, that is definitely the reason for the error.luarocks install nn
luarocks install cunn
-1
for one of the dimensions".
net1 = nn.Sequential()net1:add(nn.SpatialConvolution(3, 16, 3, 3))
net1:add(nn.SpatialBatchNormalization(16,1e-3))
net1:add(nn.ReLU())net1:add(nn.SpatialMaxPooling(2, 2))
--net1:add(nn.View(-1):setNumInputDims(3)) --works well for forward passnet1:add(nn.View(-1,16*7*7)) --also works well for forward pass
net2 = nn.Sequential()net2:add(nn.SpatialConvolution(3, 16, 3, 3))net2:add(nn.SpatialBatchNormalization(16,1e-3))
net2:add(nn.ReLU())net2:add(nn.SpatialMaxPooling(2, 2))
--net2:add(nn.View(-1):setNumInputDims(3))net2:add(nn.View(-1,16*7*7))
parallel_model = nn.Parallel(2, 2) parallel_model:add(net1)parallel_model:add(net2)
model = nn.Sequential()
model:add(parallel_model)
model:add(nn.Linear(16*7*7*2, 100))model:add(nn.LogSoftMax())
model = model:cuda()
cudnn.convert(model, cudnn)inputs = torch.rand(10,2,3,16,16):cuda();outputs = model:forward(inputs)
-- getting the error gradientscriterion = nn.ClassNLLCriterion():cuda()
parameters, gradParameters = model:getParameters()
gradParameters:zero();
targets = torch.ones(10):cuda();f = criterion:forward(outputs, targets)
df_do = criterion:backward(outputs, targets)
model:backward(inputs, df_do)
/data/torch/install/share/lua/5.1/torch/Tensor.lua:457: expecting a contiguous tensor
stack traceback: [C]: in function 'assert'
/data/torch/install/share/lua/5.1/torch/Tensor.lua:457: in function 'view' /data/torch/install/share/lua/5.1/nn/View.lua:90: in function 'updateGradInput' /data/torch/install/share/lua/5.1/nn/Sequential.lua:55: in function 'updateGradInput' /data/torch/install/share/lua/5.1/nn/Parallel.lua:53: in function 'updateGradInput' /data/torch/install/share/lua/5.1/nn/Module.lua:30: in function 'backward' /data/torch/install/share/lua/5.1/nn/Sequential.lua:88: in function 'backward' [string "_RESULT={model:backward(inputs, df_do) }"]:1: in main chunk [C]: in function 'xpcall' /data/torch/install/share/lua/5.1/trepl/init.lua:630: in function 'repl'
/data/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:185: in main chunk [C]: at 0x00406670
Hi all,I've been trying to combine 2 networks into one with concat but I kept running into problems and can't seem to get it implemented...the network structure is attached, it is a very simple network, basically I have 2 separate networks, each has their own conv layers, then after reshaping/viewing their last conv layer, I'd like to concatenate their two 1d vectors into a single 1d vector, then that vector would continues to have the subsequent fully connected layers. anyone has a good way of implementing that? Thanks!