Why my feature extraction script fails ?

22 views
Skip to first unread message

jaba marwen

unread,
May 30, 2017, 5:37:57 AM5/30/17
to Caffe Users
Hi every one ,

I have used the following script , to extract features vectors of my dataset using AlexNet pretrained model.


import numpy as np
import hickle as hkl
import caffe


caffe
.set_mode_gpu()

def feature_extract(img):

    model_file
='/home/jaba/caffe/data/diota_model/feature_extractor/bvlc_reference_caffenet.caffemodel'
    deploy_file
='/home/jaba/caffe/data/diota_model/feature_extractor/alex.deployprototxt'

    net
=caffe.Net(deploy_file,model_file,caffe.TEST)

    mean_values
=np.array([103.939, 116.779, 123.68])

   
#setting the transformer
   
    transformer
= caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer
.set_mean('data', mean_values ) #subtract by mean
    transformer
.set_transpose('data', (2,0,1)) #(H,W,C) => (C,H,W)
    transformer
.set_raw_scale('data', 255.0)   #[0.0, 1.0] => [0.0, 255.0]
    transformer
.set_channel_swap("data", (2,1,0)) # RGB => BGR

   
#img = caffe.io.load_image(img)

    net
.blobs['data'].data[...] = transformer.preprocess('data', img)
   
    output
=net.forward()
   
    feat
=net.blobs['fc7'].data.copy()

   
return feat





def create_dataset(datalist,db_prefix):
   
with open(datalist) as fr:
            lines
= fr.readlines()
    lines
= [line.rstrip() for line in lines]

    feats
= []
        labels
= []
   
   
for line_i, line in enumerate(lines):

            a
=len(line)
        label
=line[a-1]
        img_path
=line[0:a-2]
            img
= caffe.io.load_image(img_path)
            feat
= feature_extract(img)
            feats
.append(feat)
            label
= int(label)
            labels
.append(label)
           
if (line_i + 1) % 100 == 0:
                   
print "processed", line_i + 1


    feats
= np.asarray(feats)
        labels
= np.asarray(labels)


    hkl
.dump(feats, dbprefix + "_features.hkl", mode="w")
        hkl
.dump(labels, dbprefix + "_labels.hkl", mode="w")




create_dataset
('train.txt','vgg_fc7_train')
create_dataset
('val.txt','vgg_fc7_test')


 I have got the following error after few iterations error == cudaSuccess (2 vs. 0)  out of memory .
 
I know that's my gpu is out of memory . Would you help me to correct this issue ?
 For train .tx and val.txt files , train.txt ( the path of 5000 images ) and val.txt (the path of 1000 images ) .
 
Thank you .





Reply all
Reply to author
Forward
0 new messages