def classDeconvnet(): lossModelFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_deploy.prototxt'# has SIGMOID_CROSS_ENTROPY_LOSS trainedModel = '/home/dkkim930122/caffe-master/examples/deconvnet/caffe_reference_imagenet_model' meanFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_mean.npy'
# Create net (loss) caffe.set_mode_gpu() net = caffe.Classifier(lossModelFile, trainedModel) #net.set_mean('data', np.load(meanFile)) net.set_raw_scale('data', 255) net.set_channel_swap('data', (2,1,0))
# Display Net structure print 'Net structure:' for blobName, v in net.blobs.items(): print (blobName, v.data.shape)
lastBlobName = blobName
maxIteration = 100 learning_rate = 0.01 label_index = 281
caffeInput = np.random.random((1,3,227,227))
caffeLabel = np.zeros((1,1000,1,1))# for SIGMOID_CROSS_ENTROPY_LOSS (https://groups.google.com/forum/#!searchin/caffe-users/SIGMOID_CROSS_ENTROPY_LOSS/caffe-users/19XfmJqg34Q/Id_jnm_L0iIJ) caffeLabel[0][label_index] = 1; for iteration in range(maxIteration): print 'iteration at:', iteration # forward forwardOutput = net.forward(data=caffeInput, label=caffeLabel)
# backward backwardOutput = net.backward(**{net.outputs[0]: forwardOutput[lastBlobName]}) diff = backwardOutput['data']
caffeInput = caffeInput - learning_rate * caffeInput * diff# multiplication is element wise multiplication
plt.figure() plt.imshow(caffeInput[0].transpose(1,2,0))
plt.show()
target = 4 # the class you want to visualise
diff = np.zeros((1,1000,1,1))
diff[0,target,0,0] = 1 # loss
back = net.backward(**{net.outputs[0]:diff})
# IMPORT LIBRARYimport numpy as np#np.set_printoptions(threshold=np.nan)import matplotlib.pyplot as pltimport sys, caffe, os, operatorimport caffe.ioimport scipyfrom caffe.proto import caffe_pb2from PIL import Imageimport matplotlib.image as mpimg
# GLOBAL VARIABLESimagenetProbModelFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_prob_deploy.prototxt'imagenetfc8ModelFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_fc8_deploy.prototxt'imagenetTrainedModel = '/home/dkkim930122/caffe-master/examples/deconvnet/caffe_reference_imagenet_model'imagenetMeanFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_mean.npy'
# VISSQUARE# take an array of shape (n, height, width) or (n, height, width, channels)# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)def visSquare(data, padsize=1, padval=0): data -= data.min() data /= data.max() # force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3) data = np.pad(data, padding, mode='constant', constant_values=(padval, padval)) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data) plt.show()
return data
# CLASSDECONVNET# Class specific deconvnetdef classDeconvnet(): print "In classDeconvnet() function"
# Create net (fc8) print "Creating fc8..." caffe.set_mode_gpu() netfc8 = caffe.Classifier(imagenetfc8ModelFile, imagenetTrainedModel) netfc8.set_mean('data', np.load(imagenetMeanFile))# Search the following for solution: https://github.com/BVLC/caffe/issues/420 netfc8.set_raw_scale('data', 255) netfc8.set_channel_swap('data', (2,1,0))
# Display Net structure (loss) print 'Net structure (loss):' for blobName, v in netfc8.blobs.items():
print (blobName, v.data.shape)
lastBlobName = blobName
maxIteration = 2 learning_rate = 10000 labelIndex = 100 # 100: goose
# Caffe input: zero image caffeInput = np.zeros((1,3,227,227))
# Caffe label caffeLabel = np.zeros((1,1000,1,1)) caffeLabel[0,labelIndex,0,0] = 1;
for iteration in range(maxIteration): print '< iteration at:', iteration,'>'
# forward forwardOutput = netfc8.forward(data=caffeInput) print 'forward Output is:', forwardOutput[lastBlobName][0,labelIndex,0,0] print 'forward max is :', forwardOutput[lastBlobName].max()
# backward backwardOutput = netfc8.backward(**{netfc8.outputs[0]: caffeLabel}) diff = backwardOutput['data']
caffeInput = caffeInput + learning_rate * diff # gradient ascent: http://spin.atomicobject.com/2014/06/24/gradient-descent-linear-regression/
# Normalize & visualize caffeInput caffeInput -= caffeInput.min() caffeInput /= caffeInput.max()
caffeInputVisualize = visSquare(caffeInput.transpose(0,2,3,1))# figure: notAddedMeanImage.png
# Normalize & visualize caffeInput meanImage meanImage = np.load(imagenetMeanFile) #(3,256,256) meanImage = meanImage.transpose(1,2,0) # (256,256,3)
mpimg.imsave('/home/dkkim930122/Desktop/meanImage.png', arr=meanImage, format='png')
meanImage = Image.open('/home/dkkim930122/Desktop/meanImage.png')
meanImageResize = meanImage.resize((227,227),Image.ANTIALIAS) meanImageResize.save('/home/dkkim930122/Desktop/meanImageResize.jpg', 'JPEG', quality=100)
meanImageResize = plt.imread('/home/dkkim930122/Desktop/meanImageResize.jpg') #Issue: meanImageResize(:,:,0) (:,:,1) (:,:,2) have different figure than meanImage (:,:,0) (:,:,1) (:,:,2)
meanImageResize -= meanImageResize.min() meanImageResize /= meanImageResize.max()
plt.imshow(meanImageResize) plt.show()
# Normalize & visualize meanmeanAddedImage meanAddedImage = caffeInput[0] + meanImageResize.transpose(2,0,1) #(3,227,227) + (3,227,227)
meanAddedImage -= meanAddedImage.min() meanAddedImage /= meanAddedImage.max()
plt.imshow(meanAddedImage.transpose(1,2,0)) plt.show()# figure: addedMeanImage.png
classDeconvnet()
# IMPORT LIBRARY
import numpy as np
import matplotlib.pyplot as plt
import sys, caffe, os, operator
import caffe.io
from caffe.proto import caffe_pb2
from PIL import Image
import matplotlib.animation as animation
# GLOBAL VARIABLES
imagenetfc8ModelFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_fc8_deploy.prototxt'
imagenetTrainedModel = '/home/dkkim930122/caffe-master/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
imagenetMeanFile = '/home/dkkim930122/caffe-master/examples/deconvnet/imagenet_mean.npy'
# Set parameters
maxIteration = 250
learning_rate = 12500
labelIndex = 544 # 100: goose, 281: cat, 544: dumbbell
labelIndex = labelIndex-1
# CLASS DECONVNET
def classDeconvnet():
print "In classDeconvnet() function"
# Create net (fc8)
caffe.set_mode_cpu()
netfc8 = caffe.Classifier(imagenetfc8ModelFile, imagenetTrainedModel,
mean=np.load(imagenetMeanFile).mean(1).mean(1),
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
# Display Net structure (fc8)
print 'Net structure (loss):'
for blobName, v in netfc8.blobs.items():
print (blobName, v.data.shape)
lastBlobName =
blobName
# Define caffe input: zero image
caffeInput = np.zeros((1,3,227,227))
# Caffe label
caffeLabel = np.zeros((1,1,1,1000))# for SIGMOID_CROSS_ENTROPY_LOSS (https://groups.google.com/forum/#!searchin/caffe-users/SIGMOID_CROSS_ENTROPY_LOSS/caffe-users/19XfmJqg34Q/Id_jnm_L0iIJ)
caffeLabel[0,0,0,labelIndex] = 1;
# Define array for storing forwardOutput (used for chaning lr)
storeForwardOutput = np.array([])
# load mean image and center crop
meanImage = np.load(imagenetMeanFile) #(3,256,256)
cropMeanImage = meanImage[:,14:241,14:241]# Center crop
for iteration in range(maxIteration):
print '< iteration at:', iteration,'>'
# forward
forwardOutput = netfc8.forward(data=caffeInput)
storeForwardOutput = np.append(storeForwardOutput,forwardOutput[lastBlobName][0,labelIndex])
print 'forward Output is:', forwardOutput[lastBlobName][0,labelIndex]
print 'forward max is :', forwardOutput[lastBlobName].max()
print 'learning_rate is :', learning_rate
# # After some iteration, reduce lr if forward output change is small
# if iteration > 50:
# if abs(storeForwardOutput[-1] - storeForwardOutput[-2]) < 1:
# learning_rate = learning_rate * 0.99
# backward
backwardOutput = netfc8.backward(**{netfc8.outputs[0]: caffeLabel})
diff = backwardOutput['data']
# Check if diff is zero. If yes, then break
if np.linalg.norm(diff) == 0:
print 'ERROR: diff norm is zero! Not good deconvnet parms!'
break;
caffeInput = caffeInput + learning_rate * diff # gradient ascent: http://spin.atomicobject.com/2014/06/24/gradient-descent-linear-regression/
addedImage = caffeInput[0] + cropMeanImage
addedImage -= addedImage.min()
addedImage /= addedImage.max()
plt.imshow(addedImage.transpose(1,2,0))
plt.savefig(str(iteration) + '.png', bbox_inches='tight')
# Final iteration image visualization
addedImage = caffeInput[0] + cropMeanImage
addedImage -= addedImage.min()
addedImage /= addedImage.max()
plt.imshow(addedImage.transpose(1,2,0))
plt.show()
classDeconvnet()
To view this discussion on the web visit https://groups.google.com/d/msgid/caffe-users/6f77537e-719f-41be-a408-66e8ad794162%40googlegroups.com.--
You received this message because you are subscribed to a topic in the Google Groups "Caffe Users" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/caffe-users/5iwXJ3NmHJA/unsubscribe.
To unsubscribe from this group and all its topics, send an email to caffe-users...@googlegroups.com.
To post to this group, send email to caffe...@googlegroups.com.
name: "CaffeNet"input: "data"input_dim: 1input_dim: 3input_dim: 227input_dim: 227force_backward: true