I am doing a classification problems using Caffe. But what I can see is only the training and testing accuracy, How do I see the output label for each sample so that I would know where goes wrong?
By the way, how do I see the inner kernel weight/feature?
Thank you!
import sysimport caffeimport matplotlibimport numpy as npimport lmdbimport argparsefrom collections import defaultdict
if __name__ == "__main__":    parser = argparse.ArgumentParser()    parser.add_argument('--proto', type=str, required=True)    parser.add_argument('--model', type=str, required=True)    parser.add_argument('--lmdb', type=str, required=True)    args = parser.parse_args()
    count = 0    correct = 0    matrix = defaultdict(int) # (real,pred) -> int    labels_set = set()
    net = caffe.Net(args.proto, args.model, caffe.TEST)    caffe.set_mode_cpu()    lmdb_env = lmdb.open(args.lmdb)    lmdb_txn = lmdb_env.begin()    lmdb_cursor = lmdb_txn.cursor()
    for key, value in lmdb_cursor:        datum = caffe.proto.caffe_pb2.Datum()        datum.ParseFromString(value)        label = int(datum.label)        image = caffe.io.datum_to_array(datum)        image = image.astype(np.uint8)
        out = net.forward_all(data=np.asarray([image]))        plabel = int(out['prob'][0].argmax(axis=0))
        count = count + 1        iscorrect = label == plabel        correct = correct + (1 if iscorrect else 0)        matrix[(label, plabel)] += 1        labels_set.update([label, plabel])
        if not iscorrect:            print("\rError: key=%s, expected %i but predicted %i" \                    % (key, label, plabel))
        sys.stdout.write("\rAccuracy: %.1f%%" % (100.*correct/count))        sys.stdout.flush()
    print(str(correct) + " out of " + str(count) + " were classified correctly")
    print ""    print "Confusion matrix:"    print "(r , p) | count"    for l in labels_set:        for pl in labels_set:            print "(%i , %i) | %i" % (l, pl, matrix[(l,pl)])
@Axel Angel: I am using your code for confusion matrix. Unfortunately I am getting error like
Error: 10.jpg, expected 1 but predicted 0
Accuracy: 96.5%1378 out of 1428 were classified correctly
Confusion matrix:
(real , predicted) | count
(0 , 0) | 1378
(0 , 1) | 0
(1 , 0) | 50
(1 , 1) | 0
Means all true positive images are misclassified. Could you kindly suggest what may be the issue? Training and testing code are working fine. Here lmdb images directly used do I have to use substract mean image as I used during training? Please
help.