|
| 1 | +#!/usr/bin/env python3 |
| 2 | +# -*- coding: utf-8 -*- |
| 3 | +""" |
| 4 | +Created on Fri Dec 29 20:24:18 2017 |
| 5 | +
|
| 6 | +@author: hu-tom |
| 7 | +""" |
| 8 | +import tensorflow as tf |
| 9 | +import numpy as np |
| 10 | +import matplotlib.pyplot as plt |
| 11 | +import photo_data_generator |
| 12 | +import cv2 |
| 13 | +from PIL import Image |
| 14 | +camera_id=0 |
| 15 | +istrained=True |
| 16 | +model="save/model.ckpt" |
| 17 | +checkpoint_dir="save" |
| 18 | +def weight_variable(shape,name): |
| 19 | + initial=tf.truncated_normal(shape,stddev=0.1) |
| 20 | + return tf.Variable(initial,name=name) |
| 21 | +def bias_variable(shape,name): |
| 22 | + initial=tf.constant(0.1,shape=shape) |
| 23 | + return tf.Variable(initial,name=name) |
| 24 | +def conv2d(x,W): |
| 25 | + #strides[0]=stirdes[3]=1 |
| 26 | + return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME') |
| 27 | +def max_pool_2x2(x): |
| 28 | + return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') |
| 29 | +#conv1 layer |
| 30 | +def compute_accuracy(v_xs,v_ys): |
| 31 | + global prediction |
| 32 | + y_pre = sess.run(prediction,feed_dict={xs:v_xs,keep_prob:0.5}) |
| 33 | + correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1)) |
| 34 | + accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) |
| 35 | + result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys,keep_prob:0.5}) |
| 36 | + return result |
| 37 | + |
| 38 | +#x_data = np.linspace(-1,1,300)[:,np.newaxis] |
| 39 | +#noise = np.random.normal(0,0.05,x_data.shape) |
| 40 | +#y_data = np.square(x_data)-0.5+noise |
| 41 | + |
| 42 | +xs = tf.placeholder(tf.float32,[None,784])#28x28 |
| 43 | +ys = tf.placeholder(tf.float32,[None,2]) |
| 44 | +keep_prob=tf.placeholder(tf.float32) |
| 45 | +x_image=tf.reshape(xs,[-1,28,28,1]) |
| 46 | + |
| 47 | + |
| 48 | +#print(x_image.shape)#[n_samples,28,28,1] |
| 49 | + |
| 50 | +#conv1 layer |
| 51 | +W_conv1=weight_variable([3,3,1,32],name='wc1')#patch 5x5,in size 1,out size 32 |
| 52 | +b_conv1=bias_variable([32],name='bc1') |
| 53 | +h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)#tf.nn.relu for nonlinear output size28x28x32 |
| 54 | +h_pool1=max_pool_2x2(h_conv1)#output size14x14x32 |
| 55 | + |
| 56 | +#conv2 layer |
| 57 | +W_conv2=weight_variable([3,3,32,64],name='wc2')#patch 5x5,in size 32,out size 64 |
| 58 | +b_conv2=bias_variable([64],name='bc2') |
| 59 | +h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)#tf.nn.relu for nonlinear output size28x28x32 |
| 60 | +h_pool2=max_pool_2x2(h_conv2)#output size7x7x64 |
| 61 | + |
| 62 | +#conv3 layer |
| 63 | +W_conv3=weight_variable([5,5,64,128],name='wc3')#patch 5x5,in size 32,out size 64 |
| 64 | +b_conv3=bias_variable([128],name='bc3') |
| 65 | +h_conv3=tf.nn.relu(conv2d(h_pool2,W_conv3)+b_conv3)#tf.nn.relu for nonlinear output size28x28x32 |
| 66 | +h_pool3=max_pool_2x2(h_conv3)#output size7x7x128 |
| 67 | +#print(h_pool3) |
| 68 | + |
| 69 | +#conv4 layer |
| 70 | +W_conv4=weight_variable([5,5,128,256],name='wc4')#patch 5x5,in size 32,out size 64 |
| 71 | +b_conv4=bias_variable([256],name='bc4') |
| 72 | +h_conv4=tf.nn.relu(conv2d(h_pool3,W_conv4)+b_conv4)#tf.nn.relu for nonlinear output size28x28x32 |
| 73 | +h_pool4=max_pool_2x2(h_conv4)#output size7x7x128 |
| 74 | +#print(h_pool4) |
| 75 | + |
| 76 | +##conv5 layer |
| 77 | +#W_conv5=weight_variable([5,5,256,512])#patch 5x5,in size 32,out size 64 |
| 78 | +#b_conv5=bias_variable([512]) |
| 79 | +#h_conv5=tf.nn.relu(conv2d(h_pool4,W_conv5)+b_conv5)#tf.nn.relu for nonlinear output size28x28x32 |
| 80 | +#h_pool5=max_pool_2x2(h_conv5)#output size7x7x128 |
| 81 | +#print(h_pool5) |
| 82 | + |
| 83 | +#func1 layer |
| 84 | +W_f1=weight_variable([2*2*256,1024],name='wf1') |
| 85 | +b_f1=bias_variable([1024],name='bf1') |
| 86 | +#[n_samples,7,7,64]->>[n_samples,7*7*64] |
| 87 | +h_pool4_flat=tf.reshape(h_pool4,[-1,2*2*256]) |
| 88 | +h_f1=tf.nn.relu(tf.matmul(h_pool4_flat,W_f1)+b_f1) |
| 89 | +h_f1_drop=tf.nn.dropout(h_f1,keep_prob) |
| 90 | + |
| 91 | +#func2 layer |
| 92 | +W_f2=weight_variable([1024,2],name='wf2') |
| 93 | +b_f2=bias_variable([2],name='bf2') |
| 94 | + |
| 95 | +prediction=tf.nn.softmax(tf.matmul(h_f1_drop,W_f2)+b_f2) |
| 96 | + |
| 97 | +Cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(tf.clip_by_value(prediction,1e-8,1.0)),reduction_indices=[1])) |
| 98 | +train_step = tf.train.AdamOptimizer(1e-4).minimize(Cross_entropy) |
| 99 | +#train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(Cross_entropy) |
| 100 | +#cost_func = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=ys)) |
| 101 | +#train_step = tf.train.AdamOptimizer(1e-5).minimize(cost_func) |
| 102 | + |
| 103 | +init = tf.global_variables_initializer() |
| 104 | +# Create a saver. |
| 105 | +saver = tf.train.Saver() |
| 106 | +sess = tf.Session() |
| 107 | + |
| 108 | + |
| 109 | +sess.run(init) |
| 110 | +if istrained: |
| 111 | + saver=tf.train.import_meta_graph('save/model.ckpt.meta') |
| 112 | + saver.restore(sess,model) |
| 113 | +mnist=photo_data_generator.dataresize("./tr_data") |
| 114 | +for i in range(100): |
| 115 | + batch_xs,bathc_ys = mnist[0][:][:] |
| 116 | + sess.run(train_step,feed_dict={xs:batch_xs,ys:bathc_ys,keep_prob:0.5}) |
| 117 | + if i % 50 ==0: |
| 118 | + accu=compute_accuracy(mnist[2][0],mnist[2][1]) |
| 119 | + print(accu) |
| 120 | + if(accu>=0.98): |
| 121 | + break |
| 122 | + ''' |
| 123 | + for j in range(5): |
| 124 | + im_test=mnist[0][0][j] |
| 125 | + im_test=im_test.reshape(1,784) |
| 126 | + print('pre=',prediction.eval(feed_dict={xs:im_test,keep_prob:0.5}, session=sess)) |
| 127 | + print('anser=',mnist[0][1][j]) |
| 128 | + ''' |
| 129 | + #print(sess.run(y_pre)) |
| 130 | +saver_path = saver.save(sess,model) # 将模型保存到save/model.ckpt文件 |
| 131 | +print("Model saved in file:", saver_path) |
| 132 | +cap=cv2.VideoCapture(camera_id) |
| 133 | +classfier=cv2.CascadeClassifier(r'/PATH/TO/YOUR/OpenCV-tmp/opencv/data/haarcascades/haarcascade_frontalface_alt.xml') |
| 134 | +if cap.isOpened(): |
| 135 | + print('open the cap') |
| 136 | +else: |
| 137 | + print('open faild!') |
| 138 | +while cap.isOpened(): |
| 139 | + ok,frame=cap.read() |
| 140 | + if not ok: |
| 141 | + break |
| 142 | + gray=cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY) |
| 143 | + faceRects=classfier.detectMultiScale(gray,1.2, 3, cv2.CASCADE_SCALE_IMAGE,(32,32)) |
| 144 | + if len(faceRects)>0: |
| 145 | + for faceRect in faceRects: |
| 146 | + x,y,w,h=faceRect |
| 147 | + #img_name = './%d.jpg'%(num) |
| 148 | + img = frame[y : y + h , x : x + w ] |
| 149 | + img = Image.fromarray(img) |
| 150 | + img =img.convert('L').resize((28,28)) |
| 151 | + width,hight=img.size |
| 152 | + img = np.asarray(img,dtype='float64')/256. |
| 153 | + img = img.reshape(1, hight*width) |
| 154 | + pre = prediction.eval(feed_dict={xs:img,keep_prob:0.5},session=sess) |
| 155 | + if pre[0][1]>=0.7: |
| 156 | + color=(0,255,0) |
| 157 | + cv2.rectangle(frame, (x, y), (x + w , y + h ), color, thickness = 2) |
| 158 | + cv2.putText(frame,'Me',(x, y),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2) |
| 159 | + else: |
| 160 | + color=(255,0,0) |
| 161 | + cv2.rectangle(frame, (x , y ), (x + w , y + h ), color, thickness = 2) |
| 162 | + cv2.putText(frame,'Others',(x, y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2) |
| 163 | + #cv2.imwrite(img_name, image) |
| 164 | + #cv2.rectangle(frame,(x-10,y-10),(x+w+10,y+h+10),color,2) |
| 165 | + # num += 1 |
| 166 | + #if num > pic_num: #如果超过指定最大保存数量退出循环 |
| 167 | + # break |
| 168 | + cv2.imshow('identify',frame) |
| 169 | + c=cv2.waitKey(1) |
| 170 | + if c&0xff==ord('q'): |
| 171 | + break |
| 172 | +cap.release() |
| 173 | +cv2.destroyAllWindows() |
| 174 | +#sess.close() |
0 commit comments