博客
关于我
打造微信小程序之人脸属性(ubuntu16.04+tensorflow)
阅读量:332 次
发布时间:2019-03-04

本文共 33375 字,大约阅读时间需要 111 分钟。

最近学习小程序开发,涉及到了下列内容:

1.数据打包

##creat_data.py##实现数据的打包import cv2import tensorflow as tf##dlib  实现抠图import dlib##读取标志信息索引值,不同的索引值对应不同的属性anno_file = "/home/lsy/CelebA/Anno/list_attr_celeba.txt"ff = open(anno_file)anno_info = ff.readlines()attribute_class = anno_info[1].split(" ")print(attribute_class)##Eyeglasses,Male,Young,Smilingidx = 0##打印索引值for i in attribute_class:  if i =="Eyeglasses":      print("Eyeglasses",idx)  elif i=="Male":      print("Male",idx)  elif i =="Young":      print("Young",idx)  elif i=="Smiling":      print("Smiling",idx)  idx += 1##打包数据writer_train = tf.python_io.TFRecordWriter("train.tfrecords")writer_test = tf.python_io.TFRecordWriter("test.tfrecords")##人脸检测器detector = dlib.get_frontal_face_detector()##分析for idx in range(2,anno_info.__len__()):    info = anno_info[idx]##有些数据有两个空格,需要替换成一个    attr_val = info.replace("  "," ").split(" ")    #print(attr_val.__len__())##属性需要加1    print(attr_val[0])    print(attr_val[16])    print(attr_val[21])    print(attr_val[32])    print(attr_val[40])##下载的数据集地址    im_data = cv2.imread("/home/lsy/CelebA/Img/img_celeba.7z/img_celeba/" + attr_val[0])    rects = detector(im_data,0)##如果没检测到人脸    if len(rects) == 0:        continue    x1 = rects[0].left()    y1 = rects[0].top()    x2 = rects[0].right()    y2 = rects[0].bottom()##需要将检测到的人脸扣下来    y1 = int(max(y1 - 0.3 * (y2 - y1),0))##可视化    #cv2.rectangle(im_data,(x1,y1),(x2,y2),(255,0,0),2)    #cv2.imshow("11",im_data)    #cv2.waitKey(0)##数据清洗    if y2 - y1 < 50 or x2 - x1 < 50 or x1 < 0 or y1 < 0:        continue    im_data = im_data[y1:y2,x1:x2]    im_data = cv2.resize(im_data,(128,128))    ex = tf.train.Example(        features = tf.train.Features(            feature = {                "image":tf.train.Feature(                    bytes_list = tf.train.BytesList(value=[im_data.tobytes()])                ),                "Eyeglasses":tf.train.Feature(                    int64_list = tf.train.Int64List(                        value = [int(attr_val[16])]                    )                ),                "Male": tf.train.Feature(                    int64_list=tf.train.Int64List(                        value=[int(attr_val[21])]                    )                ),                "Young": tf.train.Feature(                    int64_list=tf.train.Int64List(                         value=[int(attr_val[32])]                    )                ),                "Smiling": tf.train.Feature(                    int64_list=tf.train.Int64List(                        value=[int(attr_val[40])]                    )                )            }        )    )##序列化,生成训练集与测试集    if idx > anno_info.__len__() * 0.95:        writer_test.write(ex.SerializeToString())    else:        writer_train.write(ex.SerializeToString())

2.模型训练

##搭建模型##train.pyimport tensorflow as tffrom tensorflow.contrib.layers import *from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_baseslim = tf.contrib.slim##nets下有很多网络结构,可以自己换def inception_v3 (images,drop_out = 0.5, is_training = True):    batch_norm_params ={        "is_training":is_training,        "trainable":True,        "decay":0.9997,        "epsilon":0.00001,        "variables_collections":{            "beta":None,            "gamma":None,            "moving_mean":["moving_vars"],            "moving_variance":["moving_var"]        }    }    weights_regularizer = tf.contrib.layers.l2_regularizer(0.00004)    with tf.contrib.slim.arg_scope(        [tf.contrib.slim.conv2d,tf.contrib.slim.fully_connected],        weights_regularizer = weights_regularizer,        trainable = True):        with tf.contrib.slim.arg_scope(            [tf.contrib.slim.conv2d],            weights_regularizer = tf.truncated_normal_initializer(stddev=0.1),            activation_fn = tf.nn.relu,            normalizer_fn = batch_norm,            normalizer_params = batch_norm_params):            ##传入主干网络            nets,endpoints = inception_v3_base(images)            print(nets)            print(endpoints)            net = tf.reduce_mean(nets,axis=[1,2])##nhwc            net = tf.nn.dropout(net,drop_out,name = "droplast")            net = flatten(net,scope="flatten")##全连接层    net_eyeglass = slim.fully_connected(net,2,activation_fn = None)    net_young = slim.fully_connected(net, 2, activation_fn = None)    net_male = slim.fully_connected(net, 2, activation_fn = None)    net_smiling = slim.fully_connected(net, 2, activation_fn = None)    return net_eyeglass,net_young,net_male,net_smiling##喂数据##输入input_xinput_x = tf.placeholder(tf.float32, shape=[None, 128, 128, 3])label_eyeglasses = tf.placeholder(tf.int64, shape=[None, 1])label_young = tf.placeholder(tf.int64, shape=[None, 1])label_male = tf.placeholder(tf.int64, shape=[None, 1])label_smiling = tf.placeholder(tf.int64, shape=[None, 1])logits_eyeglasses, logits_young, logits_male, logits_smiling = inception_v3(input_x,0.5,True)loss_eyeglasses = tf.losses.sparse_softmax_cross_entropy(labels= label_eyeglasses,                                                         logits = logits_eyeglasses)loss_young = tf.losses.sparse_softmax_cross_entropy(labels= label_young,                                                    logits = logits_young)loss_male = tf.losses.sparse_softmax_cross_entropy(labels= label_male,                                                   logits = logits_male)loss_smiling = tf.losses.sparse_softmax_cross_entropy(labels= label_smiling,                                                      logits = logits_smiling)logits_eyeglasses = tf.nn.softmax(logits_eyeglasses)logits_young = tf.nn.softmax(logits_young)logits_male = tf.nn.softmax(logits_male)logits_smiling = tf.nn.softmax(logits_smiling)loss = loss_eyeglasses + loss_young + loss_male +loss_smiling##定义正则化reg_set =tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)l2_loss = tf.add_n(reg_set)##learnglobal_step = tf.Variable(0,trainable=True)lr = tf.train.exponential_decay(0.0001, global_step, decay_steps=1000,                                                   decay_rate=0.98,                                                   staircase=False)updata_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)with tf.control_dependencies(updata_ops):    train_op = tf.train.AdamOptimizer(lr).minimize(loss + l2_loss , global_step)##喂数据def get_one_batch(batch_size,type):    if type == 0:        file_list = tf.gfile.Glob("train.tfrecords")    else:        file_list = tf.gfile.Glob("test.tfrecords")    reader = tf.TFRecordReader()##等loss收敛    file_queue = tf.train.string_input_producer(        file_list, num_epochs=None, shuffle=True    )    _, se = reader.read(file_queue)    if type == 0:        batch = tf.train.shuffle_batch([se], batch_size, capacity = batch_size,                                                    min_after_dequeue = batch_size // 2)    else:        batch = tf.train.shuffle_batch([se], batch_size, capacity=batch_size)    features = tf.parse_example(batch , features={        "image":tf.FixedLenFeature([],tf.string),        "Eyeglasses": tf.FixedLenFeature([1], tf.int64),        "Male": tf.FixedLenFeature([1], tf.int64),        "Young": tf.FixedLenFeature([1], tf.int64),        "Smiling": tf.FixedLenFeature([1], tf.int64),    })##数据解析    batch_im = features["image"]    batch_eye = (features["Eyeglasses"] + 1) // 2    batch_male = (features["Male"]+ 1) // 2    batch_young = (features["Young"]+ 1) // 2    batch_smiling = (features["Smiling"]+ 1) // 2    batch_im = tf.decode_raw(batch_im,tf.uint8)    batch_im = tf.cast(tf.reshape(batch_im, (batch_size, 128, 128, 3)) , tf.float32)    return batch_im, batch_eye, batch_male, batch_young, batch_smiling##训练tr_im_batch, tr_label_eye_batch, tr_label_male,\tr_label_young, tr_label_smiling = get_one_batch(64,0)te_im_batch, te_label_eye_batch, te_label_male,\te_label_young, te_label_smiling = get_one_batch(64, 1)######with tf.Session() as session:    coord = tf.train.Coordinator()    tf.train.start_queue_runners(sess=session,coord=coord)    init_ops = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())    session.run(init_ops)    #ckpt = tf.train.get_checkpoint_state("models")    ####    #if ckpt and tf.train.checkpoint_exists(ckpt.models_checkpoint_path):     #   saver.restore(session,ckpt.model_checkpoint_path)    for step in range(100000):        batch_x, batch_eye, batch_male, batch_young, batch_smiling = session.run(            [tr_im_batch, tr_label_eye_batch, tr_label_male, tr_label_young, tr_label_smiling])        _,loss_val,l2_loss_val,lr_val,global_step_val,loss_eyeglasses_val,loss_male_val,loss_smiling_val,loss_young_val = \        session.run([train_op, loss, l2_loss, lr, global_step, loss_eyeglasses, loss_male, loss_smiling, loss_young],                    feed_dict={                            input_x:batch_x,                            label_eyeglasses:batch_eye,                            label_male:batch_male,                            label_smiling:batch_smiling,                            label_young:batch_young                        })        print("ite:{}, loss:{}, l2_loss:{}, loss_eye:{}, loss_mail:{}, loss_young:{}, loss_smiling:{}, lr:{}".format(step,loss_val,l2_loss_val,                                                                                                            loss_eyeglasses_val,                                                                                                            loss_male_val,                                                                                                            loss_young_val,                                                                                                            loss_smiling_val,lr_val))##每一万保存一次模型        #if step % 10000 ==0 :        #         saver.save(sess = session , save_path = "models/models-{}.ckpt".format(step))

3.模型测试

import tensorflow as tfimport cv2import globimport numpy as npimport dlibpb_path = "face_attribute.pb"detector = dlib.get_frontal_face_detector()sess = tf.Session()with sess.as_default():    with tf.gfile.FastGFile(pb_path,'rb') as f :        graph_def = sess.graph_def        graph_def.ParseFromString(f.read())        tf.import_graph_def(graph_def,name = "")pred_eyeglasses = sess.graph.get_tensor_by_name("Softmax:0")pred_young = sess.graph.get_tensor_by_name("Softmax_1:0")pred_male = sess.graph.get_tensor_by_name("Softmax_2:0")pred_smiling = sess.graph.get_tensor_by_name("Softmax_3:0")im_list = glob.glob("/home/lsy/CelebA/Img/img_celeba.7z/img_celeba/*")for im_path in im_list:    im_data = cv2.imread(im_path)    #im_data = cv2.imread("/home/lsy/CelebA/Img/img_align_celeba.7z/img_celeba/" + attr_val[0])    rects = detector(im_data,0)    if len(rects) == 0:        continue    x1 = rects[0].left()    y1 = rects[0].top()    x2 = rects[0].right()    y2 = rects[0].bottom()    y1 = int(max(y1 - 0.3 * (y2 - y1),0))    im_data = im_data[y1:y2,x1:x2]    im_data = cv2.resize(im_data,(128,128))    print(im_data.shape)    [eye,young,male,smiling] =sess.run([pred_eyeglasses,pred_young,pred_male,pred_smiling],             {"Placeholder:0":np.expand_dims(im_data,0)})    print("eye,young,male,smiling",eye,young,male,smiling)    cv2.imshow("111",im_data)    cv2.waitKey(0)

4.模型的转换(pb文件)

import tensorflow as tffrom tensorflow.contrib.layers import *from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_baseslim = tf.contrib.slimdef inception_v3 (images,drop_out = 0.5, is_training = True):    batch_norm_params ={        "is_training":is_training,        "trainable":True,        "decay": 0.9997,        "epsilon": 0.00001,        "variables_collections": {            "beta": None,            "gamma": None,            "moving_mean": ["moving_vars"],            "moving_variance": ["moving_var"]        }    }    weights_regularizer = tf.contrib.layers.l2_regularizer(0.00004)    with tf.contrib.slim.arg_scope(        [tf.contrib.slim.conv2d,tf.contrib.slim.fully_connected],        weights_regularizer = weights_regularizer,        trainable = True):        with tf.contrib.slim.arg_scope(            [tf.contrib.slim.conv2d],            weights_regularizer = tf.truncated_normal_initializer(stddev=0.1),            activation_fn = tf.nn.relu,            normalizer_fn = batch_norm,            normalizer_params = batch_norm_params):            nets,endpoints = inception_v3_base(images)            print(nets)            print(endpoints)            net = tf.reduce_name(nets,axis=[1,2])            net = tf.nn.dropout(net,drop_out,name = "droplast")            net = flatten(net , scope="flatten")    net_eyeglass = slim.fully_connected(net, 2,activation_fn = None)    net_young = slim.fully_connected(net, 2, activation_fn=None)    net_male = slim.fully_connected(net, 2, activation_fn=None)    net_smiling = slim.fully_connected(net,2,activation_fn = None)    return net_eyeglass,net_young,net_male,net_smilinginput_x = tf.placeholder(tf.float32,shape=[None,128,128,3])label_eyeglasses = tf.placeholder(tf.int64,shape=[None,1])label_young = tf.placeholder(tf.int64,shape=[None,1])label_male = tf.placeholder(tf.int64,shape=[None,1])label_smiling = tf.placeholder(tf.int64,shape=[None,1])logits_eyeglasses,logits_young,logits_male,logits_smiling = inception_v3(input_x,1.0,False)loss_eyeglasses = tf.losses.sparse_softmax_cross_entropy(labels= label_eyeglasses,logits = logits_eyeglasses)loss_young = tf.losses.sparse_softmax_cross_entropy(labels= label_young,logits = logits_young)loss_male = tf.losses.sparse_softmax_cross_entropy(labels= label_male,logits = logits_male)loss_smiling = tf.losses.sparse_softmax_cross_entropy(labels= label_smiling,logits = logits_smiling)logits_eyeglasses = tf.nn.softmax(logits_eyeglasses)logits_young = tf.nn.softmax(logits_young)logits_male = tf.nn.softmax(logits_male)logits_smiling = tf.nn.softmax(logits_smiling)logits_eyeglasses = tf.argmax(logits_eyeglasses,axis=1)logits_young = tf.argmax(logits_young,axis=1)logits_male = tf.argmax(logits_male,axis=1)logits_smiling = tf.argmax(logits_smiling,axis=1)print(logits_eyeglasses,logits_young,logits_male,logits_smiling)loss = loss_eyeglasses + loss_young + loss_male +loss_smilingreg_set =tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)l2_loss = tf.add_n(reg_set)global_step = tf.Variable(0,trainable=True)lr = tf.train.exponential_decay(0.0001,global_step,decay_steps=1000,staircase=False)updata_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)with tf.contrib_dependencies(updata_ops):    train_op = tf.train.AdamOptimizer(lr).minimize(loss + l2_loss ,global_step)def get_one_batch(batch_size,type):    if type == 0:        file_list = tf.gfile.Glob("train.tfrecords")    else:        file_list = tf.gfile.Glob("test.tfrecords")    reader = tf.TFRecordReader()    file_queue = tf.train.string_input_producer(        file_list,num_epochs=None,shuffle=True    )    _, se = reader.read(file_queue)    if type == 0:        batch = tf.train.shuffle_batch([se], batch_size, capacity= batch_size,min_after_dequeue=batch_size // 2)    else:        batch = tf.train.shuffle_batch([se], batch_size, capacity=batch_size)    features = tf.parse_example(batch,features={        "image":tf.FixedLenFeature([],tf.string),        "Eyeglasses": tf.FixedLenFeature([1], tf.int64),        "Male": tf.FixedLenFeature([1], tf.int64),        "Young": tf.FixedLenFeature([1], tf.int64),        "Smiling": tf.FixedLenFeature([1], tf.int64),    })    batch_im = features["image"]    batch_eye = (features["Eyeglasses"] + 1) // 2    batch_male = (features["Male"]+ 1) // 2    batch_young = (features["Young"]+ 1) // 2    batch_smiling = (features["Smiling"]+ 1) // 2    batch_im = tf.decode_raw(batch_im,tf.uint8)    batch_im = tf.cast(tf.reshape(batch_im,(batch_size,128,128,3)),tf.float32)    return batch_im,batch_eye,batch_male,batch_young,batch_smiling    tr_im_batch,tr_label_eye_batch,tr_label_male,tr_label_young,tr_label_smiling =get_one_batch(64,0)    te_im_batch, te_label_eye_batch, te_label_male, te_label_young, te_label_smiling = get_one_batch(64, 1)    with tf.Session() as session:        coord = tf.train.Coordinator()        tf.train.start_queue_runners(sess=session,coord=coord)        init_ops = tf.group(tf.global_variables_initializer(),tf.lacal_variables_initializer())        session.run(init_ops)        ckpt = tf.train.get_checkpoint_state("models")        if ckpt and tf.train.checkpoint_exists(ckpt.models_checkpoint_path):            print(ckpt.model_checkpoint_path)            saver.restore(session,ckpt.model_checkpoint_path)#################################################################################        output_graph_def = tf.graph_util.\            convert_variables_to_constants(session,session.graph.as_graph_def(),                                           ['ArgMax','ArgMax_1','ArgMax_2','ArgMax_3'])        with tf.gfile.FastGFile("face_attribute.pb","wb")as f:            f.write(output_graph_def.SerializeToString())            f.close()

5.flask封装模型

from flask import Flask,requestfrom object_detection.utils import ops as utils_opsimport osimport numpy as npos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"os.environ["CUDA_VISIBLE_DEVICES"] = "0"import cv2from gevent import monkeymonkey.patch_all()import tensorflow as tfimport numpy as npapp = Flask(__name__)PATH_TO_FROZEN_GRAPH = "/home/lsy/flask_server_facerecognition/frozen_inference_graph.pb"PATH_TO_LABELS = "/home/lsy/flask_server_facerecognition/object_detection/face_label_map.pbtxt"IMAGE_SIZE = (256, 256)detection_sess = tf.Session()face_recognition_sess = tf.Session()with detection_sess.as_default():    od_graph_def = tf.GraphDef()    with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:        serialized_graph = fid.read()        od_graph_def.ParseFromString(serialized_graph)        tf.import_graph_def(od_graph_def, name='')        ops = tf.get_default_graph().get_operations()        all_tensor_names = {output.name for op in ops for output in op.outputs}        tensor_dict = {}        for key in [            'num_detections', 'detection_boxes', 'detection_scores',            'detection_classes', 'detection_masks'        ]:            tensor_name = key + ':0'            if tensor_name in all_tensor_names:                tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(                    tensor_name)        if 'detection_masks' in tensor_dict:            # The following processing is only for single image            detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])            detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])            # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.            real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)            detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])            detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])            detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(                detection_masks, detection_boxes, IMAGE_SIZE[0], IMAGE_SIZE[1])            detection_masks_reframed = tf.cast(                tf.greater(detection_masks_reframed, 0.5), tf.uint8)            # Follow the convention by adding back the batch dimension            tensor_dict['detection_masks'] = tf.expand_dims(                detection_masks_reframed, 0)        image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')#####################################face featureface_feature_sess = tf.Session()ff_pb_path = "face_recognition_model.pb"with face_feature_sess.as_default():    ff_od_graph_def = tf.GraphDef()    with tf.gfile.GFile(ff_pb_path, 'rb') as fid:        serialized_graph = fid.read()        od_graph_def.ParseFromString(serialized_graph)        tf.import_graph_def(od_graph_def, name='')        ff_images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")        ff_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")        ff_embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")####################################人脸属性face_attribute_sess = tf.Session()ff_pb_path = "/home/lsy/flask_server_facerecognition/face_recognition_model.pb"#####with face_attribute_sess.as_default():    ff_attri_graph_def = tf.GraphDef()    with tf.gfile.GFile(ff_pb_path, 'rb') as fid:        serialized_graph = fid.read()        od_graph_def.ParseFromString(serialized_graph)        tf.import_graph_def(od_graph_def, name='')        pred_eyeglasses = tf.get_default_graph().get_tensor_by_name("ArgMax:0")       pred_young = tf.get_default_graph().get_tensor_by_name("ArgMax_1:0")        pred_male = tf.get_default_graph().get_tensor_by_name("ArgMax_2:0")        pred_smiling = tf.get_default_graph().get_tensor_by_name("ArgMax_3:0")        face_attribute_image_tensor = tf.get_default_graph().get_tensor_by_name("Placeholder:0")###face featureface_landmark_sess = tf.Session()ff_pb_path = "/home/lsy/facial_landmark/landmark.pb"with face_landmark_sess.as_default():    ff_od_graph_def = tf.GraphDef()    with tf.gfile.GFile(ff_pb_path, 'rb') as fid:         serialized_graph = fid.read()         od_graph_def.ParseFromString(serialized_graph)         tf.import_graph_def(od_graph_def, name='')    landmark_tensor = tf.get_default_graph().get_tensor_by_name("fully_connected_1/Relu:0")########@app.route('/face_attribute', methods=['POST', 'GET'])def face_attribute():    f = request.files.get("file")    upload_path = os.path.join("tmp/tmp_attribute." + f.filename.split(",")[-1])     f.save(upload_path)        print(upload_path)        im_data = cv2.imread(upload_path)    rects = detector(im_data,0)    if len(rects) == 0:        ##return "error"    x1 = rects[0].left()    y1 = rects[0].top()    x2 = rects[0].right()    y2 = rects[0].bottom()    y1 = int(max(y1 - 0.3 * (y2 - y1),0))    im_data = im_data[y1:y2,x1:x2]    im_data = cv2.resize(im_data,(128,128))    [eye_glass,young,male,smiling] = face_attribute_sess.run(        [pred_eyeglasses,pred_young,pred_male,pred_smiling],        feed_dict={face_attribute_image_tensor: np.expand_dims(im_data,0)}    )    return "{},{},{},{}".format(eye_glass[0],young[0],male[0],smiling[0])###############import dlibpredictor = dlib.shape_predictor('/home/lsy/facial_landmark/shape_predictor_68_face_landmarks.dat')detector = dlib.get_frontal_face_detector()@app.route('/face_landmark', methods=['POST', 'GET'])def face_landmark():    ##实现图片上传    f = request.files.get('file')    print(f)    upload_path = os.path.join("tmp/tmp_landmark." + f.filename.split(".")[-1])    # secure_filename(f.filename))  #注意:没有的文件夹一定要先创建,不然会提示没有该路径    print(upload_path)    f.save(upload_path)    ##人脸检测    im_data = cv2.imread(upload_path)    sp = im_data.shape  ##    im_data = cv2.resize(im_data, IMAGE_SIZE)    output_dict = detection_sess.run(tensor_dict,                                     feed_dict={image_tensor:                                         np.expand_dims(                                             im_data, 0)})    # all outputs are float32 numpy arrays, so convert types as appropriate    output_dict['num_detections'] = int(output_dict['num_detections'][0])    output_dict['detection_classes'] = output_dict[        'detection_classes'][0].astype(np.uint8)    output_dict['detection_boxes'] = output_dict['detection_boxes'][0]    output_dict['detection_scores'] = output_dict['detection_scores'][0]    x1 = 0    y1 = 0    x2 = 0    y2 = 0    for i in range(len(output_dict['detection_scores'])):        if output_dict['detection_scores'][i] > 0.1:            bbox = output_dict['detection_boxes'][i]            y1 = bbox[0]            x1 = bbox[1]            y2 = (bbox[2])            x2 = (bbox[3])            print(output_dict['detection_scores'][i], x1, y1, x2, y2)            ##提取人脸区域            y1 = int((y1+(y2 - y1)*0.2) * sp[0])            x1 = int(x1 * sp[1])            y2 = int(y2 * sp[0])            x2 = int(x2 * sp[1])            face_data = im_data[y1:y2, x1:x2]            cv2.imwrite("face_landmark.jpg",face_data)            face_data = cv2.resize(face_data,(128,128))            pred = face_landmark_sess.run(landmark_tensor, {"Placeholder:0":                                   np.expand_dims(face_data, 0)})            pred = pred[0]            res = []            for i in range(0,136,2):                res.append(str((pred[i] * (x2-x1) + x1) / sp[1]))                res.append(str((pred[i+1] * (y2 - y1) + y1) / sp[0]))            res = ",".join(res)            return res    return "error"@app.route('/face_landmark_tf', methods=['POST', 'GET'])def face_landmark_tf():    f = request.files.get("file")    upload_path = os.path.join("tmp/tmp_landmark."+f.filename.split(",")[-1])    f.save(upload_path)    ##    print(upload_path)    ##    im_data = cv2.imread(upload_path)    im_data = cv2.cvtColor(im_data,cv2.COLOR_BGR2GRAY)    sp = im_data.shape    rects = detector(im_data,0)    res = []    for face in rects:        shape = predictor(im_data,face)        for pt in shape.parts():            pt_pos = (pt.x,pt.y)            x1 = face.left()            y1 = face.top()            x2 = face.right()            y2 = face.bottom()            ptx = (pt.x - x1 ) * 1.0 / (x2 - x1)            pty = (pt.y - y1 ) * 1.0 / (y2 - y1)            res.append(str(ptx))            res.append(str(pty))            res.append(str(pt.x * 1.0 / sp[1]))            res.append(str(pt.y * 1.0/ sp[0]))        if res.__len__() == 136 * 2:            res = ",".join(res)            print(res)            return res    return "error"@app.route("/")def helloword():    return '

Hello World!

'@app.route('/upload', methods=['POST', 'GET'])def upload(): f = request.files.get('file') print(f) upload_path = os.path.join("tmp/tmp." + f.filename.split(".")[-1]) # secure_filename(f.filename)) #注意:没有的文件夹一定要先创建,不然会提示没有该路径 print(upload_path) f.save(upload_path) return upload_path@app.route("/face_detect")def inference(): im_url = request.args.get("url") im_data = cv2.imread(im_url) sp = im_data.shape im_data = cv2.resize(im_data, IMAGE_SIZE) output_dict = detection_sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims( im_data, 0)}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] x1 = 0 y1 = 0 x2 = 0 y2 = 0 for i in range(len(output_dict['detection_scores'])): if output_dict['detection_scores'][i] > 0.1: bbox = output_dict['detection_boxes'][i] y1 = bbox[0] x1 = bbox[1] y2 = (bbox[2]) x2 = (bbox[3]) print(output_dict['detection_scores'][i], x1, y1, x2, y2) return str([x1, y1, x2, y2])# 图像数据标准化def prewhiten(x): mean = np.mean(x) std = np.std(x) std_adj = np.maximum(std, 1.0/np.sqrt(x.size)) y = np.multiply(np.subtract(x, mean), 1/std_adj) return ydef read_image(path): ### im_data = cv2.imread(path) im_data = prewhiten(im_data) im_data = cv2.resize(im_data, (160, 160)) #1 * h * w * 3 return im_data@app.route("/face_feature")def face_feature(): im_data1 = read_image("/home/lsy/64_CASIA-FaceV5/" "crop_image_160/0/0_0000.jpg") im_data1 = np.expand_dims(im_data1, axis=0) emb1 = face_feature_sess.run(ff_embeddings, feed_dict={ff_images_placeholder:im_data1, ff_train_placeholder:False}) strr = ",".join(str(i) for i in emb1[0]) return strr@app.route('/face_register', methods=['POST', 'GET'])def face_register(): ##实现图片上传 f = request.files.get('file') print(f) upload_path = os.path.join("tmp/tmp." + f.filename.split(".")[-1]) # secure_filename(f.filename)) #注意:没有的文件夹一定要先创建,不然会提示没有该路径 print(upload_path) f.save(upload_path) ##人脸检测 im_data = cv2.imread(upload_path) sp = im_data.shape im_data = cv2.resize(im_data, IMAGE_SIZE) output_dict = detection_sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims( im_data, 0)}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] x1 = 0 y1 = 0 x2 = 0 y2 = 0 for i in range(len(output_dict['detection_scores'])): if output_dict['detection_scores'][i] > 0.1: bbox = output_dict['detection_boxes'][i] y1 = bbox[0] x1 = bbox[1] y2 = (bbox[2]) x2 = (bbox[3]) print(output_dict['detection_scores'][i], x1, y1, x2, y2) ##提取人脸区域 y1 = int(y1 * sp[0]) x1 = int(x1 * sp[1]) y2 = int(y2 * sp[0]) x2 = int(x2 * sp[1]) face_data = im_data[y1:y2, x1:x2] im_data = prewhiten(face_data) #预处理 im_data = cv2.resize(im_data, (160, 160)) im_data1 = np.expand_dims(im_data, axis=0) ##人脸特征提取 emb1 = face_feature_sess.run(ff_embeddings, feed_dict={ff_images_placeholder: im_data1, ff_train_placeholder: False}) strr = ",".join(str(i) for i in emb1[0]) ##写去txt with open("face/feature.txt", "w") as f: f.writelines(strr) f.close() mess = "success" break else: mess = "fail" return mess@app.route('/face_login', methods=['POST', 'GET'])def face_login(): #图片上传 #人脸检测 #人脸特征提取 #加载注册人脸(人脸签到,人脸数很多,加载注册人脸放在face_login, # 启动服务加载/采用搜索引擎/ES) #同注册人脸相似性度量 #返回度量结果 f = request.files.get('file') print(f) upload_path = os.path.join("tmp/login_tmp." + f.filename.split(".")[-1]) # secure_filename(f.filename)) #注意:没有的文件夹一定要先创建,不然会提示没有该路径 print(upload_path) f.save(upload_path) ##人脸检测 im_data = cv2.imread(upload_path) sp = im_data.shape im_data = cv2.resize(im_data, IMAGE_SIZE) output_dict = detection_sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims( im_data, 0)}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] x1 = 0 y1 = 0 x2 = 0 y2 = 0 for i in range(len(output_dict['detection_scores'])): if output_dict['detection_scores'][i] > 0.1: bbox = output_dict['detection_boxes'][i] y1 = bbox[0] x1 = bbox[1] y2 = (bbox[2]) x2 = (bbox[3]) print(output_dict['detection_scores'][i], x1, y1, x2, y2) ##提取人脸区域 y1 = int(y1 * sp[0]) x1 = int(x1 * sp[1]) y2 = int(y2 * sp[0]) x2 = int(x2 * sp[1]) face_data = im_data[y1:y2, x1:x2] cv2.imwrite("face.jpg",face_data ) im_data = prewhiten(face_data) # 预处理 im_data = cv2.resize(im_data, (160, 160)) im_data1 = np.expand_dims(im_data, axis=0) ##人脸特征提取 emb1 = face_feature_sess.run(ff_embeddings, feed_dict={ff_images_placeholder: im_data1, ff_train_placeholder: False}) with open("face/feature.txt") as f: fea_str = f.readlines() f.close() emb2_str = fea_str[0].split(",") emb2 = [] for ss in emb2_str: emb2.append(float(ss)) emb2 = np.array(emb2) dist = np.linalg.norm(emb1 - emb2) print("dist---->", dist) if dist < 0.3: return "success" else: return "fail" return "fail"@app.route("/face_dis")def face_dis(): im_data1 = read_image("/home/lsy/flask_server_facedetection/tmp/crop_face.jpg") im_data1 = np.expand_dims(im_data1, axis=0) emb1 = face_feature_sess.run(ff_embeddings, feed_dict={ff_images_placeholder:im_data1, ff_train_placeholder:False}) im_data1 = read_image("tmp/face.jpg") im_data1 = np.expand_dims(im_data1, axis=0) emb2 = face_feature_sess.run(ff_embeddings, feed_dict={ff_images_placeholder:im_data1, ff_train_placeholder:False}) dist = np.linalg.norm(emb1 - emb2) return str(dist)if __name__ == '__main__': app.run(host="192.168.0.1, port=90, debug=True)##IP填自己的,用于与前端交互的

 

6.微信小程序部分代码

还没完全弄好,等我更新哟!!

转载地址:http://pfde.baihongyu.com/

你可能感兴趣的文章
NC15553 数学考试(线性DP)
查看>>
MySQL隐藏文件.mysql_history风险
查看>>
js求阶乘
查看>>
Js函数
查看>>
L1-009 N个数求和 (20 分)
查看>>
L2-031 深入虎穴 (25 分)
查看>>
Unity之PlayerPrefs
查看>>
简单的xml读取存储方法(未优化)
查看>>
Making the grade 和Sonya and Problem Wihtout a Legend
查看>>
Nginx---惊群
查看>>
2种解法 - 获取一条直线上最多的点数
查看>>
项目中常用的审计类型概述
查看>>
nodeName与tagName的区别
查看>>
(九)实现页面底部购物车的样式
查看>>
python-day3 for语句完整使用
查看>>
linux下远程上传命令scp
查看>>
可重入和不可重入函数
查看>>
(2.1)关系模型之关系结构和约束
查看>>
androidstudio同步的时候下载jcenter的库出错解决办法
查看>>
ButterKnife使用问题
查看>>