Vivado + Jupyter Notebook 模式识别

Vivado + Jupyter Notebook 模式识别

  • Overlay,调用opencv和视频处理库
 {
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2\n",
    "import numpy as np\n",
    "from matplotlib import pyplot as plt\n",
    "from pynq.overlays.base import BaseOverlay\n",
    "from pynq.lib.video import *"
   ]
  },
  • 定义HDMI格式
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "base = BaseOverlay(\"base.bit\")\n",
    "\n",
    "hdmi_out = base.video.hdmi_out\n",
    "Mode = VideoMode(640,480,24)\n",
    "hdmi_out = base.video.hdmi_out\n",
    "hdmi_out.configure(Mode,PIXEL_BGR)\n",
    "hdmi_out.start()"
   ]
  },
  • 处理代码部分
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def SkinDetect(frame):\n",
    "    ycrcb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb) \n",
    "    (_, cr, cb) = cv2.split(ycrcb)\n",
    "    _, cr = cv2.threshold(cr, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n",
    "    res = cv2.bitwise_and(frame,frame,mask = cr)\n",
    "    res = cv2.dilate(res, None, iterations=2)\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def CalRatio(cnts):\n",
    "    area1 = cv2.contourArea(cnts)\n",
    "    x, y, w, h = cv2.boundingRect(cnts)\n",
    "    area2 = w * h\n",
    "    ratio = area1 / area2\n",
    "    return x,y,w,h,ratio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def HandExtract(frame):\n",
    "    stan = 1.0  \n",
    "    HandFlag = False\n",
    "\n",
    "    frame1 = SkinDetect(frame)  \n",
    "    rows, cols, channels = frame.shape\n",
    "    res = np.zeros((((rows, cols, channels))), dtype=np.uint8) \n",
    "    \n",
    "    \n",
    "    frame_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n",
    "    _, thres = cv2.threshold(frame_gray, 20, 255, cv2.THRESH_BINARY)\n",
    "    thres = cv2.dilate(thres, None, iterations=1)\n",
    "    img,cnts, _ = cv2.findContours(thres.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
    "    c = 0\n",
    "    ratio = 0\n",
    "    if (len(cnts) > 0):\n",
    "        cnts1 = sorted(cnts, key=cv2.contourArea, reverse=True)  \n",
    "        _, _, _, _, ratio1 = CalRatio(cnts1[0])\n",
    "        ratio = ratio1\n",
    "        if (len(cnts) > 1):  \n",
    "            _, _, _, _, ratio2 = CalRatio(cnts1[1])\n",
    "            if ratio1 < ratio2:\n",
    "                c = cnts1[0]\n",
    "            else:\n",
    "                c = cnts1[1]\n",
    "                ratio = ratio2\n",
    "        else:\n",
    "            c = cnts1[0]\n",
    "       \n",
    "        if (cv2.contourArea(c) > 300 and ratio < stan):\n",
    "            HandFlag = True\n",
    "            x, y, w, h, ratio = CalRatio(c)\n",
    "            res[y:y + h, x:x + w] = frame[y:y + h, x:x + w]\n",
    "            res = cv2.bitwise_and(res, res, mask=thres)\n",
    "    return c, HandFlag, res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calSiftFeature(frame):\n",
    "    img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n",
    "    _, thres = cv2.threshold(img,50,255,cv2.THRESH_BINARY)\n",
    "    cnts, _ = cv2.findContours(thres.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
    "    c = max(cnts,key=cv2.contourArea)\n",
    "    hull = cv2.convexHull(c, returnPoints=False)\n",
    "    des = cv2.convexityDefects(c,hull)\n",
    "    #print(des.shape)\n",
    "    return des"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calFeatVec(features, centers):\n",
    "    featVec = np.zeros((1, 100))\n",
    "    for i in range(0, features.shape[0]):\n",
    "        fi = features[i]\n",
    "        diffMat = np.tile(fi, (100, 1)) - centers\n",
    "        sqSum = (diffMat ** 2).sum(axis=1)\n",
    "        dist = sqSum ** 0.5\n",
    "        sortedIndices = dist.argsort()\n",
    "        idx = sortedIndices[0]  # index of the nearest center\n",
    "        featVec[0][idx] += 1\n",
    "    return featVec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def testImg(img):\n",
    "    _, centers = np.load(\"./vocabulary.npy\", allow_pickle=True)\n",
    "    #svm = cv2.ml.SVM_load(\"./svm.xml\")\n",
    "    svm = cv2.ml.SVM_create()\n",
    "    svm.load(\"./svm.xml\")\n",
    "    features = calSiftFeature(img)\n",
    "    featVec = calFeatVec(features, centers)\n",
    "    case = np.float32(featVec)\n",
    "    dict_svm = svm.predict(case)\n",
    "    dict_svm = int(dict_svm[1])\n",
    "    return dict_svm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cap = cv2.VideoCapture(0) \n",
    "cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n",
    "cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n",
    "\n",
    "text = \"The Result is: No Gesture\"\n",
    "result = 0\n",
    "res = []\n",
    "frame_count = 0\n",
    "\n",
    "print(cap.isOpened())\n",
    "\n",
    "while (cap.isOpened()):\n",
    "    ret, frame = cap.read()\n",
    "    if (ret):\n",
    "        img = frame.copy()\n",
    "        class_num = 0\n",
    "        \n",
    "        roi = img[120:360, 160:480]\n",
    "        gesture, HandFlag, hand_img = HandExtract(roi)\n",
    "        \n",
    "        if (HandFlag):\n",
    "            hull = cv2.convexHull(gesture, returnPoints=True)\n",
    "#             try:\n",
    "#                 class_num = testImg(hand_img)\n",
    "#                 print(class_num)\n",
    "#             except:\n",
    "#                 pass\n",
    "            class_num = testImg(hand_img)\n",
    "\n",
    "        \n",
    "        if (frame_count < 5):\n",
    "            res.append(class_num)\n",
    "            frame_count += 1\n",
    "        else:\n",
    "            count = np.bincount(res)\n",
    "            result = np.argmax(count)\n",
    "            frame_count = 0\n",
    "            res = []\n",
    "\n",
    "        if (result == 0):\n",
    "            text = \"The Result is: No Gesture\"\n",
    "        else:\n",
    "            text = \"The Result is: Gesture \" + str(result)\n",
    "    \n",
    "        cv2.putText(img, text, (10, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.0, (0, 0, 255), 1, 4)\n",
    "        outframe = hdmi_out.newframe()\n",
    "        outframe[:,:,:] = img[:,:,:]\n",
    "        hdmi_out.writeframe(outframe)\n",
    "    else: \n",
    "        print(\"Failed to read from camera.\")\n",
    "        \n",
    "        \n",
    "cap.release()\n",
    "hdmi_out.close()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值