欧美bbbwbbbw肥妇,免费乱码人妻系列日韩,一级黄片

python 實(shí)時(shí)調(diào)取攝像頭的示例代碼

 更新時(shí)間:2020年11月25日 09:46:03   作者:StarZhai  
這篇文章主要介紹了python 實(shí)時(shí)調(diào)取攝像頭的示例代碼,幫助大家更好的理解和使用python,感興趣的朋友可以了解下

調(diào)取攝像頭的實(shí)現(xiàn)

import numpy as np
import cv2
cap = cv2.VideoCapture(0)    #參數(shù)為0時(shí)調(diào)用本地?cái)z像頭;url連接調(diào)取網(wǎng)絡(luò)攝像頭;文件地址獲取本地視頻
while(True):
ret,frame=cap.read()

#灰度化
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)

#普通圖片
cv2.imshow('frame',frame)

if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()

opencv代碼

# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""

#設(shè)置工作路徑
import os
os.chdir('E:\\0yfl\\SH-spyder-workspace\\')
os.path.abspath('.')


import numpy as np
import cv2

#1.1讀取圖片imread;展示圖片imshow;導(dǎo)出圖片imwrite
#只是灰度圖片
img=cv2.imread('Myhero.jpg',cv2.IMREAD_GRAYSCALE)
#彩色圖片
img=cv2.imread('Myhero.jpg',cv2.IMREAD_COLOR)
#彩色以及帶有透明度
img=cv2.imread('Myhero.jpg',cv2.IMREAD_UNCHANGED)
print(img)
#設(shè)置窗口可自動(dòng)調(diào)節(jié)大小
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
k=cv2.waitKey(0)
#如果輸入esc
if k==27:
  #exit
  cv2.destroyAllWindows
#如果輸入s
elif k==ord('s'):
  #save picture and exit
  cv2.imwrite('Myhero_out.png',img)
  cv2.destroyAllWindows()


#1.2視頻讀取
#打開內(nèi)置攝像頭
cap=cv2.VideoCapture(0)
#打開視頻
cap=cv2.VideoCapture('why.mp4')
#或者視頻每秒多少幀的數(shù)據(jù)
fps=cap.get(5)
i=0
while(True):
  #讀取一幀
  ret,frame=cap.read()
  #轉(zhuǎn)化為灰圖
  gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
  #設(shè)置導(dǎo)出文件名編號(hào)
  i = i + 1
  #每1s導(dǎo)出一張
  if i/fps==int(i/fps):
    #導(dǎo)出文件名為why+編號(hào)+.png
    #若想要導(dǎo)出灰圖,則將下面frame改為gray即可
    cv2.imwrite("why"+str(int(i/fps))+".png",frame)
  #讀完之后結(jié)束退出
  if cv2.waitKey(1)==ord('q'):
    break

cap.release()
cv2.destoryAllWindows()


#1.3圖像像素修改
rangexmin=100
rangexmax=120
rangeymin=90
rangeymax=100
img=cv2.imread('Myhero.jpg',0)
img[rangexmin:rangexmax,rangeymin:rangeymax]=[[255]*(rangeymax-rangeymin)]*(rangexmax-rangexmin)
cv2.imwrite('Myhero_out2.png',img)

#拆分以及合并圖像通道1
b,g,r=cv2.split(img)
img=cv2.merge(b,g,r)

#png轉(zhuǎn)eps,不過非常模糊
from matplotlib import pyplot as plt
img=cv2.imread('wechat1.png',cv2.IMREAD_COLOR)
plt.imsave('wechat_out.eps',img)

#圖像按比例混合
img1=cv2.imread('Myhero.jpg',cv2.IMREAD_COLOR)
img2=cv2.imread('Myhero_out.png',cv2.IMREAD_COLOR)
dst=cv2.addWeighted(img1,0.5,img2,0.5,0)
cv2.imwrite("Myhero_combi.jpg",dst)


#1.4按位運(yùn)算
#加載圖像
img1=cv2.imread("Myhero.jpg")
img2=cv2.imread("why1.png")
#后面那張圖更大
rows,cols,channels=img1.shape
ROI=img2[0:rows,0:cols]
#做一個(gè)ROI為圖像的大小
img2gray=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
#小于175的改為0,大于175的賦值為255
ret,mask=cv2.threshold(img2gray,175,255,cv2.THRESH_BINARY)
cv2.imwrite("Myhero_mask.jpg",mask)
#255-mask=mask_inv
mask_inv=cv2.bitwise_not(mask)
cv2.imwrite("Myhero_mask_inv.jpg",mask_inv)
#在mask白色區(qū)域顯示成ROI,背景圖片
img2_bg=cv2.bitwise_and(ROI,ROI,mask=mask)
cv2.imwrite("Myhero_pic2_backgroud.jpg",img2_bg)
#除了mask以外的區(qū)域都顯示成img1,前景圖片
img1_fg=cv2.bitwise_and(img1,img1,mask=mask_inv)
cv2.imwrite("Myhero_pic2_frontgroud.jpg",img1_fg)
#前景圖片加上背景圖片
dst = cv2.add(img2_bg,img1_fg)
img2[0:rows, 0:cols ] = dst
cv2.imwrite("Myhero_pic2_addgroud.jpg",dst)
#finished

#構(gòu)建淹膜方法2
#截取幀
ret,frame=cap.read()
#轉(zhuǎn)換到HSV
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
#設(shè)定藍(lán)色的閾值
lower_blue=np.array([110,50,50])
upper_blue=np.array([130,255,255])
#根據(jù)閾值構(gòu)建掩模
mask=cv2.inRange(hsv,lower_blue,upper_blue)
#對(duì)原圖像和掩模進(jìn)行位運(yùn)算
res=cv2.bitwise_and(frame,frame,mask=mask)


#圖片放縮,用的插值方法,所以不會(huì)損害清晰度
res=cv2.resize(img1,None,fx=2,fy=2,interpolation=cv2.INTER_CUBIC)
cv2.imwrite("Myhero_bigger.jpg",res)
#第二種插值方法
height,width=img.shape[:2]
res=cv2.resize(img,(2*width,2*height),interpolation=cv2.INTER_CUBIC)

#edge現(xiàn)實(shí)圖片中不好用,人工畫的圖片還可以
img = cv2.imread('why3.png',0)
edges = cv2.Canny(img,50,100)
cv2.imwrite("why3_edge.png",edges)

#識(shí)別輪廓,并保存輪廓點(diǎn)contours
img=cv2.imread('why129.png')
imgray=cv2.imread('why129.png',cv2.IMREAD_GRAYSCALE)
ret,thresh = cv2.threshold(imgray,127,255,0)
cv2.imwrite("2.jpg",thresh)
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(img, contours, -1, (0,255,0), 3)
cv2.imwrite("3.jpg",img)


#輪廓
img = cv2.imread('why3.png',0)
ret,thresh = cv2.threshold(img,127,255,0)
contours,hierarchy = cv2.findContours(thresh, 1, 2)
cnt = contours[0] 
#近似輪廓
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)

img = cv2.drawContours(img, approx, -1, (0,255,0), 3)
cv2.imwrite("4.jpg",img)

from matplotlib import pyplot as plt
#圖像識(shí)別/匹配
img_rgb = cv2.imread('why174.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
img2=img_gray.copy()
template = cv2.imread('0temp.png',0)
w, h = template.shape[::-1]
#共有六種識(shí)別方法
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR', 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

for meth in methods:
  img = img2.copy()
  #eval返回某個(gè)式子的計(jì)算結(jié)果
  method = eval(meth)
  #下面使用匹配方法
  res = cv2.matchTemplate(img,template,method)
  min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
  if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
    top_left = min_loc
  else:
    top_left = max_loc
  bottom_right = (top_left[0] + w, top_left[1] + h)
  #畫矩形把他框出來
  cv2.rectangle(img,top_left, bottom_right, 255, 2)
  
  plt.subplot(121),plt.imshow(res,cmap = 'gray')
  plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
  plt.subplot(122),plt.imshow(img,cmap = 'gray')
  plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
  plt.suptitle(meth)
  
  plt.show()
  
#這個(gè)匹配結(jié)果太差
#選取3,5,6的匹配方式會(huì)稍微好點(diǎn):cv2.TM_CCORR;cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED

#視頻人臉識(shí)別
#https://blog.csdn.net/wsywb111/article/details/79152425
import cv2
from PIL import Image
cap=cv2.VideoCapture("why.mp4")
#告訴Opencv使用人臉識(shí)別分類器
classfier=cv2.CascadeClassifier("E:\\0yfl\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_alt2.xml")
count=0
while cap.isOpened():
  ret,frame=cap.read()
  if not ret:
    break
  grey=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
  faceRect=classfier.detectMultiScale(grey,scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
  if len(faceRect)>0:
    count=count+1
print(count)


#137這種程度可以識(shí)別,111沒有成功識(shí)別,大概是側(cè)臉的緣故
#截出人臉
image_name="why111.png"
frame=cv2.imread(image_name,0)
if not (frame is None):
  #導(dǎo)入測(cè)試集
  classfier=cv2.CascadeClassifier("E:\\0yfl\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_alt2.xml")
  #使用測(cè)試集導(dǎo)出人臉的位置,存在faceRect中,可以檢測(cè)多張人臉
  faceRect=classfier.detectMultiScale(frame,scaleFactor=3.0, minNeighbors=3, minSize=(32, 32))
  count=0
  for (x1,y1,w,h) in faceRect:
    count=count+1
    #截取上述圖片的人臉部分并保存每一張識(shí)別出的人臉
    Image.open(image_name).crop((x1,y1,x1+w,y1+h)).save(image_name.split(".")[0]+"_face_"+str(count)+".png")
  if count==0:
    print ("No face detected!")
else:
  print ("Picture "+ image_name +" is not exist in "+os.path.abspath("."))
#人臉上畫出矩形
from PIL import Image,ImageDraw
image_name="why111.png"
frame=cv2.imread(image_name,0)
if not (frame is None):
  classfier=cv2.CascadeClassifier("E:\\0yfl\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_alt2.xml")
  faceRect=classfier.detectMultiScale(frame,scaleFactor=3.0, minNeighbors=3, minSize=(32, 32))
  #畫框框
  img = Image.open(image_name)
  draw_instance = ImageDraw.Draw(img)
  count=0
  for (x1,y1,w,h) in faceRect:
    draw_instance.rectangle((x1,y1,x1+w,y1+h), outline=(255, 0,0))
    img.save('drawfaces_'+image_name)
    count=count+1
  if count==0:
    print ("No face detected!")
else:
  print ("Picture "+ image_name +" is not exist in "+os.path.abspath("."))


#detectFaces()返回圖像中所有人臉的矩形坐標(biāo)(矩形左上、右下頂點(diǎn))
#使用haar特征的級(jí)聯(lián)分類器haarcascade_frontalface_default.xml,在haarcascades目錄下還有其他的訓(xùn)練好的xml文件可供選擇。
#注:haarcascades目錄下訓(xùn)練好的分類器必須以灰度圖作為輸入。


from PIL import Image,ImageDraw
image_name="why63.png"
frame=cv2.imread(image_name,0)
if not (frame is None):
  classfier=cv2.CascadeClassifier("E:\\0yfl\\opencv-master\\data\\haarcascades\\haarcascade_fullbody.xml")
  faceRect=classfier.detectMultiScale(frame,scaleFactor=3.0, minNeighbors=3, minSize=(32, 32))
  #畫框框
  img = Image.open(image_name)
  draw_instance = ImageDraw.Draw(img)
  count=0
  for (x1,y1,w,h) in faceRect:
    draw_instance.rectangle((x1,y1,x1+w,y1+h), outline=(255, 0,0))
    img.save('drawfaces_'+image_name)
    count=count+1
  if count==0:
    print ("No face detected!")
else:
  print ("Picture "+ image_name +" is not exist in "+os.path.abspath("."))

以上就是python 實(shí)時(shí)調(diào)取攝像頭的示例代碼的詳細(xì)內(nèi)容,更多關(guān)于python 調(diào)取攝像頭的資料請(qǐng)關(guān)注腳本之家其它相關(guān)文章!

相關(guān)文章

最新評(píng)論