python如何實(shí)現(xiàn)控制電腦音量
python控制電腦音量
總共兩個(gè)文件,放在同一個(gè)目錄下,運(yùn)行第二個(gè)就行,把1這個(gè)文件命名為“主函數(shù)”,第二個(gè)文件隨意名。
1、
import cv2
import mediapipe as mp
import time
class handDect():
def __init__(self,model=False,maxHands=2,detectionCon=0.5,trackCon=0.5):
self.mode=model
self.maxHands=maxHands
self.dectionCon=detectionCon
self.trackCon=trackCon
self.myhand = mp.solutions.hands
self.hands = self.myhand.Hands(False)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self,img,draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for hanglms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, hanglms, self.myhand.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
if self.results.multi_hand_landmarks[handNo]:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append([id, cx, cy])
# if draw:
#cv2.putText(img, str(int(id)), (cx,cy), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector=handDect()
while True:
success, img = cap.read()
img=detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
mm=abs(lmList[4][1] - lmList[8][1])
kk=abs(lmList[8][2] - lmList[12][2])
if(mm<6 and kk>50):
nn="hello,word "
cv2.putText(img, str(nn), (60, 80), cv2.FONT_HERSHEY_PLAIN, 3, (60, 60, 255), 3)
# import win32com.client
#
# speaker = win32com.client.Dispatch("SAPI.SpVoice")
#
# speaker.Speak("你好, 許軻,你今天看起來(lái)鄭帥")
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("image", img)
cv2.waitKey(1)
if __name__=="__main__":
main()2、
```python
import cv2
import mediapipe as mp
import time
from ctypes import cast, POINTER
import numpy as np
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
import 主函數(shù) as htm
import math
pTime=0
cTime=0
cap=cv2.VideoCapture(0)
detector=htm.handDect()
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volume.GetMute()
volume.GetMasterVolumeLevel()
volRange = volume.GetVolumeRange()
print(volRange)
minVol = volRange[0]
maxVol = volRange[1]
vol = 0
volBar = 400
volPer = 0
while True:
success,img=cap.read()
img=detector.findHands(img)
lmlist=detector.findPosition(img,draw=False)
if len(lmlist) != 0:
x1,y1=lmlist[4][1],lmlist[4][2]
x2, y2 = lmlist[8][1], lmlist[8][2]
cv2.circle(img,(x1,y1),10,(255,0,255),cv2.FILLED)
cv2.circle(img, (x2, y2), 10, (255, 0, 255), cv2.FILLED)
cx=int((x1+x2)/2)
cy = int((y1 + y2) / 2)
length=math.hypot(x2-x1,y2-y1)
print(length)
vol=np.interp(length,[0,200],[minVol,maxVol])
volBar = np.interp(length, [50, 300], [400, 150])
volPer = np.interp(length, [50, 300], [0, 100])
volume.SetMasterVolumeLevel(vol, None)
cv2.rectangle(img, (50, 150), (85, 400), (0, 255, 0), 3)
cv2.rectangle(img, (50, int(volBar)), (85, 400), (0, 255, 0), cv2.FILLED)
if(length<10):
cv2.circle(img, (cx, cy), 10, (0, 255, 0), cv2.FILLED)
else:
cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)
cv2.line(img,(x1,y1),(x2,y2),(255,0,255),3)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.putText(img, str("music"+str(int(volPer))), (170, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("image", img)
cv2.waitKey(1)手勢(shì)控制電腦音量
在tiktok看到的計(jì)算機(jī)視覺(jué)大佬恩培,然后跟著一起完成了這個(gè)簡(jiǎn)單的計(jì)算機(jī)視覺(jué)的小項(xiàng)目。
"""
Date: 2021-11-16
功能:手勢(shì)操作電腦音量
1、使用OpenCV讀取攝像頭視頻流;
2、識(shí)別手掌關(guān)鍵點(diǎn)像素坐標(biāo);
3、根據(jù)拇指和食指指尖的坐標(biāo),利用勾股定理計(jì)算距離;
4、將距離等比例轉(zhuǎn)為音量大小,控制電腦音量
"""
# 導(dǎo)入OpenCV
import cv2
# 導(dǎo)入mediapipe
import mediapipe as mp
# 導(dǎo)入電腦音量控制模塊
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
# 導(dǎo)入其他依賴包
import time
import math
import numpy as np
class HandControlVolume:
def __init__(self):
# 初始化medialpipe
self.mp_drawing = mp.solutions.drawing_utils
self.mp_drawing_styles = mp.solutions.drawing_styles
self.mp_hands = mp.solutions.hands
# 獲取電腦音量范圍
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
self.volume = cast(interface, POINTER(IAudioEndpointVolume))
self.volume.SetMute(0, None)
self.volume_range = self.volume.GetVolumeRange()
# 主函數(shù)
def recognize(self):
# 計(jì)算刷新率
fpsTime = time.time()
# OpenCV讀取視頻流
cap = cv2.VideoCapture(0)
# 視頻分辨率
resize_w = 640
resize_h = 480
# 畫(huà)面顯示初始化參數(shù)
rect_height = 0
rect_percent_text = 0
with self.mp_hands.Hands(min_detection_confidence=0.7,
min_tracking_confidence=0.5,
max_num_hands=2) as hands:
while cap.isOpened():
success, image = cap.read()
image = cv2.resize(image, (resize_w, resize_h))
if not success:
print("空幀.")
continue
# 提高性能
image.flags.writeable = False
# 轉(zhuǎn)為RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 鏡像
image = cv2.flip(image, 1)
# mediapipe模型處理
results = hands.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 判斷是否有手掌
if results.multi_hand_landmarks:
# 遍歷每個(gè)手掌
for hand_landmarks in results.multi_hand_landmarks:
# 在畫(huà)面標(biāo)注手指
self.mp_drawing.draw_landmarks(
image,
hand_landmarks,
self.mp_hands.HAND_CONNECTIONS,
self.mp_drawing_styles.get_default_hand_landmarks_style(),
self.mp_drawing_styles.get_default_hand_connections_style())
# 解析手指,存入各個(gè)手指坐標(biāo)
landmark_list = []
for landmark_id, finger_axis in enumerate(
hand_landmarks.landmark):
landmark_list.append([
landmark_id, finger_axis.x, finger_axis.y,
finger_axis.z
])
if landmark_list:
# 獲取大拇指指尖坐標(biāo)
thumb_finger_tip = landmark_list[4]
thumb_finger_tip_x = math.ceil(thumb_finger_tip[1] * resize_w)
thumb_finger_tip_y = math.ceil(thumb_finger_tip[2] * resize_h)
# 獲取食指指尖坐標(biāo)
index_finger_tip = landmark_list[8]
index_finger_tip_x = math.ceil(index_finger_tip[1] * resize_w)
index_finger_tip_y = math.ceil(index_finger_tip[2] * resize_h)
# 中間點(diǎn)
finger_middle_point = (thumb_finger_tip_x + index_finger_tip_x) // 2, (
thumb_finger_tip_y + index_finger_tip_y) // 2
# print(thumb_finger_tip_x)
thumb_finger_point = (thumb_finger_tip_x, thumb_finger_tip_y)
index_finger_point = (index_finger_tip_x, index_finger_tip_y)
# 畫(huà)指尖2點(diǎn)
image = cv2.circle(image, thumb_finger_point, 10, (255, 0, 255), -1)
image = cv2.circle(image, index_finger_point, 10, (255, 0, 255), -1)
image = cv2.circle(image, finger_middle_point, 10, (255, 0, 255), -1)
# 畫(huà)2點(diǎn)連線
image = cv2.line(image, thumb_finger_point, index_finger_point, (255, 0, 255), 5)
# 勾股定理計(jì)算長(zhǎng)度
line_len = math.hypot((index_finger_tip_x - thumb_finger_tip_x),
(index_finger_tip_y - thumb_finger_tip_y))
# 獲取電腦最大最小音量
min_volume = self.volume_range[0]
max_volume = self.volume_range[1]
# 將指尖長(zhǎng)度映射到音量上
vol = np.interp(line_len, [50, 300], [min_volume, max_volume])
# 將指尖長(zhǎng)度映射到矩形顯示上
rect_height = np.interp(line_len, [50, 300], [0, 200])
rect_percent_text = np.interp(line_len, [50, 300], [0, 100])
# 設(shè)置電腦音量
self.volume.SetMasterVolumeLevel(vol, None)
# 顯示矩形
cv2.putText(image, str(math.ceil(rect_percent_text)) + "%", (10, 350),
cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
image = cv2.rectangle(image, (30, 100), (70, 300), (255, 0, 0), 3)
image = cv2.rectangle(image, (30, math.ceil(300 - rect_height)), (70, 300), (255, 0, 0), -1)
# 顯示刷新率FPS
cTime = time.time()
fpsTime = cTime
cv2.putText(image, "FPS: " + str(int(fps_text)), (10, 70),
cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# 顯示畫(huà)面
cv2.imshow('MediaPipe Hands', image)
break
cap.release()
# 開(kāi)始程序
control = HandControlVolume()
control.recognize()

總結(jié)
以上為個(gè)人經(jīng)驗(yàn),希望能給大家一個(gè)參考,也希望大家多多支持腳本之家。
相關(guān)文章
Python實(shí)現(xiàn)爬取百度貼吧帖子所有樓層圖片的爬蟲(chóng)示例
這篇文章主要介紹了Python實(shí)現(xiàn)爬取百度貼吧帖子所有樓層圖片的爬蟲(chóng),涉及基于urllib的網(wǎng)頁(yè)訪問(wèn)與正則匹配相關(guān)操作技巧,需要的朋友可以參考下2018-04-04
Python中sorted()函數(shù)的強(qiáng)大排序技術(shù)實(shí)例探索
排序在編程中是一個(gè)基本且重要的操作,而Python的sorted()函數(shù)則為我們提供了強(qiáng)大的排序能力,在本篇文章中,我們將深入研究不同排序算法、sorted()?函數(shù)的靈活性,以及各種排序場(chǎng)景下的最佳實(shí)踐2024-01-01
YOLOv5構(gòu)建安全帽檢測(cè)和識(shí)別系統(tǒng)使用詳解
這篇文章主要為大家介紹了YOLOv5構(gòu)建安全帽檢測(cè)和識(shí)別系統(tǒng)使用詳解,有需要的朋友可以借鑒參考下,希望能夠有所幫助,祝大家多多進(jìn)步,早日升職加薪2023-04-04
Pytorch中torch.argmax()函數(shù)使用及說(shuō)明
這篇文章主要介紹了Pytorch中torch.argmax()函數(shù)使用及說(shuō)明,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。如有錯(cuò)誤或未考慮完全的地方,望不吝賜教2023-01-01
python實(shí)現(xiàn)簡(jiǎn)單的井字棋游戲(gui界面)
這篇文章主要介紹了python如何實(shí)現(xiàn)簡(jiǎn)單的井字棋游戲,幫助大家更好的理解和使用python,感興趣的朋友可以了解下2021-01-01

