# Importing Librariesimportcv2importmediapipeasmp# Used to convert protobuf message to a dictionary.fromgoogle.protobuf.json_formatimportMessageToDict# Initializing the ModelmpHands=mp.solutions.handshands=mpHands.Hands(static_image_mode=False,model_complexity=1,min_detection_confidence=0.75,min_tracking_confidence=0.75,max_num_hands=2)# Start capturing video from webcamcap=cv2.VideoCapture(0)whileTrue:# Read video frame by framesuccess,img=cap.read()# Flip the image(frame)img=cv2.flip(img,1)# Convert BGR image to RGB imageimgRGB=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)# Process the RGB imageresults=hands.process(imgRGB)# If hands are present in image(frame)ifresults.multi_hand_landmarks:# Both Hands are present in image(frame)iflen(results.multi_handedness)==2:# Display 'Both Hands' on the imagecv2.putText(img,'Both Hands',(250,50),cv2.FONT_HERSHEY_COMPLEX,0.9,(0,255,0),2)# If any hand presentelse:foriinresults.multi_handedness:# Return whether it is Right or Left Handlabel=MessageToDict(i)['classification'][0]['label']iflabel=='Left':# Display 'Left Hand' on# left side of windowcv2.putText(img,label+' Hand',(20,50),cv2.FONT_HERSHEY_COMPLEX,0.9,(0,255,0),2)iflabel=='Right':# Display 'Left Hand'# on left side of windowcv2.putText(img,label+' Hand',(460,50),cv2.FONT_HERSHEY_COMPLEX,0.9,(0,255,0),2)# Display Video and when 'q'# is entered, destroy the windowcv2.imshow('Image',img)ifcv2.waitKey(1)&0xff==ord('q'):break