Skip to content

pyresearch/pyresearch

Repository files navigation

Installation

You can simply use pip to install the latest version of pyresearch.

pip install pyresearch


60 FPS Face Detection

from pyresearch.FaceDetectionModule import FaceDetector
import cv2

cap = cv2.VideoCapture(0)
detector = FaceDetector()

while True:
    success, img = cap.read()
    img, bboxs = detector.findFaces(img)

    if bboxs:
        # bboxInfo - "id","bbox","score","center"
        center = bboxs[0]["center"]
        cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)

    cv2.imshow("Image", img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()

demo.1.mov


Face Mesh Detection


from pyresearch.FaceMeshModule import FaceMeshDetector
import cv2

cap = cv2.VideoCapture(0)
detector = FaceMeshDetector(maxFaces=2)
while True:
    success, img = cap.read()
    img, faces = detector.findFaceMesh(img)
    if faces:
        print(faces[0])
    cv2.imshow("Image", img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()

facemesh.mov


FPS


import pyresearch
import cv2

fpsReader = pyresearch.FPS()

cap = cv2.VideoCapture(0)


cap.set(3, 1280)

cap.set(4, 720)

while True:

    success, img = cap.read()
   
    fps, img = fpsReader.update(img,pos=(50,80),color=(0,255,0),scale=5,thickness=5)
   
    cv2.imshow("Image", img)
   
    if cv2.waitKey(1) & 0xFF == ord('q'):
   
        break

cap.release()
cv2.destroyAllWindows()

fps.mov


Stack Images


import pyresearch
import cv2

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)

while True:
    success, img = cap.read()
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    imgList = [img, img, imgGray, img, imgGray, img,imgGray, img, img]
    stackedImg = pyresearch.stackImages(imgList, 3, 0.4)

    cv2.imshow("stackedImg", stackedImg)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()


stacke.mov


Hand Tracking


Basic Code Example

from pyresearch.HandTrackingModule import HandDetector
import cv2

cap = cv2.VideoCapture(0)

detector = HandDetector(detectionCon=0.8, maxHands=2)

while True:
    # Get image frame

    success, img = cap.read()

    # Find the hand and its landmarks

    hands, img = detector.findHands(img)  # with draw

    # hands = detector.findHands(img, draw=False)  # without draw

    if hands:

        # Hand 1

        hand1 = hands[0]

        lmList1 = hand1["lmList"]  # List of 21 Landmark points

        bbox1 = hand1["bbox"]  # Bounding box info x,y,w,h

        centerPoint1 = hand1['center']  # center of the hand cx,cy

        handType1 = hand1["type"]  # Handtype Left or Right

        fingers1 = detector.fingersUp(hand1)

        if len(hands) == 2:
            # Hand 2
            hand2 = hands[1]
            lmList2 = hand2["lmList"]  # List of 21 Landmark points
            bbox2 = hand2["bbox"]  # Bounding box info x,y,w,h
            centerPoint2 = hand2['center']  # center of the hand cx,cy
            handType2 = hand2["type"]  # Hand Type "Left" or "Right"

            fingers2 = detector.fingersUp(hand2)

            # Find Distance between two Landmarks. Could be same hand or different hands
            length, info, img = detector.findDistance(lmList1[8], lmList2[8], img)  # with draw
            # length, info = detector.findDistance(lmList1[8], lmList2[8])  # with draw
    # Display
    cv2.imshow("Image", img)
    cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()

hand.1.mov


Pose Estimation


from pyresearch.PoseModule import PoseDetector
import cv2

cap = cv2.VideoCapture(0)
detector = PoseDetector()
while True:
    success, img = cap.read()
    img = detector.findPose(img)
    lmList, bboxInfo = detector.findPosition(img, bboxWithHands=False)
    if bboxInfo:
        center = bboxInfo["center"]
        cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)

    cv2.imshow("Image", img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()

pose.mov


Real-Time-Background-Remover-through-Human-Segmentation


import cv2
from pyresearch.SelfiSegmentationModule import SelfiSegmentation

# connecting the internal camera (first camera index will be 0, it is the default)
cap = cv2.VideoCapture(0)

# extracting the camera capture size
width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )), int( cap.get(cv2.CAP_PROP_FRAME_HEIGHT ))

# loading and resizing the background image
background_image = cv2.resize(cv2.imread("bg_image.jpeg"), (width, height)) 

# creating segmentation instance for taking the foreground (the person).
segmentor = SelfiSegmentation()

# iterating the camera captures
while True:
    # Reading the captured images from the camera
    ret, frame = cap.read()

    # segmenting the image
    segmentated_img = segmentor.removeBG(frame, background_image, threshold=0.9)

    # concatenating the images horizontally
    concatenated_img = cv2.hconcat([frame, segmentated_img])

    #cv2.imshow("Camera Capture", concatenated_img)
    cv2.imshow("Camera Live", concatenated_img)

    # ending condition
    if cv2.waitKey(1) == ord('q'):
        break

# relasing the sources
cap.release()
cv2.destroyAllWindows()

My.Video.mov


Heatmap & Tracking Objects using YOLOv8 ByteTrack & Supervision


import argparse
from pyresearch.heatmap_and_track import process_video

def main():
    # Define the arguments as a dictionary
    args = {
        "source_weights_path": "yolov8s.pt",
        "source_video_path": "people-walking.mp4",
        "target_video_path": "output.mp4",
        "confidence_threshold": 0.35,
        "iou_threshold": 0.5,
        "heatmap_alpha": 0.5,
        "radius": 25,
        "track_threshold": 0.35,
        "track_seconds": 5,
        "match_threshold": 0.99,
        "display": True,
    }

    # Convert the dictionary to an argparse Namespace object
    args_namespace = argparse.Namespace(**args)

    # Call the process_video function with the Namespace object
    process_video(args_namespace)

if __name__ == "__main__":
    main()
  

Watch the video

Buy Us a Beer!

This Pyresearch platform is funded by donations only. Please support us to maintain and further improve our computer vision solutions!

Donate using Liberapay

Much more information about the self-sufficienty challenge : Pyresearch
contact@pyresearch.org