how to stream output opencv procesed frames from ip camera stream and play it on vlc or any

105
April 20, 2021, at 5:50 PM

Actually i'm importing a stream ip camera into OpenCV & python and playing on local(Ubuntu server Frame), working on Face Mask detector's project. It does process the frames imported and check if target 'Wear or not mask' and show output in a frame (vs = VideoStream('rtsp://CAMERA_USER:PASSWORD@IP_SERVER:554/videoMain').start(). (it works) I need to stream that Frame at same time on RTSP, TCP, RTP to be able to play on VLC(external client).thanks. yeh its my Final carreer project. Incidence's menu: 1.capture frames from ip camera using OpenCV-python; √ 2.do the image preprocesing and infence it with Tensorflow-Keras; √ 3.draw the detected BOX on the origin frame; √ 4.stream these frames vía gstreamer RTSP, using OpenCV. × 5.open vlc player to watch the real-time frames. ×

 # import the necessary packages
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import os
import gi
def detect_and_predict_mask(frame, faceNet, maskNet):
    # grab the dimensions of the frame and then construct a blob
    # from it
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
        (104.0, 177.0, 123.0))
    # pass the blob through the network and obtain the face detections
    faceNet.setInput(blob)
    detections = faceNet.forward()
    # initialize our list of faces, their corresponding locations,
    # and the list of predictions from our face mask network
    faces = []
    locs = []
    preds = []
    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the detection
        confidence = detections[0, 0, i, 2]
        # filter out weak detections by ensuring the confidence is
        # greater than the minimum confidence
        if confidence > args["confidence"]:
            # compute the (x, y)-coordinates of the bounding box for
            # the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")
            # ensure the bounding boxes fall within the dimensions of
            # the frame
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))
            # extract the face ROI, convert it from BGR to RGB channel
            # ordering, resize it to 224x224, and preprocess it
            face = frame[startY:endY, startX:endX]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face)
            face = preprocess_input(face)
            # add the face and bounding boxes to their respective
            # lists
            faces.append(face)
            locs.append((startX, startY, endX, endY))
    # only make a predictions if at least one face was detected
    if len(faces) > 0:
        # for faster inference we'll make batch predictions on *all*
        # faces at the same time rather than one-by-one predictions
        # in the above `for` loop
        faces = np.array(faces, dtype="float32")
        preds = maskNet.predict(faces, batch_size=32)
    # return a 2-tuple of the face locations and their corresponding
    # locations
    return (locs, preds)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", type=str,
    default="face_detector",
    help="path to face detector model directory")
ap.add_argument("-m", "--model", type=str,
    default="mask_detector.model",
    help="path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
    help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized face detector model from disk
print("[INFO]Cargando modelos de deteccion de Andres Daza...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
    "res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] Iniciando detector de mascarillas...")
maskNet = load_model(args["model"])
# initialize the video stream and allow the camera sensor to warm up
# creo q esta es la variable de inicializacion de camarea ip o usb
print("[Atencion!] Iniciando Proyecto final de Andres Daza...")
#STREAM from usb camera
vs = VideoStream(src=0).start()
#stream from ip camera (it works)
#vs = VideoStream('rtsp://user:pass@192.168.1.70:554/videoMain').start()

time.sleep(2.0)
#output to STREAM to client
out = cv2.VideoWriter**("appsrc ! video/x-raw, format=BGR ! queue ! videoconvert ! video/x-raw, format=BGRx ! nvvidconv ! omxh264enc ! video/x-h264, stream-format=byte-stream ! h264parse ! rtph264pay pt=96 config-interval=1 ! udpsink host=127.0.0.1 port=50001", cv2.CAP_GSTREAMER, 0, 25.0, (640,480))**
print("[Atencion!] Iniciando Transmision en Vivo")
# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=800)
    # detect faces in the frame and determine if they are wearing a
    # face mask or not
    (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
    # loop over the detected face locations and their corresponding
    # locations
    for (box, pred) in zip(locs, preds):
        # unpack the bounding box and predictions
        (startX, startY, endX, endY) = box
        (mask, withoutMask) = pred
        # determine the class label and color we'll use to draw
        # the bounding box and text
        label = "Mask" if mask > withoutMask else "No Mask"
        color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
            
        # include the probability in the label
        label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
        # display the label and bounding box rectangle on the output
        # frame
        cv2.putText(frame, label, (startX, startY - 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
        cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
    # Muestra la ventana(Frame) 
    cv2.imshow("Proyecto Final  [ ASIXII ]", frame)
    
    # Escribe en la variable out el contenido de Frame 
    out.write(frame)
    
    #si la camara se inicializa, la transmision tambien inicia
    #while vs.isOpened():
         # ret, frame = vs.read()
          #if ret:
           #  if out.isOpened():
                #out.write(frame)
                #print("Transmitiendo en Vivo")
            # if cv2.waitKey(1) & 0xFF == ord('q'):
             #  break
         # else:
          #  break
    key = cv2.waitKey(1) & 0xFF
    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
READ ALSO
Symfony Api Platform /api not showing the api page correctly

Symfony Api Platform /api not showing the api page correctly

I'm working on a Symfony ProjectI had to install the API PLATFORM

100
Displaying output from different function in one innerHTML

Displaying output from different function in one innerHTML

I am trying to display two outputs in different functions into single innerHTML when the button clickedHowever, I want it without using a global variable to store the output

20
2 col layout expanding past max width (TAILWIND CSS)

2 col layout expanding past max width (TAILWIND CSS)

Here's a quick rundown of the relevant code before I ask this question

103