添加链接
link之家
链接快照平台
  • 输入网页链接,自动生成快照
  • 标签化管理网页链接
def Receive ( ) : print ( "start Reveive" ) cap = cv2 . VideoCapture ( "rtsp://admin:admin_123@172.0.0.0" ) ret , frame = cap . read ( ) q . put ( frame ) while ret : ret , frame = cap . read ( ) q . put ( frame ) def Display ( ) : print ( "Start Displaying" ) while True : if q . empty ( ) != True : frame = q . get ( ) cv2 . imshow ( "frame1" , frame ) if cv2 . waitKey ( 1 ) & 0xFF == ord ( 'q' ) : break if __name__ == '__main__' : p1 = threading . Thread ( target = Receive ) p2 = threading . Thread ( target = Display ) p1 . start ( ) p2 . start ( )

抓取与跳帧

import cv2
import queue
import time
import threading
q=queue.Queue()
def Receive():
    print("start Reveive")
    cap = cv2.VideoCapture("rtsp://admin:admin_123@172.0.0.0")
    ret, frame = cap.read()
    path = ""
    size = (int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)))   
    fps = 25
    out_video = cv2.VideoWriter(path, cv2.VideoWriter_fourcc('m','j','p','g'), fps, size)
    q.put(frame)
    fp = 0
    while ret:
    	fp += 1
        ret, frame = cap.read()
        out_video.write(frame)
        if (fp % 2 == 0):
        	q.put(frame)
def Display():
     print("Start Displaying")
     while True:
         if q.empty() !=True:
            frame=q.get()
            cv2.imshow("frame1", frame)
         if cv2.waitKey(1) & 0xFF == ord('q'):
                break
if __name__=='__main__':
    p1=threading.Thread(target=Receive)
    p2 = threading.Thread(target=Display)
    p1.start()
    p2.start()
read() and grab() in OpenCV VideoCapture takes different time as there is no
decoding for grab() operation. Let's try to find out how much these two methods
differs.
import cv2
def main():
    tm = cv2.TickMeter()
    # =========================================================================
    # Test 1, grab and decode each frame.
    cap = cv2.VideoCapture()
    cap.open("/home/robin/Videos/clip1-ssd.mp4", cv2.CAP_FFMPEG)
    total_frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    tm.start()
    # print("Total frames number: {}".format(total_frame_count))
    f_count = 0
    while True:
        f_count += 1
        r, img = cap.read()
        if r == False:
            break
        # for _ in range(24):
        #     cap.grab()
    tm.stop()
    print("Read and decode {} frames takes {} seconds.".format(
        f_count, tm.getTimeSec()/tm.getCounter()))
    # =========================================================================
    # Test 2, grab only without decoding.
    cap = cv2.VideoCapture()
    cap.open("/home/robin/Videos/clip1-ssd.mp4", cv2.CAP_FFMPEG)
    total_frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    tm.reset()
    tm.start()
    while True:
        r = cap.grab()
        if r == False:
            break
    tm.stop()
    print("Read {} frames takes {} seconds.".format(
        total_frame_count, tm.getTimeSec()/tm.getCounter()))
    # =========================================================================
    # Test 3, reading same file with two decoders. This block failed on my PC.
    # Maybe it's not supported by FFMPEG?
    from multiprocessing.pool import ThreadPool
    thread_num = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=thread_num)
    def get_frame_at(src, location):
        cap = cv2.VideoCapture()
        cap.open(src, cv2.CAP_FFMPEG)
        cap.set(cv2.CAP_PROP_FRAME_COUNT, location)
        rst, frame = cap.read()
        return rst, frame
    def get_frame_next(cap):
        rst, frame = cap.read()
        return rst, frame
    video_src = "/home/robin/Videos/clip1-ssd.mp4"
    cap1 = cv2.VideoCapture()
    cap1.open(video_src, cv2.CAP_FFMPEG)
    cap2 = cv2.VideoCapture()
    cap2.open(video_src, cv2.CAP_FFMPEG)
    tm.reset()
    tm.start()
    f_count = 0
    while True:
        f_count += 1
        t1 = pool.apply_async(get_frame_next, (cap1, ))
        t2 = pool.apply_async(get_frame_next, (cap2, ))
        print(t1.get()[0])
        if (t1.get()[0] is False) and (t2.get()[0] is False):
            break
    tm.stop()
    print("Decode same file with 2 decoder takes {} seconds.".format(
        tm.getTimeSec()/tm.getCounter()))
if __name__ == "__main__":
    main()
然后清楚了grab的含义,我们就可以对开始的
    	fp += 1
        success = cap.grab()
        if (fp % 2 == 0):
        	continue
        _,image = cap.retrive()

参考文献:

  • https://www.icode9.com/content-4-816043.html
用队列将同步转为异步import cv2import queueimport timeimport threadingq=queue.Queue() def Receive(): print("start Reveive") cap = cv2.VideoCapture("rtsp://admin:admin_123@172.0.0.0") ret, frame = cap.read() q.put(frame) while ret: r
目前项目需要对网络摄像头进行实时视觉算法检测。 遇到问题: 在使用抓取VideoCaptureRTSP流的过程中,若需要对图像进行处理,则最后的显示结果画面就会表现出高延迟,然后卡住,很快崩溃; 直观感受: 图像处理速度远不如VideoCapture抓取速度,然后感觉是有内置缓存的,导致卡顿,延迟;对VideoCapture进行等待处理,效果不佳; 解决思路: 采用多线程的方式,为VideoCapture单独开一个线程,自动抓取图像,并且存储到一个容器中,容器若满了自动更新容器,清理掉前面的图像,存储最新的
# Channels: 实时数据 # 1: 通道 cap = cv2.VideoCapture(rtsp://admin:12345@192.168.1.64/main/Channels/1) print (cap.isOpened()) while cap.isOpened(): success,frame = cap.read() cv2.imshow(frame,frame) cv2.waitKey(1)
使用opencv源库进行摄像头的采集,opencv默认使用的解码库也是ffmpeg,其中,由于ffmpeg默认解码摄像头采集格式为YUY2(我的摄像头仅支持YUY2以及MJPG两种格式解码),但项目需求在高分辨率下流畅度也要最高,我的摄像头YUY2的解码格式不能满足要求,必须使用MJPG。 //1.打开摄像机设备(默认第一个) cv::VideoCapture cap= cv::VideoCapture(0); //2.一定要先设置采集格式!!! cap.set(CV_CAP
cv2.VideoCapture从摄像头获取视频流并处理但是处理速度慢 在做深度学习视觉相关项目的时候,经常会遇到无法做到实时的情况。比如处理一帧图像要3s,但是摄像头采集的视频里在这3s内,可能已经读了好几十帧(假如fps是20)。那么处理完这一帧之后,下一次cap.read()返回的是第二帧还是第62帧。 source:.. # Channels: 实时数据 # 1: 通道 cap = cv2.VideoCapture("rtsp://admin:12345@192.168.1.64/main/Channels/1") print (cap.isOpened())
最近在使用python版的opencv调用网络摄像头做人脸识别时遇到了一些问题,命令行报错导致rtsp断流。 报错如下: [h264 @ 0x7fd990026040] left block unavailable for requested intra4x4 mode -1 [h264 @ 0x7fd990026040] error while decoding MB 0 18, bytestr...
摄像头与电脑的连接 首先,你需要获得hikvision摄像头的密码以及用户名(不知道的可以去打客服电话进行咨询),这里不做介绍; 其次,将电脑的ip设置与hikvision同频段,一般来说,海康威视的ip为192.168.1.64,电脑设置如下: 最后,使用IE浏览器(其他可能不支持),输入ip:192.168.1.64并登陆 输入用户名和密码即可获取视频画面(可以观察到,视频有畸变) 使用python+openCV获取监控画面 在使用openCV获取监控画面,具体代码如下 url格式为:“rtsp: