bump
This commit is contained in:
188
run_me.py
188
run_me.py
@@ -2,7 +2,6 @@ import numpy as np
|
||||
from functools import partial
|
||||
from hailo_platform import VDevice, HailoSchedulingAlgorithm, FormatType
|
||||
import cv2
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
@@ -33,7 +32,6 @@ def resize_image(img_in, reshape_to_final=True):
|
||||
return data
|
||||
|
||||
|
||||
|
||||
def model_scoring_callback(completion_info, bindings, data):
|
||||
if completion_info.exception:
|
||||
# handle exception
|
||||
@@ -83,9 +81,11 @@ import multiprocessing
|
||||
import numpy as np
|
||||
import ctypes
|
||||
import shutil
|
||||
from utils import *
|
||||
|
||||
cameras = {
|
||||
"camera_side": {
|
||||
'url': "rtsp://admin:marybear@192.168.1.151:554/h264Preview_01_sub",
|
||||
"camera_sidefeeder": {
|
||||
'url': "rtsp://admin:marybear@192.168.1.157:554/h264Preview_01_sub",
|
||||
'resolution': (480, 640, 3)
|
||||
},
|
||||
"camera_driveway": {
|
||||
@@ -111,66 +111,93 @@ cameras = {
|
||||
},
|
||||
}
|
||||
|
||||
# # %%
|
||||
# import os
|
||||
# os.environ['OPENCV_FFMPEG_CAPTURE_OPTIONS'] = 'rtsp_transport;udp'
|
||||
# cap = cv2.VideoCapture(cameras['camera_railing']['url'])
|
||||
# # %%
|
||||
# while True:
|
||||
# _, frame = cap.read()
|
||||
|
||||
# # %%
|
||||
# _, frame = cap.read()
|
||||
# cv2.imwrite('FRAME.jpg', frame)
|
||||
|
||||
# # %%
|
||||
def format_gst_url(rtsp_url):
|
||||
gst_pipeline = f"rtspsrc location={rtsp_url} latency=50 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink max-buffers=1 drop=true"
|
||||
return gst_pipeline
|
||||
|
||||
|
||||
|
||||
from functools import partial
|
||||
|
||||
cameras = dict()
|
||||
for cam_name, details in cameras.items():
|
||||
array_len = np.prod(details['resolution'])
|
||||
rtsp_url = details['url']
|
||||
resolution = details['resolution']
|
||||
cameras[cam_name] = StreamManager( rtsp_url, resolution, cam_name)
|
||||
|
||||
|
||||
# %%
|
||||
details['cam_name'] = cam_name
|
||||
details['img_array'] = multiprocessing.Array(ctypes.c_uint8,
|
||||
int(array_len), lock = True)
|
||||
int(array_len),
|
||||
lock=True)
|
||||
details['img_timestamp'] = multiprocessing.Value(ctypes.c_double)
|
||||
details['queue'] = multiprocessing.Queue()
|
||||
details['gst_pipeline_str'] = format_gst_url(details['url'])
|
||||
details['rtsp_url'] = format_ffmpeg_decode_url(details['url']).split(" ")
|
||||
details['process_func'] = partial(stream_wrapper,
|
||||
details['resolution'],
|
||||
details['rtsp_url'],
|
||||
camera_name=cam_name,
|
||||
queue=details['queue'],
|
||||
img_array=details['img_array'],
|
||||
img_timestamp=details['img_timestamp'])
|
||||
|
||||
for cam_name, details in cameras.items():
|
||||
p = multiprocessing.Process(target = details['process_func'])
|
||||
details['process'] = p
|
||||
p.start()
|
||||
|
||||
# %%
|
||||
img_array = details['img_array']
|
||||
img_timestamp = details['img_timestamp']
|
||||
|
||||
import time
|
||||
with img_timestamp.get_lock():
|
||||
img_timestamp.value = -1
|
||||
details['queue'].put('get')
|
||||
for i in range(1000):
|
||||
val = img_timestamp.value
|
||||
print(val)
|
||||
if val > 0:
|
||||
|
||||
print('Done')
|
||||
break
|
||||
time.sleep(0.001)
|
||||
|
||||
|
||||
import datetime as dt
|
||||
def rtsp_stream_manager( camera_name, gst_pipeline_str, queue, img_array, img_timestamp):
|
||||
capture_handle = cv2.VideoCapture(gst_pipeline_str, cv2.CAP_GSTREAMER)
|
||||
while True:
|
||||
if not queue.empty():
|
||||
msg = queue.get_nowait()
|
||||
if msg == 'get':
|
||||
ret, frame = capture_handle.read()
|
||||
with img_timestamp.get_lock(), img_array.get_lock():
|
||||
if frame is None:
|
||||
print(f"Read empty frame for {camera_name}")
|
||||
img_array[:] = 0
|
||||
img_timestamp.value = 0
|
||||
else:
|
||||
print(f"Read frame for {camera_name} at {dt.datetime.now()}")
|
||||
img_array[:] = frame.flatten()[:]
|
||||
img_timestamp.value = time.time()
|
||||
elif msg == 'restart':
|
||||
print('Restarting')
|
||||
capture_handle = cv2.VideoCapture(gst_pipeline_str, cv2.CAP_GSTREAMER)
|
||||
elif msg == 'exit':
|
||||
print('Exiting')
|
||||
return
|
||||
# %%
|
||||
for cam_name, details in cameras.items():
|
||||
details['queue'].put('get')
|
||||
img_array = details['img_array']
|
||||
img_timestamp = details['img_timestamp']
|
||||
with img_array.get_lock(), img_timestamp.get_lock():
|
||||
reshaped_image = np.frombuffer(details['img_array'].get_obj(),
|
||||
dtype=np.uint8).reshape(
|
||||
details['resolution'])
|
||||
timestamp = img_timestamp.value
|
||||
img_timestamp.value = -1
|
||||
print('Writing for ' + cam_name + f' for {reshaped_image.shape}')
|
||||
cv2.imwrite('images/'+ cam_name + '.jpg', reshaped_image)
|
||||
|
||||
|
||||
|
||||
# %%
|
||||
import asyncio
|
||||
|
||||
img_scoring_queue = multiprocessing.Queue()
|
||||
|
||||
for cam_name, details in cameras.items():
|
||||
p = multiprocessing.Process(target=rtsp_stream_manager,
|
||||
args=(cam_name, details['gst_pipeline_str'],
|
||||
details['queue'], details['img_array'], details['img_timestamp']))
|
||||
details['queue'], details['img_array'],
|
||||
details['img_timestamp']))
|
||||
details['process'] = p
|
||||
|
||||
|
||||
asyncio.create_task(details['async_task'])
|
||||
|
||||
shape = (512, 896, 3)
|
||||
|
||||
asyncio.run(read_stream(shape, cmd))
|
||||
|
||||
import datetime as dt
|
||||
|
||||
for cam_name, details in cameras.items():
|
||||
details['process'].start()
|
||||
# %%
|
||||
@@ -180,60 +207,49 @@ for cam_name, details in cameras.items():
|
||||
for cam_name, details in cameras.items():
|
||||
details['queue'].put('get')
|
||||
|
||||
|
||||
if os.path.exists('images/'):
|
||||
shutil.rmtree('images/')
|
||||
|
||||
os.makedirs('images/')
|
||||
os.makedirs('images/')
|
||||
|
||||
|
||||
def create_score_message( details, reshaped_image, timestamp):
|
||||
def create_score_message(details, reshaped_image, timestamp):
|
||||
frames = list()
|
||||
|
||||
msg = list()
|
||||
if details.get('split_into_two', False):
|
||||
split_point = int(reshaped_image.shape[1]/2)
|
||||
left_frame = resize_image(reshaped_image[:,:split_point,:], reshape_to_final = False)
|
||||
right_frame = resize_image(reshaped_image[:,split_point:,:], reshape_to_final = False)
|
||||
split_point = int(reshaped_image.shape[1] / 2)
|
||||
left_frame = resize_image(reshaped_image[:, :split_point, :],
|
||||
reshape_to_final=False)
|
||||
right_frame = resize_image(reshaped_image[:, split_point:, :],
|
||||
reshape_to_final=False)
|
||||
|
||||
left_frame = cv2.cvtColor(left_frame, cv2.COLOR_BGR2RGB)
|
||||
right_frame = cv2.cvtColor(right_frame, cv2.COLOR_BGR2RGB)
|
||||
msg.append({'camera_name': details['cam_name']+'_left', 'frame': left_frame, 'image_timestamp': timestamp})
|
||||
msg.append({'camera_name': details['cam_name']+'_right', 'frame': right_frame, 'image_timestamp': timestamp})
|
||||
right_frame = cv2.cvtColor(right_frame, cv2.COLOR_BGR2RGB)
|
||||
msg.append({
|
||||
'camera_name': details['cam_name'] + '_left',
|
||||
'frame': left_frame,
|
||||
'image_timestamp': timestamp
|
||||
})
|
||||
msg.append({
|
||||
'camera_name': details['cam_name'] + '_right',
|
||||
'frame': right_frame,
|
||||
'image_timestamp': timestamp
|
||||
})
|
||||
|
||||
else:
|
||||
frame = resize_image(reshaped_image, reshape_to_final = False)
|
||||
frame = resize_image(reshaped_image, reshape_to_final=False)
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
msg.append({'camera_name': details['cam_name'], 'frame': frame, 'image_timestamp': timestamp})
|
||||
|
||||
msg.append({
|
||||
'camera_name': details['cam_name'],
|
||||
'frame': frame,
|
||||
'image_timestamp': timestamp
|
||||
})
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
|
||||
for cam_name, details in cameras.items():
|
||||
img_array = details['img_array']
|
||||
img_timestamp = details['img_timestamp']
|
||||
with img_array.get_lock(), img_timestamp.get_lock():
|
||||
reshaped_image = np.frombuffer(details['img_array'].get_obj(), dtype=np.uint8).reshape(details['resolution'])
|
||||
timestamp = img_timestamp.value
|
||||
for msg in create_score_message(details, reshaped_image, timestamp):
|
||||
img_scoring_queue.put(msg)
|
||||
|
||||
|
||||
print('Writing for ' + cam_name + f' for {reshaped_image.shape}')
|
||||
# cv2.imwrite('images/'+ cam_name + '.jpg', reshaped_image)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
for x in range(img_scoring_queue.qsize()):
|
||||
qu = img_scoring_queue.get()
|
||||
print(qu['camera_name'],qu['frame'].shape)
|
||||
cv2.imwrite(str(x)+'.jpg', qu['frame'])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
qu = img_scoring_queue.get()
|
||||
print(qu['camera_name'], qu['frame'].shape)
|
||||
cv2.imwrite(str(x) + '.jpg', qu['frame'])
|
||||
|
||||
Reference in New Issue
Block a user