我正在尝试编写一个python对象跟踪代码,我需要我的标线以鼠标光标为中心。当我使用黑屏作为初始帧编写代码时,它起到了作用。标线以我的鼠标为中心,并完美地跟随它。但当我写代码打开我的网络摄像头来实际查看和跟踪物体时,标线不再以我的鼠标为中心。(它位于左下方)
我将提供标线的代码和图像,以便您更好地理解。
“代码1和图像1(黑屏)是我正在寻找的结果,但我需要打开网络摄像头才能跟踪对象”
“代码2和图像2(网络摄像头)显示光标和标线片错位
代码1:
import cv2
import numpy as np
import time
# THIS CODE HAS THE MOUSE MOVMENTS INTERGRATED INTO THE RETICLE "MOUSE IS CENTERED ON RETICLE DOT".
# Define screen dimensions (adjust according to your display)
screen_width = 800
screen_height = 600
# Define the gap between the parallel lines for each reticle size
reticle_sizes = {
1: 150, # Close-range reticle size
2: 100, # Medium-range reticle size
3: 30 # Long-range reticle size
}
# Define reticle size mode names
reticle_size_names = {
1: "Short Range",
2: "Medium Range",
3: "Long Range"
}
# Initialize reticle size and position
current_reticle_size = 2 # Medium-range reticle size initially
reticle_x = screen_width // 2
reticle_y = screen_height // 2
# Initialize mouse position
mouse_x = reticle_x
mouse_y = reticle_y
# Initialize object tracker
tracker = None
# Define mouse event callback function
def mouse_event(event, x, y, flags, param):
global reticle_x, reticle_y, mouse_x, mouse_y, tracker
if event == cv2.EVENT_MOUSEMOVE:
mouse_x = x
mouse_y = y
reticle_x = x
reticle_y = y
elif event == cv2.EVENT_LBUTTONDOWN:
if abs(x - reticle_x) <= 2 and abs(y - reticle_y) <= 2:
# Initialize object tracker at the position of the center dot
bbox_size = reticle_sizes[current_reticle_size] // 2
bbox = (reticle_x - bbox_size // 2, reticle_y - bbox_size // 2, bbox_size, bbox_size)
tracker = cv2.TrackerKCF_create()
# Use black frame as the input for tracker initialization
init_frame = np.zeros((screen_height, screen_width, 3), dtype=np.uint8)
ok = tracker.init(init_frame, bbox)
# Create a black screen as the initial frame
black_screen = np.zeros((screen_height, screen_width, 3), dtype=np.uint8)
# Create a window and set the mouse event callback function
cv2.namedWindow('Camera Feed')
cv2.setMouseCallback('Camera Feed', mouse_event)
# Initialize variables for FPS calculation
start_time = time.time()
frame_counter = 0
while True:
# Use black screen as the frame
webcam_frame = black_screen.copy()
# Calculate FPS
frame_counter += 1
elapsed_time = time.time() - start_time
fps = frame_counter / elapsed_time
# Draw FPS on the frame
cv2.putText(webcam_frame, f"FPS: {fps:.2f}", (8, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Object tracking
if tracker is not None:
ok, bbox = tracker.update(webcam_frame)
if ok:
# Tracking success
x, y, w, h = [int(v) for v in bbox]
cv2.rectangle(webcam_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
else:
# Tracking failure
cv2.putText(webcam_frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
# Create a black frame for the reticle and HUD
hud_frame = np.zeros((screen_height, screen_width, 3), dtype=np.uint8)
# Draw reticle based on current size
line_gap = reticle_sizes[current_reticle_size]
cv2.line(hud_frame, (0, reticle_y - line_gap), (screen_width, reticle_y - line_gap), (255, 255, 255), 1)
cv2.line(hud_frame, (0, reticle_y + line_gap), (screen_width, reticle_y + line_gap), (255, 255, 255), 1)
cv2.line(hud_frame, (reticle_x - line_gap, 0), (reticle_x - line_gap, screen_height), (255, 255, 255), 1)
cv2.line(hud_frame, (reticle_x + line_gap, 0), (reticle_x + line_gap, screen_height), (255, 255, 255), 1)
# Draw a small filled circle at the center of the reticle
cv2.circle(hud_frame, (reticle_x, reticle_y), 2, (255, 255, 255), -1)
# Display reticle size mode in the HUD
cv2.putText(hud_frame, f"Reticle Size: {reticle_size_names[current_reticle_size]}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Display reticle size selection commands in the HUD
cv2.putText(hud_frame, f"C-Range: '1' M-Range: '2' L-Range: '3'", (10, screen_height - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Resize hud_frame to match webcam_frame
hud_frame_resized = cv2.resize(hud_frame, (webcam_frame.shape[1], webcam_frame.shape[0]))
# Combine the webcam frame and the HUD frame
combined_frame = cv2.addWeighted(webcam_frame, 0.9, hud_frame_resized, 0.9, 0)
# Display the combined frame
cv2.imshow('Camera Feed', combined_frame)
# Check for key press to switch reticle size
key = cv2.waitKey(1)
if key == ord('1'):
current_reticle_size = 1
elif key == ord('2'):
current_reticle_size = 2
elif key == ord('3'):
current_reticle_size = 3
# Check for key press to exit
if key & 0xFF == ord('q'):
break
# Release the VideoCapture and close all OpenCV windows
cap.release()
cv2.destroyAllWindows()
Image 1: Working reticle (centered)
代码2:
import cv2
import numpy as np
import time
# Define screen dimensions (adjust according to your display)
screen_width = 800
screen_height = 600
# Define the gap between the parallel lines for each reticle size
reticle_sizes = {
1: 150, # Close-range reticle size
2: 100, # Medium-range reticle size
3: 30 # Long-range reticle size
}
# Define reticle size mode names
reticle_size_names = {
1: "Short Range",
2: "Medium Range",
3: "Long Range"
}
# Initialize reticle size and position
current_reticle_size = 2 # Medium-range reticle size initially
reticle_x = screen_width // 2
reticle_y = screen_height // 2
# Initialize mouse position relative to HUD frame
mouse_x = reticle_x
mouse_y = reticle_y
# Initialize object tracker
tracker = None
# Define mouse event callback function
def mouse_event(event, x, y, flags, param):
global reticle_x, reticle_y, mouse_x, mouse_y, tracker
if event == cv2.EVENT_MOUSEMOVE:
mouse_x = x
mouse_y = y
reticle_x = x
reticle_y = y
elif event == cv2.EVENT_LBUTTONDOWN:
if abs(x - reticle_x) <= 2 and abs(y - reticle_y) <= 2:
# Initialize object tracker at the position of the center dot
bbox_size = reticle_sizes[current_reticle_size] // 2
bbox = (reticle_x - bbox_size // 2, reticle_y - bbox_size // 2, bbox_size, bbox_size)
tracker = cv2.TrackerKCF_create()
# Use black frame as the input for tracker initialization
init_frame = np.zeros((screen_height, screen_width, 3), dtype=np.uint8)
ok = tracker.init(init_frame, bbox)
# Create a window and set the mouse event callback function
cv2.namedWindow('Camera Feed')
cv2.setMouseCallback('Camera Feed', mouse_event)
# Initialize variables for FPS calculation
start_time = time.time()
frame_counter = 0
# Create a VideoCapture object to capture video from the camera
cap = cv2.VideoCapture(0) # Use 0 for the default camera, you can change it if you have multiple cameras
# Check if the camera is opened successfully
if not cap.isOpened():
print("Error: Unable to open camera")
exit()
# Set the width and height of the camera frame
cap.set(cv2.CAP_PROP_FRAME_WIDTH, screen_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, screen_height)
while True:
# Read a frame from the camera
ret, webcam_frame = cap.read()
# Check if the frame is read successfully
if not ret:
print("Error: Unable to read frame")
break
# Calculate FPS
frame_counter += 1
elapsed_time = time.time() - start_time
fps = frame_counter / elapsed_time
# Draw FPS on the frame
cv2.putText(webcam_frame, f"FPS: {fps:.2f}", (8, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Object tracking
if tracker is not None:
ok, bbox = tracker.update(webcam_frame)
if ok:
# Tracking success
x, y, w, h = [int(v) for v in bbox]
cv2.rectangle(webcam_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
else:
# Tracking failure
cv2.putText(webcam_frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
# Create a black frame for the reticle and HUD
hud_frame = np.zeros((screen_height, screen_width, 3), dtype=np.uint8)
# Draw reticle based on current size
line_gap = reticle_sizes[current_reticle_size]
cv2.line(hud_frame, (0, reticle_y - line_gap), (screen_width, reticle_y - line_gap), (255, 255, 255), 1)
cv2.line(hud_frame, (0, reticle_y + line_gap), (screen_width, reticle_y + line_gap), (255, 255, 255), 1)
cv2.line(hud_frame, (reticle_x - line_gap, 0), (reticle_x - line_gap, screen_height), (255, 255, 255), 1)
cv2.line(hud_frame, (reticle_x + line_gap, 0), (reticle_x + line_gap, screen_height), (255, 255, 255), 1)
# Draw a small filled circle at the center of the reticle
cv2.circle(hud_frame, (reticle_x, reticle_y), 2, (255, 255, 255), -1)
# Display reticle size mode in the HUD
cv2.putText(hud_frame, f"Reticle Size: {reticle_size_names[current_reticle_size]}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Display reticle size selection commands in the HUD
cv2.putText(hud_frame, f"C-Range: '1' M-Range: '2' L-Range: '3'", (10, screen_height - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Resize hud_frame to match webcam_frame
hud_frame_resized = cv2.resize(hud_frame, (webcam_frame.shape[1], webcam_frame.shape[0]))
# Combine the webcam frame and HUD frame
combined_frame = cv2.addWeighted(webcam_frame, 0.9, hud_frame_resized, 0.9, 0)
# Display the combined frame
cv2.imshow('Camera Feed', combined_frame)
# Check for key press to switch reticle size
key = cv2.waitKey(1)
if key == ord('1'):
current_reticle_size = 1
elif key == ord('2'):
current_reticle_size = 2
elif key == ord('3'):
current_reticle_size = 3
# Check for key press to exit
if key & 0xFF == ord('q'):
break
# Release the VideoCapture and close all OpenCV windows
cap.release()
cv2.destroyAllWindows()
Image 2: cursor not centered it at the bottom right
我希望标线的中心框是roi或边界框,当我点击框内的对象时,我希望标线跟随/跟踪该对象。
我试着把小中心点作为“鼠标参考”,但没能做到。
我想我需要控制HUD框架中的鼠标输入,而不是网络摄像头框架,但我仍然得到了相同的结果。