Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ERROR CudaFormNumpy #1919

Open
XuanHoai02 opened this issue Dec 9, 2024 · 0 comments
Open

ERROR CudaFormNumpy #1919

XuanHoai02 opened this issue Dec 9, 2024 · 0 comments

Comments

@XuanHoai02
Copy link

XuanHoai02 commented Dec 9, 2024

Hi Mr.dusty-nv @dusty-nv

When I install Jetson Inference and Jetson Utils, I get the error message:

jetson.utils -- compiled without NumPy array conversion support (warning)
jetson.utils -- if you wish to have support for converting NumPy arrays,
jetson.utils -- first run 'sudo apt-get install python-numpy python3-numpy'

Currently, I need to use cudaFromNumpy to convert NumPy arrays to CUDA so that my Jetson Nano can use them. Below are the hardware and code I am using."

Hardware:
Jetson nano - Jetpack 4.6 - Ubuntu 20.04
Camera raspberry pi v2

Code:

import cv2
import jetson_inference
import jetson_utils
import pyglet


class Point:
    def __init__(self, x, y):
        self.x = x
        self.y = y

    def getPoint(self):
        return (self.x, self.y)

    def translate(self, dx, dy):
        return Point(self.x + dx, self.y + dy)


class Zone:
    def __init__(self, top_left_point, bottom_right_point):
        # top-left point
        self.tl_point = top_left_point
        # bottom-right point
        self.br_point = bottom_right_point

    def getCenterPoint(self):
        return Point(
            (self.tl_point.x + self.br_point.x) / 2,
            (self.tl_point.y + self.br_point.y) / 2
        )

    def getWidth(self):
        return abs(self.tl_point.x - self.br_point.x)

    def getHeight(self):
        return abs(self.tl_point.y - self.br_point.y)

    def isOverlap(self, otherZone, adjustment=0):
        center = self.getCenterPoint()
        otherZoneCenter = otherZone.getCenterPoint()
        # distance of 2 center points
        distanceX = abs(otherZoneCenter.x - center.x)
        distanceY = abs(otherZoneCenter.y - center.y)
        return (
            distanceX < (self.getWidth() / 2 + otherZone.getWidth() / 2) - adjustment
        ) and (
            distanceY < (self.getHeight() / 2 + otherZone.getHeight() / 2) - adjustment
        )


def draw_warning_zone(warning_zone, img):
    FPS_string = "FPS {:.0f} FPS".format(net.GetNetworkFPS())
    top_left = warning_zone.tl_point.getPoint()
    bottom_right = warning_zone.br_point.getPoint()
    cv2.rectangle(img, top_left, bottom_right, (0, 255, 0), 2)
    cv2.putText(
        img, FPS_string, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 0, 0), 2
    )


def play_sound(sound_name):
    WarningSound = pyglet.resource.media(sound_name)
    WarningSound.play()


net = jetson_inference.detectNet("ssd-mobilenet-v2", threshold=0.5)
cap = cv2.VideoCapture(
    'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=940, height=480, format=(string)NV12, framerate=(fraction)60/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink',
    cv2.CAP_GSTREAMER,
)

count1 = 0
count2 = 0

while True:
    success, img = cap.read()
    imgCuda = jetson_utils.cudaFromNumpy(img)
    detections = net.Detect(imgCuda)

    warning_zone = Zone(Point(240, 130), Point(660, 490))
    draw_warning_zone(warning_zone, img)

    for d in detections:
        count2 += 1

        # position of detected object
        top_left_point = Point(int(d.Left), int(d.Top))
        bottom_right_point = Point(int(d.Right), int(d.Bottom))
        object_zone = Zone(top_left_point, bottom_right_point)

        center_x = int(d.Center[0])
        center_y = int(d.Center[1])

        # Draw cross-line of object
        cv2.line(
            img,
            (top_left_point.x, center_y),
            (bottom_right_point.x, center_y),
            (255, 0, 255),
            1,
        )
        cv2.line(
            img,
            (center_x, top_left_point.y),
            (center_x, bottom_right_point.y),
            (255, 0, 255),
            1,
        )
        # Draw center point
        cv2.circle(img, (center_x, center_y), 5, (255, 0, 255), 2)
        # Draw bounding box
        cv2.rectangle(
            img,
            (int(d.Left), int(d.Top)),
            (int(d.Right), int(d.Bottom)),
            (255, 0, 255),
            2,
        )
        # Put the object name in bounding box
        object_name = net.GetClassDesc(d.ClassID)
        cv2.putText(
            img,
            object_name,
            object_zone.tl_point.translate(5, 15).getPoint(),
            cv2.FONT_HERSHEY_DUPLEX,
            0.75,
            (255, 0, 255),
            2,
        )

        objectTypeId = d.ClassID

        if count2 == 40:
            count2 = 0
            if objectTypeId == 10:
                play_sound('ngatu.wav')
            elif objectTypeId == 13:
                play_sound('stop.wav')

        if warning_zone.isOverlap(object_zone, 5):
            count1 += 1
            cv2.putText(
                img,
                'NGUY HIEM',
                warning_zone.tl_point.getPoint(),
                cv2.FONT_HERSHEY_SIMPLEX,
                1,
                (5, 0, 255),
                3,
            )
            if count1 == 50:
                count1 = 0
                if objectTypeId == 1:
                    play_sound('nguoi.wav')
                elif objectTypeId in [2, 3, 4, 6, 10]:
                    play_sound('xe.wav')

    cv2.imshow("Detection frame", img)
    if cv2.waitKey(1) == ord('q'):
        break

cv2.destroyAllWindows()
cap.release()

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants
@XuanHoai02 and others