Skip to main content

3.4.2 JDK Reference Examples

1. Basic Examples

1.1 JDK Frame Structure

JdkFrame is the unified data structure for image transmission between modules:

class JdkFrame {
public:
JdkFrame(int dma_fd_, size_t size_, int w, int h);
~JdkFrame();

// Copy DMA buffer data to host memory and return pointer
unsigned char* toHost() const;

// Clone data to vector
std::vector<unsigned char> Clone() const;

// Manually release mapping (if necessary)
// Save as YUV NV12 file
bool saveToFile(const std::string& filename) const;
bool loadFromFile(const std::string& filename, size_t expected_size);

int getDMAFd() const;
size_t getSize() const { return size_; }
int getWidth() const { return width_; }
int getHeight() const { return height_; }

int MemCopy(const uint8_t* nalu, int nalu_size, int offset = 0);

private:
size_t size_;
int width_;
int height_;
JdkDma dma_;
std::shared_ptr<JdkDmaBuffer> data;
};

1.2 MIPI Camera Capture

// Open MIPI camera node
auto camera = JdkCamera::create("/device/video50", 1920, 1080, V4L2_PIX_FMT_NV12);

// Capture a frame
auto frame = camera->getFrame();

1.3 Encoding with JdkEncoder

// Create encoder
auto encoder = std::make_shared<JdkEncoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// Encode a frame
auto encodedFrame = encoder->Encode(jdkFrame);

1.4 Decoding with JdkDecoder

// Create decoder
auto decoder = std::make_shared<JdkDecoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// Decode a frame
auto decodedFrame = decoder->Decode(jdkFrame);

1.5 Image Processing with JdkV2D

// Create V2D instance
auto v2d = std::make_shared<JdkV2D>();

// NV12 to RGB8888 conversion
auto rgba_frame = v2d->convert_format(input_nv12, V2D_FMT_RGB888);

// Draw a rectangle
v2d->draw_rect(input_nv12, box, 0xFFFFFF00, 4); // Yellow, line width 4

// Draw multiple rectangles
v2d->draw_rects(input_nv12, {{30, 20, 100, 80}, {60, 40, 200, 160}}, 0x00ffcc66, 4);

// Resize frame
auto resized_frame = v2d->resize(jdkFrame, 1920, 1080);

// Resize and convert format
auto converted_frame = v2d->resize_and_convert(jdkFrame, 1920, 1080, V2D_FMT_RGB888);

1.6 SDL Display with JdkVo

// Create video output object
auto jdkvo = std::make_shared<JdkVo>(width, height, PIXEL_FORMAT_NV12);

// Display a frame
auto ret = jdkvo->sendFrame(jdkFrame);

1.7 DRM Display with JdkDrm

// Create DRM object
auto drm = std::make_shared<JdkDrm>(width, height, width, PixelFmt::NV12, "/dev/dri/card1");

// Display a frame
auto ret = drm->sendFrame(jdkFrame1);

1.8 AI Inference with YOLOv8

// Create inference engine instance
auto engine = YOLOV8Det::create_infer("yolov8n.q.onnx", "onnx");

// Run inference
auto result = engine->commit(jdkFrame).get();

// Draw results
draw_nv12(jdkFrame, std::any_cast<YOLOV8Det::Objects>(result));

2.Comprehensive Examples

2.1Download & Install SDK

wget https://archive.spacemit.com/ros2/code/jdk_sdk.tar.gz
sudo tar xvf jdk_sdk.tar.gz
jdk_sdk
├── include
│   ├── data_type.hpp
│   ├── IConver.hpp
│   ├── IEngine.hpp
│   ├── IPlugin.hpp
│   ├── ITensor.hpp
│   ├── JdkCamera.hpp
│   ├── JdkDecoder.hpp
│   ├── JdkDma.hpp
│   ├── JdkDrm.hpp
│   ├── JdkEncoder.hpp
│   ├── JdkFrame.hpp
│   ├── jdk_log.h
│   ├── JdkUsbCam.hpp
│   ├── jdkV2d.hpp
│   ├── JdkVo.hpp
│   ├── json.hpp
│   └── Tensor.hpp
├── jdk_examples
│   ├── jdk_cam
│   ├── jdk_client
│   ├── jdk_drm
│   ├── jdk_frame
│   ├── jdk_infer
│   ├── jdk_infer@rtsp
│   ├── jdk_server
│   ├── jdk_usbcam
│   ├── jdk_v2d
│   ├── jdk_vdec
│   ├── jdk_venc
│   └── jdk_vo
├── ko
│   └── jdk_dma.ko
├── lib
│   ├── libengine.so
│   ├── libjdk_cam.so
│   ├── libjdk_dma.so
│   ├── libjdk_drm.so
│   ├── libjdk_frame.so
│   ├── libjdk_usbcam.so
│   ├── libjdk_v2d.so
│   ├── libjdk_vdec.so
│   ├── libjdk_venc.so
│   ├── libjdk_vo.so
│   ├── libnet_client.so
│   └── libnet_server.so
├── Makefile
└── README.md

2.2 Example Modules

jdk_examples
├── jdk_cam
├── jdk_client
├── jdk_drm
├── jdk_frame
├── jdk_infer
├── jdk_infer@rtsp
├── jdk_server
├── jdk_v2d
├── jdk_vdec
├── jdk_venc
└── jdk_vo

2.3Compile Examples

Enter the camera module directory and compile the example:

cd jdk_sdk
make all

Sample compilation output:

make -C jdk_examples/jdk_cam all
make[1]: Entering directory '/home/work/jdk_sdk/jdk_examples/jdk_cam'
Compile depends C++ src/main.cpp
Compile CXX src/main.cpp
Link workspace/jdk_cam

2.3 Run Examples

Before running the jdk example, you need to install the jdk dma driver.

insmod ./ko/jdk_dma.ko
cd jdk_examples/jdk_usbcam
./workspace/jdk_usbcam /dev/video20

🚨 Tip: Please confirm the device node path according to the actual situation, such as /dev/video20. Device information can be viewed through the v4l2-ctl --list-devices command.

After running, the terminal will output the log information of module initialization and image acquisition.

2.4 Example of startup log

VIDIOC_STREAMON succeeded
[MPP-DEBUG] 10800:module_init:159 +++++++++++++++ module init, module type = 9
[MPP-DEBUG] 10800:find_v4l2_linlonv5v7_plugin:83 yeah! we have v4l2_linlonv5v7_codec plugin---------------
[MPP-DEBUG] 10800:module_init:199 ++++++++++ V4L2_LINLONV5V7 (/usr/lib/libv4l2_linlonv5v7_codec.so)
[MPP-DEBUG] 10800:module_init:199 ++++++++++ open (/usr/lib/libv4l2_linlonv5v7_codec.so) success !
[MPP-DEBUG] 10800:al_dec_create:337 init create
[MPP-DEBUG] 10800:al_dec_init:398 input para check: foramt:0x4745504a output format:0x3231564e input buffer num:12 output buffer num:8
[MPP-DEBUG] 10800:al_dec_init:421 video fd = 4, device path = '/dev/video0'
[MPP-DEBUG] 10800:createCodec:115 create a codec, width=1280 height=720 align=1 inputtype=2 outputtype=9 inputformat=4745504a outputformat=3231564e inputbufnum=12 outputbufnum=8
[MPP-DEBUG] 10800:createPort:80 create a port, type=2 format_fourcc=1195724874
[MPP-DEBUG] 10800:createPort:80 create a port, type=9 format_fourcc=842094158
[MPP-DEBUG] 10800:getTrySetFormat:196 width=1280 height=720 align=1 pixel_format=4745504a
[MPP-DEBUG] 10800:printFormat:294 PRINTFORMAT ===== type: 2, format: 1195724874, width: 1280, height: 720, bytesperline: 0, sizeimage: 1048576
[MPP-DEBUG] 10800:getTrySetFormat:196 width=1280 height=720 align=1 pixel_format=3231564e
[MPP-DEBUG] 10800:printFormat:283 PRINTFORMAT ===== type: 9, format: 842094158, width: 1280, height: 720, nplanes: 2, bytesperline: [1280 1280 0], sizeimage: [921600 460800 0]
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:2 count:12(12) memory:1
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:9 count:8(8) memory:4
[MPP-DEBUG] 10800:streamon:558 Stream on 1751956058513
[MPP-DEBUG] 10800:streamon:558 Stream on 1751956058513
[MPP-DEBUG] 10800:al_dec_init:449 init finish
[MPP-DEBUG] 10800:VO_CreateChannel:43 create VO Channel success!
[MPP-DEBUG] 10800:module_init:159 +++++++++++++++ module init, module type = 101
[MPP-DEBUG] 10800:check_vo_sdl2:121 yeah! have vo_sdl2---------------
[MPP-DEBUG] 10800:find_vo_sdl2_plugin:86 yeah! we have vo_sdl2_plugin plugin---------------
[MPP-DEBUG] 10800:module_init:207 ++++++++++ VO_SDL2 (/usr/lib/libvo_sdl2_plugin.so)
[MPP-DEBUG] 10800:module_init:207 ++++++++++ open (/usr/lib/libvo_sdl2_plugin.so) success !
[MPP-ERROR] 10800:al_vo_init:93 SDL could not initialize! SDL_Error: wayland not available
[MPP-ERROR] 10800:al_vo_init:128 k1 vo_sdl2 init fail
[MPP-DEBUG] 10800:VO_Init:66 init VO Channel, ret = -400
[MPP-ERROR] 10800:JdkVo:32 VO_init failed, please check!
[MPP-INFO] 10801:runpoll:321 Now k1 hardware decoding ...
select: Resource temporarily unavailable
Failed to capture frame 0
NO data, return.
[MPP-DEBUG] 10801:handleEvent:453 get V4L2_EVENT_SOURCE_CHANGE event, do notify!
[MPP-DEBUG] 10800:handleOutputBuffer:1509 Resolution changed:0 new size: 1280 x 720
[MPP-DEBUG] 10800:streamoff:571 Stream off 1751956060839
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:9 count:0(0) memory:4
[MPP-DEBUG] 10800:getTrySetFormat:196 width=1280 height=720 align=1 pixel_format=3231564e
[MPP-DEBUG] 10800:printFormat:283 PRINTFORMAT ===== type: 9, format: 842094158, width: 1280, height: 720, nplanes: 2, bytesperline: [1280 1280 0], sizeimage: [921600 460800 0]
[MPP-DEBUG] 10800:allocateBuffers:340 Request buffers. type:9 count:12(12) memory:4
[MPP-DEBUG] 10800:streamon:558 Stream on 1751956060850
[MPP-ERROR] 10800:queueBuffer:461 Failed to queue buffer. type = 9 (Invalid argument)
[MPP-ERROR] 10800:al_dec_return_output_frame:652 queueBuffer failed, this should not happen, please check!
[MPP-DEBUG] 10800:VO_Process:82 vo one packet, ret = 0
index:1,dma_fd:33 width:1280,height:720,size:1382400
[MPP-DEBUG] 10800:VO_Process:82 vo one packet, ret = 0
index:2,dma_fd:33 width:1280,height:720,size:1382400
[MPP-DEBUG] 10800:VO_Process:82 vo one packet, ret = 0
index:3,dma_fd:33 width:1280,height:720,size:1382400

2.5 Example 1: USB camera → decoding → display

// Open the camera node
auto camera = JdkUsbCam::create(device, width, height, V4L2_PIX_FMT_MJPEG);
// Create a JPEG decoder
auto decoder = std::make_shared<JdkDecoder>(width, height, CODING_MJPEG, PIXEL_FORMAT_NV12);
// Initialize the display module (vo)
auto jdkvo = std::make_shared<JdkVo>(width, height, PIXEL_FORMAT_NV12);
// Get a frame of image data
auto frame = camera->getFrame();
// Decode JPEG images
auto decFrame = decoder->Decode(frame)
// Send the image frame to the display module
auto ret = jdkvo->sendFrame(decFrame);

2.6 Example 2: MIPI camera acquisition → real-time display

// Open the camera node
auto camera = JdkCamera::create("/device/video50", 1920, 1080, V4L2_PIX_FMT_NV12);
// Initialize the display module (vo)
auto jdkvo = std::make_shared<JdkVo>(1920, 1080, PIXEL_FORMAT_NV12);
// Get a frame of image data
auto frame = camera->getFrame();
// Send the image frame to the display module
auto ret = jdkvo->sendFrame(frame);

2.7 Example 3: MIPI Camera → Coding → RTSP Streaming

// Create a camera acquisition object
auto camera = JdkCamera::create("/dev/video50", width, height, V4L2_PIX_FMT_NV12);

// Create an encoder object
auto encoder = std::make_shared<JdkEncoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// Create an RTSP server
auto rtsp_ = std::make_shared<RTSPServer>("test", 1, 8554, VideoCodecType::H264);

while (running) {
auto frame = camera->getFrame(); // Get a frame of image
if (!frame) {
std::cerr << "Failed to capture frame\n";
continue;
}
auto encFrame = encoder->Encode(frame); // Code
if (encFrame) {
size_t sz = encFrame->getSize();
uint8_t* data = (uint8_t*)encFrame->toHost();
rtsp_->send_nalu(data, sz, getTimestamp()); // Push coding data
}
}

2.8 Example 4: RTSP → Decoding → DRM Display

// Create an RTSP client
auto netclient = std::make_shared<NetClient>(device_id, channel_id, 0, "");

// Create a JDK decoder
auto decoder = std::make_shared<JdkDecoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);

// Start the receiving code stream
netclient->start("rtsp://admin:123456@169.254.202.148:8554/stream_8554");

// Binding video callback function
video_cb_ = std::bind(&NetClient::rtsp_video_cb, this,
std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4,
std::placeholders::_5);

// Implementation of video callback function
int NetClient::rtsp_video_cb(LPBYTE pdata, int len, unsigned int ts,
unsigned short seq, void* puser) {
if (!decoder_) {
// Initialize the decoder and DRM display module
decoder_ = std::make_shared<JdkDecoder>(info_tmp->video.width,
info_tmp->video.height,
CODING_H264, PIXEL_FORMAT_NV12);

drm_ = std::make_shared<JdkDrm>(info_tmp->video.width,
info_tmp->video.height,
info_tmp->video.width,
PixelFmt::NV12, "/dev/dri/card1");

drmframe = std::make_shared<JdkFrame>(-1,
info_tmp->video.width * info_tmp->video.height * 3 / 2,
info_tmp->video.width,
info_tmp->video.height);
} else {
frame = decoder_->Decode(pdata, len); // Decode
drmframe->MemCopy(frame->toHost(), frame->getSize()); // Copy data to the display buffer
drm_->sendFrame(drmframe); // Display images
}
}

2.9 Example 5: MIPI Camera → YOLOv8 Reasoning → Picture Frame → Display

// Create a camera acquisition object
auto camera = JdkCamera::create("/dev/video50", width, height, V4L2_PIX_FMT_NV12);
// Initialize the display module (vo)
auto jdkvo = std::make_shared<JdkVo>(width, height, PIXEL_FORMAT_NV12);
// Create an example of algorithmic reasoning engine
auto engine = YOLOV8Det::create_infer("yolov8n.q.onnx", "onnx");
//Initialize v2d module
auto v2d = std::make_shared<JdkV2D>();
// Get a frame of image data
auto jdkFrame = camera->getFrame();
// Reasoning on frame data
auto result = engine->commit(jdkFrame).get();
//Stacking through v2d module
draw_nv12(jdkFrame, std::any_cast<YOLOV8Det::Objects>(result));
int draw_nv12(std::shared_ptr<JdkFrame> frame, YOLOV8Det::Objects box_result)
{
// printf(" box_result.size:%d\r\n", box_result.size());
auto v2d = std::make_shared<JdkV2D>();
auto reult = v2d->resize(frame, 320, 320);
reult->saveToFile("320x320_resize.nv12");
auto boxs = box_result[0].boxs;
for (int i = 0; i < boxs.size(); ++i)
{
auto &ibox = boxs[i];
float left = ibox.rect.tl().x;
float top = ibox.rect.tl().y;
float right = ibox.rect.br().x;
float bottom = ibox.rect.br().y;
int class_label = ibox.label;
float confidence = ibox.prob;
v2d->draw_rect(reult, {ibox.rect.x, ibox.rect.y, ibox.rect.width, ibox.rect.height}, 0xFFFF00, 2);
}
// mkdir(save_dir);
// auto save_file = save_dir + "/" + file;
// cv::imwrite(save_file.data(), image);
// reult->saveToFile("320x320_OSD_result.nv12");
return 0;
}
// Send the image frame to the display module for display
auto ret = jdkvo->sendFrame(jdkFrame);

2.10 Example 6: MIPI camera → YOLOv8 reasoning + RTSP real-time coding streaming (parallel output)

std::atomic<bool>					  running(true);
safe_queue<std::shared_ptr<JdkFrame>> queue;

void input_thread() {
std::string wait;
std::getline(std::cin, wait);
running = false;
}

void infer_thread() {
auto Engin = YOLOV8Det::create_infer("yolov8n.q.onnx", "onnx");
if (nullptr == Engin) {
printf("create_infer error!!!!\r\n");
return;
}

while (running) {
auto frame = queue.get(&running);
if (!frame) {
printf("Warning: frame is NULL!\n");
continue;
}
auto result = Engin->commit(frame).get();
}
}

static uint32_t getTimestamp() {
struct timeval tv = {0};
gettimeofday(&tv, NULL);
uint32_t ts = ((tv.tv_sec * 1000) + ((tv.tv_usec + 500) / 1000)) * 90; // clockRate/1000;
return ts;
}

int main(int argc, char *argv[]) {
int width = 1920;
int height = 1080;
auto camera = JdkCamera::create("/dev/video50", width, height, V4L2_PIX_FMT_NV12);
if (!camera) {
std::cerr << "Failed to create camera\n";
return -1;
}
auto encoder = std::make_shared<JdkEncoder>(width, height, CODING_H264, PIXEL_FORMAT_NV12);
auto rtsp_ = std::make_shared<RTSPServer>("test", 1, 8554, VideoCodecType::H264);

std::thread th(input_thread);
std::thread th_infer(infer_thread);

while (running) {
auto frame = camera->getFrame();
if (!frame) {
std::cerr << "Failed to capture frame " << "\n";
continue;
}
queue.push(frame, &running);
if (auto encFrame = encoder->Encode(frame); encFrame) {
size_t sz = encFrame->getSize();
uint8_t *data = (uint8_t *)encFrame->toHost();
rtsp_->send_nalu(data, sz, getTimestamp());
}
static int i = 0;
if ((i % 5000) == 0) {
printf("dma_fd:%d width:%d,height:%d,size:%d\r\n", frame->getDMAFd(), frame->getWidth(), frame->getHeight(), frame->getSize());
}
}

if (th.joinable())
th.join();
if (th_infer.joinable())
th_infer.join();
std::cout << "✅ Exited by user input." << std::endl;

return 0;
}

3.Common Issues & Tips

IssueDescription
Camera not detectedCheck USB connection, try other ports, verify camera model
Camera won't openMay be unrecognized or unsupported driver
SDL errorMissing system graphics libs, doesn't affect image capture
No image displayCheck VO init logs, confirm display channel and permissions