diff --git a/doc/build/dts/api/api.rst b/doc/build/dts/api/api.rst index 31cfb3842edc6..7352317e9dfa5 100644 --- a/doc/build/dts/api/api.rst +++ b/doc/build/dts/api/api.rst @@ -466,6 +466,8 @@ device. WS2812 GPIO driver * - zephyr,touch - touchscreen controller device node. + * - zephyr,videoenc + - Video encoder device, typically an H264 or MJPEG video encoder. * - mcuboot,ram-load-dev - When a Zephyr application is built to be loaded to RAM by MCUboot, with :kconfig:option:`CONFIG_MCUBOOT_BOOTLOADER_MODE_SINGLE_APP_RAM_LOAD`, diff --git a/samples/drivers/video/tcpserversink/Kconfig b/samples/drivers/video/tcpserversink/Kconfig new file mode 100644 index 0000000000000..d8ab8be1c7c13 --- /dev/null +++ b/samples/drivers/video/tcpserversink/Kconfig @@ -0,0 +1,78 @@ +# Copyright (c) 2024 Espressif Systems (Shanghai) Co., Ltd. +# Copyright (c) 2025 STMicroelectronics. +# SPDX-License-Identifier: Apache-2.0 + +mainmenu "TCP camera streaming sample application" + +menu "Video capture configuration" + +config VIDEO_SOURCE_CROP_LEFT + int "Crop area left value" + default 0 + help + Left value of the crop area within the video source. + +config VIDEO_SOURCE_CROP_TOP + int "Crop area top value" + default 0 + help + Top value of the crop area within the video source. + +config VIDEO_SOURCE_CROP_WIDTH + int "Crop area width value" + default 0 + help + Width value of the crop area within the video source. + If set to 0, the crop is not applied. + +config VIDEO_SOURCE_CROP_HEIGHT + int "Crop area height value" + default 0 + help + Height value of the crop area within the video source. + If set to 0, the crop is not applied. + +config VIDEO_FRAME_HEIGHT + int "Height of the video frame" + default 0 + help + Height of the video frame. If set to 0, the default height is used. + +config VIDEO_FRAME_WIDTH + int "Width of the video frame" + default 0 + help + Width of the video frame. If set to 0, the default width is used. + +config VIDEO_PIXEL_FORMAT + string "Pixel format of the video frame" + help + Pixel format of the video frame. If not set, the default pixel format is used. + +config VIDEO_CAPTURE_N_BUFFERING + int "Capture N-buffering" + default 2 + help + Framerate versus memory usage tradeoff. + "2" allows to capture while sending data (optimal framerate). + "1" allows to reduce memory usage but capture framerate is lower. + If not set defaults to "2". + +config VIDEO_CTRL_HFLIP + bool "Mirror the video frame horizontally" + help + If set, mirror the video frame horizontally + +config VIDEO_CTRL_VFLIP + bool "Mirror the video frame vertically" + help + If set, mirror the video frame vertically + +config VIDEO_ENCODED_PIXEL_FORMAT + string "Pixel format of the encoded frame" + help + Pixel format of the encoded frame. + +endmenu + +source "Kconfig.zephyr" diff --git a/samples/drivers/video/tcpserversink/README.rst b/samples/drivers/video/tcpserversink/README.rst index b44d9f3105ad7..4ff4c03573981 100644 --- a/samples/drivers/video/tcpserversink/README.rst +++ b/samples/drivers/video/tcpserversink/README.rst @@ -18,6 +18,9 @@ This samples requires a video capture device and network support. - :zephyr:board:`mimxrt1064_evk` - `MT9M114 camera module`_ +- :zephyr:board:`stm32n6570_dk` +- `MB1854 camera module`_ + Wiring ****** @@ -26,6 +29,12 @@ J35 camera connector. A USB cable should be connected from a host to the micro USB debug connector (J41) in order to get console output via the freelink interface. Ethernet cable must be connected to RJ45 connector. +On :zephyr:board:`stm32n6570_dk`, the MB1854 IMX335 camera module must be plugged in +the CSI-2 camera connector. A RJ45 ethernet cable must be plugged in the ethernet CN6 +connector. For an optimal image experience, it is advice to embed STM32 image signal +processing middleware: https://github.com/stm32-hotspot/zephyr-stm32-mw-isp. + + Building and Running ******************** @@ -49,6 +58,15 @@ a video software pattern generator is supported by using :ref:`snippet-video-sw- :goals: build :compact: +For :zephyr:board:`stm32n6570_dk`, the sample can be built with the following command: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/tcpserversink + :board: stm32n6570_dk + :shield: st_b_cams_imx_mb1854 + :goals: build + :compact: + Sample Output ============= @@ -71,6 +89,13 @@ Example with gstreamer: For video software generator, the default resolution should be width=320 and height=160. +When using compression support, use this GStreamer command line: + +.. code-block:: console + + gst-launch-1.0 tcpclientsrc host=192.0.2.1 port=5000 \ + ! queue ! decodebin ! queue ! fpsdisplaysink sync=false + References ********** diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf new file mode 100644 index 0000000000000..8bd71ecc50267 --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf @@ -0,0 +1,24 @@ +# Video buffer pool +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=10000000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=10 + +# Camera interface +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_PIXEL_FORMAT="pRAA" +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_WIDTH=2592 +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_HEIGHT=1944 +CONFIG_FPU=y + +# Capture +CONFIG_VIDEO_FRAME_WIDTH=1920 +CONFIG_VIDEO_FRAME_HEIGHT=1080 +CONFIG_VIDEO_PIXEL_FORMAT="NV12" +CONFIG_VIDEO_CAPTURE_N_BUFFERING=2 + +# Video encoder +CONFIG_VIDEO_STM32_VENC=y +CONFIG_MAIN_STACK_SIZE=4096 +CONFIG_VIDEO_ENCODED_PIXEL_FORMAT="H264" + +# Network buffers +CONFIG_NET_BUF_RX_COUNT=4 +CONFIG_NET_BUF_TX_COUNT=8 diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf new file mode 100644 index 0000000000000..8bd71ecc50267 --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf @@ -0,0 +1,24 @@ +# Video buffer pool +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=10000000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=10 + +# Camera interface +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_PIXEL_FORMAT="pRAA" +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_WIDTH=2592 +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_HEIGHT=1944 +CONFIG_FPU=y + +# Capture +CONFIG_VIDEO_FRAME_WIDTH=1920 +CONFIG_VIDEO_FRAME_HEIGHT=1080 +CONFIG_VIDEO_PIXEL_FORMAT="NV12" +CONFIG_VIDEO_CAPTURE_N_BUFFERING=2 + +# Video encoder +CONFIG_VIDEO_STM32_VENC=y +CONFIG_MAIN_STACK_SIZE=4096 +CONFIG_VIDEO_ENCODED_PIXEL_FORMAT="H264" + +# Network buffers +CONFIG_NET_BUF_RX_COUNT=4 +CONFIG_NET_BUF_TX_COUNT=8 diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.overlay b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.overlay new file mode 100644 index 0000000000000..23d7b74de9fce --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.overlay @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2025 STMicroelectronics. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + chosen { + zephyr,videoenc = &venc; + }; +}; + +&venc { + status = "okay"; +}; diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf new file mode 100644 index 0000000000000..8bd71ecc50267 --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf @@ -0,0 +1,24 @@ +# Video buffer pool +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=10000000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=10 + +# Camera interface +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_PIXEL_FORMAT="pRAA" +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_WIDTH=2592 +CONFIG_VIDEO_STM32_DCMIPP_SENSOR_HEIGHT=1944 +CONFIG_FPU=y + +# Capture +CONFIG_VIDEO_FRAME_WIDTH=1920 +CONFIG_VIDEO_FRAME_HEIGHT=1080 +CONFIG_VIDEO_PIXEL_FORMAT="NV12" +CONFIG_VIDEO_CAPTURE_N_BUFFERING=2 + +# Video encoder +CONFIG_VIDEO_STM32_VENC=y +CONFIG_MAIN_STACK_SIZE=4096 +CONFIG_VIDEO_ENCODED_PIXEL_FORMAT="H264" + +# Network buffers +CONFIG_NET_BUF_RX_COUNT=4 +CONFIG_NET_BUF_TX_COUNT=8 diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.overlay b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.overlay new file mode 100644 index 0000000000000..23d7b74de9fce --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.overlay @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2025 STMicroelectronics. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + chosen { + zephyr,videoenc = &venc; + }; +}; + +&venc { + status = "okay"; +}; diff --git a/samples/drivers/video/tcpserversink/src/main.c b/samples/drivers/video/tcpserversink/src/main.c index 640a728f64eea..c3192741fc8ac 100644 --- a/samples/drivers/video/tcpserversink/src/main.c +++ b/samples/drivers/video/tcpserversink/src/main.c @@ -1,11 +1,14 @@ /* * Copyright (c) 2019 Linaro Limited + * Copyright 2025 NXP + * Copyright (c) 2025 STMicroelectronics. * * SPDX-License-Identifier: Apache-2.0 */ #include #include +#include #include #include #include @@ -15,6 +18,9 @@ LOG_MODULE_REGISTER(main, CONFIG_LOG_DEFAULT_LEVEL); #define MY_PORT 5000 #define MAX_CLIENT_QUEUE 1 +/* Assuming that video encoder will at least compress to this ratio */ +#define ESTIMATED_COMPRESSION_RATIO 10 + static ssize_t sendall(int sock, const void *buf, size_t len) { while (len) { @@ -30,17 +36,157 @@ static ssize_t sendall(int sock, const void *buf, size_t len) return 0; } +#if DT_HAS_CHOSEN(zephyr_videoenc) +const struct device *encoder_dev = NULL; + +int configure_encoder() +{ + struct video_buffer *buffer; + struct video_format fmt; + struct video_caps caps; + uint32_t size; + int i = 0; + + encoder_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_videoenc)); + if (!device_is_ready(encoder_dev)) { + LOG_ERR("%s: encoder video device not ready.", + encoder_dev->name); + return -1; + } + + /* Get capabilities */ + caps.type = VIDEO_BUF_TYPE_OUTPUT; + if (video_get_caps(encoder_dev, &caps)) { + LOG_ERR("Unable to retrieve video capabilities"); + return -1; + } + + LOG_INF("- Capabilities:"); + while (caps.format_caps[i].pixelformat) { + const struct video_format_cap *fcap = &caps.format_caps[i]; + /* fourcc to string */ + LOG_INF(" %s width [%u; %u; %u] height [%u; %u; %u]", + VIDEO_FOURCC_TO_STR(fcap->pixelformat), + fcap->width_min, fcap->width_max, fcap->width_step, + fcap->height_min, fcap->height_max, fcap->height_step); + i++; + } + + /* Get default/native format */ + fmt.type = VIDEO_BUF_TYPE_OUTPUT; + if (video_get_format(encoder_dev, &fmt)) { + LOG_ERR("Unable to retrieve video format"); + return -1; + } + + printk("Video encoder device detected, format: %s %ux%u\n", + VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height); + +#if CONFIG_VIDEO_FRAME_HEIGHT + fmt.height = CONFIG_VIDEO_FRAME_HEIGHT; +#endif + +#if CONFIG_VIDEO_FRAME_WIDTH + fmt.width = CONFIG_VIDEO_FRAME_WIDTH; +#endif + + if (strcmp(CONFIG_VIDEO_ENCODED_PIXEL_FORMAT, "")) { + fmt.pixelformat = VIDEO_FOURCC_FROM_STR(CONFIG_VIDEO_ENCODED_PIXEL_FORMAT); + } + + LOG_INF("- Video encoded format: %s %ux%u", + VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height); + + fmt.type = VIDEO_BUF_TYPE_OUTPUT; + if (video_set_format(encoder_dev, &fmt)) { + LOG_ERR("Unable to set format"); + return -1; + } + + /* Set input format */ + if (strcmp(CONFIG_VIDEO_PIXEL_FORMAT, "")) { + fmt.pixelformat = VIDEO_FOURCC_FROM_STR(CONFIG_VIDEO_PIXEL_FORMAT); + } + + LOG_INF("- Video input format: %s %ux%u", + VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height); + + fmt.type = VIDEO_BUF_TYPE_INPUT; + if (video_set_format(encoder_dev, &fmt)) { + LOG_ERR("Unable to set input format"); + return 0; + } + + /* Alloc output buffer */ + size = fmt.width * fmt.height / ESTIMATED_COMPRESSION_RATIO; + buffer = video_buffer_aligned_alloc(size, CONFIG_VIDEO_BUFFER_POOL_ALIGN, + K_FOREVER); + if (buffer == NULL) { + LOG_ERR("Unable to alloc compressed video buffer size=%d", size); + return -1; + } + buffer->type = VIDEO_BUF_TYPE_OUTPUT; + video_enqueue(encoder_dev, buffer); + + /* Start video encoder */ + buffer->type = VIDEO_BUF_TYPE_OUTPUT; + if (video_stream_start(encoder_dev, VIDEO_BUF_TYPE_OUTPUT)) { + LOG_ERR("Unable to start video encoder"); + return -1; + } + + return 0; +} + +int encode_frame(struct video_buffer *in, struct video_buffer **out) +{ + int ret; + + in->type = VIDEO_BUF_TYPE_INPUT; + video_enqueue(encoder_dev, in); + + (*out)->type = VIDEO_BUF_TYPE_OUTPUT; + ret = video_dequeue(encoder_dev, out, K_FOREVER); + if (ret) { + LOG_ERR("Unable to dequeue encoder buf"); + return ret; + } + + return 0; +} + +void stop_encoder(void) +{ + if (video_stream_stop(encoder_dev, VIDEO_BUF_TYPE_OUTPUT)) + LOG_ERR("Unable to stop encoder"); +} +#endif + int main(void) { struct sockaddr_in addr, client_addr; socklen_t client_addr_len = sizeof(client_addr); - struct video_buffer *buffers[2]; + struct video_buffer *buffers[CONFIG_VIDEO_CAPTURE_N_BUFFERING]; struct video_buffer *vbuf = &(struct video_buffer){}; - int i, ret, sock, client; +#if DT_HAS_CHOSEN(zephyr_videoenc) + struct video_buffer *vbuf_out = &(struct video_buffer){}; +#endif + int ret, sock, client; struct video_format fmt; struct video_caps caps; + struct video_frmival frmival; + struct video_frmival_enum fie; enum video_buf_type type = VIDEO_BUF_TYPE_OUTPUT; const struct device *video_dev; +#if (CONFIG_VIDEO_SOURCE_CROP_WIDTH && CONFIG_VIDEO_SOURCE_CROP_HEIGHT) || \ + CONFIG_VIDEO_FRAME_HEIGHT || CONFIG_VIDEO_FRAME_WIDTH + struct video_selection sel = { + .type = VIDEO_BUF_TYPE_OUTPUT, + }; +#endif + size_t bsize; + int i = 0; + int err; video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); if (!device_is_ready(video_dev)) { @@ -80,6 +226,17 @@ int main(void) return 0; } + LOG_INF("- Capabilities:"); + while (caps.format_caps[i].pixelformat) { + const struct video_format_cap *fcap = &caps.format_caps[i]; + /* fourcc to string */ + LOG_INF(" %s width [%u; %u; %u] height [%u; %u; %u]", + VIDEO_FOURCC_TO_STR(fcap->pixelformat), + fcap->width_min, fcap->width_max, fcap->width_step, + fcap->height_min, fcap->height_max, fcap->height_step); + i++; + } + /* Get default/native format */ fmt.type = type; if (video_get_format(video_dev, &fmt)) { @@ -95,9 +252,139 @@ int main(void) return 0; } + /* Set the crop setting if necessary */ +#if CONFIG_VIDEO_SOURCE_CROP_WIDTH && CONFIG_VIDEO_SOURCE_CROP_HEIGHT + sel.target = VIDEO_SEL_TGT_CROP; + sel.rect.left = CONFIG_VIDEO_SOURCE_CROP_LEFT; + sel.rect.top = CONFIG_VIDEO_SOURCE_CROP_TOP; + sel.rect.width = CONFIG_VIDEO_SOURCE_CROP_WIDTH; + sel.rect.height = CONFIG_VIDEO_SOURCE_CROP_HEIGHT; + if (video_set_selection(video_dev, &sel)) { + LOG_ERR("Unable to set selection crop"); + return 0; + } + LOG_INF("Selection crop set to (%u,%u)/%ux%u", + sel.rect.left, sel.rect.top, sel.rect.width, sel.rect.height); +#endif + +#if CONFIG_VIDEO_FRAME_HEIGHT || CONFIG_VIDEO_FRAME_WIDTH +#if CONFIG_VIDEO_FRAME_HEIGHT + fmt.height = CONFIG_VIDEO_FRAME_HEIGHT; +#endif + +#if CONFIG_VIDEO_FRAME_WIDTH + fmt.width = CONFIG_VIDEO_FRAME_WIDTH; +#endif + + /* + * Check (if possible) if targeted size is same as crop + * and if compose is necessary + */ + sel.target = VIDEO_SEL_TGT_CROP; + err = video_get_selection(video_dev, &sel); + if (err < 0 && err != -ENOSYS) { + LOG_ERR("Unable to get selection crop"); + return 0; + } + + if (err == 0 && (sel.rect.width != fmt.width || sel.rect.height != fmt.height)) { + sel.target = VIDEO_SEL_TGT_COMPOSE; + sel.rect.left = 0; + sel.rect.top = 0; + sel.rect.width = fmt.width; + sel.rect.height = fmt.height; + err = video_set_selection(video_dev, &sel); + if (err < 0 && err != -ENOSYS) { + LOG_ERR("Unable to set selection compose"); + return 0; + } + } +#endif + + if (strcmp(CONFIG_VIDEO_PIXEL_FORMAT, "")) { + fmt.pixelformat = VIDEO_FOURCC_FROM_STR(CONFIG_VIDEO_PIXEL_FORMAT); + } + + LOG_INF("- Video format: %s %ux%u", + VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height); + + if (video_set_format(video_dev, &fmt)) { + LOG_ERR("Unable to set format"); + return 0; + } + + if (!video_get_frmival(video_dev, &frmival)) { + LOG_INF("- Default frame rate : %f fps", + 1.0 * frmival.denominator / frmival.numerator); + } + + LOG_INF("- Supported frame intervals for the default format:"); + memset(&fie, 0, sizeof(fie)); + fie.format = &fmt; + while (video_enum_frmival(video_dev, &fie) == 0) { + if (fie.type == VIDEO_FRMIVAL_TYPE_DISCRETE) { + LOG_INF(" %u/%u", fie.discrete.numerator, fie.discrete.denominator); + } else { + LOG_INF(" [min = %u/%u; max = %u/%u; step = %u/%u]", + fie.stepwise.min.numerator, fie.stepwise.min.denominator, + fie.stepwise.max.numerator, fie.stepwise.max.denominator, + fie.stepwise.step.numerator, fie.stepwise.step.denominator); + } + fie.index++; + } + + /* Get supported controls */ + LOG_INF("- Supported controls:"); + const struct device *last_dev = NULL; + struct video_ctrl_query cq = {.dev = video_dev, .id = VIDEO_CTRL_FLAG_NEXT_CTRL}; + + while (!video_query_ctrl(&cq)) { + if (cq.dev != last_dev) { + last_dev = cq.dev; + LOG_INF("\t\tdevice: %s", cq.dev->name); + } + video_print_ctrl(&cq); + cq.id |= VIDEO_CTRL_FLAG_NEXT_CTRL; + } + + /* Set controls */ + struct video_control ctrl = {.id = VIDEO_CID_HFLIP, .val = 1}; + int tp_set_ret = -ENOTSUP; + + if (IS_ENABLED(CONFIG_VIDEO_CTRL_HFLIP)) { + video_set_ctrl(video_dev, &ctrl); + } + + if (IS_ENABLED(CONFIG_VIDEO_CTRL_VFLIP)) { + ctrl.id = VIDEO_CID_VFLIP; + video_set_ctrl(video_dev, &ctrl); + } + + if (IS_ENABLED(CONFIG_TEST)) { + ctrl.id = VIDEO_CID_TEST_PATTERN; + tp_set_ret = video_set_ctrl(video_dev, &ctrl); + } + + /* Size to allocate for each buffer */ + if (caps.min_line_count == LINE_COUNT_HEIGHT) { + if (fmt.pixelformat == VIDEO_PIX_FMT_NV12) { + bsize = fmt.width * fmt.height * + video_bits_per_pixel(fmt.pixelformat) / BITS_PER_BYTE; + } else { + bsize = fmt.pitch * fmt.height; + } + } else { + bsize = fmt.pitch * caps.min_line_count; + } + /* Alloc Buffers */ for (i = 0; i < ARRAY_SIZE(buffers); i++) { - buffers[i] = video_buffer_alloc(fmt.pitch * fmt.height, K_FOREVER); + /* + * For some hardwares, such as the PxP used on i.MX RT1170 to do image rotation, + * buffer alignment is needed in order to achieve the best performance + */ + buffers[i] = video_buffer_aligned_alloc(bsize, CONFIG_VIDEO_BUFFER_POOL_ALIGN, + K_FOREVER); if (buffers[i] == NULL) { LOG_ERR("Unable to alloc video buffer"); return 0; @@ -117,6 +404,13 @@ int main(void) printk("TCP: Accepted connection\n"); +#if DT_HAS_CHOSEN(zephyr_videoenc) + if (configure_encoder()) { + LOG_ERR("Unable to configure video encoder"); + return 0; + } +#endif + /* Enqueue Buffers */ for (i = 0; i < ARRAY_SIZE(buffers); i++) { video_enqueue(video_dev, buffers[i]); @@ -140,16 +434,27 @@ int main(void) return 0; } - printk("\rSending frame %d\n", i++); +#if DT_HAS_CHOSEN(zephyr_videoenc) + encode_frame(vbuf, &vbuf_out); + printk("\rSending compressed frame %d (size=%d bytes)\n", i++, vbuf_out->bytesused); + /* Send compressed video buffer to TCP client */ + ret = sendall(client, vbuf_out->buffer, vbuf_out->bytesused); + + vbuf_out->type = VIDEO_BUF_TYPE_OUTPUT; + video_enqueue(encoder_dev, vbuf_out); +#else + printk("\rSending frame %d\n", i++); /* Send video buffer to TCP client */ ret = sendall(client, vbuf->buffer, vbuf->bytesused); +#endif if (ret && ret != -EAGAIN) { /* client disconnected */ printk("\nTCP: Client disconnected %d\n", ret); close(client); } + vbuf->type = VIDEO_BUF_TYPE_INPUT; (void)video_enqueue(video_dev, vbuf); } while (!ret); @@ -159,8 +464,13 @@ int main(void) return 0; } +#if DT_HAS_CHOSEN(zephyr_videoenc) + stop_encoder(); +#endif + /* Flush remaining buffers */ do { + vbuf->type = VIDEO_BUF_TYPE_INPUT; ret = video_dequeue(video_dev, &vbuf, K_NO_WAIT); } while (!ret);