From 3634db32aeffb0a92ddb5572b2238db2ade429a8 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 16 May 2021 01:44:39 +0200 Subject: [PATCH 01/10] WIP: Device configuration --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/device/Device.hpp | 61 +++-- include/depthai/device/DeviceBootloader.hpp | 15 +- include/depthai/pipeline/Pipeline.hpp | 28 ++- shared/depthai-bootloader-shared | 2 +- shared/depthai-shared | 2 +- src/device/Device.cpp | 108 ++++++-- src/device/DeviceBootloader.cpp | 46 ++-- src/pipeline/Pipeline.cpp | 36 ++- src/utility/Resources.cpp | 265 ++++++++++++-------- src/utility/Resources.hpp | 9 +- 11 files changed, 375 insertions(+), 199 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 88d2756dd9..2bb81b1e3e 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "fddbe1b7868bdb67945b50ddc9dfaeb15bba2d60") +set(DEPTHAI_DEVICE_SIDE_COMMIT "8641723d7f46be11969c96d5346636cba2f2334b") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/device/Device.hpp b/include/depthai/device/Device.hpp index 9d575bc17b..aa79d241d6 100644 --- a/include/depthai/device/Device.hpp +++ b/include/depthai/device/Device.hpp @@ -20,6 +20,7 @@ #include "depthai-shared/common/ChipTemperature.hpp" #include "depthai-shared/common/CpuUsage.hpp" #include "depthai-shared/common/MemoryInfo.hpp" +#include "depthai-shared/device/PrebootConfig.hpp" #include "depthai-shared/log/LogLevel.hpp" #include "depthai-shared/log/LogMessage.hpp" @@ -44,6 +45,18 @@ class Device { static constexpr std::size_t EVENT_QUEUE_MAXIMUM_SIZE{2048}; /// Default rate at which system information is logged static constexpr float DEFAULT_SYSTEM_INFORMATION_LOGGING_RATE_HZ{1.0f}; + /// Default UsbSpeed for device connection + static constexpr UsbSpeed DEFAULT_USB_SPEED{UsbSpeed::SUPER}; + + // Structures + + /** + * Device specific configuration + */ + struct Config { + OpenVINO::Version version; + PrebootConfig preboot; + }; // static API @@ -106,15 +119,16 @@ class Device { /** * Connects to any available device with a DEFAULT_SEARCH_TIME timeout. * @param pipeline Pipeline to be executed on the device - * @param pathToCmd Path to custom device firmware + * @param maxUsbSpeed Maximum allowed USB speed */ - Device(const Pipeline& pipeline, const char* pathToCmd); + Device(const Pipeline& pipeline, UsbSpeed maxUsbSpeed); /** * Connects to any available device with a DEFAULT_SEARCH_TIME timeout. * @param pipeline Pipeline to be executed on the device * @param pathToCmd Path to custom device firmware */ + Device(const Pipeline& pipeline, const char* pathToCmd); Device(const Pipeline& pipeline, const std::string& pathToCmd); /** @@ -129,16 +143,17 @@ class Device { * Connects to device specified by devInfo. * @param pipeline Pipeline to be executed on the device * @param devInfo DeviceInfo which specifies which device to connect to - * @param pathToCmd Path to custom device firmware + * @param maxUsbSpeed Maximum allowed USB speed */ - Device(const Pipeline& pipeline, const DeviceInfo& devInfo, const char* pathToCmd); + Device(const Pipeline& pipeline, const DeviceInfo& devInfo, UsbSpeed maxUsbSpeed); /** * Connects to device specified by devInfo. * @param pipeline Pipeline to be executed on the device * @param devInfo DeviceInfo which specifies which device to connect to - * @param usb2Mode Path to custom device firmware + * @param pathToCmd Path to custom device firmware */ + Device(const Pipeline& pipeline, const DeviceInfo& devInfo, const char* pathToCmd); Device(const Pipeline& pipeline, const DeviceInfo& devInfo, const std::string& pathToCmd); /** @@ -157,15 +172,16 @@ class Device { /** * Connects to any available device with a DEFAULT_SEARCH_TIME timeout. * @param version OpenVINO version which the device will be booted with - * @param pathToCmd Path to custom device firmware + * @param maxUsbSpeed Maximum allowed USB speed */ - Device(OpenVINO::Version version, const char* pathToCmd); + Device(OpenVINO::Version version, UsbSpeed maxUsbSpeed); /** * Connects to any available device with a DEFAULT_SEARCH_TIME timeout. * @param version OpenVINO version which the device will be booted with * @param pathToCmd Path to custom device firmware */ + Device(OpenVINO::Version version, const char* pathToCmd); Device(OpenVINO::Version version, const std::string& pathToCmd); /** @@ -180,18 +196,26 @@ class Device { * Connects to device specified by devInfo. * @param version OpenVINO version which the device will be booted with * @param devInfo DeviceInfo which specifies which device to connect to - * @param pathToCmd Path to custom device firmware + * @param maxUsbSpeed Maximum USB speed */ - Device(OpenVINO::Version version, const DeviceInfo& devInfo, const char* pathToCmd); + Device(OpenVINO::Version version, const DeviceInfo& devInfo, UsbSpeed maxUsbSpeed); /** * Connects to device specified by devInfo. * @param version OpenVINO version which the device will be booted with * @param devInfo DeviceInfo which specifies which device to connect to - * @param usb2Mode Path to custom device firmware + * @param pathToCmd Path to custom device firmware */ + Device(OpenVINO::Version version, const DeviceInfo& devInfo, const char* pathToCmd); Device(OpenVINO::Version version, const DeviceInfo& devInfo, const std::string& pathToCmd); + /** + * Connects to device 'devInfo' with custom config. + * @param devInfo DeviceInfo which specifies which device to connect to + * @param config Device custom configuration to boot with + */ + Device(const DeviceInfo& devInfo, Config config); + /** * Device destructor. Closes the connection and data queues. */ @@ -451,6 +475,13 @@ class Device { */ CpuUsage getLeonMssCpuUsage(); + /** + * Retrieves USB connection speed + * + * @returns USB connection speed of connected device if applicable. Unknown otherwise. + */ + UsbSpeed getUsbSpeed(); + /** * Explicitly closes connection to device. * @note This function does not need to be explicitly called @@ -464,10 +495,12 @@ class Device { bool isClosed() const; private: - // private static + // private void init(OpenVINO::Version version, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd); void init(const Pipeline& pipeline, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd); - void init2(bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd, tl::optional pipeline); + void init(OpenVINO::Version version, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd); + void init(const Pipeline& pipeline, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd); + void init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcmd, tl::optional pipeline); void checkClosed() const; std::shared_ptr connection; @@ -514,8 +547,8 @@ class Device { class Impl; Pimpl pimpl; - // OpenVINO version device was booted with - OpenVINO::Version openvinoVersion; + // Device config + Config config; }; } // namespace dai diff --git a/include/depthai/device/DeviceBootloader.hpp b/include/depthai/device/DeviceBootloader.hpp index f4e5d04b42..c9f5827c05 100644 --- a/include/depthai/device/DeviceBootloader.hpp +++ b/include/depthai/device/DeviceBootloader.hpp @@ -1,6 +1,7 @@ #pragma once // std +#include #include #include #include @@ -8,6 +9,7 @@ // project #include "CallbackHandler.hpp" #include "DataQueue.hpp" +#include "Device.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/xlink/XLinkConnection.hpp" #include "depthai/xlink/XLinkStream.hpp" @@ -41,6 +43,11 @@ class DeviceBootloader { unsigned versionMajor, versionMinor, versionPatch; }; + struct Config { + Config() : timeout(3) {} + std::chrono::milliseconds timeout; + }; + // Static API /** * Searches for connected devices in either UNBOOTED or BOOTLOADER states and returns first available. @@ -60,7 +67,7 @@ class DeviceBootloader { * @param pathToCmd Optional path to custom device firmware * @returns Depthai application package */ - static std::vector createDepthaiApplicationPackage(Pipeline& pipeline, std::string pathToCmd = ""); + static std::vector createDepthaiApplicationPackage(Pipeline& pipeline, UsbSpeed maxUsbSpeed = Device::DEFAULT_USB_SPEED); /** * Saves application package to a file which can be flashed to depthai device. @@ -68,7 +75,7 @@ class DeviceBootloader { * @param pipeline Pipeline from which to create the application package * @param pathToCmd Optional path to custom device firmware */ - static void saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, std::string pathToCmd = ""); + static void saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, UsbSpeed maxUsbSpeed = Device::DEFAULT_USB_SPEED); /** * @returns Embedded bootloader version @@ -78,7 +85,7 @@ class DeviceBootloader { /** * @returns Embedded bootloader binary */ - static std::vector getEmbeddedBootloaderBinary(); + static std::vector getEmbeddedBootloaderBinary(Config config); // DeviceBootloader() = delete; @@ -121,7 +128,7 @@ class DeviceBootloader { * @param progressCallback Callback that sends back a value between 0..1 which signifies current flashing progress * @param path Optional parameter to custom bootloader to flash */ - std::tuple flashBootloader(std::function progressCallback, std::string path = ""); + std::tuple flashBootloader(std::function progressCallback, Config config = {}); /** * @returns Version of current running bootloader diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 31a937bea4..a45545fee0 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -12,6 +12,7 @@ #include "depthai/openvino/OpenVINO.hpp" // shared +#include "depthai-shared/device/PrebootConfig.hpp" #include "depthai-shared/pipeline/PipelineSchema.hpp" #include "depthai-shared/properties/GlobalProperties.hpp" @@ -33,9 +34,11 @@ class PipelineImpl { // Functions Node::Id getNextUniqueId(); PipelineSchema getPipelineSchema() const; - OpenVINO::Version getPipelineOpenVINOVersion() const; + tl::optional getPipelineOpenVINOVersion() const; + bool isOpenVINOVersionCompatible(OpenVINO::Version version) const; AssetManager getAllAssets() const; void setCameraTuningBlobPath(const std::string& path); + PrebootConfig getDevicePrebootConfig() const; // Access to nodes std::vector> getAllNodes() const; @@ -43,7 +46,7 @@ class PipelineImpl { std::shared_ptr getNode(Node::Id id) const; std::shared_ptr getNode(Node::Id id); - void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage, OpenVINO::Version& version) const; + void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage) const; void remove(std::shared_ptr node); std::vector getConnections() const; @@ -118,8 +121,8 @@ class Pipeline { PipelineSchema getPipelineSchema(); // void loadAssets(AssetManager& assetManager); - void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage, OpenVINO::Version& version) const { - impl()->serialize(schema, assets, assetStorage, version); + void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage) const { + impl()->serialize(schema, assets, assetStorage); } /** @@ -216,8 +219,13 @@ class Pipeline { impl()->forceRequiredOpenVINOVersion = version; } - /// Get required OpenVINO version to run this pipeline + /// Get possible OpenVINO version to run this pipeline OpenVINO::Version getOpenVINOVersion() const { + return impl()->getPipelineOpenVINOVersion().value_or(Pipeline::DEFAULT_OPENVINO_VERSION); + } + + /// Get required OpenVINO version to run this pipeline. Can be none + tl::optional getRequiredOpenVINOVersion() const { return impl()->getPipelineOpenVINOVersion(); } @@ -225,6 +233,16 @@ class Pipeline { void setCameraTuningBlobPath(const std::string& path) { impl()->setCameraTuningBlobPath(path); } + + /// Checks whether a given OpenVINO version is compatible with the pipeline + bool isOpenVINOVersionCompatible(OpenVINO::Version version) const { + return impl()->isOpenVINOVersionCompatible(version); + } + + /// Checks whether a given OpenVINO version is compatible with the pipeline + PrebootConfig getDevicePrebootConfig() const { + return impl()->getDevicePrebootConfig(); + } }; } // namespace dai diff --git a/shared/depthai-bootloader-shared b/shared/depthai-bootloader-shared index c8779ca581..8e9965a34a 160000 --- a/shared/depthai-bootloader-shared +++ b/shared/depthai-bootloader-shared @@ -1 +1 @@ -Subproject commit c8779ca581659803b6f3e465cc2bde7b22203be0 +Subproject commit 8e9965a34a2c8de02a151f111e2397657e2ddf52 diff --git a/shared/depthai-shared b/shared/depthai-shared index cd7a857f4b..e23302538d 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit cd7a857f4bc4a4d0e5348a6cc6a536c0a3abc7b7 +Subproject commit e23302538d867042e1ec6c2eb6ff723e7c4d538a diff --git a/src/device/Device.cpp b/src/device/Device.cpp index ed3c425c50..641618b31a 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -84,6 +84,7 @@ template std::tuple Device::getAnyAvailableDevice(std::chrono: constexpr std::chrono::seconds Device::DEFAULT_SEARCH_TIME; constexpr std::size_t Device::EVENT_QUEUE_MAXIMUM_SIZE; constexpr float Device::DEFAULT_SYSTEM_INFORMATION_LOGGING_RATE_HZ; +constexpr UsbSpeed Device::DEFAULT_USB_SPEED; template std::tuple Device::getAnyAvailableDevice(std::chrono::duration timeout) { @@ -214,6 +215,10 @@ Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, bool usb2Mod init(pipeline, true, usb2Mode, ""); } +Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, UsbSpeed maxUsbSpeed) : deviceInfo(devInfo) { + init(pipeline, true, maxUsbSpeed, ""); +} + Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, const char* pathToCmd) : deviceInfo(devInfo) { init(pipeline, false, false, std::string(pathToCmd)); } @@ -254,6 +259,16 @@ Device::Device(const Pipeline& pipeline, const std::string& pathToCmd) { init(pipeline, false, false, pathToCmd); } +Device::Device(const Pipeline& pipeline, UsbSpeed maxUsbSpeed) { + // Searches for any available device for 'default' timeout + bool found = false; + std::tie(found, deviceInfo) = getAnyAvailableDevice(); + + // If no device found, throw + if(!found) throw std::runtime_error("No available devices"); + init(pipeline, true, maxUsbSpeed, ""); +} + Device::Device(const Pipeline& pipeline, bool usb2Mode) { // Searches for any available device for 'default' timeout bool found = false; @@ -268,6 +283,10 @@ Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, bool usb2Mo init(version, true, usb2Mode, ""); } +Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, UsbSpeed maxUsbSpeed) : deviceInfo(devInfo) { + init(version, true, maxUsbSpeed, ""); +} + Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, const char* pathToCmd) : deviceInfo(devInfo) { init(version, false, false, std::string(pathToCmd)); } @@ -318,6 +337,20 @@ Device::Device(OpenVINO::Version version, bool usb2Mode) { init(version, true, usb2Mode, ""); } +Device::Device(OpenVINO::Version version, UsbSpeed maxUsbSpeed) { + // Searches for any available device for 'default' timeout + bool found = false; + std::tie(found, deviceInfo) = getAnyAvailableDevice(); + + // If no device found, throw + if(!found) throw std::runtime_error("No available devices"); + init(version, true, maxUsbSpeed, ""); +} + +Device::Device(const DeviceInfo& devInfo, Config config) { + init2(config, false, {}, {}); +} + void Device::close() { // Only allow to close once if(closed.exchange(true)) return; @@ -372,35 +405,56 @@ Device::~Device() { } void Device::init(OpenVINO::Version version, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd) { - // Initalize depthai library if not already - initialize(); - + Config cfg; + // Specify usb speed + cfg.preboot.maxUsbSpeed = usb2Mode ? UsbSpeed::HIGH : Device::DEFAULT_USB_SPEED; // Specify the OpenVINO version - openvinoVersion = version; - - spdlog::debug("Device - OpenVINO version: {}", OpenVINO::getVersionName(openvinoVersion)); - - init2(embeddedMvcmd, usb2Mode, pathToMvcmd, tl::nullopt); + cfg.version = version; + init2(cfg, embeddedMvcmd, pathToMvcmd, {}); } - void Device::init(const Pipeline& pipeline, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd) { + Config cfg; + // Specify usb speed + cfg.preboot.maxUsbSpeed = usb2Mode ? UsbSpeed::HIGH : Device::DEFAULT_USB_SPEED; + // Specify the OpenVINO version + cfg.version = pipeline.getOpenVINOVersion(); + init2(cfg, embeddedMvcmd, pathToMvcmd, pipeline); +} +void Device::init(OpenVINO::Version version, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd) { + Config cfg; + // Specify usb speed + cfg.preboot.maxUsbSpeed = maxUsbSpeed; + // Specify the OpenVINO version + cfg.version = version; + init2(cfg, embeddedMvcmd, pathToMvcmd, {}); +} +void Device::init(const Pipeline& pipeline, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd) { + Config cfg; + // Specify usb speed + cfg.preboot.maxUsbSpeed = maxUsbSpeed; + // Specify the OpenVINO version + cfg.version = pipeline.getOpenVINOVersion(); + init2(cfg, embeddedMvcmd, pathToMvcmd, pipeline); +} + +void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcmd, tl::optional pipeline) { // Initalize depthai library if not already initialize(); - // Mark the OpenVINO version - openvinoVersion = pipeline.getOpenVINOVersion(); - - spdlog::debug("Device - pipeline serialized, OpenVINO version: {}", OpenVINO::getVersionName(openvinoVersion)); + // Specify cfg + config = cfg; - init2(embeddedMvcmd, usb2Mode, pathToMvcmd, pipeline); -} + if(pipeline) { + spdlog::debug("Device - pipeline serialized, OpenVINO version: {}", OpenVINO::getVersionName(config.version)); + } else { + spdlog::debug("Device - OpenVINO version: {}", OpenVINO::getVersionName(config.version)); + } -void Device::init2(bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd, tl::optional pipeline) { // Set logging pattern of device (device id + shared pattern) pimpl->setPattern(fmt::format("[{}] {}", deviceInfo.getMxId(), LOG_DEFAULT_PATTERN)); // Get embedded mvcmd - std::vector embeddedFw = Resources::getInstance().getDeviceFirmware(usb2Mode, openvinoVersion); + std::vector embeddedFw = Resources::getInstance().getDeviceFirmware(config); // Init device (if bootloader, handle correctly - issue USB boot command) if(deviceInfo.state == X_LINK_UNBOOTED) { @@ -838,6 +892,12 @@ CpuUsage Device::getLeonMssCpuUsage() { return client->call("getLeonMssCpuUsage").as(); } +UsbSpeed Device::getUsbSpeed() { + checkClosed(); + + return client->call("getUsbSpeed").as(); +} + bool Device::isPipelineRunning() { checkClosed(); @@ -923,16 +983,16 @@ bool Device::startPipeline(const Pipeline& pipeline) { throw std::runtime_error("Pipeline is already running"); } + // Check openvino version + if(!pipeline.isOpenVINOVersionCompatible(config.version)) { + throw std::runtime_error("Device booted with different OpenVINO version that pipeline requires"); + } + + // Serialize the pipeline PipelineSchema schema; Assets assets; std::vector assetStorage; - - // Mark the OpenVINO version and serialize the pipeline - OpenVINO::Version pipelineOpenvinoVersion; - pipeline.serialize(schema, assets, assetStorage, pipelineOpenvinoVersion); - if(openvinoVersion != pipelineOpenvinoVersion) { - throw std::runtime_error("Device booted with different OpenVINO version that pipeline requires"); - } + pipeline.serialize(schema, assets, assetStorage); // Open queues upfront, let queues know about data sizes (input queues) // Go through Pipeline and check for 'XLinkIn' and 'XLinkOut' nodes diff --git a/src/device/DeviceBootloader.cpp b/src/device/DeviceBootloader.cpp index c578afd007..c9872fc160 100644 --- a/src/device/DeviceBootloader.cpp +++ b/src/device/DeviceBootloader.cpp @@ -51,24 +51,21 @@ std::vector DeviceBootloader::getAllAvailableDevices() { return availableDevices; } -std::vector DeviceBootloader::createDepthaiApplicationPackage(Pipeline& pipeline, std::string pathToCmd) { +std::vector DeviceBootloader::createDepthaiApplicationPackage(Pipeline& pipeline, UsbSpeed maxUsbSpeed) { // Serialize the pipeline PipelineSchema schema; Assets assets; std::vector assetStorage; - OpenVINO::Version version; - pipeline.serialize(schema, assets, assetStorage, version); + pipeline.serialize(schema, assets, assetStorage); + + // Prepare Device::Config + Device::Config cfg; + cfg.version = pipeline.getOpenVINOVersion(); + cfg.preboot = pipeline.getDevicePrebootConfig(); + cfg.preboot.maxUsbSpeed = maxUsbSpeed; // Prepare device firmware - std::vector deviceFirmware; - if(pathToCmd != "") { - std::ifstream fwStream(pathToCmd, std::ios::binary); - if(!fwStream.is_open()) throw std::runtime_error("Cannot create application package, device firmware at path: " + pathToCmd + " doesn't exist"); - deviceFirmware = std::vector(std::istreambuf_iterator(fwStream), {}); - } else { - // TODO(themarpe) - specify OpenVINO version - deviceFirmware = Resources::getInstance().getDeviceFirmware(false, version); - } + std::vector deviceFirmware = Resources::getInstance().getDeviceFirmware(cfg); // Create msgpacks std::vector pipelineBinary, assetsBinary; @@ -155,7 +152,11 @@ void DeviceBootloader::init(bool embeddedMvcmd, const std::string& pathToMvcmd) if(deviceInfo.state == X_LINK_UNBOOTED) { // Unbooted device found, boot to BOOTLOADER and connect with XLinkConnection constructor if(embeddedMvcmd) { - connection = std::make_shared(deviceInfo, getEmbeddedBootloaderBinary(), X_LINK_BOOTLOADER); + Config cfg; + // Never timeout + cfg.timeout = std::chrono::milliseconds(-1); + + connection = std::make_shared(deviceInfo, getEmbeddedBootloaderBinary(cfg), X_LINK_BOOTLOADER); } else { connection = std::make_shared(deviceInfo, pathToMvcmd, X_LINK_BOOTLOADER); } @@ -272,8 +273,8 @@ std::tuple DeviceBootloader::flash(std::function return flashDepthaiApplicationPackage(progressCb, createDepthaiApplicationPackage(pipeline)); } -void DeviceBootloader::saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, std::string pathToCmd) { - auto dap = createDepthaiApplicationPackage(pipeline, pathToCmd); +void DeviceBootloader::saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, UsbSpeed maxUsbSpeed) { + auto dap = createDepthaiApplicationPackage(pipeline, maxUsbSpeed); std::ofstream outfile(path, std::ios::binary); outfile.write(reinterpret_cast(dap.data()), dap.size()); } @@ -317,15 +318,8 @@ std::tuple DeviceBootloader::flashDepthaiApplicationPackage(s return {result.success, result.errorMsg}; } -std::tuple DeviceBootloader::flashBootloader(std::function progressCb, std::string path) { - std::vector package; - if(path != "") { - std::ifstream fwStream(path, std::ios::binary); - if(!fwStream.is_open()) throw std::runtime_error("Cannot flash bootloader, binary at path: " + path + " doesn't exist"); - package = std::vector(std::istreambuf_iterator(fwStream), {}); - } else { - package = getEmbeddedBootloaderBinary(); - } +std::tuple DeviceBootloader::flashBootloader(std::function progressCb, DeviceBootloader::Config config) { + std::vector package = getEmbeddedBootloaderBinary(config); // get streamId streamId_t streamId = stream->getStreamId(); @@ -371,8 +365,8 @@ bool DeviceBootloader::isEmbeddedVersion() { return isEmbedded; } -std::vector DeviceBootloader::getEmbeddedBootloaderBinary() { - return Resources::getInstance().getBootloaderFirmware(); +std::vector DeviceBootloader::getEmbeddedBootloaderBinary(Config config) { + return Resources::getInstance().getBootloaderFirmware(config); } DeviceBootloader::Version::Version(const std::string& v) : versionMajor(0), versionMinor(0), versionPatch(0) { diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 853dd2fc14..984b7773b1 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -101,15 +101,12 @@ std::vector> PipelineImpl::getAllNodes() { return nodes; } -void PipelineImpl::serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage, OpenVINO::Version& version) const { +void PipelineImpl::serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage) const { // Set schema schema = getPipelineSchema(); // set assets and generate asset storage getAllAssets().serialize(assets, assetStorage); - - // detect and set openvino version - version = getPipelineOpenVINOVersion(); } AssetManager PipelineImpl::getAllAssets() const { @@ -200,7 +197,16 @@ PipelineSchema PipelineImpl::getPipelineSchema() const { return schema; } -OpenVINO::Version PipelineImpl::getPipelineOpenVINOVersion() const { +bool PipelineImpl::isOpenVINOVersionCompatible(OpenVINO::Version version) const { + auto ver = getPipelineOpenVINOVersion(); + if(ver) { + return OpenVINO::areVersionsBlobCompatible(version, *ver); + } else { + return true; + } +} + +tl::optional PipelineImpl::getPipelineOpenVINOVersion() const { // Loop over nodes, and get the required information tl::optional version; std::string lastNodeNameWithRequiredVersion = ""; @@ -240,17 +246,23 @@ OpenVINO::Version PipelineImpl::getPipelineOpenVINOVersion() const { } } - // After iterating over, set openvinoVersion - OpenVINO::Version openvinoVersion = DEFAULT_OPENVINO_VERSION; + // After iterating over, return appropriate version if(forceRequiredOpenVINOVersion) { - // set to forced version - openvinoVersion = *forceRequiredOpenVINOVersion; + // Return forced version + return forceRequiredOpenVINOVersion; } else if(version) { - // set to detected version - openvinoVersion = *version; + // Return detected version + return version; + } else { + // Return null + return tl::nullopt; } +} - return openvinoVersion; +PrebootConfig PipelineImpl::getDevicePrebootConfig() const { + PrebootConfig cfg{}; + // TODO - rest of preboot config + return cfg; } void PipelineImpl::setCameraTuningBlobPath(const std::string& path) { diff --git a/src/utility/Resources.cpp b/src/utility/Resources.cpp index 62d1227b81..071daaa537 100644 --- a/src/utility/Resources.cpp +++ b/src/utility/Resources.cpp @@ -16,6 +16,9 @@ #include "spdlog/fmt/chrono.h" #include "spdlog/spdlog.h" +// shared +#include "depthai-bootloader-shared/PrebootConfig.hpp" + extern "C" { #include "bspatch/bspatch.h" } @@ -28,46 +31,42 @@ CMRC_DECLARE(depthai); namespace dai { -static std::vector getEmbeddedBootloaderBinary(); +static std::vector createPrebootHeader(const std::vector& payload); #ifdef DEPTHAI_RESOURCES_TAR_XZ constexpr static auto CMRC_DEPTHAI_DEVICE_TAR_XZ = "depthai-device-fwp-" DEPTHAI_DEVICE_VERSION ".tar.xz"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_1_PATH = "depthai-device-openvino-2020.1-" DEPTHAI_DEVICE_VERSION ".cmd"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_3_PATH = "depthai-device-openvino-2020.3-" DEPTHAI_DEVICE_VERSION ".cmd"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_2_PATH = DEPTHAI_CMD_OPENVINO_2020_3_PATH; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_4_PATH = "depthai-device-openvino-2020.4-" DEPTHAI_DEVICE_VERSION ".cmd"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2021_1_PATH = "depthai-device-openvino-2021.1-" DEPTHAI_DEVICE_VERSION ".cmd"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2021_2_PATH = "depthai-device-openvino-2021.2-" DEPTHAI_DEVICE_VERSION ".cmd"; + +// Main FW constexpr static auto DEPTHAI_CMD_OPENVINO_2021_3_PATH = "depthai-device-openvino-2021.3-" DEPTHAI_DEVICE_VERSION ".cmd"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_1_USB2_PATCH_PATH = "depthai-device-usb2-patch-openvino-2020.1-" DEPTHAI_DEVICE_VERSION ".patch"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_3_USB2_PATCH_PATH = "depthai-device-usb2-patch-openvino-2020.3-" DEPTHAI_DEVICE_VERSION ".patch"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_2_USB2_PATCH_PATH = DEPTHAI_CMD_OPENVINO_2020_3_USB2_PATCH_PATH; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_4_USB2_PATCH_PATH = "depthai-device-usb2-patch-openvino-2020.4-" DEPTHAI_DEVICE_VERSION ".patch"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2021_1_USB2_PATCH_PATH = "depthai-device-usb2-patch-openvino-2021.1-" DEPTHAI_DEVICE_VERSION ".patch"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2021_2_USB2_PATCH_PATH = "depthai-device-usb2-patch-openvino-2021.2-" DEPTHAI_DEVICE_VERSION ".patch"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2021_3_USB2_PATCH_PATH = "depthai-device-usb2-patch-openvino-2021.3-" DEPTHAI_DEVICE_VERSION ".patch"; - -constexpr static std::array resourcesListTarXz = { - DEPTHAI_CMD_OPENVINO_2020_1_PATH, - DEPTHAI_CMD_OPENVINO_2020_2_PATH, - DEPTHAI_CMD_OPENVINO_2020_3_PATH, - DEPTHAI_CMD_OPENVINO_2020_4_PATH, - DEPTHAI_CMD_OPENVINO_2021_1_PATH, - DEPTHAI_CMD_OPENVINO_2021_2_PATH, - DEPTHAI_CMD_OPENVINO_2021_3_PATH, - DEPTHAI_CMD_OPENVINO_2020_1_USB2_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2020_2_USB2_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2020_3_USB2_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2020_4_USB2_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2021_1_USB2_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2021_2_USB2_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2021_3_USB2_PATCH_PATH, -}; - -std::vector Resources::getDeviceBinary(OpenVINO::Version version, bool usb2Mode) { - std::vector finalCmd; +// Patches from Main FW +constexpr static auto DEPTHAI_CMD_OPENVINO_2020_1_PATCH_PATH = "depthai-device-openvino-2020.1-" DEPTHAI_DEVICE_VERSION ".patch"; +constexpr static auto DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH = "depthai-device-openvino-2020.3-" DEPTHAI_DEVICE_VERSION ".patch"; +constexpr static auto DEPTHAI_CMD_OPENVINO_2020_2_PATCH_PATH = DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH; +constexpr static auto DEPTHAI_CMD_OPENVINO_2020_4_PATCH_PATH = "depthai-device-openvino-2020.4-" DEPTHAI_DEVICE_VERSION ".patch"; +constexpr static auto DEPTHAI_CMD_OPENVINO_2021_1_PATCH_PATH = "depthai-device-openvino-2021.1-" DEPTHAI_DEVICE_VERSION ".patch"; +constexpr static auto DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH = "depthai-device-openvino-2021.2-" DEPTHAI_DEVICE_VERSION ".patch"; + +// Creates std::array without explicitly needing to state the size +template +static constexpr auto array_of(T&&... t) -> std::array { + return {{std::forward(t)...}}; +} + +constexpr static auto resourcesListTarXz = array_of(DEPTHAI_CMD_OPENVINO_2021_3_PATH, + DEPTHAI_CMD_OPENVINO_2020_1_PATCH_PATH, + DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH, + DEPTHAI_CMD_OPENVINO_2020_2_PATCH_PATH, + DEPTHAI_CMD_OPENVINO_2020_4_PATCH_PATH, + DEPTHAI_CMD_OPENVINO_2021_1_PATCH_PATH, + DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH); + +std::vector Resources::getDeviceBinary(Device::Config config) { + std::vector depthaiBinary; + + // Get OpenVINO version + auto& version = config.version; // Check if env variable DEPTHAI_DEVICE_BINARY is set auto fwBinaryPath = spdlog::details::os::getenv("DEPTHAI_DEVICE_BINARY"); @@ -80,83 +79,82 @@ std::vector Resources::getDeviceBinary(OpenVINO::Version version, throw std::runtime_error(fmt::format("File at path {} pointed to by DEPTHAI_DEVICE_BINARY doesn't exist.", fwBinaryPath)); } - return std::vector(std::istreambuf_iterator(stream), {}); - } - + depthaiBinary = std::vector(std::istreambuf_iterator(stream), {}); + } else { // Binaries are resource compiled #ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES - std::vector& depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_PATH]; - std::vector& depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_USB2_PATCH_PATH]; - - switch(version) { - case OpenVINO::VERSION_2020_1: - spdlog::warn("OpenVino version 2020.1 is deprecated and will be removed in the next release!"); - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2020_1_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_1_USB2_PATCH_PATH]; - break; - - case OpenVINO::VERSION_2020_2: - spdlog::warn("OpenVino version 2020.2 is deprecated and will be removed in the next release!"); - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2020_2_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_2_USB2_PATCH_PATH]; - break; - - case OpenVINO::VERSION_2020_3: - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2020_3_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_3_USB2_PATCH_PATH]; - break; - - case OpenVINO::VERSION_2020_4: - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2020_4_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_4_USB2_PATCH_PATH]; - break; - - case OpenVINO::VERSION_2021_1: - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_1_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_1_USB2_PATCH_PATH]; - break; - case OpenVINO::VERSION_2021_2: - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_2_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_2_USB2_PATCH_PATH]; - break; - case OpenVINO::VERSION_2021_3: - depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_PATH]; - depthaiUsb2Patch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_USB2_PATCH_PATH]; - break; - } - - if(usb2Mode) { - #ifdef DEPTHAI_PATCH_ONLY_MODE - - // Get new size - int64_t patchedSize = bspatch_mem_get_newsize(depthaiUsb2Patch.data(), depthaiUsb2Patch.size()); - - // Reserve space for patched binary - finalCmd.resize(patchedSize); + // Temporary binary + std::vector tmpDepthaiBinary; + // Main FW + depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_PATH]; + // Patch from main to specified + std::vector& depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH]; + + switch(version) { + case OpenVINO::VERSION_2020_1: + spdlog::warn("OpenVino version 2020.1 is deprecated and will be removed in the next release!"); + depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_1_PATCH_PATH]; + break; + + case OpenVINO::VERSION_2020_2: + spdlog::warn("OpenVino version 2020.2 is deprecated and will be removed in the next release!"); + depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_2_PATCH_PATH]; + break; + + case OpenVINO::VERSION_2020_3: + depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH]; + break; + + case OpenVINO::VERSION_2020_4: + depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_4_PATCH_PATH]; + break; + + case OpenVINO::VERSION_2021_1: + depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_1_PATCH_PATH]; + break; + + case OpenVINO::VERSION_2021_2: + depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH]; + break; + + case OpenVINO::VERSION_2021_3: + depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_PATH]; + break; + } - // Patch - int error = bspatch_mem(depthaiBinary.data(), depthaiBinary.size(), depthaiUsb2Patch.data(), depthaiUsb2Patch.size(), finalCmd.data()); + // is patching required? + if(version != OpenVINO::VERSION_2021_3) { + spdlog::debug("Patching OpenVINO FW version from {} to {}", OpenVINO::getVersionName(OpenVINO::VERSION_2021_3), OpenVINO::getVersionName(version)); - // if patch not successful - if(error > 0) throw std::runtime_error("Error while patching cmd for usb2 mode"); + // Get new size + int64_t patchedSize = bspatch_mem_get_newsize(depthaiPatch.data(), depthaiPatch.size()); - #else + // Reserve space for patched binary + tmpDepthaiBinary.resize(patchedSize); - static_assert("Unsupported currently"); + // Patch + int error = bspatch_mem(depthaiBinary.data(), depthaiBinary.size(), depthaiPatch.data(), depthaiPatch.size(), tmpDepthaiBinary.data()); - #endif + // if patch not successful + if(error > 0) throw std::runtime_error("Error while patching cmd for usb2 mode"); - } else { - return depthaiBinary; - } + // Change depthaiBinary to tmpDepthaiBinary + depthaiBinary = tmpDepthaiBinary; + } #else - // Binaries from default path (TODO) + // Binaries from default path (TODO) #endif + } - return finalCmd; + // Prepend preboot config + auto prebootHeader = createPrebootHeader(nlohmann::json::to_msgpack(config.preboot)); + depthaiBinary.insert(depthaiBinary.begin(), prebootHeader.begin(), prebootHeader.end()); + + // Return created firmware + return depthaiBinary; } #else @@ -343,8 +341,7 @@ Resources::~Resources() { if(lazyThread.joinable()) lazyThread.join(); } -// Get device firmware -std::vector Resources::getDeviceFirmware(bool usb2Mode, OpenVINO::Version version) { +std::vector Resources::getDeviceFirmware(Device::Config config) { // Acquire mutex (this mutex signifies that lazy load is complete) // It is necessary when accessing resourceMap variable std::unique_lock lock(mtx); @@ -352,25 +349,34 @@ std::vector Resources::getDeviceFirmware(bool usb2Mode, OpenVINO:: #ifdef DEPTHAI_RESOURCES_TAR_XZ // Return device firmware - return getDeviceBinary(version, usb2Mode); + return getDeviceBinary(config); #else // Return device firmware - return getEmbeddedDeviceBinary(usb2Mode); + return getEmbeddedDeviceBinary(config); #endif } -std::vector Resources::getBootloaderFirmware() { - // Acquire mutex (this mutex signifies that lazy load is complete) - // It is necessary when accessing resourceMap variable - std::unique_lock lock(mtx); +// Get device firmware +std::vector Resources::getDeviceFirmware(bool usb2Mode, OpenVINO::Version version) { + Device::Config cfg; + if(usb2Mode) { + cfg.preboot.maxUsbSpeed = UsbSpeed::HIGH; + } else { + cfg.preboot.maxUsbSpeed = Device::DEFAULT_USB_SPEED; + } + cfg.version = version; - return getEmbeddedBootloaderBinary(); + return getDeviceFirmware(cfg); } -std::vector getEmbeddedBootloaderBinary() { +std::vector Resources::getBootloaderFirmware(DeviceBootloader::Config config) { + // Convert Config to PrebootConfig + bootloader::PrebootConfig prebootCfg; + prebootCfg.timeoutMs = config.timeout.count(); + // Binaries are resource compiled #ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES @@ -379,8 +385,17 @@ std::vector getEmbeddedBootloaderBinary() { // Get binaries from internal sources auto fs = cmrc::depthai::get_filesystem(); + // Load the bootloader FW auto bootloaderBinary = fs.open(CMRC_DEPTHAI_BOOTLOADER_PATH); - return std::vector(bootloaderBinary.begin(), bootloaderBinary.end()); + std::vector bootloaderFw{bootloaderBinary.begin(), bootloaderBinary.end()}; + + // Create prebootCfg header + const uint8_t* pCfg = reinterpret_cast(&prebootCfg); + auto header = createPrebootHeader({pCfg, pCfg + sizeof(prebootCfg)}); + bootloaderFw.insert(bootloaderFw.begin(), header.begin(), header.end()); + + // Return final bootloader fw + return bootloaderFw; #else static_assert(0 && "Unsupported"); @@ -388,4 +403,38 @@ std::vector getEmbeddedBootloaderBinary() { #endif } +std::vector createPrebootHeader(const std::vector& payload) { + constexpr const std::uint8_t HEADER[] = {77, 65, 50, 120, 0x8A, 0x00, 0x00, 0x00, 0x70}; + + // Store the constructed preboot information + std::vector prebootHeader; + + // Store initial header + prebootHeader.insert(prebootHeader.begin(), std::begin(HEADER), std::end(HEADER)); + + // Calculate size + std::size_t totalPayloadSize = payload.size() + sizeof(std::uint32_t); + std::size_t toAddBytes = 4 - (totalPayloadSize % 4); + std::size_t totalSize = totalPayloadSize + toAddBytes; + std::size_t totalSizeWord = totalSize / 4; + + // Write size in words in little endian + prebootHeader.push_back((totalSizeWord >> 0) & 0xFF); + prebootHeader.push_back((totalSizeWord >> 8) & 0xFF); + + // Write payload size (uint32_t LE) + for(int i = 0; i < 4; i++) { + prebootHeader.push_back((payload.size() >> (i * 8)) & 0xFF); + } + + // Copy payload + prebootHeader.insert(prebootHeader.end(), payload.begin(), payload.end()); + // Add missing bytes + for(std::size_t i = 0; i < toAddBytes; i++) { + prebootHeader.push_back(0x00); + } + + return prebootHeader; +} + } // namespace dai diff --git a/src/utility/Resources.hpp b/src/utility/Resources.hpp index a4b841d238..b1e1e3756e 100644 --- a/src/utility/Resources.hpp +++ b/src/utility/Resources.hpp @@ -7,11 +7,13 @@ #include #include +// project #include +#include +#include namespace dai { - class Resources { // private constructor Resources(); @@ -21,7 +23,7 @@ class Resources { std::thread lazyThread; std::unordered_map> resourceMap; - std::vector getDeviceBinary(OpenVINO::Version version, bool usb2Mode); + std::vector getDeviceBinary(Device::Config config); public: static Resources& getInstance(); @@ -30,7 +32,8 @@ class Resources { // Available resources std::vector getDeviceFirmware(bool usb2Mode, OpenVINO::Version version = OpenVINO::VERSION_2020_1); - std::vector getBootloaderFirmware(); + std::vector getDeviceFirmware(Device::Config config); + std::vector getBootloaderFirmware(DeviceBootloader::Config config); }; From 00d4dc4526c1fab101aa1d42d0d0186fbceca49e Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 28 May 2021 13:02:32 +0200 Subject: [PATCH 02/10] Refactored and added preboot config --- CMakeLists.txt | 2 +- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- cmake/Hunter/config.cmake | 60 ++--- examples/CMakeLists.txt | 162 ++++++++----- examples/src/camera_mobilenet_example.cpp | 131 ----------- .../src/camera_mobilenet_sync_example.cpp | 18 +- examples/src/camera_preview_example.cpp | 63 ----- examples/src/camera_video_example.cpp | 63 ----- examples/src/depth_crop_control.cpp | 110 +++++++++ examples/src/depth_preview.cpp | 68 ++++++ examples/src/device_queue_event.cpp | 50 ++++ examples/src/device_queue_event_example.cpp | 64 ------ examples/src/encoding_max_limit.cpp | 86 +++++++ examples/src/h264_encoding_example.cpp | 70 ------ examples/src/image_manip_example.cpp | 32 ++- examples/src/mjpeg_encoding_example.cpp | 45 ++-- ...mobilenet_device_side_decoding_example.cpp | 124 ---------- examples/src/mono_camera_control.cpp | 149 ++++++++++++ examples/src/mono_camera_example.cpp | 57 ----- examples/src/mono_depth_mobilenetssd.cpp | 153 +++++++++++++ examples/src/mono_full_resolution_saver.cpp | 53 +++++ examples/src/mono_mobilenet.cpp | 116 ++++++++++ examples/src/mono_preview.cpp | 55 +++++ ...tracker_example.cpp => object_tracker.cpp} | 122 +++++----- examples/src/opencv_support.cpp | 52 +++++ examples/src/opencv_support_example.cpp | 56 ----- ...rol_example.cpp => rgb_camera_control.cpp} | 144 ++++++------ ...gned_example.cpp => rgb_depth_aligned.cpp} | 60 ++--- examples/src/rgb_encoding.cpp | 55 +++++ examples/src/rgb_encoding_mobilenet.cpp | 127 ++++++++++ examples/src/rgb_encoding_mono_mobilenet.cpp | 162 +++++++++++++ .../src/rgb_encoding_mono_mobilenet_depth.cpp | 216 ++++++++++++++++++ examples/src/rgb_full_resolution_saver.cpp | 64 ++++++ examples/src/rgb_mobilenet.cpp | 145 ++++++++++++ examples/src/rgb_mobilenet_4k.cpp | 130 +++++++++++ examples/src/rgb_mono_encoding.cpp | 82 +++++++ examples/src/rgb_preview.cpp | 55 +++++ ...p_warp_example.cpp => rgb_rotate_warp.cpp} | 94 ++++---- examples/src/rgb_video.cpp | 45 ++++ ...le.cpp => spatial_location_calculator.cpp} | 74 +++--- ...enet_example.cpp => spatial_mobilenet.cpp} | 164 +++++++------ ...example.cpp => spatial_mobilenet_mono.cpp} | 165 +++++++------ ...example.cpp => spatial_object_tracker.cpp} | 127 +++++----- ...yolo_example.cpp => spatial_tiny_yolo.cpp} | 163 +++++++------ ...reo_example.cpp => stereo_depth_video.cpp} | 72 +++--- ...ion_example.cpp => system_information.cpp} | 71 +++--- ... => tiny_yolo_v3_device_side_decoding.cpp} | 173 +++++++------- ...y_yolo_v3_device_side_decoding_example.cpp | 137 ----------- .../src/tiny_yolo_v4_device_side_decoding.cpp | 159 +++++++++++++ examples/src/utility.cpp | 38 ++- examples/src/utility.hpp | 1 + examples/src/video_mobilenet.cpp | 112 +++++++++ examples/src/webcam_mobilenet_example.cpp | 32 +-- include/depthai/common/CameraBoardSocket.hpp | 25 ++ include/depthai/common/UsbSpeed.hpp | 31 +++ include/depthai/device/Device.hpp | 3 +- include/depthai/device/DeviceBootloader.hpp | 15 +- include/depthai/pipeline/node/MonoCamera.hpp | 11 +- include/depthai/pipeline/node/StereoDepth.hpp | 5 +- shared/depthai-bootloader-shared | 2 +- shared/depthai-shared | 2 +- shared/depthai-shared.cmake | 5 +- src/device/CallbackHandler.cpp | 2 +- src/device/DataQueue.cpp | 2 +- src/device/Device.cpp | 12 +- src/device/DeviceBootloader.cpp | 44 ++-- src/opencv/ImgFrame.cpp | 11 +- src/pipeline/node/MonoCamera.cpp | 2 +- src/utility/Resources.cpp | 49 ++-- src/utility/Resources.hpp | 2 +- tests/src/image_manip_node_test.cpp | 2 +- tests/src/neural_network_test.cpp | 2 +- 72 files changed, 3329 insertions(+), 1728 deletions(-) delete mode 100644 examples/src/camera_mobilenet_example.cpp delete mode 100644 examples/src/camera_preview_example.cpp delete mode 100644 examples/src/camera_video_example.cpp create mode 100644 examples/src/depth_crop_control.cpp create mode 100644 examples/src/depth_preview.cpp create mode 100644 examples/src/device_queue_event.cpp delete mode 100644 examples/src/device_queue_event_example.cpp create mode 100644 examples/src/encoding_max_limit.cpp delete mode 100644 examples/src/h264_encoding_example.cpp delete mode 100644 examples/src/mobilenet_device_side_decoding_example.cpp create mode 100644 examples/src/mono_camera_control.cpp delete mode 100644 examples/src/mono_camera_example.cpp create mode 100644 examples/src/mono_depth_mobilenetssd.cpp create mode 100644 examples/src/mono_full_resolution_saver.cpp create mode 100644 examples/src/mono_mobilenet.cpp create mode 100644 examples/src/mono_preview.cpp rename examples/src/{object_tracker_example.cpp => object_tracker.cpp} (55%) create mode 100644 examples/src/opencv_support.cpp delete mode 100644 examples/src/opencv_support_example.cpp rename examples/src/{color_camera_control_example.cpp => rgb_camera_control.cpp} (57%) rename examples/src/{rgb_depth_aligned_example.cpp => rgb_depth_aligned.cpp} (71%) create mode 100644 examples/src/rgb_encoding.cpp create mode 100644 examples/src/rgb_encoding_mobilenet.cpp create mode 100644 examples/src/rgb_encoding_mono_mobilenet.cpp create mode 100644 examples/src/rgb_encoding_mono_mobilenet_depth.cpp create mode 100644 examples/src/rgb_full_resolution_saver.cpp create mode 100644 examples/src/rgb_mobilenet.cpp create mode 100644 examples/src/rgb_mobilenet_4k.cpp create mode 100644 examples/src/rgb_mono_encoding.cpp create mode 100644 examples/src/rgb_preview.cpp rename examples/src/{image_manip_warp_example.cpp => rgb_rotate_warp.cpp} (72%) create mode 100644 examples/src/rgb_video.cpp rename examples/src/{spatial_location_calculator_example.cpp => spatial_location_calculator.cpp} (79%) rename examples/src/{spatial_mobilenet_example.cpp => spatial_mobilenet.cpp} (61%) rename examples/src/{spatial_mobilenet_mono_example.cpp => spatial_mobilenet_mono.cpp} (60%) rename examples/src/{spatial_object_tracker_example.cpp => spatial_object_tracker.cpp} (67%) rename examples/src/{spatial_tiny_yolo_example.cpp => spatial_tiny_yolo.cpp} (68%) rename examples/src/{stereo_example.cpp => stereo_depth_video.cpp} (70%) rename examples/src/{system_information_example.cpp => system_information.cpp} (65%) rename examples/src/{tiny_yolo_v4_device_side_decoding_example.cpp => tiny_yolo_v3_device_side_decoding.cpp} (51%) delete mode 100644 examples/src/tiny_yolo_v3_device_side_decoding_example.cpp create mode 100644 examples/src/tiny_yolo_v4_device_side_decoding.cpp create mode 100644 examples/src/video_mobilenet.cpp create mode 100644 include/depthai/common/CameraBoardSocket.hpp create mode 100644 include/depthai/common/UsbSpeed.hpp mode change 100755 => 100644 src/opencv/ImgFrame.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index ea0ae3be7c..669541110c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,7 +26,7 @@ if(WIN32) endif() # Create depthai project -project(depthai VERSION "2.3.0" LANGUAGES CXX C) +project(depthai VERSION "2.4.0" LANGUAGES CXX C) get_directory_property(has_parent PARENT_DIRECTORY) if(has_parent) set(DEPTHAI_VERSION ${PROJECT_VERSION} PARENT_SCOPE) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 2bb81b1e3e..ef84e670f4 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "8641723d7f46be11969c96d5346636cba2f2334b") +set(DEPTHAI_DEVICE_SIDE_COMMIT "67acb9bdda92ca767d26ce5a264740bb72524372") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/cmake/Hunter/config.cmake b/cmake/Hunter/config.cmake index 3b96418293..4aaf3f142c 100644 --- a/cmake/Hunter/config.cmake +++ b/cmake/Hunter/config.cmake @@ -8,8 +8,8 @@ hunter_config( hunter_config( XLink VERSION "luxonis-2021.2-develop" - URL "https://github.com/luxonis/XLink/archive/ee361ecba950335390ad539e509f8ab96313b6b4.tar.gz" - SHA1 "72108319bf2289d91157a3933663ed5fb2b6eb18" + URL "https://github.com/luxonis/XLink/archive/48549564792b32cfff9292b3f3cd89710151775e.tar.gz" + SHA1 "c8893d59051da5abf99bc09cf818877d9e4fa88f" ) hunter_config( @@ -34,33 +34,33 @@ hunter_config( URL "https://github.com/luxonis/libarchive/archive/cf2caf0588fc5e2af22cae37027d3ff6902e096f.tar.gz" SHA1 "e99477d32ce14292fe652dc5f4f460d3af8fbc93" CMAKE_ARGS - ENABLE_ACL=OFF - ENABLE_BZip2=OFF - ENABLE_CAT=OFF - ENABLE_CAT_SHARED=OFF - ENABLE_CNG=OFF - ENABLE_COVERAGE=OFF - ENABLE_CPIO=OFF - ENABLE_CPIO_SHARED=OFF - ENABLE_EXPAT=OFF - ENABLE_ICONV=OFF - ENABLE_INSTALL=ON - ENABLE_LIBB2=OFF - ENABLE_LIBXML2=OFF - ENABLE_LZ4=OFF - ENABLE_LZMA=ON - ENABLE_LZO=OFF - ENABLE_LibGCC=OFF - ENABLE_MBEDTLS=OFF - ENABLE_NETTLE=OFF - ENABLE_OPENSSL=OFF - ENABLE_PCREPOSIX=OFF - ENABLE_SAFESEH=AUTO - ENABLE_TAR=OFF - ENABLE_TAR_SHARED=OFF - ENABLE_TEST=OFF - ENABLE_WERROR=OFF - ENABLE_XATTR=OFF - ENABLE_ZLIB=OFF + ENABLE_ACL=OFF + ENABLE_BZip2=OFF + ENABLE_CAT=OFF + ENABLE_CAT_SHARED=OFF + ENABLE_CNG=OFF + ENABLE_COVERAGE=OFF + ENABLE_CPIO=OFF + ENABLE_CPIO_SHARED=OFF + ENABLE_EXPAT=OFF + ENABLE_ICONV=OFF + ENABLE_INSTALL=ON + ENABLE_LIBB2=OFF + ENABLE_LIBXML2=OFF + ENABLE_LZ4=OFF + ENABLE_LZMA=ON + ENABLE_LZO=OFF + ENABLE_LibGCC=OFF + ENABLE_MBEDTLS=OFF + ENABLE_NETTLE=OFF + ENABLE_OPENSSL=OFF + ENABLE_PCREPOSIX=OFF + ENABLE_SAFESEH=AUTO + ENABLE_TAR=OFF + ENABLE_TAR_SHARED=OFF + ENABLE_TEST=OFF + ENABLE_WERROR=OFF + ENABLE_XATTR=OFF + ENABLE_ZLIB=OFF ENABLE_ZSTD=OFF ) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index aa63e340d1..d20fb20d16 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -56,17 +56,6 @@ endmacro() # Hunter test data download -## message(STATUS "Location of test1.data: ${test1_data}") - -# Color camera preview output example -dai_add_example(camera_preview src/camera_preview_example.cpp) - -# Color camera video output example -dai_add_example(camera_video src/camera_video_example.cpp) - -# Mono camera video output example -dai_add_example(mono_camera src/mono_camera_example.cpp) - # NeuralNetwork node, mobilenet example hunter_private_data( URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_6shave.blob" @@ -91,6 +80,14 @@ hunter_private_data( LOCATION tiny_yolo_v4_blob ) +# NeuralNetwork node, mobilenet example, 5 shaves +hunter_private_data( + URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_5shave.blob" + SHA1 "d715f85e474609cf3f696d7a2e3750804ed6c726" + FILE "mobilenet-ssd_openvino_2021.2_5shave.blob" + LOCATION mobilenet_5shaves_blob +) + # NeuralNetwork node, mobilenet example, 8 shaves hunter_private_data( URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_8shave.blob" @@ -99,80 +96,133 @@ hunter_private_data( LOCATION mobilenet_8shaves_blob ) -dai_add_example(camera_mobilenet src/camera_mobilenet_example.cpp) -target_compile_definitions(camera_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +# Video file with objects to detect +hunter_private_data( + URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4" + SHA1 "271d8d0b702e683ce02957db7c100843de5ceaec" + FILE "construction_vest.mp4" + LOCATION construction_vest +) -# NeuralNetwork node, webcam input (from host) -dai_add_example(webcam_mobilenet src/webcam_mobilenet_example.cpp) -target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}") +## message(STATUS "Location of test1.data: ${test1_data}") -# MJPEG encoding -dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) +# RGB camera preview output example +dai_add_example(rgb_preview src/rgb_preview.cpp) -# h264 encoding -dai_add_example(h264_encoding src/h264_encoding_example.cpp) +# Mono camera preview output example +dai_add_example(mono_preview src/mono_preview.cpp) -# StereoDepth example -dai_add_example(stereo src/stereo_example.cpp) +# Depth preview output example +dai_add_example(depth_preview src/depth_preview.cpp) -# Image Manip node examples -dai_add_example(image_manip src/image_manip_example.cpp) -dai_add_example(image_manip_warp src/image_manip_warp_example.cpp) +# rgb encoding +dai_add_example(rgb_encoding src/rgb_encoding.cpp) -# Color Camera config example -dai_add_example(color_camera_control src/color_camera_control_example.cpp) +# rgb+mono encoding +dai_add_example(rgb_mono_encoding src/rgb_mono_encoding.cpp) -# System information example -dai_add_example(system_information src/system_information_example.cpp) +dai_add_example(rgb_full_resolution_saver src/rgb_full_resolution_saver.cpp) -# Device getQueueEvent example -dai_add_example(device_queue_event src/device_queue_event_example.cpp) +dai_add_example(mono_full_resolution_saver src/mono_full_resolution_saver.cpp) -# OpenCV support example -dai_add_example(opencv_support src/opencv_support_example.cpp) +dai_add_example(rgb_mobilenet src/rgb_mobilenet.cpp) +target_compile_definitions(rgb_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") -# RGB-depth alignment example -dai_add_example(rgb_depth_aligned src/rgb_depth_aligned_example.cpp) +dai_add_example(mono_mobilenet src/mono_mobilenet.cpp) +target_compile_definitions(mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") -# Device side decoding example for mobilenet-ssd -dai_add_example(mobilenet_device_side_decoding src/mobilenet_device_side_decoding_example.cpp) -target_compile_definitions(mobilenet_device_side_decoding PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(mono_depth_mobilenetssd src/mono_depth_mobilenetssd.cpp) +target_compile_definitions(mono_depth_mobilenetssd PRIVATE BLOB_PATH="${mobilenet_blob}") -# Device side decoding example for mobilenet-ssd with 3d coordinates on RGB camera -dai_add_example(spatial_mobilenet src/spatial_mobilenet_example.cpp) -target_compile_definitions(spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(rgb_encoding_mono_mobilenet src/rgb_encoding_mono_mobilenet.cpp) +target_compile_definitions(rgb_encoding_mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") -# Device side decoding example for mobilenet-ssd with 3d coordinates on right camera -dai_add_example(spatial_mobilenet_mono src/spatial_mobilenet_mono_example.cpp) -target_compile_definitions(spatial_mobilenet_mono PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(rgb_encoding_mono_mobilenet_depth src/rgb_encoding_mono_mobilenet_depth.cpp) +target_compile_definitions(rgb_encoding_mono_mobilenet_depth PRIVATE BLOB_PATH="${mobilenet_blob}") + +dai_add_example(encoding_max_limit src/encoding_max_limit.cpp) + +# RGB Camera config example +dai_add_example(rgb_camera_control src/rgb_camera_control.cpp) + +dai_add_example(mono_camera_control src/mono_camera_control.cpp) + +dai_add_example(depth_crop_control src/depth_crop_control.cpp) + +dai_add_example(rgb_mobilenet_4k src/rgb_mobilenet_4k.cpp) +target_compile_definitions(rgb_mobilenet_4k PRIVATE BLOB_PATH="${mobilenet_5shaves_blob}") + +# Device getQueueEvent example +dai_add_example(device_queue_event src/device_queue_event.cpp) + +dai_add_example(video_mobilenet src/video_mobilenet.cpp) +target_compile_definitions(video_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}" VIDEO_PATH="${construction_vest}") + +dai_add_example(rgb_encoding_mobilenet src/rgb_encoding_mobilenet.cpp) +target_compile_definitions(rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") + +# Image Manip node examples +dai_add_example(image_manip src/image_manip_example.cpp) + +# Imagae manip node exapmle with warping +dai_add_example(rgb_rotate_warp src/rgb_rotate_warp.cpp) # Device side decoding example for tiny-yolo-v3 -dai_add_example(tiny_yolo_v3_device_side_decoding src/tiny_yolo_v3_device_side_decoding_example.cpp) +dai_add_example(tiny_yolo_v3_device_side_decoding src/tiny_yolo_v3_device_side_decoding.cpp) target_compile_definitions(tiny_yolo_v3_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") # Device side decoding example for tiny-yolo-v4 -dai_add_example(tiny_yolo_v4_device_side_decoding src/tiny_yolo_v4_device_side_decoding_example.cpp) +dai_add_example(tiny_yolo_v4_device_side_decoding src/tiny_yolo_v4_device_side_decoding.cpp) target_compile_definitions(tiny_yolo_v4_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") -# Sync example between NN and camera frames -dai_add_example(camera_mobilenet_sync src/camera_mobilenet_sync_example.cpp) -target_compile_definitions(camera_mobilenet_sync PRIVATE BLOB_PATH="${mobilenet_blob}") +# OpenCV support example +dai_add_example(opencv_support src/opencv_support.cpp) + +# System information example +dai_add_example(system_information src/system_information.cpp) -dai_add_example(spatial_location_calculator src/spatial_location_calculator_example.cpp) +# Device side decoding example for mobilenet-ssd with 3d coordinates on RGB camera +dai_add_example(spatial_mobilenet src/spatial_mobilenet.cpp) +target_compile_definitions(spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") + +# Device side decoding example for mobilenet-ssd with 3d coordinates on right camera +dai_add_example(spatial_mobilenet_mono src/spatial_mobilenet_mono.cpp) +target_compile_definitions(spatial_mobilenet_mono PRIVATE BLOB_PATH="${mobilenet_blob}") # Device side decoding example for tiny-yolo-v3 with 3d coordinates on RGB camera -dai_add_example(spatial_tiny_yolo_v3 src/spatial_tiny_yolo_example.cpp) +dai_add_example(spatial_tiny_yolo_v3 src/spatial_tiny_yolo.cpp) target_compile_definitions(spatial_tiny_yolo_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") # Device side decoding example for tiny-yolo-v4 with 3d coordinates on RGB camera -dai_add_example(spatial_tiny_yolo_v4 src/spatial_tiny_yolo_example.cpp) +dai_add_example(spatial_tiny_yolo_v4 src/spatial_tiny_yolo.cpp) target_compile_definitions(spatial_tiny_yolo_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") +dai_add_example(spatial_location_calculator src/spatial_location_calculator.cpp) + +# RGB camera video output example +dai_add_example(rgb_video src/rgb_video.cpp) + # Object tracker example on mobilenet SSD output -dai_add_example(object_tracker_example src/object_tracker_example.cpp) -target_compile_definitions(object_tracker_example PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(object_tracker src/object_tracker.cpp) +target_compile_definitions(object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") # Spatial Object tracker example on mobilenet SSD output -dai_add_example(spatial_object_tracker_example src/spatial_object_tracker_example.cpp) -target_compile_definitions(spatial_object_tracker_example PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(spatial_object_tracker src/spatial_object_tracker.cpp) +target_compile_definitions(spatial_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") + +# Stereo Depth example +dai_add_example(stereo_depth_video src/stereo_depth_video.cpp) + +# NeuralNetwork node, webcam input (from host) +dai_add_example(webcam_mobilenet src/webcam_mobilenet_example.cpp) +target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}") +# MJPEG encoding +dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) + +# RGB-depth alignment example +dai_add_example(rgb_depth_aligned src/rgb_depth_aligned.cpp) + +# Sync example between NN and camera frames +dai_add_example(camera_mobilenet_sync src/camera_mobilenet_sync_example.cpp) +target_compile_definitions(camera_mobilenet_sync PRIVATE BLOB_PATH="${mobilenet_blob}") diff --git a/examples/src/camera_mobilenet_example.cpp b/examples/src/camera_mobilenet_example.cpp deleted file mode 100644 index 5f094058d2..0000000000 --- a/examples/src/camera_mobilenet_example.cpp +++ /dev/null @@ -1,131 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -static bool syncNN = true; - -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto nn1 = p.create(); - auto nnOut = p.create(); - - nn1->setBlobPath(nnPath); - - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(nn1->input); - if(syncNN) { - nn1->passthrough.link(xlinkOut->input); - } else { - colorCam->preview.link(xlinkOut->input); - } - - nn1->out.link(nnOut->input); - - return p; -} - -int main(int argc, char** argv) { - using namespace std; - - // Default blob path provided by Hunter private data download - // Applicable for easier example usage only - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); - - cv::Mat frame; - auto preview = d.getOutputQueue("preview"); - auto detections = d.getOutputQueue("detections"); - - while(1) { - auto imgFrame = preview->get(); - if(imgFrame) { - printf("Frame - w: %d, h: %d\n", imgFrame->getWidth(), imgFrame->getHeight()); - frame = toMat(imgFrame->getData(), imgFrame->getWidth(), imgFrame->getHeight(), 3, 1); - } - - struct Detection { - unsigned int label; - float score; - float x_min; - float y_min; - float x_max; - float y_max; - }; - - vector dets; - - auto det = detections->get(); - std::vector detData = det->getFirstLayerFp16(); - if(detData.size() > 0) { - int i = 0; - while(detData[i * 7] != -1.0f) { - Detection d; - d.label = detData[i * 7 + 1]; - d.score = detData[i * 7 + 2]; - d.x_min = detData[i * 7 + 3]; - d.y_min = detData[i * 7 + 4]; - d.x_max = detData[i * 7 + 5]; - d.y_max = detData[i * 7 + 6]; - i++; - dets.push_back(d); - } - } - - for(const auto& d : dets) { - int x1 = d.x_min * frame.cols; - int y1 = d.y_min * frame.rows; - int x2 = d.x_max * frame.cols; - int y2 = d.y_max * frame.rows; - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), cv::Scalar(255, 255, 255)); - } - - printf("===================== %lu detection(s) =======================\n", dets.size()); - for(unsigned det = 0; det < dets.size(); ++det) { - printf("%5d | %6.4f | %7.4f | %7.4f | %7.4f | %7.4f\n", - dets[det].label, - dets[det].score, - dets[det].x_min, - dets[det].y_min, - dets[det].x_max, - dets[det].y_max); - } - - cv::imshow("preview", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } - - return 0; -} diff --git a/examples/src/camera_mobilenet_sync_example.cpp b/examples/src/camera_mobilenet_sync_example.cpp index e09d5fbe87..e6220f0a8a 100644 --- a/examples/src/camera_mobilenet_sync_example.cpp +++ b/examples/src/camera_mobilenet_sync_example.cpp @@ -21,9 +21,11 @@ int main(int argc, char** argv) { nnPath = std::string(argv[1]); } + // Create pipeline dai::Pipeline pipeline; - auto colorCam = pipeline.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); auto nn = pipeline.create(); auto camOut = pipeline.create(); auto passthroughMeta = pipeline.create(); @@ -35,11 +37,11 @@ int main(int argc, char** argv) { resultOut->setStreamName("resultOut"); // ColorCamera options - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(CAMERA_FPS); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(CAMERA_FPS); // NN input options nn->input.setBlocking(false); @@ -49,8 +51,8 @@ int main(int argc, char** argv) { nn->setNumInferenceThreads(2); // Link nodes CAM -> XLINK - colorCam->preview.link(nn->input); - colorCam->preview.link(camOut->input); + camRgb->preview.link(nn->input); + camRgb->preview.link(camOut->input); nn->passthrough.link(passthroughMeta->input); nn->out.link(resultOut->input); diff --git a/examples/src/camera_preview_example.cpp b/examples/src/camera_preview_example.cpp deleted file mode 100644 index 82b7c99c7b..0000000000 --- a/examples/src/camera_preview_example.cpp +++ /dev/null @@ -1,63 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -dai::Pipeline createCameraPipeline() { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - xlinkOut->setStreamName("preview"); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(true); - - // Link plugins CAM -> XLINK - colorCam->preview.link(xlinkOut->input); - - return p; -} - -int main() { - using namespace std; - - dai::Pipeline p = createCameraPipeline(); - - // Start the pipeline - dai::Device d; - - cout << "Connected cameras: "; - for(const auto& cam : d.getConnectedCameras()) { - cout << static_cast(cam) << " "; - } - cout << endl; - - // Start the pipeline - d.startPipeline(p); - - cv::Mat frame; - auto preview = d.getOutputQueue("preview"); - - while(1) { - auto imgFrame = preview->get(); - if(imgFrame) { - printf("Frame - w: %d, h: %d\n", imgFrame->getWidth(), imgFrame->getHeight()); - frame = cv::Mat(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC3, imgFrame->getData().data()); - cv::imshow("preview", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } else { - std::cout << "Not ImgFrame" << std::endl; - } - } - - return 0; -} diff --git a/examples/src/camera_video_example.cpp b/examples/src/camera_video_example.cpp deleted file mode 100644 index c610446b93..0000000000 --- a/examples/src/camera_video_example.cpp +++ /dev/null @@ -1,63 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -dai::Pipeline createCameraFullPipeline() { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - xlinkOut->setStreamName("video"); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(true); - - // Link plugins CAM -> XLINK - colorCam->video.link(xlinkOut->input); - - return p; -} - -int main() { - using namespace std; - using namespace std::chrono; - - // Create pipeline - dai::Pipeline p = createCameraFullPipeline(); - - // Connect and start the pipeline - dai::Device d(p); - - cv::Mat frame; - auto video = d.getOutputQueue("video"); - - while(1) { - auto imgFrame = video->get(); - if(imgFrame) { - auto dur = steady_clock::now() - imgFrame->getTimestamp(); - - printf("Frame - w: %d, h: %d, latency: %ldms\n", imgFrame->getWidth(), imgFrame->getHeight(), duration_cast(dur).count()); - - frame = cv::Mat(imgFrame->getHeight() * 3 / 2, imgFrame->getWidth(), CV_8UC1, imgFrame->getData().data()); - - cv::Mat rgb(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC3); - - cv::cvtColor(frame, rgb, cv::COLOR_YUV2BGR_NV12); - - cv::imshow("video", rgb); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } else { - std::cout << "Not ImgFrame" << std::endl; - } - } - return 0; -} \ No newline at end of file diff --git a/examples/src/depth_crop_control.cpp b/examples/src/depth_crop_control.cpp new file mode 100644 index 0000000000..4265bffbd8 --- /dev/null +++ b/examples/src/depth_crop_control.cpp @@ -0,0 +1,110 @@ +/** + * This example shows usage of depth camera in crop mode with the possibility to move the crop. + * Use 'WASD' in order to do it. + */ +#include + +// Includes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Step size ('W','A','S','D' controls) +static constexpr float stepSize = 0.02; + +static std::atomic sendCamConfig{false}; + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto manip = pipeline.create(); + auto stereo = pipeline.create(); + + auto configIn = pipeline.create(); + auto xout = pipeline.create(); + + configIn->setStreamName("config"); + xout->setStreamName("depth"); + + // Crop range + dai::Point2f topLeft(0.2, 0.2); + dai::Point2f bottomRight(0.8, 0.8); + + // Properties + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + + manip->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + manip->setMaxOutputFrameSize(monoRight->getResolutionHeight() * monoRight->getResolutionWidth() * 3); + stereo->setConfidenceThreshold(200); + + // Linking + configIn->out.link(manip->inputConfig); + stereo->depth.link(manip->inputImage); + manip->out.link(xout->input); + monoRight->out.link(stereo->left); + monoLeft->out.link(stereo->right); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Queues + auto q = device.getOutputQueue(xout->getStreamName(), 4, false); + auto configQueue = device.getInputQueue(configIn->getStreamName()); + + while(true) { + auto inDepth = q->get(); + cv::Mat depthFrame = inDepth->getFrame(); + // Frame is transformed, the color map will be applied to highlight the depth info + cv::Mat depthFrameColor; + cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); + cv::equalizeHist(depthFrameColor, depthFrameColor); + cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); + + // Frame is ready to be shown + cv::imshow("depth", depthFrameColor); + + // Update screen (10ms pooling rate) + int key = cv::waitKey(10); + if(key == 'q') { + break; + } else if(key == 'w') { + if(topLeft.y - stepSize >= 0) { + topLeft.y -= stepSize; + bottomRight.y -= stepSize; + sendCamConfig = true; + } + } else if(key == 'a') { + if(topLeft.x - stepSize >= 0) { + topLeft.x -= stepSize; + bottomRight.x -= stepSize; + sendCamConfig = true; + } + } else if(key == 's') { + if(bottomRight.y + stepSize <= 1) { + topLeft.y += stepSize; + bottomRight.y += stepSize; + sendCamConfig = true; + } + } else if(key == 'd') { + if(bottomRight.x + stepSize <= 1) { + topLeft.x += stepSize; + bottomRight.x += stepSize; + sendCamConfig = true; + } + } + + // Send new config to camera + if(sendCamConfig) { + dai::ImageManipConfig cfg; + cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + configQueue->send(cfg); + sendCamConfig = false; + } + } + return 0; +} diff --git a/examples/src/depth_preview.cpp b/examples/src/depth_preview.cpp new file mode 100644 index 0000000000..12054b0800 --- /dev/null +++ b/examples/src/depth_preview.cpp @@ -0,0 +1,68 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Closer-in minimum depth, disparity range is doubled (from 95 to 190): +static std::atomic extended_disparity{false}; +// Better accuracy for longer distance, fractional disparity 32-levels: +static std::atomic subpixel{false}; +// Better handling for occlusions: +static std::atomic lr_check{false}; + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto depth = pipeline.create(); + auto xout = pipeline.create(); + + xout->setStreamName("disparity"); + + // Properties + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + // Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) + depth->setConfidenceThreshold(200); + // Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) + depth->setMedianFilter(dai::StereoDepthProperties::MedianFilter::KERNEL_7x7); + depth->setLeftRightCheck(lr_check); + depth->setExtendedDisparity(extended_disparity); + depth->setSubpixel(subpixel); + + // Linking + monoLeft->out.link(depth->left); + monoRight->out.link(depth->right); + depth->disparity.link(xout->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queue will be used to get the disparity frames from the outputs defined above + auto q = device.getOutputQueue("disparity", 4, false); + + while(true) { + auto inDepth = q->get(); + auto frame = inDepth->getFrame(); + // Normalization for better visualization + frame.convertTo(frame, CV_8UC1, 255 / depth->getMaxDisparity()); + + cv::imshow("disparity", frame); + + // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + cv::applyColorMap(frame, frame, cv::COLORMAP_JET); + cv::imshow("disparity_color", frame); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/device_queue_event.cpp b/examples/src/device_queue_event.cpp new file mode 100644 index 0000000000..056cb54cbc --- /dev/null +++ b/examples/src/device_queue_event.cpp @@ -0,0 +1,50 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto camMono = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto xoutMono = pipeline.create(); + + xoutRgb->setStreamName("rgb"); + xoutMono->setStreamName("mono"); + + // Properties + camRgb->setInterleaved(true); + camRgb->setPreviewSize(300, 300); + + // Linking + camRgb->preview.link(xoutRgb->input); + camMono->out.link(xoutMono->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Clear queue events + device.getQueueEvents(); + + while(true) { + auto ev = device.getQueueEvent(); + + if(ev == "rgb") { + auto rgb = device.getOutputQueue(ev)->get(); + cv::imshow("rgb", rgb->getFrame()); + } else if(ev == "mono") { + auto mono = device.getOutputQueue(ev)->get(); + cv::imshow("mono", mono->getFrame()); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/device_queue_event_example.cpp b/examples/src/device_queue_event_example.cpp deleted file mode 100644 index 4bfaf70b2f..0000000000 --- a/examples/src/device_queue_event_example.cpp +++ /dev/null @@ -1,64 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - - dai::Pipeline p; - - auto colorCam = p.create(); - auto xout = p.create(); - auto xout2 = p.create(); - auto videnc = p.create(); - - // XLinkOut - xout->setStreamName("mjpeg"); - xout2->setStreamName("preview"); - - // ColorCamera - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - // colorCam->setFps(5.0); - colorCam->setInterleaved(true); - - // VideoEncoder - videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::MJPEG); - - // Link plugins CAM -> XLINK - colorCam->video.link(videnc->input); - colorCam->preview.link(xout2->input); - videnc->bitstream.link(xout->input); - - // Connect and start the pipeline - dai::Device d(p); - - // Sets queues size and behaviour - d.getOutputQueue("mjpeg", 8, false); - d.getOutputQueue("preview", 8, false); - - while(1) { - auto ev = d.getQueueEvent(); - if(ev == "preview") { - auto preview = d.getOutputQueue(ev)->get(); - cv::imshow("preview", cv::Mat(preview->getHeight(), preview->getWidth(), CV_8UC3, preview->getData().data())); - } else if(ev == "mjpeg") { - auto mjpeg = d.getOutputQueue(ev)->get(); - cv::Mat decodedFrame = cv::imdecode(cv::Mat(mjpeg->getData()), cv::IMREAD_COLOR); - cv::imshow("mjpeg", decodedFrame); - } - - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } - - return 0; -} diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp new file mode 100644 index 0000000000..8d29a53926 --- /dev/null +++ b/examples/src/encoding_max_limit.cpp @@ -0,0 +1,86 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Keyboard interrupt (Ctrl + C) detected +static std::atomic alive{true}; +static void sigintHandler(int signum) { + alive = false; +} + +int main() { + using namespace std; + using namespace std::chrono; + std::signal(SIGINT, &sigintHandler); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto ve1 = pipeline.create(); + auto ve2 = pipeline.create(); + auto ve3 = pipeline.create(); + + auto ve1Out = pipeline.create(); + auto ve2Out = pipeline.create(); + auto ve3Out = pipeline.create(); + + ve1Out->setStreamName("ve1Out"); + ve2Out->setStreamName("ve2Out"); + ve3Out->setStreamName("ve3Out"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + // Setting to 26fps will trigger error + ve1->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); + ve2->setDefaultProfilePreset(3840, 2160, 25, dai::VideoEncoderProperties::Profile::H265_MAIN); + ve3->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); + + // Linking + monoLeft->out.link(ve1->input); + camRgb->video.link(ve2->input); + monoRight->out.link(ve3->input); + + ve1->bitstream.link(ve1Out->input); + ve2->bitstream.link(ve2Out->input); + ve3->bitstream.link(ve3Out->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the encoded data from the output defined above + auto outQ1 = device.getOutputQueue("ve1Out", 30, true); + auto outQ2 = device.getOutputQueue("ve2Out", 30, true); + auto outQ3 = device.getOutputQueue("ve3Out", 30, true); + + // The .h264 / .h265 files are raw stream files (not playable yet) + auto videoFile1 = ofstream("mono1.h264", ios::binary); + auto videoFile2 = ofstream("color.h265", ios::binary); + auto videoFile3 = ofstream("mono2.h264", ios::binary); + cout << "Press Ctrl+C to stop encoding..." << endl; + + while(alive) { + auto out1 = outQ1->get(); + videoFile1.write((char*)out1->getData().data(), out1->getData().size()); + auto out2 = outQ2->get(); + videoFile2.write((char*)out2->getData().data(), out2->getData().size()); + auto out3 = outQ3->get(); + videoFile3.write((char*)out3->getData().data(), out3->getData().size()); + } + + cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 25 -i mono1.h264 -c copy mono1.mp4" << endl; + cout << "ffmpeg -framerate 25 -i mono2.h264 -c copy mono2.mp4" << endl; + cout << "ffmpeg -framerate 25 -i color.h265 -c copy color.mp4" << endl; + + return 0; +} diff --git a/examples/src/h264_encoding_example.cpp b/examples/src/h264_encoding_example.cpp deleted file mode 100644 index ee16592267..0000000000 --- a/examples/src/h264_encoding_example.cpp +++ /dev/null @@ -1,70 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - - std::string h264Path("video.h264"); - - // If path specified, use that - if(argc > 1) { - h264Path = std::string(argv[1]); - } - - dai::Pipeline p; - - auto colorCam = p.create(); - auto xout = p.create(); - auto xout2 = p.create(); - auto videnc = p.create(); - - // XLinkOut - xout->setStreamName("h264"); - xout2->setStreamName("preview"); - - // ColorCamera - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - // colorCam->setFps(5.0); - colorCam->setInterleaved(true); - - // VideoEncoder - videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); - - // Link plugins CAM -> XLINK - colorCam->video.link(videnc->input); - colorCam->preview.link(xout2->input); - videnc->bitstream.link(xout->input); - - // Connect and start the pipeline - dai::Device d(p); - - auto myfile = std::fstream(h264Path, std::ios::out | std::ios::binary); - - auto h264Queue = d.getOutputQueue("h264", 8, false); - auto previewQueue = d.getOutputQueue("preview", 8, false); - while(1) { - auto preview = previewQueue->get(); - cv::imshow("preview", cv::Mat(preview->getHeight(), preview->getWidth(), CV_8UC3, preview->getData().data())); - auto h264 = h264Queue->get(); - myfile.write((char*)h264->getData().data(), h264->getData().size()); - - int key = cv::waitKey(1); - if(key == 'q') { - break; - } - } - myfile.close(); - - std::cout << "To view the encoded data, convert the stream file " << h264Path << " into a video file (.mp4) using a command below:" << std::endl; - std::cout << "ffmpeg -framerate " << colorCam->getFps() << " -i " << h264Path << " -c copy video.mp4" << std::endl; - - return 0; -} diff --git a/examples/src/image_manip_example.cpp b/examples/src/image_manip_example.cpp index a3707b7b07..c6278529a6 100644 --- a/examples/src/image_manip_example.cpp +++ b/examples/src/image_manip_example.cpp @@ -1,6 +1,3 @@ - - -#include #include #include "utility.hpp" @@ -9,9 +6,11 @@ #include "depthai/depthai.hpp" int main() { + // Create pipeline dai::Pipeline pipeline; - auto colorCam = pipeline.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); auto imageManip = pipeline.create(); auto imageManip2 = pipeline.create(); auto camOut = pipeline.create(); @@ -24,10 +23,11 @@ int main() { manipOut2->setStreamName("manip2"); manip2In->setStreamName("manip2In"); - colorCam->setPreviewSize(304, 304); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(304, 304); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); // Create a center crop image manipulation imageManip->initialConfig.setCenterCrop(0.7f); @@ -37,20 +37,12 @@ int main() { imageManip2->initialConfig.setCropRect(0.1, 0.1, 0.3, 0.3); imageManip2->setWaitForConfigInput(true); - // Link nodes CAM -> XLINK - colorCam->preview.link(camOut->input); - - // Link nodes CAM -> imageManip -> XLINK - colorCam->preview.link(imageManip->inputImage); + // Linking + camRgb->preview.link(camOut->input); + camRgb->preview.link(imageManip->inputImage); imageManip->out.link(manipOut->input); - - // ImageManip -> ImageManip 2 imageManip->out.link(imageManip2->inputImage); - - // ImageManip2 -> XLinkOut imageManip2->out.link(manipOut2->input); - - // Host config -> image manip 2 manip2In->out.link(imageManip2->inputConfig); // Connect to device and start pipeline @@ -66,6 +58,7 @@ int main() { int frameCounter = 0; float xmin = 0.1f; float xmax = 0.3f; + while(true) { xmin += 0.003f; xmax += 0.003f; @@ -99,4 +92,5 @@ int main() { frameCounter++; } + return 0; } diff --git a/examples/src/mjpeg_encoding_example.cpp b/examples/src/mjpeg_encoding_example.cpp index 075978feee..8e1daacc1f 100644 --- a/examples/src/mjpeg_encoding_example.cpp +++ b/examples/src/mjpeg_encoding_example.cpp @@ -1,47 +1,43 @@ - -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -int main(int argc, char** argv) { - using namespace std; +int main() { using namespace std::chrono; - dai::Pipeline p; + // Create pipeline + dai::Pipeline pipeline; - auto colorCam = p.create(); - auto xout = p.create(); - auto xout2 = p.create(); - auto videnc = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto xout = pipeline.create(); + auto xout2 = pipeline.create(); + auto videnc = pipeline.create(); - // XLinkOut xout->setStreamName("mjpeg"); xout2->setStreamName("preview"); // ColorCamera - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - // colorCam->setFps(5.0); - colorCam->setInterleaved(true); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + // camRgb->setFps(5.0); + camRgb->setInterleaved(true); // VideoEncoder videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::MJPEG); - // Link plugins CAM -> XLINK - colorCam->video.link(videnc->input); - colorCam->preview.link(xout2->input); + // Linking + camRgb->video.link(videnc->input); + camRgb->preview.link(xout2->input); videnc->bitstream.link(xout->input); - // Connect and start the pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); - auto mjpegQueue = d.getOutputQueue("mjpeg", 8, false); - auto previewQueue = d.getOutputQueue("preview", 8, false); - while(1) { + auto mjpegQueue = device.getOutputQueue("mjpeg", 8, false); + auto previewQueue = device.getOutputQueue("preview", 8, false); + while(true) { auto t1 = steady_clock::now(); auto preview = previewQueue->get(); auto t2 = steady_clock::now(); @@ -69,6 +65,5 @@ int main(int argc, char** argv) { return 0; } } - return 0; } diff --git a/examples/src/mobilenet_device_side_decoding_example.cpp b/examples/src/mobilenet_device_side_decoding_example.cpp deleted file mode 100644 index 29347768f0..0000000000 --- a/examples/src/mobilenet_device_side_decoding_example.cpp +++ /dev/null @@ -1,124 +0,0 @@ -#include -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", - "car", "cat", "chair", "cow", "diningtable", "dog", "horse", - "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; - -static bool syncNN = true; - -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto detectionNetwork = p.create(); - auto nnOut = p.create(); - - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); - - // testing MobileNet DetectionNetwork - detectionNetwork->setConfidenceThreshold(0.5f); - detectionNetwork->setBlobPath(nnPath); - - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); - if(syncNN) { - detectionNetwork->passthrough.link(xlinkOut->input); - } else { - colorCam->preview.link(xlinkOut->input); - } - - detectionNetwork->out.link(nnOut->input); - - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); - - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - - cv::Mat frame = imgFrame->getCvFrame(); - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; - } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); - } - - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); - - cv::imshow("preview", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } - - return 0; -} \ No newline at end of file diff --git a/examples/src/mono_camera_control.cpp b/examples/src/mono_camera_control.cpp new file mode 100644 index 0000000000..8636c0f43a --- /dev/null +++ b/examples/src/mono_camera_control.cpp @@ -0,0 +1,149 @@ +/** + * This example shows usage of mono camera in crop mode with the possibility to move the crop. + * Uses 'WASD' controls to move the crop window, 'T' to trigger autofocus, 'IOKL,.' for manual exposure/focus: + * Control: key[dec/inc] min..max + * exposure time: I O 1..33000 [us] + * sensitivity iso: K L 100..1600 + * To go back to auto controls: + * 'E' - autoexposure + */ +#include + +// Includes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Step size ('W','A','S','D' controls) +static constexpr float stepSize = 0.02; + +// Manual exposure/focus set step +static constexpr int EXP_STEP = 500; // us +static constexpr int ISO_STEP = 50; + +static int clamp(int num, int v0, int v1) { + return std::max(v0, std::min(num, v1)); +} + +static std::atomic sendCamConfig{false}; + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto manipRight = pipeline.create(); + auto manipLeft = pipeline.create(); + + auto controlIn = pipeline.create(); + auto configIn = pipeline.create(); + auto manipOutRight = pipeline.create(); + auto manipOutLeft = pipeline.create(); + + controlIn->setStreamName("control"); + configIn->setStreamName("config"); + manipOutRight->setStreamName("right"); + manipOutLeft->setStreamName("left"); + + // Crop range + dai::Point2f topLeft(0.2, 0.2); + dai::Point2f bottomRight(0.8, 0.8); + + // Properties + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + manipRight->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + manipLeft->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + + // Linking + monoRight->out.link(manipRight->inputImage); + monoLeft->out.link(manipLeft->inputImage); + controlIn->out.link(monoRight->inputControl); + controlIn->out.link(monoLeft->inputControl); + configIn->out.link(manipRight->inputConfig); + configIn->out.link(manipLeft->inputConfig); + manipRight->out.link(manipOutRight->input); + manipLeft->out.link(manipOutLeft->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the grayscale frames + auto qRight = device.getOutputQueue(manipOutRight->getStreamName(), 4, false); + auto qLeft = device.getOutputQueue(manipOutLeft->getStreamName(), 4, false); + auto controlQueue = device.getInputQueue(controlIn->getStreamName()); + auto configQueue = device.getInputQueue(configIn->getStreamName()); + + // Defaults and limits for manual focus/exposure controls + int exp_time = 20000; + int exp_min = 1; + int exp_max = 33000; + + int sens_iso = 800; + int sens_min = 100; + int sens_max = 1600; + + while(true) { + auto inRight = qRight->get(); + auto inLeft = qLeft->get(); + cv::imshow("right", inRight->getCvFrame()); + cv::imshow("left", inLeft->getCvFrame()); + + // Update screen (1ms pooling rate) + int key = cv::waitKey(1); + if(key == 'q') { + break; + } else if(key == 'e') { + printf("Autoexposure enable\n"); + dai::CameraControl ctrl; + ctrl.setAutoExposureEnable(); + controlQueue->send(ctrl); + } else if(key == 'i' || key == 'o' || key == 'k' || key == 'l') { + if(key == 'i') exp_time -= EXP_STEP; + if(key == 'o') exp_time += EXP_STEP; + if(key == 'k') sens_iso -= ISO_STEP; + if(key == 'l') sens_iso += ISO_STEP; + exp_time = clamp(exp_time, exp_min, exp_max); + sens_iso = clamp(sens_iso, sens_min, sens_max); + printf("Setting manual exposure, time: %d, iso: %d\n", exp_time, sens_iso); + dai::CameraControl ctrl; + ctrl.setManualExposure(exp_time, sens_iso); + controlQueue->send(ctrl); + } else if(key == 'w') { + if(topLeft.y - stepSize >= 0) { + topLeft.y -= stepSize; + bottomRight.y -= stepSize; + sendCamConfig = true; + } + } else if(key == 'a') { + if(topLeft.x - stepSize >= 0) { + topLeft.x -= stepSize; + bottomRight.x -= stepSize; + sendCamConfig = true; + } + } else if(key == 's') { + if(bottomRight.y + stepSize <= 1) { + topLeft.y += stepSize; + bottomRight.y += stepSize; + sendCamConfig = true; + } + } else if(key == 'd') { + if(bottomRight.x + stepSize <= 1) { + topLeft.x += stepSize; + bottomRight.x += stepSize; + sendCamConfig = true; + } + } + + // Send new config to camera + if(sendCamConfig) { + dai::ImageManipConfig cfg; + cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + configQueue->send(cfg); + sendCamConfig = false; + } + } + return 0; +} diff --git a/examples/src/mono_camera_example.cpp b/examples/src/mono_camera_example.cpp deleted file mode 100644 index 730b736d92..0000000000 --- a/examples/src/mono_camera_example.cpp +++ /dev/null @@ -1,57 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -dai::Pipeline createMonoPipeline() { - dai::Pipeline p; - - auto monoCam = p.create(); - auto xlinkOut = p.create(); - xlinkOut->setStreamName("mono"); - - // Set camera socket - monoCam->setBoardSocket(dai::CameraBoardSocket::RIGHT); - - // Link plugins CAM -> XLINK - monoCam->out.link(xlinkOut->input); - - return p; -} - -int main() { - using namespace std; - using namespace std::chrono; - - // Create pipeline - dai::Pipeline p = createMonoPipeline(); - - // Connect and start the pipeline - dai::Device d(p); - - cv::Mat frame; - auto monoQueue = d.getOutputQueue("mono"); - - while(1) { - auto imgFrame = monoQueue->get(); - if(imgFrame) { - int latencyMs = duration_cast(steady_clock::now() - imgFrame->getTimestamp()).count(); - printf("Frame - w: %d, h: %d, latency %dms\n", imgFrame->getWidth(), imgFrame->getHeight(), latencyMs); - - frame = cv::Mat(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC1, imgFrame->getData().data()); - - cv::imshow("video", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } else { - std::cout << "Not ImgFrame" << std::endl; - } - } - return 0; -} \ No newline at end of file diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp new file mode 100644 index 0000000000..cfa5b5812c --- /dev/null +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -0,0 +1,153 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +static std::atomic flipRectified{true}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto stereo = pipeline.create(); + auto manip = pipeline.create(); + auto nn = pipeline.create(); + + auto disparityOut = pipeline.create(); + auto xoutRight = pipeline.create(); + auto nnOut = pipeline.create(); + + disparityOut->setStreamName("disparity"); + xoutRight->setStreamName("rectifiedRight"); + nnOut->setStreamName("nn"); + + // Properties + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + // Produce the depth map (using disparity output as it's easier to visualize depth this way) + stereo->setConfidenceThreshold(255); + stereo->setRectifyEdgeFillColor(0); // Black, to better see the cutout from rectification (black stripe on the edges) + // Convert the grayscale frame into the nn-acceptable form + manip->initialConfig.setResize(300, 300); + // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); + + // Define a neural network that will make predictions based on the source frames + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // Linking + monoRight->out.link(stereo->right); + monoLeft->out.link(stereo->left); + stereo->rectifiedRight.link(manip->inputImage); + stereo->disparity.link(disparityOut->input); + manip->out.link(nn->input); + manip->out.link(xoutRight->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the grayscale / depth frames and nn data from the outputs defined above + auto qRight = device.getOutputQueue("rectifiedRight", 4, false); + auto qDisparity = device.getOutputQueue("disparity", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + cv::Mat rightFrame; + cv::Mat disparityFrame; + std::vector detections; + + // Add bounding boxes and text to the frame and show it to the user + auto show = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 192, 203); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + float disparityMultiplier = 255 / stereo->getMaxDisparity(); + + while(true) { + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inRight = qRight->tryGet(); + auto inDet = qDet->tryGet(); + auto inDisparity = qDisparity->tryGet(); + + if(inRight) { + rightFrame = inRight->getCvFrame(); + if(flipRectified) { + cv::flip(rightFrame, rightFrame, 1); + } + } + + if(inDet) { + detections = inDet->detections; + if(flipRectified) { + for(auto& detection : detections) { + auto swap = detection.xmin; + detection.xmin = 1 - detection.xmax; + detection.xmax = 1 - swap; + } + } + } + + if(inDisparity) { + // Frame is transformed, normalized, and color map will be applied to highlight the depth info + disparityFrame = inDisparity->getFrame(); + disparityFrame.convertTo(disparityFrame, CV_8UC1, disparityMultiplier); + // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + cv::applyColorMap(disparityFrame, disparityFrame, cv::COLORMAP_JET); + show("disparity", disparityFrame, detections); + } + + if(!rightFrame.empty()) { + show("rectified right", rightFrame, detections); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') return 0; + } + return 0; +} diff --git a/examples/src/mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp new file mode 100644 index 0000000000..a3e98346a0 --- /dev/null +++ b/examples/src/mono_full_resolution_saver.cpp @@ -0,0 +1,53 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" +#include "utility.hpp" + +int main() { + using namespace std::chrono; + // Create pipeline + dai::Pipeline pipeline; + + // Define source and output + auto monoRight = pipeline.create(); + auto xoutRight = pipeline.create(); + + xoutRight->setStreamName("right"); + + // Properties + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + + // Linking + monoRight->out.link(xoutRight->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queue will be used to get the grayscale frames from the output defined above + auto qRight = device.getOutputQueue("right", 4, false); + + std::string dirName = "mono_data"; + createDirectory(dirName); + + while(true) { + auto inRight = qRight->get(); + // Data is originally represented as a flat 1D array, it needs to be converted into HxW form + // Frame is transformed and ready to be shown + cv::imshow("right", inRight->getCvFrame()); + + uint64_t time = duration_cast(system_clock::now().time_since_epoch()).count(); + std::stringstream videoStr; + videoStr << dirName << "/" << time << ".png"; + // After showing the frame, it's being stored inside a target directory as a PNG image + cv::imwrite(videoStr.str(), inRight->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp new file mode 100644 index 0000000000..bb16d56034 --- /dev/null +++ b/examples/src/mono_mobilenet.cpp @@ -0,0 +1,116 @@ +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoRight = pipeline.create(); + auto manip = pipeline.create(); + auto nn = pipeline.create(); + auto manipOut = pipeline.create(); + auto nnOut = pipeline.create(); + + manipOut->setStreamName("right"); + nnOut->setStreamName("nn"); + + // Properties + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + + // Convert the grayscale frame into the nn-acceptable form + manip->initialConfig.setResize(300, 300); + // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // Linking + monoRight->out.link(manip->inputImage); + manip->out.link(nn->input); + manip->out.link(manipOut->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the grayscale frames and nn data from the outputs defined above + auto qRight = device.getOutputQueue("right", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + cv::Mat frame; + std::vector detections; + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + while(true) { + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inRight = qRight->tryGet(); + auto inDet = qDet->tryGet(); + + if(inRight) { + frame = inRight->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + displayFrame("right", frame, detections); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') return 0; + } + return 0; +} diff --git a/examples/src/mono_preview.cpp b/examples/src/mono_preview.cpp new file mode 100644 index 0000000000..44558145ed --- /dev/null +++ b/examples/src/mono_preview.cpp @@ -0,0 +1,55 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto xoutLeft = pipeline.create(); + auto xoutRight = pipeline.create(); + + xoutLeft->setStreamName("left"); + xoutRight->setStreamName("right"); + + // Properties + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + + // Linking + monoRight->out.link(xoutRight->input); + monoLeft->out.link(xoutLeft->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the grayscale frames from the outputs defined above + auto qLeft = device.getOutputQueue("left", 4, false); + auto qRight = device.getOutputQueue("right", 4, false); + + while(true) { + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inLeft = qLeft->tryGet(); + auto inRight = qRight->tryGet(); + + if(inLeft) { + cv::imshow("left", inLeft->getCvFrame()); + } + + if(inRight) { + cv::imshow("right", inRight->getCvFrame()); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/object_tracker_example.cpp b/examples/src/object_tracker.cpp similarity index 55% rename from examples/src/object_tracker_example.cpp rename to examples/src/object_tracker.cpp index c691642ad7..dc43559c2b 100644 --- a/examples/src/object_tracker_example.cpp +++ b/examples/src/object_tracker.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -11,76 +8,78 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; +static std::atomic fullFrameTracking{false}; + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto detectionNetwork = p.create(); - auto objectTracker = p.create(); - auto trackerOut = p.create(); + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto objectTracker = pipeline.create(); + + auto xlinkOut = pipeline.create(); + auto trackerOut = pipeline.create(); xlinkOut->setStreamName("preview"); trackerOut->setStreamName("tracklets"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); // testing MobileNet DetectionNetwork - detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); - - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); - if(syncNN) { - objectTracker->passthroughTrackerFrame.link(xlinkOut->input); - } else { - colorCam->preview.link(xlinkOut->input); - } + detectionNetwork->setConfidenceThreshold(0.5f); + detectionNetwork->input.setBlocking(false); objectTracker->setDetectionLabelsToTrack({15}); // track only person + // possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + // take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); - detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); - detectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); - detectionNetwork->out.link(objectTracker->inputDetections); - objectTracker->out.link(trackerOut->input); - - return p; -} + // Linking + camRgb->preview.link(detectionNetwork->input); + objectTracker->passthroughTrackerFrame.link(xlinkOut->input); -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); + if(fullFrameTracking) { + camRgb->video.link(objectTracker->inputTrackerFrame); + } else { + detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); } - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + detectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); + detectionNetwork->out.link(objectTracker->inputDetections); + objectTracker->out.link(trackerOut->input); - // Connect and start the pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); - auto preview = d.getOutputQueue("preview", 4, false); - auto tracklets = d.getOutputQueue("tracklets", 4, false); + auto preview = device.getOutputQueue("preview", 4, false); + auto tracklets = device.getOutputQueue("tracklets", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - while(1) { + + while(true) { auto imgFrame = preview->get(); auto track = tracklets->get(); @@ -94,10 +93,10 @@ int main(int argc, char** argv) { } auto color = cv::Scalar(255, 0, 0); - cv::Mat trackletFrame = imgFrame->getCvFrame(); + cv::Mat frame = imgFrame->getCvFrame(); auto trackletsData = track->tracklets; for(auto& t : trackletsData) { - auto roi = t.roi.denormalize(trackletFrame.cols, trackletFrame.rows); + auto roi = t.roi.denormalize(frame.cols, frame.rows); int x1 = roi.topLeft().x; int y1 = roi.topLeft().y; int x2 = roi.bottomRight().x; @@ -108,29 +107,28 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(trackletFrame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream idStr; idStr << "ID: " << t.id; - cv::putText(trackletFrame, idStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, idStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream statusStr; statusStr << "Status: " << t.status; - cv::putText(trackletFrame, statusStr.str(), cv::Point(x1 + 10, y1 + 60), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, statusStr.str(), cv::Point(x1 + 10, y1 + 60), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(trackletFrame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(trackletFrame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + fpsStr << "NN fps:" << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); - cv::imshow("tracker", trackletFrame); + cv::imshow("tracker", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; -} \ No newline at end of file +} diff --git a/examples/src/opencv_support.cpp b/examples/src/opencv_support.cpp new file mode 100644 index 0000000000..e55af8c384 --- /dev/null +++ b/examples/src/opencv_support.cpp @@ -0,0 +1,52 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Include OpenCV +#include + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define source and outputs + auto camRgb = pipeline.create(); + auto xoutVideo = pipeline.create(); + auto xoutPreview = pipeline.create(); + + xoutVideo->setStreamName("video"); + xoutPreview->setStreamName("preview"); + + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(true); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + + // Linking + camRgb->video.link(xoutVideo->input); + camRgb->preview.link(xoutPreview->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + auto video = device.getOutputQueue("video"); + auto preview = device.getOutputQueue("preview"); + + while(true) { + auto videoFrame = video->get(); + auto previewFrame = preview->get(); + + // Get BGR frame from NV12 encoded video frame to show with opencv + cv::imshow("video", videoFrame->getCvFrame()); + + // Show 'preview' frame as is (already in correct format, no copy is made) + cv::imshow("preview", previewFrame->getFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') return 0; + } + return 0; +} diff --git a/examples/src/opencv_support_example.cpp b/examples/src/opencv_support_example.cpp deleted file mode 100644 index 4b8ca55410..0000000000 --- a/examples/src/opencv_support_example.cpp +++ /dev/null @@ -1,56 +0,0 @@ -#include -#include - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -// Include OpenCV -#include - -dai::Pipeline createCameraFullPipeline() { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto xlinkOut2 = p.create(); - xlinkOut->setStreamName("video"); - xlinkOut2->setStreamName("preview"); - - colorCam->setPreviewSize(320, 180); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(true); - - // Link plugins CAM -> XLINK - colorCam->video.link(xlinkOut->input); - colorCam->preview.link(xlinkOut2->input); - - return p; -} - -int main() { - using namespace std; - using namespace std::chrono; - - // Create pipeline - dai::Pipeline p = createCameraFullPipeline(); - - // Connect and start the pipeline - dai::Device d(p); - - auto video = d.getOutputQueue("video"); - auto preview = d.getOutputQueue("preview"); - - while(1) { - // Retrieves video ImgFrame and converts a cv::Mat copy in BGR format (suitable for opencv usage) - auto videoFrame = video->get(); - cv::imshow("video", videoFrame->getCvFrame()); - - // Retrieves preview ImgFrame and returns (without copying deepCopy = false) cv::Mat - auto previewFrame = preview->get(); - cv::imshow("preview", previewFrame->getFrame()); - - // Waits a bit and updates windows - if(cv::waitKey(1) == 'q') break; - } - return 0; -} \ No newline at end of file diff --git a/examples/src/color_camera_control_example.cpp b/examples/src/rgb_camera_control.cpp similarity index 57% rename from examples/src/color_camera_control_example.cpp rename to examples/src/rgb_camera_control.cpp index 5bd33ade47..791b4b900a 100644 --- a/examples/src/color_camera_control_example.cpp +++ b/examples/src/rgb_camera_control.cpp @@ -10,7 +10,6 @@ * 'E' - autoexposure * 'F' - autofocus (continuous) */ -#include #include #include "utility.hpp" @@ -19,7 +18,7 @@ #include "depthai/depthai.hpp" // Step size ('W','A','S','D' controls) -static constexpr int STEP_SIZE = 16; +static constexpr int STEP_SIZE = 8; // Manual exposure/focus set step static constexpr int EXP_STEP = 500; // us @@ -31,68 +30,72 @@ static int clamp(int num, int v0, int v1) { } int main() { + // Create pipeline dai::Pipeline pipeline; - // Nodes - auto colorCam = pipeline.create(); - auto controlIn = pipeline.create(); - auto configIn = pipeline.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); auto videoEncoder = pipeline.create(); auto stillEncoder = pipeline.create(); + + auto controlIn = pipeline.create(); + auto configIn = pipeline.create(); auto videoMjpegOut = pipeline.create(); auto stillMjpegOut = pipeline.create(); auto previewOut = pipeline.create(); - // Properties - colorCam->setVideoSize(640, 360); - colorCam->setPreviewSize(300, 300); controlIn->setStreamName("control"); configIn->setStreamName("config"); - videoEncoder->setDefaultProfilePreset(colorCam->getVideoSize(), colorCam->getFps(), dai::VideoEncoderProperties::Profile::MJPEG); - stillEncoder->setDefaultProfilePreset(colorCam->getStillSize(), 1, dai::VideoEncoderProperties::Profile::MJPEG); videoMjpegOut->setStreamName("video"); stillMjpegOut->setStreamName("still"); previewOut->setStreamName("preview"); - // Link nodes - colorCam->video.link(videoEncoder->input); - colorCam->still.link(stillEncoder->input); - colorCam->preview.link(previewOut->input); - controlIn->out.link(colorCam->inputControl); - configIn->out.link(colorCam->inputConfig); + // Properties + camRgb->setVideoSize(640, 360); + camRgb->setPreviewSize(300, 300); + videoEncoder->setDefaultProfilePreset(camRgb->getVideoSize(), camRgb->getFps(), dai::VideoEncoderProperties::Profile::MJPEG); + stillEncoder->setDefaultProfilePreset(camRgb->getStillSize(), 1, dai::VideoEncoderProperties::Profile::MJPEG); + + // Linking + camRgb->video.link(videoEncoder->input); + camRgb->still.link(stillEncoder->input); + camRgb->preview.link(previewOut->input); + controlIn->out.link(camRgb->inputControl); + configIn->out.link(camRgb->inputConfig); videoEncoder->bitstream.link(videoMjpegOut->input); stillEncoder->bitstream.link(stillMjpegOut->input); - // Connect and start the pipeline - dai::Device dev(pipeline); + // Connect to device and start pipeline + dai::Device device(pipeline); - // Create data queues - auto controlQueue = dev.getInputQueue("control"); - auto configQueue = dev.getInputQueue("config"); - auto previewQueue = dev.getOutputQueue("preview"); - auto videoQueue = dev.getOutputQueue("video"); - auto stillQueue = dev.getOutputQueue("still"); + // Get data queues + auto controlQueue = device.getInputQueue("control"); + auto configQueue = device.getInputQueue("config"); + auto previewQueue = device.getOutputQueue("preview"); + auto videoQueue = device.getOutputQueue("video"); + auto stillQueue = device.getOutputQueue("still"); - // Max crop_x & crop_y - float max_crop_x = (colorCam->getResolutionWidth() - colorCam->getVideoWidth()) / (float)colorCam->getResolutionWidth(); - float max_crop_y = (colorCam->getResolutionHeight() - colorCam->getVideoHeight()) / (float)colorCam->getResolutionHeight(); + // Max cropX & cropY + float maxCropX = (camRgb->getResolutionWidth() - camRgb->getVideoWidth()) / (float)camRgb->getResolutionWidth(); + float maxCropY = (camRgb->getResolutionHeight() - camRgb->getVideoHeight()) / (float)camRgb->getResolutionHeight(); // Default crop - float crop_x = 0; - float crop_y = 0; + float cropX = 0; + float cropY = 0; + bool sendCamConfig = true; // Defaults and limits for manual focus/exposure controls - int lens_pos = 150; - int lens_min = 0; - int lens_max = 255; + int lensPos = 150; + int lensMin = 0; + int lensMax = 255; - int exp_time = 20000; - int exp_min = 1; - int exp_max = 33000; + int expTime = 20000; + int expMin = 1; + int expMax = 33000; - int sens_iso = 800; - int sens_min = 100; - int sens_max = 1600; + int sensIso = 800; + int sensMin = 100; + int sensMax = 1600; while(true) { auto previewFrames = previewQueue->tryGetAll(); @@ -107,6 +110,15 @@ int main() { auto frame = cv::imdecode(videoFrame->getData(), cv::IMREAD_UNCHANGED); // Display cv::imshow("video", frame); + + // Send new cfg to camera + if(sendCamConfig) { + dai::ImageManipConfig cfg; + cfg.setCropRect(cropX, cropY, 0, 0); + configQueue->send(cfg); + printf("Sending new crop - x: %f, y: %f\n", cropX, cropY); + sendCamConfig = false; + } } auto stillFrames = stillQueue->tryGetAll(); @@ -117,8 +129,8 @@ int main() { cv::imshow("still", frame); } - // Update screen (10ms pooling rate) - int key = cv::waitKey(10); + // Update screen (1ms pooling rate) + int key = cv::waitKey(1); if(key == 'q') { break; } else if(key == 'c') { @@ -142,44 +154,40 @@ int main() { ctrl.setAutoExposureEnable(); controlQueue->send(ctrl); } else if(key == ',' || key == '.') { - if(key == ',') lens_pos -= LENS_STEP; - if(key == '.') lens_pos += LENS_STEP; - lens_pos = clamp(lens_pos, lens_min, lens_max); - printf("Setting manual focus, lens position: %d\n", lens_pos); + if(key == ',') lensPos -= LENS_STEP; + if(key == '.') lensPos += LENS_STEP; + lensPos = clamp(lensPos, lensMin, lensMax); + printf("Setting manual focus, lens position: %d\n", lensPos); dai::CameraControl ctrl; - ctrl.setManualFocus(lens_pos); + ctrl.setManualFocus(lensPos); controlQueue->send(ctrl); } else if(key == 'i' || key == 'o' || key == 'k' || key == 'l') { - if(key == 'i') exp_time -= EXP_STEP; - if(key == 'o') exp_time += EXP_STEP; - if(key == 'k') sens_iso -= ISO_STEP; - if(key == 'l') sens_iso += ISO_STEP; - exp_time = clamp(exp_time, exp_min, exp_max); - sens_iso = clamp(sens_iso, sens_min, sens_max); - printf("Setting manual exposure, time %d us, iso %d\n", exp_time, sens_iso); + if(key == 'i') expTime -= EXP_STEP; + if(key == 'o') expTime += EXP_STEP; + if(key == 'k') sensIso -= ISO_STEP; + if(key == 'l') sensIso += ISO_STEP; + expTime = clamp(expTime, expMin, expMax); + sensIso = clamp(sensIso, sensMin, sensMax); + printf("Setting manual exposure, time: %d, iso: %d\n", expTime, sensIso); dai::CameraControl ctrl; - ctrl.setManualExposure(exp_time, sens_iso); + ctrl.setManualExposure(expTime, sensIso); controlQueue->send(ctrl); } else if(key == 'w' || key == 'a' || key == 's' || key == 'd') { if(key == 'a') { - crop_x -= (max_crop_x / colorCam->getResolutionWidth()) * STEP_SIZE; - if(crop_x < 0) crop_x = max_crop_x; + cropX -= (maxCropX / camRgb->getResolutionWidth()) * STEP_SIZE; + if(cropX < 0) cropX = maxCropX; } else if(key == 'd') { - crop_x += (max_crop_x / colorCam->getResolutionWidth()) * STEP_SIZE; - if(crop_x > max_crop_x) crop_x = 0.0f; + cropX += (maxCropX / camRgb->getResolutionWidth()) * STEP_SIZE; + if(cropX > maxCropX) cropX = 0.0f; } else if(key == 'w') { - crop_y -= (max_crop_y / colorCam->getResolutionHeight()) * STEP_SIZE; - if(crop_y < 0) crop_y = max_crop_y; + cropY -= (maxCropY / camRgb->getResolutionHeight()) * STEP_SIZE; + if(cropY < 0) cropY = maxCropY; } else if(key == 's') { - crop_y += (max_crop_y / colorCam->getResolutionHeight()) * STEP_SIZE; - if(crop_y > max_crop_y) crop_y = 0.0f; + cropY += (maxCropY / camRgb->getResolutionHeight()) * STEP_SIZE; + if(cropY > maxCropY) cropY = 0.0f; } - - // Send new cfg to camera - dai::ImageManipConfig cfg; - cfg.setCropRect(crop_x, crop_y, 0, 0); - configQueue->send(cfg); - printf("Sending new crop - x: %f, y: %f\n", crop_x, crop_y); + sendCamConfig = true; } } + return 0; } diff --git a/examples/src/rgb_depth_aligned_example.cpp b/examples/src/rgb_depth_aligned.cpp similarity index 71% rename from examples/src/rgb_depth_aligned_example.cpp rename to examples/src/rgb_depth_aligned.cpp index 851d185844..98c0bffcdb 100644 --- a/examples/src/rgb_depth_aligned_example.cpp +++ b/examples/src/rgb_depth_aligned.cpp @@ -8,65 +8,69 @@ // Optional. If set (true), the ColorCamera is downscaled from 1080p to 720p. // Otherwise (false), the aligned depth is automatically upscaled to 1080p -static constexpr bool downscaleColor = true; +static std::atomic downscaleColor{true}; int main() { using namespace std; - dai::Pipeline p; + // Create pipeline + dai::Pipeline pipeline; std::vector queueNames; - auto cam = p.create(); - cam->setBoardSocket(dai::CameraBoardSocket::RGB); - cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - if(downscaleColor) cam->setIspScale(2, 3); - // For now, RGB needs fixed focus to properly align with depth. - // This value was used during calibration - cam->initialControl.setManualFocus(130); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto left = pipeline.create(); + auto right = pipeline.create(); + auto stereo = pipeline.create(); + + auto rgbOut = pipeline.create(); + auto depthOut = pipeline.create(); - auto rgbOut = p.create(); rgbOut->setStreamName("rgb"); queueNames.push_back("rgb"); - cam->isp.link(rgbOut->input); + depthOut->setStreamName("depth"); + queueNames.push_back("depth"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + if(downscaleColor) camRgb->setIspScale(2, 3); + // For now, RGB needs fixed focus to properly align with depth. + // This value was used during calibration + camRgb->initialControl.setManualFocus(130); - auto left = p.create(); left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); left->setBoardSocket(dai::CameraBoardSocket::LEFT); - - auto right = p.create(); right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); right->setBoardSocket(dai::CameraBoardSocket::RIGHT); - auto stereo = p.create(); stereo->setConfidenceThreshold(200); // LR-check is required for depth alignment stereo->setLeftRightCheck(true); stereo->setDepthAlign(dai::CameraBoardSocket::RGB); + + // Linking + camRgb->isp.link(rgbOut->input); left->out.link(stereo->left); right->out.link(stereo->right); - - auto depthOut = p.create(); - depthOut->setStreamName("depth"); - queueNames.push_back("depth"); - // Currently we use the 'disparity' output. TODO 'depth' stereo->disparity.link(depthOut->input); // Connect to device and start pipeline - dai::Device d(p); + dai::Device device(pipeline); // Sets queues size and behavior for(const auto& name : queueNames) { - d.getOutputQueue(name, 4, false); + device.getOutputQueue(name, 4, false); } std::unordered_map frame; - while(1) { + while(true) { std::unordered_map> latestPacket; - auto queueEvents = d.getQueueEvents(queueNames); + auto queueEvents = device.getQueueEvents(queueNames); for(const auto& name : queueEvents) { - auto packets = d.getOutputQueue(name)->tryGetAll(); + auto packets = device.getOutputQueue(name)->tryGetAll(); auto count = packets.size(); if(count > 0) { latestPacket[name] = packets[count - 1]; @@ -77,8 +81,9 @@ int main() { if(latestPacket.find(name) != latestPacket.end()) { if(name == "depth") { frame[name] = latestPacket[name]->getFrame(); + auto maxDisparity = stereo->getMaxDisparity(); // Optional, extend range 0..95 -> 0..255, for a better visualisation - if(1) frame[name].convertTo(frame[name], CV_8UC1, 255. / 95); + if(1) frame[name].convertTo(frame[name], CV_8UC1, 255. / maxDisparity); // Optional, apply false colorization if(1) cv::applyColorMap(frame[name], frame[name], cv::COLORMAP_HOT); } else { @@ -102,8 +107,9 @@ int main() { } int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } + return 0; } diff --git a/examples/src/rgb_encoding.cpp b/examples/src/rgb_encoding.cpp new file mode 100644 index 0000000000..52a1920030 --- /dev/null +++ b/examples/src/rgb_encoding.cpp @@ -0,0 +1,55 @@ +#include +#include + +// Includes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Keyboard interrupt (Ctrl + C) detected +static std::atomic alive{true}; +static void sigintHandler(int signum) { + alive = false; +} + +int main(int argc, char** argv) { + using namespace std; + std::signal(SIGINT, &sigintHandler); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto videoEnc = pipeline.create(); + auto xout = pipeline.create(); + + xout->setStreamName("h265"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + // Linking + camRgb->video.link(videoEnc->input); + videoEnc->bitstream.link(xout->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queue will be used to get the encoded data from the output defined above + auto q = device.getOutputQueue("h265", 30, true); + + // The .h265 file is a raw stream file (not playable yet) + auto videoFile = std::ofstream("video.h265", std::ios::binary); + cout << "Press Ctrl+C to stop encoding..." << endl; + + while(alive) { + auto h265Packet = q->get(); + videoFile.write((char*)(h265Packet->getData().data()), h265Packet->getData().size()); + } + + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; + + return 0; +} diff --git a/examples/src/rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp new file mode 100644 index 0000000000..991e13189b --- /dev/null +++ b/examples/src/rgb_encoding_mobilenet.cpp @@ -0,0 +1,127 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto videoEncoder = pipeline.create(); + auto nn = pipeline.create(); + + auto xoutRgb = pipeline.create(); + auto videoOut = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutRgb->setStreamName("rgb"); + videoOut->setStreamName("h265"); + nnOut->setStreamName("nn"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setPreviewSize(300, 300); + camRgb->setInterleaved(false); + + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // Linking + camRgb->video.link(videoEncoder->input); + camRgb->preview.link(xoutRgb->input); + camRgb->preview.link(nn->input); + videoEncoder->bitstream.link(videoOut->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Queues + int queueSize = 8; + auto qRgb = device.getOutputQueue("rgb", queueSize); + auto qDet = device.getOutputQueue("nn", queueSize); + auto qRgbEnc = device.getOutputQueue("h265", 30, true); + + cv::Mat frame; + std::vector detections; + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + auto videoFile = std::ofstream("video.h264", std::ios::binary); + + while(true) { + auto inRgb = qRgb->tryGet(); + auto inDet = qDet->tryGet(); + + auto out = qRgbEnc->get(); + videoFile.write((char*)out->getData().data(), out->getData().size()); + + if(inRgb) { + frame = inRgb->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + displayFrame("rgb", frame, detections); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + break; + } + } + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << endl; + return 0; +} diff --git a/examples/src/rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp new file mode 100644 index 0000000000..249459ec97 --- /dev/null +++ b/examples/src/rgb_encoding_mono_mobilenet.cpp @@ -0,0 +1,162 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto monoRight = pipeline.create(); + auto videoEncoder = pipeline.create(); + auto nn = pipeline.create(); + auto manip = pipeline.create(); + + auto videoOut = pipeline.create(); + auto xoutRight = pipeline.create(); + auto manipOut = pipeline.create(); + auto nnOut = pipeline.create(); + + videoOut->setStreamName("h265"); + xoutRight->setStreamName("right"); + manipOut->setStreamName("manip"); + nnOut->setStreamName("nn"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); + manip->initialConfig.setResize(300, 300); + + // Linking + camRgb->video.link(videoEncoder->input); + videoEncoder->bitstream.link(videoOut->input); + monoRight->out.link(manip->inputImage); + manip->out.link(nn->input); + monoRight->out.link(xoutRight->input); + manip->out.link(manipOut->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Queues + int queueSize = 8; + auto qRight = device.getOutputQueue("right", queueSize); + auto qManip = device.getOutputQueue("manip", queueSize); + auto qDet = device.getOutputQueue("nn", queueSize); + auto qRgbEnc = device.getOutputQueue("h265", 30, true); + + cv::Mat frame; + cv::Mat frameManip; + std::vector detections; + int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; + auto color = cv::Scalar(255, 0, 0); + + auto videoFile = std::ofstream("video.h265", std::ios::binary); + cv::namedWindow("right", cv::WINDOW_NORMAL); + cv::namedWindow("manip", cv::WINDOW_NORMAL); + + while(true) { + auto inRight = qRight->tryGet(); + auto inManip = qManip->tryGet(); + auto inDet = qDet->tryGet(); + + auto out1 = qRgbEnc->get(); + videoFile.write((char*)out1->getData().data(), out1->getData().size()); + + if(inRight) { + frame = inRight->getCvFrame(); + } + + if(inManip) { + frameManip = inManip->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow("right", frame); + } + + if(!frameManip.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * frameManip.cols; + int y1 = detection.ymin * frameManip.rows; + int x2 = detection.xmax * frameManip.cols; + int y2 = detection.ymax * frameManip.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow("manip", frameManip); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + break; + } + } + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; + return 0; +} diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp new file mode 100644 index 0000000000..139fa6d00d --- /dev/null +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -0,0 +1,216 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +static std::atomic flipRectified{true}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto videoEncoder = pipeline.create(); + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto depth = pipeline.create(); + auto manip = pipeline.create(); + auto nn = pipeline.create(); + + auto videoOut = pipeline.create(); + auto xoutRight = pipeline.create(); + auto disparityOut = pipeline.create(); + auto manipOut = pipeline.create(); + auto nnOut = pipeline.create(); + + videoOut->setStreamName("h265"); + xoutRight->setStreamName("right"); + disparityOut->setStreamName("disparity"); + manipOut->setStreamName("manip"); + nnOut->setStreamName("nn"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + // Note: the rectified streams are horizontally mirrored by default + depth->setConfidenceThreshold(255); + depth->setRectifyMirrorFrame(false); + depth->setRectifyEdgeFillColor(0); // Black, to better see the cutout + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // The NN model expects BGR input-> By default ImageManip output type would be same as input (gray in this case) + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); + manip->initialConfig.setResize(300, 300); + + // Linking + camRgb->video.link(videoEncoder->input); + videoEncoder->bitstream.link(videoOut->input); + monoRight->out.link(xoutRight->input); + monoRight->out.link(depth->right); + monoLeft->out.link(depth->left); + depth->disparity.link(disparityOut->input); + depth->rectifiedRight.link(manip->inputImage); + manip->out.link(nn->input); + manip->out.link(manipOut->input); + nn->out.link(nnOut->input); + + // Disparity range is used for normalization + float disparityMultiplier = 255 / depth->getMaxDisparity(); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Queues + int queueSize = 8; + auto qRight = device.getOutputQueue("right", queueSize); + auto qDisparity = device.getOutputQueue("disparity", queueSize); + auto qManip = device.getOutputQueue("manip", queueSize); + auto qDet = device.getOutputQueue("nn", queueSize); + auto qRgbEnc = device.getOutputQueue("h265", 30, true); + + cv::Mat frame; + cv::Mat frameManip; + cv::Mat frameDisparity; + std::vector detections; + int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; + auto color = cv::Scalar(255, 0, 0); + + auto videoFile = std::ofstream("video.h265", std::ios::binary); + cv::namedWindow("right", cv::WINDOW_NORMAL); + cv::namedWindow("manip", cv::WINDOW_NORMAL); + + while(true) { + auto inRight = qRight->tryGet(); + auto inManip = qManip->tryGet(); + auto inDet = qDet->tryGet(); + auto inDisparity = qDisparity->tryGet(); + + auto out1 = qRgbEnc->get(); + videoFile.write((char*)out1->getData().data(), out1->getData().size()); + + if(inRight) { + frame = inRight->getCvFrame(); + } + + if(inManip) { + frameManip = inManip->getCvFrame(); + } + + if(inDisparity) { + frameDisparity = inDisparity->getCvFrame(); + if(flipRectified) { + cv::flip(frameDisparity, frameDisparity, 1); + } + frameDisparity.convertTo(frameDisparity, CV_8UC1, disparityMultiplier); + cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the right cam frame + cv::imshow("right", frame); + } + + if(!frameDisparity.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameDisparity, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameDisparity, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameDisparity, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the disparity frame + cv::imshow("disparity", frameDisparity); + } + + if(!frameManip.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * frameManip.cols; + int y1 = detection.ymin * frameManip.rows; + int x2 = detection.xmax * frameManip.cols; + int y2 = detection.ymax * frameManip.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the manip frame + cv::imshow("manip", frameManip); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + break; + } + } + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; + return 0; +} diff --git a/examples/src/rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp new file mode 100644 index 0000000000..69b6eaf8f8 --- /dev/null +++ b/examples/src/rgb_full_resolution_saver.cpp @@ -0,0 +1,64 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" +#include "utility.hpp" + +int main() { + using namespace std::chrono; + + // Create pipeline + dai::Pipeline pipeline; + + // Define source and outputs + auto camRgb = pipeline.create(); + auto videoEnc = pipeline.create(); + auto xoutJpeg = pipeline.create(); + auto xoutRgb = pipeline.create(); + + xoutJpeg->setStreamName("jpeg"); + xoutRgb->setStreamName("rgb"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + videoEnc->setDefaultProfilePreset(camRgb->getVideoSize(), camRgb->getFps(), dai::VideoEncoderProperties::Profile::MJPEG); + + // Linking + camRgb->video.link(xoutRgb->input); + camRgb->video.link(videoEnc->input); + videoEnc->bitstream.link(xoutJpeg->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Queues + auto qRgb = device.getOutputQueue("rgb", 30, false); + auto qJpeg = device.getOutputQueue("jpeg", 30, true); + + std::string dirName = "rgb_data"; + createDirectory(dirName); + + while(true) { + auto inRgb = qRgb->tryGet(); + if(inRgb != NULL) { + cv::imshow("rgb", inRgb->getCvFrame()); + } + + auto encFrames = qJpeg->tryGetAll(); + for(const auto& encFrame : encFrames) { + uint64_t time = duration_cast(system_clock::now().time_since_epoch()).count(); + std::stringstream videoStr; + videoStr << dirName << "/" << time << ".jpeg"; + auto videoFile = std::ofstream(videoStr.str(), std::ios::binary); + videoFile.write((char*)encFrame->getData().data(), encFrame->getData().size()); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp new file mode 100644 index 0000000000..4186a94462 --- /dev/null +++ b/examples/src/rgb_mobilenet.cpp @@ -0,0 +1,145 @@ +#include +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +static std::atomic syncNN{true}; + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto nn = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("nn"); + + // Properties + camRgb->setPreviewSize(300, 300); // NN input + camRgb->setInterleaved(false); + camRgb->setFps(40); + // Define a neural network that will make predictions based on the source frames + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // Linking + if(syncNN) { + nn->passthrough.link(xoutRgb->input); + } else { + camRgb->preview.link(xoutRgb->input); + } + + camRgb->preview.link(nn->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the rgb frames and nn data from the outputs defined above + auto qRgb = device.getOutputQueue("rgb", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + cv::Mat frame; + std::vector detections; + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + auto color2 = cv::Scalar(255, 255, 255); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + while(true) { + std::shared_ptr inRgb; + std::shared_ptr inDet; + + if(syncNN) { + inRgb = qRgb->get(); + inDet = qDet->get(); + } else { + inRgb = qRgb->tryGet(); + inDet = qDet->tryGet(); + } + + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + + if(inRgb) { + frame = inRgb->getCvFrame(); + std::stringstream fpsStr; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + displayFrame("video", frame, detections); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp new file mode 100644 index 0000000000..449b793329 --- /dev/null +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -0,0 +1,130 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto nn = pipeline.create(); + + auto xoutVideo = pipeline.create(); + auto xoutPreview = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutVideo->setStreamName("video"); + xoutPreview->setStreamName("preview"); + nnOut->setStreamName("nn"); + + // Properties + camRgb->setPreviewSize(300, 300); // NN input + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + camRgb->setInterleaved(false); + camRgb->setPreviewKeepAspectRatio(false); + // Define a neural network that will make predictions based on the source frames + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // Linking + camRgb->video.link(xoutVideo->input); + camRgb->preview.link(xoutPreview->input); + camRgb->preview.link(nn->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the frames and nn data from the outputs defined above + auto qVideo = device.getOutputQueue("video", 4, false); + auto qPreview = device.getOutputQueue("preview", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + cv::Mat previewFrame; + cv::Mat videoFrame; + std::vector detections; + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + cv::namedWindow("video", cv::WINDOW_NORMAL); + cv::resizeWindow("video", 1280, 720); + cout << "Resize video window with mouse drag!" << endl; + + while(true) { + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inVideo = qVideo->tryGet(); + auto inPreview = qPreview->tryGet(); + auto inDet = qDet->tryGet(); + + if(inVideo) { + videoFrame = inVideo->getCvFrame(); + } + + if(inPreview) { + previewFrame = inPreview->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!videoFrame.empty()) { + displayFrame("video", videoFrame, detections); + } + + if(!previewFrame.empty()) { + displayFrame("preview", previewFrame, detections); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/rgb_mono_encoding.cpp b/examples/src/rgb_mono_encoding.cpp new file mode 100644 index 0000000000..b99efe5a0a --- /dev/null +++ b/examples/src/rgb_mono_encoding.cpp @@ -0,0 +1,82 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Keyboard interrupt (Ctrl + C) detected +static std::atomic alive{true}; +static void sigintHandler(int signum) { + alive = false; +} + +int main() { + using namespace std; + std::signal(SIGINT, &sigintHandler); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto ve1 = pipeline.create(); + auto ve2 = pipeline.create(); + auto ve3 = pipeline.create(); + + auto ve1Out = pipeline.create(); + auto ve2Out = pipeline.create(); + auto ve3Out = pipeline.create(); + + ve1Out->setStreamName("ve1Out"); + ve2Out->setStreamName("ve2Out"); + ve3Out->setStreamName("ve3Out"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + // Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding + ve1->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); + ve2->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + ve3->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); + + // Linking + monoLeft->out.link(ve1->input); + camRgb->video.link(ve2->input); + monoRight->out.link(ve3->input); + ve1->bitstream.link(ve1Out->input); + ve2->bitstream.link(ve2Out->input); + ve3->bitstream.link(ve3Out->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the encoded data from the output defined above + auto outQ1 = device.getOutputQueue("ve1Out", 30, true); + auto outQ2 = device.getOutputQueue("ve2Out", 30, true); + auto outQ3 = device.getOutputQueue("ve3Out", 30, true); + + // The .h264 / .h265 files are raw stream files (not playable yet) + auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); + auto videoFile2 = std::ofstream("color.h265", std::ios::binary); + auto videoFile3 = std::ofstream("mono2.h264", std::ios::binary); + cout << "Press Ctrl+C to stop encoding..." << endl; + + while(alive) { + auto out1 = outQ1->get(); + videoFile1.write((char*)out1->getData().data(), out1->getData().size()); + auto out2 = outQ2->get(); + videoFile2.write((char*)out2->getData().data(), out2->getData().size()); + auto out3 = outQ3->get(); + videoFile3.write((char*)out3->getData().data(), out3->getData().size()); + } + + cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i mono1.h264 -c copy mono1.mp4" << endl; + cout << "ffmpeg -framerate 30 -i mono2.h264 -c copy mono2.mp4" << endl; + cout << "ffmpeg -framerate 30 -i color.h265 -c copy color.mp4" << endl; + + return 0; +} diff --git a/examples/src/rgb_preview.cpp b/examples/src/rgb_preview.cpp new file mode 100644 index 0000000000..911f7d1d95 --- /dev/null +++ b/examples/src/rgb_preview.cpp @@ -0,0 +1,55 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main() { + using namespace std; + // Create pipeline + dai::Pipeline pipeline; + + // Define source and output + auto camRgb = pipeline.create(); + auto xoutRgb = pipeline.create(); + + xoutRgb->setStreamName("rgb"); + + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::RGB); + + // Linking + camRgb->preview.link(xoutRgb->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + cout << "Connected cameras: "; + for(const auto& cam : device.getConnectedCameras()) { + cout << static_cast(cam) << " "; + cout << cam << " "; + } + cout << endl; + + // Print USB speed + cout << "Usb speed: " << device.getUsbSpeed() << endl; + + // Output queue will be used to get the rgb frames from the output defined above + auto qRgb = device.getOutputQueue("rgb", 4, false); + + while(true) { + auto inRgb = qRgb->get(); + + // Retrieve 'bgr' (opencv format) frame + cv::imshow("rgb", inRgb->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/image_manip_warp_example.cpp b/examples/src/rgb_rotate_warp.cpp similarity index 72% rename from examples/src/image_manip_warp_example.cpp rename to examples/src/rgb_rotate_warp.cpp index 48595fabe9..610cff4f8b 100644 --- a/examples/src/image_manip_warp_example.cpp +++ b/examples/src/rgb_rotate_warp.cpp @@ -1,24 +1,28 @@ -#include #include #include "depthai/depthai.hpp" #include "utility.hpp" -struct warpFourPointTest { - std::vector points; - bool normalizedCoords; - const char* description; -}; +static constexpr auto keyRotateDecr = 'z'; +static constexpr auto keyRotateIncr = 'x'; +static constexpr auto keyResizeInc = 'v'; +static constexpr auto keyWarpTestCycle = 'c'; + +void printControls() { + printf("\n=== Controls:\n"); + printf(" %c -rotated rectangle crop, decrease rate\n", keyRotateDecr); + printf(" %c -rotated rectangle crop, increase rate\n", keyRotateIncr); + printf(" %c -warp 4-point transform, cycle through modes\n", keyWarpTestCycle); + printf(" %c -resize cropped region, or disable resize\n", keyResizeInc); + printf(" h -print controls (help)\n"); +} static constexpr auto ROTATE_RATE_MAX = 5.0; static constexpr auto ROTATE_RATE_INC = 0.1; -static constexpr auto KEY_ROTATE_DECR = 'z'; -static constexpr auto KEY_ROTATE_INCR = 'x'; static constexpr auto RESIZE_MAX_W = 800; static constexpr auto RESIZE_MAX_H = 600; static constexpr auto RESIZE_FACTOR_MAX = 5; -static constexpr auto KEY_RESIZE_INC = 'v'; /* The crop points are specified in clockwise order, * with first point mapped to output top-left, as: @@ -30,6 +34,11 @@ static const dai::Point2f P0 = {0, 0}; // top-left static const dai::Point2f P1 = {1, 0}; // top-right static const dai::Point2f P2 = {1, 1}; // bottom-right static const dai::Point2f P3 = {0, 1}; // bottom-left +struct warpFourPointTest { + std::vector points; + bool normalizedCoords; + const char* description; +}; std::vector warpList = { //{{{ 0, 0},{ 1, 0},{ 1, 1},{ 0, 1}}, true, "passthrough"}, @@ -44,42 +53,33 @@ std::vector warpList = { {{{-0.3, 0}, {1, 0}, {1.3, 1}, {0, 1}}, true, "8. parallelogram transform"}, {{{-0.2, 0}, {1.8, 0}, {1, 1}, {0, 1}}, true, "9. trapezoid transform"}, }; -static constexpr auto KEY_WARP_TEST_CYCLE = 'c'; - -void printControls() { - printf("\n=== Controls:\n"); - printf(" %c -rotated rectangle crop, decrease rate\n", KEY_ROTATE_DECR); - printf(" %c -rotated rectangle crop, increase rate\n", KEY_ROTATE_INCR); - printf(" %c -warp 4-point transform, cycle through modes\n", KEY_WARP_TEST_CYCLE); - printf(" %c -resize cropped region, or disable resize\n", KEY_RESIZE_INC); - printf(" h -print controls (help)\n"); -} int main() { + // Create pipeline dai::Pipeline pipeline; - auto colorCam = pipeline.create(); - auto camOut = pipeline.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); auto manip = pipeline.create(); - auto manipCfg = pipeline.create(); + + auto camOut = pipeline.create(); auto manipOut = pipeline.create(); + auto manipCfg = pipeline.create(); camOut->setStreamName("preview"); manipOut->setStreamName("manip"); manipCfg->setStreamName("manipCfg"); - colorCam->setPreviewSize(640, 480); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(640, 480); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); manip->setMaxOutputFrameSize(2000 * 1500 * 3); - /* Link nodes: CAM --> XLINK(preview) - * \--> manip -> XLINK(manipOut) - * manipCfg ---/ - */ - colorCam->preview.link(camOut->input); - colorCam->preview.link(manip->inputImage); + // Linking + camRgb->preview.link(camOut->input); + camRgb->preview.link(manip->inputImage); manip->out.link(manipOut->input); manipCfg->out.link(manip->inputConfig); @@ -87,17 +87,19 @@ int main() { dai::Device device(pipeline); // Create input & output queues - auto previewQueue = device.getOutputQueue("preview", 8, false); - auto manipQueue = device.getOutputQueue("manip", 8, false); - auto manipInQueue = device.getInputQueue("manipCfg"); + auto qPreview = device.getOutputQueue("preview", 8, false); + auto qManip = device.getOutputQueue("manip", 8, false); + auto qManipCfg = device.getInputQueue("manipCfg"); - std::vector frameQueues{previewQueue, manipQueue}; + std::vector frameQueues{qPreview, qManip}; // keep processing data int key = -1; float angleDeg = 0; float rotateRate = 1.0; - int resizeFactor = 0, resizeX, resizeY; + int resizeFactor = 0; + int resizeX = 0; + int resizeY = 0; bool testFourPt = false; int warpIdx = -1; @@ -106,15 +108,15 @@ int main() { while(key != 'q') { if(key >= 0) { printf("Pressed: %c | ", key); - if(key == KEY_ROTATE_DECR || key == KEY_ROTATE_INCR) { - if(key == KEY_ROTATE_DECR) { + if(key == keyRotateDecr || key == keyRotateIncr) { + if(key == keyRotateDecr) { if(rotateRate > -ROTATE_RATE_MAX) rotateRate -= ROTATE_RATE_INC; - } else if(key == KEY_ROTATE_INCR) { + } else if(key == keyRotateIncr) { if(rotateRate < ROTATE_RATE_MAX) rotateRate += ROTATE_RATE_INC; } testFourPt = false; printf("Crop rotated rectangle, rate: %g degrees", rotateRate); - } else if(key == KEY_RESIZE_INC) { + } else if(key == keyResizeInc) { resizeFactor++; if(resizeFactor > RESIZE_FACTOR_MAX) { resizeFactor = 0; @@ -124,10 +126,10 @@ int main() { resizeY = RESIZE_MAX_H / resizeFactor; printf("Crop region resized to: %d x %d", resizeX, resizeY); } - } else if(key == KEY_WARP_TEST_CYCLE) { + } else if(key == keyWarpTestCycle) { resizeFactor = 0; // Disable resizing initially warpIdx = (warpIdx + 1) % warpList.size(); - printf("Warp 4-point transform, %s", warpList[warpIdx].description); + printf("Warp 4-point transform: %s", warpList[warpIdx].description); testFourPt = true; } else if(key == 'h') { printControls(); @@ -145,15 +147,14 @@ int main() { dai::RotatedRect rr = {{320, 240}, // center {640, 480}, //{400, 400}, // size angleDeg}; - bool normalized = false; - cfg.setCropRotatedRect(rr, normalized); + cfg.setCropRotatedRect(rr, false); } if(resizeFactor > 0) { cfg.setResize(resizeX, resizeY); } // cfg.setWarpBorderFillColor(255, 0, 0); // cfg.setWarpBorderReplicatePixels(); - manipInQueue->send(cfg); + qManipCfg->send(cfg); } for(const auto& q : frameQueues) { @@ -163,4 +164,5 @@ int main() { } key = cv::waitKey(1); } + return 0; } diff --git a/examples/src/rgb_video.cpp b/examples/src/rgb_video.cpp new file mode 100644 index 0000000000..ecb349e4d0 --- /dev/null +++ b/examples/src/rgb_video.cpp @@ -0,0 +1,45 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main() { + // Create pipeline + dai::Pipeline pipeline; + + // Define source and output + auto camRgb = pipeline.create(); + auto xoutVideo = pipeline.create(); + + xoutVideo->setStreamName("video"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setVideoSize(1920, 1080); + + xoutVideo->input.setBlocking(false); + xoutVideo->input.setQueueSize(1); + + // Linking + camRgb->video.link(xoutVideo->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + auto video = device.getOutputQueue("video"); + + while(true) { + auto videoIn = video->get(); + + // Get BGR frame from NV12 encoded video frame to show with opencv + // Visualizing the frame on slower hosts might have overhead + cv::imshow("video", videoIn->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/spatial_location_calculator_example.cpp b/examples/src/spatial_location_calculator.cpp similarity index 79% rename from examples/src/spatial_location_calculator_example.cpp rename to examples/src/spatial_location_calculator.cpp index dc0ba33ac5..05137bfef8 100644 --- a/examples/src/spatial_location_calculator_example.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -1,6 +1,3 @@ - - -#include #include #include "utility.hpp" @@ -10,26 +7,29 @@ static constexpr float stepSize = 0.05; +static std::atomic newConfig{false}; + int main() { using namespace std; - dai::Pipeline p; + // Create pipeline + dai::Pipeline pipeline; - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); - auto spatialDataCalculator = p.create(); + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto spatialDataCalculator = pipeline.create(); - auto xoutDepth = p.create(); - auto xoutSpatialData = p.create(); - auto xinSpatialCalcConfig = p.create(); + auto xoutDepth = pipeline.create(); + auto xoutSpatialData = pipeline.create(); + auto xinSpatialCalcConfig = pipeline.create(); - // XLinkOut xoutDepth->setStreamName("depth"); xoutSpatialData->setStreamName("spatialData"); xinSpatialCalcConfig->setStreamName("spatialCalcConfig"); - // MonoCamera + // Properties monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); @@ -38,47 +38,48 @@ int main() { bool lrcheck = false; bool subpixel = false; - // StereoDepth stereo->setConfidenceThreshold(255); - - // stereo->setMedianFilter(dai::StereoDepthProperties::MedianFilter::MEDIAN_OFF); stereo->setLeftRightCheck(lrcheck); stereo->setSubpixel(subpixel); - // Link plugins CAM -> STEREO -> XLINK - monoLeft->out.link(stereo->left); - monoRight->out.link(stereo->right); - - spatialDataCalculator->passthroughDepth.link(xoutDepth->input); - stereo->depth.link(spatialDataCalculator->inputDepth); - + // Config dai::Point2f topLeft(0.4f, 0.4f); dai::Point2f bottomRight(0.6f, 0.6f); - spatialDataCalculator->setWaitForConfigInput(false); dai::SpatialLocationCalculatorConfigData config; config.depthThresholds.lowerThreshold = 100; config.depthThresholds.upperThreshold = 10000; config.roi = dai::Rect(topLeft, bottomRight); + + spatialDataCalculator->setWaitForConfigInput(false); spatialDataCalculator->initialConfig.addROI(config); + + // Linking + monoLeft->out.link(stereo->left); + monoRight->out.link(stereo->right); + + spatialDataCalculator->passthroughDepth.link(xoutDepth->input); + stereo->depth.link(spatialDataCalculator->inputDepth); + spatialDataCalculator->out.link(xoutSpatialData->input); xinSpatialCalcConfig->out.link(spatialDataCalculator->inputConfig); - // Connect and start the pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); - auto depthQueue = d.getOutputQueue("depth", 8, false); - auto spatialCalcQueue = d.getOutputQueue("spatialData", 8, false); - auto spatialCalcConfigInQueue = d.getInputQueue("spatialCalcConfig"); + // Output queue will be used to get the depth frames from the outputs defined above + auto depthQueue = device.getOutputQueue("depth", 8, false); + auto spatialCalcQueue = device.getOutputQueue("spatialData", 8, false); + auto spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig"); - cv::Mat depthFrame; auto color = cv::Scalar(255, 255, 255); + std::cout << "Use WASD keys to move ROI!" << std::endl; - while(1) { - auto depth = depthQueue->get(); + while(true) { + auto inDepth = depthQueue->get(); - cv::Mat depthFrame = depth->getFrame(); + cv::Mat depthFrame = inDepth->getFrame(); cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); @@ -104,14 +105,12 @@ int main() { depthZ << "Z: " << (int)depthData.spatialCoordinates.z << " mm"; cv::putText(depthFrameColor, depthZ.str(), cv::Point(xmin + 10, ymin + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); } - + // Show the frame cv::imshow("depth", depthFrameColor); - bool newConfig = false; int key = cv::waitKey(1); switch(key) { case 'q': - return 0; break; case 'w': if(topLeft.y - stepSize >= 0) { @@ -144,11 +143,14 @@ int main() { default: break; } + if(newConfig) { config.roi = dai::Rect(topLeft, bottomRight); dai::SpatialLocationCalculatorConfig cfg; cfg.addROI(config); spatialCalcConfigInQueue->send(cfg); + newConfig = false; } } + return 0; } diff --git a/examples/src/spatial_mobilenet_example.cpp b/examples/src/spatial_mobilenet.cpp similarity index 61% rename from examples/src/spatial_mobilenet_example.cpp rename to examples/src/spatial_mobilenet.cpp index fd88e3b229..ba72f1ad20 100644 --- a/examples/src/spatial_mobilenet_example.cpp +++ b/examples/src/spatial_mobilenet.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -11,40 +8,53 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; +static std::atomic syncNN{true}; + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; + // Create pipeline + dai::Pipeline pipeline; - // create nodes - auto colorCam = p.create(); - auto spatialDetectionNetwork = p.create(); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); - // create xlink connections - auto xoutRgb = p.create(); - auto xoutNN = p.create(); - auto xoutBoundingBoxDepthMapping = p.create(); - auto xoutDepth = p.create(); + auto xoutRgb = pipeline.create(); + auto xoutNN = pipeline.create(); + auto xoutBoundingBoxDepthMapping = pipeline.create(); + auto xoutDepth = pipeline.create(); - xoutRgb->setStreamName("preview"); + xoutRgb->setStreamName("rgb"); xoutNN->setStreamName("detections"); xoutBoundingBoxDepthMapping->setStreamName("boundingBoxDepthMapping"); xoutDepth->setStreamName("depth"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - /// setting node configs + // Setting node configs stereo->setConfidenceThreshold(255); spatialDetectionNetwork->setBlobPath(nnPath); @@ -54,16 +64,15 @@ dai::Pipeline createNNPipeline(std::string nnPath) { spatialDetectionNetwork->setDepthLowerThreshold(100); spatialDetectionNetwork->setDepthUpperThreshold(5000); - // Link plugins CAM -> STEREO -> XLINK + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(spatialDetectionNetwork->input); + camRgb->preview.link(spatialDetectionNetwork->input); if(syncNN) { spatialDetectionNetwork->passthrough.link(xoutRgb->input); } else { - colorCam->preview.link(xoutRgb->input); + camRgb->preview.link(xoutRgb->input); } spatialDetectionNetwork->out.link(xoutNN->input); @@ -72,53 +81,44 @@ dai::Pipeline createNNPipeline(std::string nnPath) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); + // Connect to device and start pipeline + dai::Device device(pipeline); - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - auto xoutBoundingBoxDepthMapping = d.getOutputQueue("boundingBoxDepthMapping", 4, false); - auto depthQueue = d.getOutputQueue("depth", 4, false); + auto previewQueue = device.getOutputQueue("rgb", 4, false); + auto detectionNNQueue = device.getOutputQueue("detections", 4, false); + auto xoutBoundingBoxDepthMappingQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); + auto depthQueue = device.getOutputQueue("depth", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 255, 255); - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + while(true) { + auto inPreview = previewQueue->get(); + auto inDet = detectionNNQueue->get(); auto depth = depthQueue->get(); - auto dets = det->detections; + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + cv::Mat frame = inPreview->getCvFrame(); cv::Mat depthFrame = depth->getFrame(); + cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); - if(!dets.empty()) { - auto boundingBoxMapping = xoutBoundingBoxDepthMapping->get(); + auto detections = inDet->detections; + if(!detections.empty()) { + auto boundingBoxMapping = xoutBoundingBoxDepthMappingQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); for(auto roiData : roiDatas) { @@ -134,57 +134,47 @@ int main(int argc, char** argv) { cv::rectangle(depthFrameColor, cv::Rect(cv::Point(xmin, ymin), cv::Point(xmax, ymax)), color, cv::FONT_HERSHEY_SIMPLEX); } } - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - cv::Mat frame = imgFrame->getCvFrame(); + for(const auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; - depthX << "X: " << (int)d.spatialCoordinates.x << " mm"; - cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; + cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; - depthY << "Y: " << (int)d.spatialCoordinates.y << " mm"; - cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; + cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; - depthZ << "Z: " << (int)d.spatialCoordinates.z << " mm"; - cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; + cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inPreview->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("depth", depthFrameColor); cv::imshow("preview", frame); + int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } diff --git a/examples/src/spatial_mobilenet_mono_example.cpp b/examples/src/spatial_mobilenet_mono.cpp similarity index 60% rename from examples/src/spatial_mobilenet_mono_example.cpp rename to examples/src/spatial_mobilenet_mono.cpp index 3eaa53581e..960d142075 100644 --- a/examples/src/spatial_mobilenet_mono_example.cpp +++ b/examples/src/spatial_mobilenet_mono.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -11,32 +8,47 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; -static bool flipRectified = true; +static std::atomic syncNN{true}; +static std::atomic flipRectified{true}; -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); - auto xlinkOut = p.create(); - auto spatialDetectionNetwork = p.create(); - auto imageManip = p.create(); + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); - auto nnOut = p.create(); - auto depthRoiMap = p.create(); - auto xoutDepth = p.create(); + // Create pipeline + dai::Pipeline pipeline; - xlinkOut->setStreamName("preview"); + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto imageManip = pipeline.create(); + + auto xoutManip = pipeline.create(); + auto nnOut = pipeline.create(); + auto depthRoiMap = pipeline.create(); + auto xoutDepth = pipeline.create(); + + xoutManip->setStreamName("right"); nnOut->setStreamName("detections"); depthRoiMap->setStreamName("boundingBoxDepthMapping"); xoutDepth->setStreamName("depth"); + // Properties imageManip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) - imageManip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + imageManip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); @@ -45,70 +57,50 @@ dai::Pipeline createNNPipeline(std::string nnPath) { // StereoDepth stereo->setConfidenceThreshold(255); - // Link plugins CAM -> STEREO -> XLINK - monoLeft->out.link(stereo->left); - monoRight->out.link(stereo->right); - - stereo->rectifiedRight.link(imageManip->inputImage); - - // testing MobileNet DetectionNetwork + // Define a neural network that will make predictions based on the source frames spatialDetectionNetwork->setConfidenceThreshold(0.5f); - spatialDetectionNetwork->setBoundingBoxScaleFactor(0.7); + spatialDetectionNetwork->setBlobPath(nnPath); + spatialDetectionNetwork->input.setBlocking(false); + spatialDetectionNetwork->setBoundingBoxScaleFactor(0.5); spatialDetectionNetwork->setDepthLowerThreshold(100); spatialDetectionNetwork->setDepthUpperThreshold(5000); - spatialDetectionNetwork->setBlobPath(nnPath); + // Linking + monoLeft->out.link(stereo->left); + monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK imageManip->out.link(spatialDetectionNetwork->input); if(syncNN) { - spatialDetectionNetwork->passthrough.link(xlinkOut->input); + spatialDetectionNetwork->passthrough.link(xoutManip->input); } else { - imageManip->out.link(xlinkOut->input); + imageManip->out.link(xoutManip->input); } spatialDetectionNetwork->out.link(nnOut->input); spatialDetectionNetwork->boundingBoxMapping.link(depthRoiMap->input); + stereo->rectifiedRight.link(imageManip->inputImage); stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - auto depthRoiMap = d.getOutputQueue("boundingBoxDepthMapping", 4, false); - auto depthQueue = d.getOutputQueue("depth", 4, false); + // Output queues will be used to get the rgb frames and nn data from the outputs defined above + auto previewQueue = device.getOutputQueue("right", 4, false); + auto detectionNNQueue = device.getOutputQueue("detections", 4, false); + auto depthRoiMapQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); + auto depthQueue = device.getOutputQueue("depth", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 255, 255); - while(1) { - auto inRectifiedRight = preview->get(); - auto det = detections->get(); - auto depth = depthQueue->get(); + while(true) { + auto inRectified = previewQueue->get(); + auto inDet = detectionNNQueue->get(); + auto inDepth = depthQueue->get(); counter++; auto currentTime = steady_clock::now(); @@ -119,16 +111,18 @@ int main(int argc, char** argv) { startTime = currentTime; } - auto dets = det->detections; + cv::Mat rectifiedRight = inRectified->getCvFrame(); + if(flipRectified) cv::flip(rectifiedRight, rectifiedRight, 1); - cv::Mat depthFrame = depth->getFrame(); + cv::Mat depthFrame = inDepth->getFrame(); cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); - if(!dets.empty()) { - auto boundingBoxMapping = depthRoiMap->get(); + auto detections = inDet->detections; + if(!detections.empty()) { + auto boundingBoxMapping = depthRoiMapQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); for(auto roiData : roiDatas) { @@ -140,45 +134,40 @@ int main(int argc, char** argv) { auto ymin = (int)topLeft.y; auto xmax = (int)bottomRight.x; auto ymax = (int)bottomRight.y; - cv::rectangle(depthFrameColor, cv::Rect(cv::Point(xmin, ymin), cv::Point(xmax, ymax)), color, cv::FONT_HERSHEY_SIMPLEX); } } - cv::Mat rectifiedRight = inRectifiedRight->getCvFrame(); - - if(flipRectified) cv::flip(rectifiedRight, rectifiedRight, 1); - - for(auto& d : dets) { + for(auto& detection : detections) { if(flipRectified) { - auto swap = d.xmin; - d.xmin = 1 - d.xmax; - d.xmax = 1 - swap; + auto swap = detection.xmin; + detection.xmin = 1 - detection.xmax; + detection.xmax = 1 - swap; } - int x1 = d.xmin * rectifiedRight.cols; - int y1 = d.ymin * rectifiedRight.rows; - int x2 = d.xmax * rectifiedRight.cols; - int y2 = d.ymax * rectifiedRight.rows; + int x1 = detection.xmin * rectifiedRight.cols; + int y1 = detection.ymin * rectifiedRight.rows; + int x2 = detection.xmax * rectifiedRight.cols; + int y2 = detection.ymax * rectifiedRight.rows; - int labelIndex = d.label; + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(rectifiedRight, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(rectifiedRight, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(rectifiedRight, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(rectifiedRight, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; - depthX << "X: " << (int)d.spatialCoordinates.x << " mm"; - cv::putText(rectifiedRight, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; + cv::putText(rectifiedRight, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; - depthY << "Y: " << (int)d.spatialCoordinates.y << " mm"; - cv::putText(rectifiedRight, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; + cv::putText(rectifiedRight, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; - depthZ << "Z: " << (int)d.spatialCoordinates.z << " mm"; - cv::putText(rectifiedRight, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; + cv::putText(rectifiedRight, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(rectifiedRight, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } @@ -189,11 +178,11 @@ int main(int argc, char** argv) { cv::imshow("depth", depthFrameColor); cv::imshow("rectified right", rectifiedRight); + int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } diff --git a/examples/src/spatial_object_tracker_example.cpp b/examples/src/spatial_object_tracker.cpp similarity index 67% rename from examples/src/spatial_object_tracker_example.cpp rename to examples/src/spatial_object_tracker.cpp index b7195495f5..b54ceca87a 100644 --- a/examples/src/spatial_object_tracker_example.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include "utility.hpp" @@ -11,30 +9,43 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; +static std::atomic fullFrameTracking{false}; -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } - // create nodes - auto colorCam = p.create(); - auto spatialDetectionNetwork = p.create(); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); - auto objectTracker = p.create(); + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); - // create xlink connections - auto xoutRgb = p.create(); - auto trackerOut = p.create(); + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto objectTracker = pipeline.create(); + + auto xoutRgb = pipeline.create(); + auto trackerOut = pipeline.create(); xoutRgb->setStreamName("preview"); trackerOut->setStreamName("tracklets"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); @@ -51,60 +62,46 @@ dai::Pipeline createNNPipeline(std::string nnPath) { spatialDetectionNetwork->setDepthLowerThreshold(100); spatialDetectionNetwork->setDepthUpperThreshold(5000); - // Link plugins CAM -> STEREO -> XLINK + objectTracker->setDetectionLabelsToTrack({15}); // track only person + // possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS + objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + // take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID + objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); + + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(spatialDetectionNetwork->input); - if(syncNN) { - objectTracker->passthroughTrackerFrame.link(xoutRgb->input); + camRgb->preview.link(spatialDetectionNetwork->input); + objectTracker->passthroughTrackerFrame.link(xoutRgb->input); + objectTracker->out.link(trackerOut->input); + + if(fullFrameTracking) { + camRgb->setPreviewKeepAspectRatio(false); + camRgb->video.link(objectTracker->inputTrackerFrame); + objectTracker->inputTrackerFrame.setBlocking(false); + // do not block the pipeline if it's too slow on full frame + objectTracker->inputTrackerFrame.setQueueSize(2); } else { - colorCam->preview.link(xoutRgb->input); + spatialDetectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); } - objectTracker->setDetectionLabelsToTrack({15}); // track only person - objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); - objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); - objectTracker->out.link(trackerOut->input); - - spatialDetectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); spatialDetectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); spatialDetectionNetwork->out.link(objectTracker->inputDetections); - stereo->depth.link(spatialDetectionNetwork->inputDepth); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); + // Connect to device and start pipeline + dai::Device device(pipeline); - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); - - auto preview = d.getOutputQueue("preview", 4, false); - auto tracklets = d.getOutputQueue("tracklets", 4, false); + auto preview = device.getOutputQueue("preview", 4, false); + auto tracklets = device.getOutputQueue("tracklets", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - auto color = cv::Scalar(255, 0, 0); + auto color = cv::Scalar(255, 255, 255); - while(1) { + while(true) { auto imgFrame = preview->get(); auto track = tracklets->get(); @@ -131,38 +128,38 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream idStr; idStr << "ID: " << t.id; - cv::putText(frame, idStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, idStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream statusStr; statusStr << "Status: " << t.status; - cv::putText(frame, statusStr.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, statusStr.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; depthX << "X: " << (int)t.spatialCoordinates.x << " mm"; - cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; depthY << "Y: " << (int)t.spatialCoordinates.y << " mm"; - cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; depthZ << "Z: " << (int)t.spatialCoordinates.z << " mm"; - cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 95), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 95), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("tracker", frame); + int key = cv::waitKey(1); if(key == 'q') { return 0; } } - return 0; } diff --git a/examples/src/spatial_tiny_yolo_example.cpp b/examples/src/spatial_tiny_yolo.cpp similarity index 68% rename from examples/src/spatial_tiny_yolo_example.cpp rename to examples/src/spatial_tiny_yolo.cpp index d320662dd7..5371c5442e 100644 --- a/examples/src/spatial_tiny_yolo_example.cpp +++ b/examples/src/spatial_tiny_yolo.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include "utility.hpp" @@ -18,33 +16,46 @@ static const std::vector labelMap = { "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; -static bool syncNN = true; +static std::atomic syncNN{true}; -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; - // create nodes - auto colorCam = p.create(); - auto spatialDetectionNetwork = p.create(); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); - // create xlink connections - auto xoutRgb = p.create(); - auto xoutNN = p.create(); - auto xoutBoundingBoxDepthMapping = p.create(); - auto xoutDepth = p.create(); + auto xoutRgb = pipeline.create(); + auto xoutNN = pipeline.create(); + auto xoutBoundingBoxDepthMapping = pipeline.create(); + auto xoutDepth = pipeline.create(); - xoutRgb->setStreamName("preview"); + xoutRgb->setStreamName("rgb"); xoutNN->setStreamName("detections"); xoutBoundingBoxDepthMapping->setStreamName("boundingBoxDepthMapping"); xoutDepth->setStreamName("depth"); - colorCam->setPreviewSize(416, 416); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(416, 416); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); @@ -65,19 +76,18 @@ dai::Pipeline createNNPipeline(std::string nnPath) { spatialDetectionNetwork->setNumClasses(80); spatialDetectionNetwork->setCoordinateSize(4); spatialDetectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); - spatialDetectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + spatialDetectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}}); spatialDetectionNetwork->setIouThreshold(0.5f); - // Link plugins CAM -> STEREO -> XLINK + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(spatialDetectionNetwork->input); + camRgb->preview.link(spatialDetectionNetwork->input); if(syncNN) { spatialDetectionNetwork->passthrough.link(xoutRgb->input); } else { - colorCam->preview.link(xoutRgb->input); + camRgb->preview.link(xoutRgb->input); } spatialDetectionNetwork->out.link(xoutNN->input); @@ -86,53 +96,44 @@ dai::Pipeline createNNPipeline(std::string nnPath) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - auto xoutBoundingBoxDepthMapping = d.getOutputQueue("boundingBoxDepthMapping", 4, false); - auto depthQueue = d.getOutputQueue("depth", 4, false); + // Output queues will be used to get the rgb frames and nn data from the outputs defined above + auto previewQueue = device.getOutputQueue("rgb", 4, false); + auto detectionNNQueue = device.getOutputQueue("detections", 4, false); + auto xoutBoundingBoxDepthMappingQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); + auto depthQueue = device.getOutputQueue("depth", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 255, 255); - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + while(true) { + auto imgFrame = previewQueue->get(); + auto inDet = detectionNNQueue->get(); auto depth = depthQueue->get(); - auto dets = det->detections; - + cv::Mat frame = imgFrame->getCvFrame(); cv::Mat depthFrame = depth->getFrame(); cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); - if(!dets.empty()) { - auto boundingBoxMapping = xoutBoundingBoxDepthMapping->get(); + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + + auto detections = inDet->detections; + if(!detections.empty()) { + auto boundingBoxMapping = xoutBoundingBoxDepthMappingQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); for(auto roiData : roiDatas) { @@ -148,42 +149,32 @@ int main(int argc, char** argv) { cv::rectangle(depthFrameColor, cv::Rect(cv::Point(xmin, ymin), cv::Point(xmax, ymax)), color, cv::FONT_HERSHEY_SIMPLEX); } } - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - - cv::Mat frame = imgFrame->getCvFrame(); - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; + for(const auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; - int labelIndex = d.label; + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; - depthX << "X: " << (int)d.spatialCoordinates.x << " mm"; - cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; + cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; - depthY << "Y: " << (int)d.spatialCoordinates.y << " mm"; - cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; + cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; - depthZ << "Z: " << (int)d.spatialCoordinates.z << " mm"; - cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; + cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } @@ -193,12 +184,12 @@ int main(int argc, char** argv) { cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("depth", depthFrameColor); - cv::imshow("preview", frame); + cv::imshow("rgb", frame); + int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } diff --git a/examples/src/stereo_example.cpp b/examples/src/stereo_depth_video.cpp similarity index 70% rename from examples/src/stereo_example.cpp rename to examples/src/stereo_depth_video.cpp index 10a4ed60bf..712a18b0a0 100644 --- a/examples/src/stereo_example.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -1,30 +1,27 @@ - - -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" +static std::atomic withDepth{true}; + int main() { using namespace std; - using namespace std::chrono; - // TODO - split this example into two separate examples - bool withDepth = true; - - dai::Pipeline p; - - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto xoutLeft = p.create(); - auto xoutRight = p.create(); - auto stereo = withDepth ? p.create() : nullptr; - auto xoutDisp = p.create(); - auto xoutDepth = p.create(); - auto xoutRectifL = p.create(); - auto xoutRectifR = p.create(); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = withDepth ? pipeline.create() : nullptr; + + auto xoutLeft = pipeline.create(); + auto xoutRight = pipeline.create(); + auto xoutDisp = pipeline.create(); + auto xoutDepth = pipeline.create(); + auto xoutRectifL = pipeline.create(); + auto xoutRectifR = pipeline.create(); // XLinkOut xoutLeft->setStreamName("left"); @@ -44,11 +41,11 @@ int main() { monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); // monoRight->setFps(5.0); - bool outputDepth = false; - bool outputRectified = true; - bool lrcheck = true; - bool extended = false; - bool subpixel = true; + std::atomic outputDepth{false}; + std::atomic outputRectified{true}; + std::atomic lrcheck{true}; + std::atomic extended{false}; + std::atomic subpixel{false}; if(withDepth) { // StereoDepth @@ -57,12 +54,12 @@ int main() { // stereo->loadCalibrationFile("../../../../depthai/resources/depthai.calib"); // stereo->setInputResolution(1280, 720); // TODO: median filtering is disabled on device with (lrcheck || extended || subpixel) - // stereo->setMedianFilter(dai::StereoDepthProperties::MedianFilter::MEDIAN_OFF); + stereo->setMedianFilter(dai::StereoDepthProperties::MedianFilter::MEDIAN_OFF); stereo->setLeftRightCheck(lrcheck); stereo->setExtendedDisparity(extended); stereo->setSubpixel(subpixel); - // Link plugins CAM -> STEREO -> XLINK + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); @@ -83,17 +80,17 @@ int main() { monoRight->out.link(xoutRight->input); } - // Connect and start the pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); - auto leftQueue = d.getOutputQueue("left", 8, false); - auto rightQueue = d.getOutputQueue("right", 8, false); - auto dispQueue = withDepth ? d.getOutputQueue("disparity", 8, false) : nullptr; - auto depthQueue = withDepth ? d.getOutputQueue("depth", 8, false) : nullptr; - auto rectifLeftQueue = withDepth ? d.getOutputQueue("rectified_left", 8, false) : nullptr; - auto rectifRightQueue = withDepth ? d.getOutputQueue("rectified_right", 8, false) : nullptr; + auto leftQueue = device.getOutputQueue("left", 8, false); + auto rightQueue = device.getOutputQueue("right", 8, false); + auto dispQueue = withDepth ? device.getOutputQueue("disparity", 8, false) : nullptr; + auto depthQueue = withDepth ? device.getOutputQueue("depth", 8, false) : nullptr; + auto rectifLeftQueue = withDepth ? device.getOutputQueue("rectified_left", 8, false) : nullptr; + auto rectifRightQueue = withDepth ? device.getOutputQueue("rectified_right", 8, false) : nullptr; - while(1) { + while(true) { auto left = leftQueue->get(); cv::imshow("left", cv::Mat(left->getHeight(), left->getWidth(), CV_8UC1, left->getData().data())); auto right = rightQueue->get(); @@ -129,8 +126,9 @@ int main() { } int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } + return 0; } diff --git a/examples/src/system_information_example.cpp b/examples/src/system_information.cpp similarity index 65% rename from examples/src/system_information_example.cpp rename to examples/src/system_information.cpp index 5a4e67524a..be4a117137 100644 --- a/examples/src/system_information_example.cpp +++ b/examples/src/system_information.cpp @@ -1,62 +1,47 @@ - - -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" - -void printSystemInformation(dai::SystemInformation); +void printSystemInformation(dai::SystemInformation info) { + printf("Ddr used / total - %.2f / %.2f MiB\n", info.ddrMemoryUsage.used / (1024.0f * 1024.0f), info.ddrMemoryUsage.total / (1024.0f * 1024.0f)); + printf("Cmx used / total - %.2f / %.2f MiB\n", info.cmxMemoryUsage.used / (1024.0f * 1024.0f), info.cmxMemoryUsage.total / (1024.0f * 1024.0f)); + printf("LeonCss heap used / total - %.2f / %.2f MiB\n", + info.leonCssMemoryUsage.used / (1024.0f * 1024.0f), + info.leonCssMemoryUsage.total / (1024.0f * 1024.0f)); + printf("LeonMss heap used / total - %.2f / %.2f MiB\n", + info.leonMssMemoryUsage.used / (1024.0f * 1024.0f), + info.leonMssMemoryUsage.total / (1024.0f * 1024.0f)); + const auto& t = info.chipTemperature; + printf("Chip temperature - average: %.2f, css: %.2f, mss: %.2f, upa0: %.2f, upa1: %.2f\n", t.average, t.css, t.mss, t.upa, t.dss); + printf("Cpu usage - Leon OS: %.2f %%, Leon RT: %.2f %%\n", info.leonCssCpuUsage.average * 100, info.leonMssCpuUsage.average * 100); + printf("----------------------------------------\n"); +} int main() { - using namespace std; - + // Create pipeline dai::Pipeline pipeline; + + // Define source and output auto sysLog = pipeline.create(); auto xout = pipeline.create(); - // properties - sysLog->setRate(1.0f); // 1 hz updates xout->setStreamName("sysinfo"); - // links - sysLog->out.link(xout->input); - - // Connect to device - dai::Device device; - - // Query device (before pipeline starts) - dai::MemoryInfo ddr = device.getDdrMemoryUsage(); - printf("Ddr used / total - %.2f / %.2f MiB\n", ddr.used / (1024.0f * 1024.0f), ddr.total / (1024.0f * 1024.0f)); + // Properties + sysLog->setRate(1.0f); // 1 hz updates - dai::MemoryInfo cmx = device.getCmxMemoryUsage(); - printf("Cmx used / total - %.2f / %.2f MiB\n", cmx.used / (1024.0f * 1024.0f), cmx.total / (1024.0f * 1024.0f)); + // Linking + sysLog->out.link(xout->input); - // Start pipeline - device.startPipeline(pipeline); + // Connect to device and start pipeline + dai::Device device(pipeline); - // Create 'sysinfo' queue - auto queue = device.getOutputQueue("sysinfo"); + // Output queue will be used to get the system info + auto qSysInfo = device.getOutputQueue("sysinfo", 4, false); - while(1) { - auto sysInfo = queue->get(); + while(true) { + auto sysInfo = qSysInfo->get(); printSystemInformation(*sysInfo); } + return 0; } - -void printSystemInformation(dai::SystemInformation info) { - printf("Ddr used / total - %.2f / %.2f MiB\n", info.ddrMemoryUsage.used / (1024.0f * 1024.0f), info.ddrMemoryUsage.total / (1024.0f * 1024.0f)); - printf("Cmx used / total - %.2f / %.2f MiB\n", info.cmxMemoryUsage.used / (1024.0f * 1024.0f), info.cmxMemoryUsage.total / (1024.0f * 1024.0f)); - - printf("LeonCss heap used / total - %.2f / %.2f MiB\n", - info.leonCssMemoryUsage.used / (1024.0f * 1024.0f), - info.leonCssMemoryUsage.total / (1024.0f * 1024.0f)); - printf("LeonMss heap used / total - %.2f / %.2f MiB\n", - info.leonMssMemoryUsage.used / (1024.0f * 1024.0f), - info.leonMssMemoryUsage.total / (1024.0f * 1024.0f)); - const auto& t = info.chipTemperature; - printf("Chip temperature - average: %.2f, css: %.2f, mss: %.2f, upa0: %.2f, upa1: %.2f\n", t.average, t.css, t.mss, t.upa, t.dss); - printf("Cpu usage - Leon OS: %.2f %%, Leon RT: %.2f %%\n", info.leonCssCpuUsage.average * 100, info.leonMssCpuUsage.average * 100); -} \ No newline at end of file diff --git a/examples/src/tiny_yolo_v4_device_side_decoding_example.cpp b/examples/src/tiny_yolo_v3_device_side_decoding.cpp similarity index 51% rename from examples/src/tiny_yolo_v4_device_side_decoding_example.cpp rename to examples/src/tiny_yolo_v3_device_side_decoding.cpp index 9fcb2ea319..2968fcacf8 100644 --- a/examples/src/tiny_yolo_v4_device_side_decoding_example.cpp +++ b/examples/src/tiny_yolo_v3_device_side_decoding.cpp @@ -1,17 +1,9 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -/* -The code is the same as for Tiny-yolo-V3, the only difference is the blob file. -The blob was compiled following this tutorial: https://github.com/TNTWEN/OpenVINO-YOLOV4 -*/ - static const std::vector labelMap = { "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", @@ -23,76 +15,110 @@ static const std::vector labelMap = { "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; -static bool syncNN = true; +static std::atomic syncNN{true}; + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto nnOut = p.create(); + // Create pipeline + dai::Pipeline pipeline; - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto nnOut = pipeline.create(); - colorCam->setPreviewSize(416, 416); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("nn"); - auto detectionNetwork = p.create(); + // Properties + camRgb->setPreviewSize(416, 416); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); - // network specific!! + // Network specific settings detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setNumClasses(80); detectionNetwork->setCoordinateSize(4); detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); - detectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}}); detectionNetwork->setIouThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setNumInferenceThreads(2); + detectionNetwork->input.setBlocking(false); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); + // Linking + camRgb->preview.link(detectionNetwork->input); if(syncNN) { - detectionNetwork->passthrough.link(xlinkOut->input); + detectionNetwork->passthrough.link(xoutRgb->input); } else { - colorCam->preview.link(xlinkOut->input); + camRgb->preview.link(xoutRgb->input); } detectionNetwork->out.link(nnOut->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } + // Connect to device and start pipeline + dai::Device device(pipeline); - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); + auto qRgb = device.getOutputQueue("rgb", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + cv::Mat frame; + std::vector detections; auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + auto color2 = cv::Scalar(255, 255, 255); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + while(true) { + std::shared_ptr inRgb; + std::shared_ptr inDet; + + if(syncNN) { + inRgb = qRgb->get(); + inDet = qDet->get(); + } else { + inRgb = qRgb->tryGet(); + inDet = qDet->tryGet(); + } counter++; auto currentTime = steady_clock::now(); @@ -103,40 +129,25 @@ int main(int argc, char** argv) { startTime = currentTime; } - cv::Mat frame = imgFrame->getCvFrame(); - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; - } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + if(inRgb) { + frame = inRgb->getCvFrame(); + std::stringstream fpsStr; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2); + } - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + if(inDet) { + detections = inDet->detections; } - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + if(!frame.empty()) { + displayFrame("rgb", frame, detections); + } - cv::imshow("preview", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } \ No newline at end of file diff --git a/examples/src/tiny_yolo_v3_device_side_decoding_example.cpp b/examples/src/tiny_yolo_v3_device_side_decoding_example.cpp deleted file mode 100644 index 18b2fd1db6..0000000000 --- a/examples/src/tiny_yolo_v3_device_side_decoding_example.cpp +++ /dev/null @@ -1,137 +0,0 @@ -#include -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -static const std::vector labelMap = { - "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", - "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", - "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", - "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", - "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", - "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", - "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", - "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; - -static bool syncNN = true; - -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto nnOut = p.create(); - - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); - - colorCam->setPreviewSize(416, 416); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); - - auto detectionNetwork = p.create(); - - // network specific!! - detectionNetwork->setConfidenceThreshold(0.5f); - detectionNetwork->setNumClasses(80); - detectionNetwork->setCoordinateSize(4); - detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); - detectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); - detectionNetwork->setIouThreshold(0.5f); - detectionNetwork->setBlobPath(nnPath); - - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); - if(syncNN) { - detectionNetwork->passthrough.link(xlinkOut->input); - } else { - colorCam->preview.link(xlinkOut->input); - } - - detectionNetwork->out.link(nnOut->input); - - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); - - // Connect and start the pipeline - dai::Device d(p); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); - - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - - cv::Mat frame = imgFrame->getCvFrame(); - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; - } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); - } - - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); - - cv::imshow("preview", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } - - return 0; -} \ No newline at end of file diff --git a/examples/src/tiny_yolo_v4_device_side_decoding.cpp b/examples/src/tiny_yolo_v4_device_side_decoding.cpp new file mode 100644 index 0000000000..0221f1c7d6 --- /dev/null +++ b/examples/src/tiny_yolo_v4_device_side_decoding.cpp @@ -0,0 +1,159 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +/* +The code is the same as for Tiny-yolo-V3, the only difference is the blob file. +The blob was compiled following this tutorial: https://github.com/TNTWEN/OpenVINO-YOLOV4 +*/ + +static const std::vector labelMap = { + "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", + "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", + "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", + "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", + "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", + "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", + "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; + +static std::atomic syncNN{true}; + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("detections"); + + // Properties + camRgb->setPreviewSize(416, 416); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); + + // Network specific settings + detectionNetwork->setConfidenceThreshold(0.5f); + detectionNetwork->setNumClasses(80); + detectionNetwork->setCoordinateSize(4); + detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); + detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}}); + detectionNetwork->setIouThreshold(0.5f); + detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setNumInferenceThreads(2); + detectionNetwork->input.setBlocking(false); + + // Linking + camRgb->preview.link(detectionNetwork->input); + if(syncNN) { + detectionNetwork->passthrough.link(xoutRgb->input); + } else { + camRgb->preview.link(xoutRgb->input); + } + + detectionNetwork->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues will be used to get the rgb frames and nn data from the outputs defined above + auto qRgb = device.getOutputQueue("rgb", 4, false); + auto qDet = device.getOutputQueue("detections", 4, false); + + cv::Mat frame; + std::vector detections; + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + auto color2 = cv::Scalar(255, 255, 255); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + while(true) { + std::shared_ptr inRgb; + std::shared_ptr inDet; + + if(syncNN) { + inRgb = qRgb->get(); + inDet = qDet->get(); + } else { + inRgb = qRgb->tryGet(); + inDet = qDet->tryGet(); + } + + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + + if(inRgb) { + frame = inRgb->getCvFrame(); + std::stringstream fpsStr; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + displayFrame("rgb", frame, detections); + } + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/utility.cpp b/examples/src/utility.cpp index 64cfe80df1..16fd91d741 100644 --- a/examples/src/utility.cpp +++ b/examples/src/utility.cpp @@ -3,8 +3,30 @@ // libraries #include "fp16/fp16.h" +#include "errno.h" + +#if (defined(_WIN32) || defined(_WIN64)) +#include +#include +#else +#include +#endif + +int createDirectory(std::string directory) +{ + int ret = 0; +#if (defined(_WIN32) || defined(_WIN64)) + ret = _mkdir(directory.c_str()); +#else + ret = mkdir(directory.c_str(), 0777); +#endif + if (ret == EEXIST) ret = 0; + return ret; +} + + cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, int bpp){ - + cv::Mat frame; if(numPlanes == 3){ @@ -15,15 +37,15 @@ cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, in uint8_t b = data.data()[i + w*h * 0]; frame.data[i*3+0] = b; } - for(int i = 0; i < w*h; i++) { - uint8_t g = data.data()[i + w*h * 1]; + for(int i = 0; i < w*h; i++) { + uint8_t g = data.data()[i + w*h * 1]; frame.data[i*3+1] = g; } for(int i = 0; i < w*h; i++) { uint8_t r = data.data()[i + w*h * 2]; frame.data[i*3+2] = r; } - + } else { if(bpp == 3){ frame = cv::Mat(h, w, CV_8UC3); @@ -31,26 +53,26 @@ cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, in uint8_t b,g,r; b = data.data()[i + 2]; g = data.data()[i + 1]; - r = data.data()[i + 0]; + r = data.data()[i + 0]; frame.at( (i/bpp) / w, (i/bpp) % w) = cv::Vec3b(b,g,r); } } else if(bpp == 6) { //first denormalize //dump - + frame = cv::Mat(h, w, CV_8UC3); for(int y = 0; y < h; y++){ for(int x = 0; x < w; x++){ - const uint16_t* fp16 = (const uint16_t*) (data.data() + (y*w+x)*bpp); + const uint16_t* fp16 = (const uint16_t*) (data.data() + (y*w+x)*bpp); uint8_t r = (uint8_t) (fp16_ieee_to_fp32_value(fp16[0]) * 255.0f); uint8_t g = (uint8_t) (fp16_ieee_to_fp32_value(fp16[1]) * 255.0f); uint8_t b = (uint8_t) (fp16_ieee_to_fp32_value(fp16[2]) * 255.0f); frame.at(y, x) = cv::Vec3b(b,g,r); } } - + } } diff --git a/examples/src/utility.hpp b/examples/src/utility.hpp index 9f0d758070..10f6facdec 100644 --- a/examples/src/utility.hpp +++ b/examples/src/utility.hpp @@ -5,3 +5,4 @@ cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, int bpp); void toPlanar(cv::Mat& bgr, std::vector& data); cv::Mat resizeKeepAspectRatio(const cv::Mat &input, const cv::Size &dstSize, const cv::Scalar &bgcolor); +int createDirectory(std::string directory); diff --git a/examples/src/video_mobilenet.cpp b/examples/src/video_mobilenet.cpp new file mode 100644 index 0000000000..a4ce91420b --- /dev/null +++ b/examples/src/video_mobilenet.cpp @@ -0,0 +1,112 @@ +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + std::string videoPath(VIDEO_PATH); + + // If path to blob specified, use that + if(argc > 2) { + nnPath = std::string(argv[1]); + videoPath = std::string(argv[2]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + printf("Using video at path: %s\n", videoPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define source and outputs + auto nn = pipeline.create(); + + auto xinFrame = pipeline.create(); + auto nnOut = pipeline.create(); + + xinFrame->setStreamName("inFrame"); + nnOut->setStreamName("nn"); + + // Properties + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + // Linking + xinFrame->out.link(nn->input); + nn->out.link(nnOut->input); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Input queue will be used to send video frames to the device. + auto qIn = device.getInputQueue("inFrame"); + // Output queue will be used to get nn data from the video frames. + auto qDet = device.getOutputQueue("nn", 4, false); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + cv::Mat frame; + cv::VideoCapture cap(videoPath); + + cv::namedWindow("inFrame", cv::WINDOW_NORMAL); + cv::resizeWindow("inFrame", 1280, 720); + std::cout << "Resize video window with mouse drag!" << std::endl; + + while(cap.isOpened()) { + // Read frame from video + cap >> frame; + auto tensor = std::make_shared(); + + frame = resizeKeepAspectRatio(frame, cv::Size(300, 300), cv::Scalar(0)); + + toPlanar(frame, tensor->data); + + qIn->send(tensor); + + auto inDet = qDet->get(); + auto detections = inDet->detections; + + displayFrame("inFrame", frame, detections); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') return 0; + } + return 0; +} diff --git a/examples/src/webcam_mobilenet_example.cpp b/examples/src/webcam_mobilenet_example.cpp index 3a517ce7ad..a32dd4c1fa 100644 --- a/examples/src/webcam_mobilenet_example.cpp +++ b/examples/src/webcam_mobilenet_example.cpp @@ -1,5 +1,4 @@ -#include #include #include "utility.hpp" @@ -25,37 +24,38 @@ int main(int argc, char** argv) { using namespace std; - // CREATE PIPELINE - dai::Pipeline p; + // Create pipeline + dai::Pipeline pipeline; - auto xin = p.create(); - auto nn = p.create(); - auto xout = p.create(); + // Define sources and outputs + auto nn = pipeline.create(); + auto xin = pipeline.create(); + auto xout = pipeline.create(); + + xin->setStreamName("nn_in"); + xout->setStreamName("nn_out"); // Properties nn->setBlobPath(nnPath); - xin->setStreamName("nn_in"); xin->setMaxDataSize(300 * 300 * 3); xin->setNumFrames(4); - xout->setStreamName("nn_out"); - - // Link plugins XLINK -> NN -> XLINK + // Linking xin->out.link(nn->input); nn->out.link(xout->input); // Open Webcam cv::VideoCapture webcam(camId); - // Connect to device with above created pipeline - dai::Device d(p); + // Connect to device and start pipeline + dai::Device device(pipeline); cv::Mat frame; - auto in = d.getInputQueue("nn_in"); - auto detections = d.getOutputQueue("nn_out"); + auto in = device.getInputQueue("nn_in"); + auto detections = device.getOutputQueue("nn_out"); - while(1) { + while(true) { // data to send further auto tensor = std::make_shared(); @@ -120,11 +120,11 @@ int main(int argc, char** argv) { } cv::imshow("preview", frame); + int key = cv::waitKey(1); if(key == 'q') { return 0; } } - return 0; } diff --git a/include/depthai/common/CameraBoardSocket.hpp b/include/depthai/common/CameraBoardSocket.hpp new file mode 100644 index 0000000000..b27bedd0b4 --- /dev/null +++ b/include/depthai/common/CameraBoardSocket.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include "depthai-shared/common/CameraBoardSocket.hpp" + +namespace dai { + +inline std::ostream& operator<<(std::ostream& out, const CameraBoardSocket& socket) { + switch(socket) { + case CameraBoardSocket::AUTO: + out << "AUTO"; + break; + case CameraBoardSocket::RGB: + out << "RGB"; + break; + case CameraBoardSocket::LEFT: + out << "LEFT"; + break; + case CameraBoardSocket::RIGHT: + out << "RIGHT"; + break; + } + return out; +} + +} // namespace dai \ No newline at end of file diff --git a/include/depthai/common/UsbSpeed.hpp b/include/depthai/common/UsbSpeed.hpp new file mode 100644 index 0000000000..59ccdcb5ef --- /dev/null +++ b/include/depthai/common/UsbSpeed.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "depthai-shared/common/UsbSpeed.hpp" + +namespace dai { + +inline std::ostream& operator<<(std::ostream& out, const UsbSpeed& speed) { + switch(speed) { + case UsbSpeed::UNKNOWN: + out << "UNKNOWN"; + break; + case UsbSpeed::LOW: + out << "LOW"; + break; + case UsbSpeed::FULL: + out << "FULL"; + break; + case UsbSpeed::HIGH: + out << "HIGH"; + break; + case UsbSpeed::SUPER: + out << "SUPER"; + break; + case UsbSpeed::SUPER_PLUS: + out << "SUPER_PLUS"; + break; + } + return out; +} + +} // namespace dai diff --git a/include/depthai/device/Device.hpp b/include/depthai/device/Device.hpp index aa79d241d6..e63a95f592 100644 --- a/include/depthai/device/Device.hpp +++ b/include/depthai/device/Device.hpp @@ -10,13 +10,14 @@ // project #include "CallbackHandler.hpp" #include "DataQueue.hpp" +#include "depthai/common/CameraBoardSocket.hpp" +#include "depthai/common/UsbSpeed.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/utility/Pimpl.hpp" #include "depthai/xlink/XLinkConnection.hpp" #include "depthai/xlink/XLinkStream.hpp" // shared -#include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai-shared/common/ChipTemperature.hpp" #include "depthai-shared/common/CpuUsage.hpp" #include "depthai-shared/common/MemoryInfo.hpp" diff --git a/include/depthai/device/DeviceBootloader.hpp b/include/depthai/device/DeviceBootloader.hpp index c9f5827c05..f4e5d04b42 100644 --- a/include/depthai/device/DeviceBootloader.hpp +++ b/include/depthai/device/DeviceBootloader.hpp @@ -1,7 +1,6 @@ #pragma once // std -#include #include #include #include @@ -9,7 +8,6 @@ // project #include "CallbackHandler.hpp" #include "DataQueue.hpp" -#include "Device.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/xlink/XLinkConnection.hpp" #include "depthai/xlink/XLinkStream.hpp" @@ -43,11 +41,6 @@ class DeviceBootloader { unsigned versionMajor, versionMinor, versionPatch; }; - struct Config { - Config() : timeout(3) {} - std::chrono::milliseconds timeout; - }; - // Static API /** * Searches for connected devices in either UNBOOTED or BOOTLOADER states and returns first available. @@ -67,7 +60,7 @@ class DeviceBootloader { * @param pathToCmd Optional path to custom device firmware * @returns Depthai application package */ - static std::vector createDepthaiApplicationPackage(Pipeline& pipeline, UsbSpeed maxUsbSpeed = Device::DEFAULT_USB_SPEED); + static std::vector createDepthaiApplicationPackage(Pipeline& pipeline, std::string pathToCmd = ""); /** * Saves application package to a file which can be flashed to depthai device. @@ -75,7 +68,7 @@ class DeviceBootloader { * @param pipeline Pipeline from which to create the application package * @param pathToCmd Optional path to custom device firmware */ - static void saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, UsbSpeed maxUsbSpeed = Device::DEFAULT_USB_SPEED); + static void saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, std::string pathToCmd = ""); /** * @returns Embedded bootloader version @@ -85,7 +78,7 @@ class DeviceBootloader { /** * @returns Embedded bootloader binary */ - static std::vector getEmbeddedBootloaderBinary(Config config); + static std::vector getEmbeddedBootloaderBinary(); // DeviceBootloader() = delete; @@ -128,7 +121,7 @@ class DeviceBootloader { * @param progressCallback Callback that sends back a value between 0..1 which signifies current flashing progress * @param path Optional parameter to custom bootloader to flash */ - std::tuple flashBootloader(std::function progressCallback, Config config = {}); + std::tuple flashBootloader(std::function progressCallback, std::string path = ""); /** * @returns Version of current running bootloader diff --git a/include/depthai/pipeline/node/MonoCamera.hpp b/include/depthai/pipeline/node/MonoCamera.hpp index 0978e55bfe..fe3b813146 100644 --- a/include/depthai/pipeline/node/MonoCamera.hpp +++ b/include/depthai/pipeline/node/MonoCamera.hpp @@ -2,10 +2,10 @@ #include +#include "depthai/common/CameraBoardSocket.hpp" #include "depthai/pipeline/Node.hpp" // shared -#include #include namespace dai { @@ -46,10 +46,17 @@ class MonoCamera : public Node { /** * Outputs ImgFrame message that carries RAW8 encoded (grayscale) frame data. * - * Suitable for use StereoDepth node + * Suitable for use StereoDepth node. Processed by ISP */ Output out{*this, "out", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + /** + * Outputs ImgFrame message that carries RAW10-packed (MIPI CSI-2 format) frame data. + * + * Captured directly from the camera sensor + */ + Output raw{*this, "raw", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + /** * Specify which board socket to use * @param boardSocket Board socket to use diff --git a/include/depthai/pipeline/node/StereoDepth.hpp b/include/depthai/pipeline/node/StereoDepth.hpp index 407226092c..926139131b 100644 --- a/include/depthai/pipeline/node/StereoDepth.hpp +++ b/include/depthai/pipeline/node/StereoDepth.hpp @@ -155,8 +155,9 @@ class StereoDepth : public Node { * The mirroring is required to have a normal non-mirrored disparity/depth output. * * A side effect of this option is disparity alignment to the perspective of left or right input: - * - LR-check disabled: `false`: mapped to left and mirrored, `true`: mapped to right; - * - LR-check enabled: `false`: mapped to right, `true`: mapped to left, never mirrored. + * `false`: mapped to left and mirrored, `true`: mapped to right. + * With LR-check enabled, this option is ignored, none of the outputs are mirrored, + * and disparity is mapped to right. * * @param enable True for normal disparity/depth, otherwise mirrored */ diff --git a/shared/depthai-bootloader-shared b/shared/depthai-bootloader-shared index 8e9965a34a..c8779ca581 160000 --- a/shared/depthai-bootloader-shared +++ b/shared/depthai-bootloader-shared @@ -1 +1 @@ -Subproject commit 8e9965a34a2c8de02a151f111e2397657e2ddf52 +Subproject commit c8779ca581659803b6f3e465cc2bde7b22203be0 diff --git a/shared/depthai-shared b/shared/depthai-shared index e23302538d..d9c6390769 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit e23302538d867042e1ec6c2eb6ff723e7c4d538a +Subproject commit d9c6390769efa691ae9707f493fccc3247287428 diff --git a/shared/depthai-shared.cmake b/shared/depthai-shared.cmake index 3e2f95285e..39722c9b1b 100644 --- a/shared/depthai-shared.cmake +++ b/shared/depthai-shared.cmake @@ -4,6 +4,7 @@ set(DEPTHAI_SHARED_3RDPARTY_HEADERS_PATH "depthai-shared/3rdparty") set(DEPTHAI_SHARED_SOURCES ${DEPTHAI_SHARED_FOLDER}/src/datatype/DatatypeEnum.cpp + ${DEPTHAI_SHARED_FOLDER}/src/utility/Checksum.cpp ) set(DEPTHAI_SHARED_PUBLIC_INCLUDE @@ -21,7 +22,7 @@ set(DEPTHAI_SHARED_INCLUDE # Try retriving depthai-shared commit hash (if cloned and not sources only) find_package(Git) if(GIT_FOUND AND NOT DEPTHAI_DOWNLOADED_SOURCES) - + # Check that submodule is initialized and updated execute_process( COMMAND ${GIT_EXECUTABLE} submodule status ${DEPTHAI_SHARED_FOLDER} @@ -32,7 +33,7 @@ if(GIT_FOUND AND NOT DEPTHAI_DOWNLOADED_SOURCES) string(SUBSTRING ${statusCommit} 0 1 status) if(${status} STREQUAL "-") message(FATAL_ERROR "Submodule 'depthai-shared' not initialized/updated. Run 'git submodule update --init --recursive' first") - endif() + endif() # Get depthai-shared current commit execute_process( diff --git a/src/device/CallbackHandler.cpp b/src/device/CallbackHandler.cpp index cf2176929f..1e4fe30036 100644 --- a/src/device/CallbackHandler.cpp +++ b/src/device/CallbackHandler.cpp @@ -18,7 +18,7 @@ CallbackHandler::CallbackHandler(std::shared_ptr conn, t = std::thread([this, streamName]() { try { // open stream with 1B write size (no writing will happen here) - XLinkStream stream(*connection, streamName, XLINK_USB_BUFFER_MAX_SIZE); + XLinkStream stream(*connection, streamName, device::XLINK_USB_BUFFER_MAX_SIZE); while(running) { // read packet diff --git a/src/device/DataQueue.cpp b/src/device/DataQueue.cpp index eb648c1161..caa80dde5e 100644 --- a/src/device/DataQueue.cpp +++ b/src/device/DataQueue.cpp @@ -162,7 +162,7 @@ bool DataOutputQueue::removeCallback(int callbackId) { DataInputQueue::DataInputQueue(const std::shared_ptr& conn, const std::string& streamName, unsigned int maxSize, bool blocking) : queue(maxSize, blocking), name(streamName) { // open stream with default XLINK_USB_BUFFER_MAX_SIZE write size - XLinkStream stream(*conn, name, dai::XLINK_USB_BUFFER_MAX_SIZE); + XLinkStream stream(*conn, name, device::XLINK_USB_BUFFER_MAX_SIZE); writingThread = std::thread(std::bind( [this, conn](XLinkStream& stream) { diff --git a/src/device/Device.cpp b/src/device/Device.cpp index 641618b31a..74e80eba24 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -518,7 +518,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm deviceInfo.state = X_LINK_BOOTED; // prepare rpc for both attached and host controlled mode - rpcStream = std::unique_ptr(new XLinkStream(*connection, dai::XLINK_CHANNEL_MAIN_RPC, dai::XLINK_USB_BUFFER_MAX_SIZE)); + rpcStream = std::unique_ptr(new XLinkStream(*connection, device::XLINK_CHANNEL_MAIN_RPC, device::XLINK_USB_BUFFER_MAX_SIZE)); client = std::unique_ptr>( new nanorpc::core::client([this](nanorpc::core::type::buffer request) { @@ -548,7 +548,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm break; } // Ping with a period half of that of the watchdog timeout - std::this_thread::sleep_for(XLINK_WATCHDOG_TIMEOUT / 2); + std::this_thread::sleep_for(device::XLINK_WATCHDOG_TIMEOUT / 2); } // Watchdog ended. Useful for checking disconnects @@ -560,7 +560,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm using namespace std::chrono; try { - XLinkStream stream(*this->connection, XLINK_CHANNEL_TIMESYNC, 128); + XLinkStream stream(*this->connection, device::XLINK_CHANNEL_TIMESYNC, 128); Timestamp timestamp = {}; while(timesyncRunning) { // Block @@ -587,7 +587,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm using namespace std::chrono; std::vector messages; try { - XLinkStream stream(*this->connection, XLINK_CHANNEL_LOG, 128); + XLinkStream stream(*this->connection, device::XLINK_CHANNEL_LOG, 128); while(loggingRunning) { // Block auto log = stream.read(); @@ -1059,10 +1059,10 @@ bool Device::startPipeline(const Pipeline& pipeline) { // Transfer the whole assetStorage in a separate thread const std::string streamAssetStorage = "__stream_asset_storage"; std::thread t1([this, &streamAssetStorage, &assetStorage]() { - XLinkStream stream(*connection, streamAssetStorage, XLINK_USB_BUFFER_MAX_SIZE); + XLinkStream stream(*connection, streamAssetStorage, device::XLINK_USB_BUFFER_MAX_SIZE); int64_t offset = 0; do { - int64_t toTransfer = std::min(static_cast(XLINK_USB_BUFFER_MAX_SIZE), static_cast(assetStorage.size() - offset)); + int64_t toTransfer = std::min(static_cast(device::XLINK_USB_BUFFER_MAX_SIZE), static_cast(assetStorage.size() - offset)); stream.write(&assetStorage[offset], toTransfer); offset += toTransfer; } while(offset < static_cast(assetStorage.size())); diff --git a/src/device/DeviceBootloader.cpp b/src/device/DeviceBootloader.cpp index c9872fc160..bdd44f1399 100644 --- a/src/device/DeviceBootloader.cpp +++ b/src/device/DeviceBootloader.cpp @@ -51,21 +51,26 @@ std::vector DeviceBootloader::getAllAvailableDevices() { return availableDevices; } -std::vector DeviceBootloader::createDepthaiApplicationPackage(Pipeline& pipeline, UsbSpeed maxUsbSpeed) { +std::vector DeviceBootloader::createDepthaiApplicationPackage(Pipeline& pipeline, std::string pathToCmd) { // Serialize the pipeline PipelineSchema schema; Assets assets; std::vector assetStorage; pipeline.serialize(schema, assets, assetStorage); - // Prepare Device::Config - Device::Config cfg; - cfg.version = pipeline.getOpenVINOVersion(); - cfg.preboot = pipeline.getDevicePrebootConfig(); - cfg.preboot.maxUsbSpeed = maxUsbSpeed; + // Get openvino version + OpenVINO::Version version = pipeline.getOpenVINOVersion(); // Prepare device firmware - std::vector deviceFirmware = Resources::getInstance().getDeviceFirmware(cfg); + std::vector deviceFirmware; + if(pathToCmd != "") { + std::ifstream fwStream(pathToCmd, std::ios::binary); + if(!fwStream.is_open()) throw std::runtime_error("Cannot create application package, device firmware at path: " + pathToCmd + " doesn't exist"); + deviceFirmware = std::vector(std::istreambuf_iterator(fwStream), {}); + } else { + // TODO(themarpe) - specify OpenVINO version + deviceFirmware = Resources::getInstance().getDeviceFirmware(false, version); + } // Create msgpacks std::vector pipelineBinary, assetsBinary; @@ -152,11 +157,7 @@ void DeviceBootloader::init(bool embeddedMvcmd, const std::string& pathToMvcmd) if(deviceInfo.state == X_LINK_UNBOOTED) { // Unbooted device found, boot to BOOTLOADER and connect with XLinkConnection constructor if(embeddedMvcmd) { - Config cfg; - // Never timeout - cfg.timeout = std::chrono::milliseconds(-1); - - connection = std::make_shared(deviceInfo, getEmbeddedBootloaderBinary(cfg), X_LINK_BOOTLOADER); + connection = std::make_shared(deviceInfo, getEmbeddedBootloaderBinary(), X_LINK_BOOTLOADER); } else { connection = std::make_shared(deviceInfo, pathToMvcmd, X_LINK_BOOTLOADER); } @@ -273,8 +274,8 @@ std::tuple DeviceBootloader::flash(std::function return flashDepthaiApplicationPackage(progressCb, createDepthaiApplicationPackage(pipeline)); } -void DeviceBootloader::saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, UsbSpeed maxUsbSpeed) { - auto dap = createDepthaiApplicationPackage(pipeline, maxUsbSpeed); +void DeviceBootloader::saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, std::string pathToCmd) { + auto dap = createDepthaiApplicationPackage(pipeline, pathToCmd); std::ofstream outfile(path, std::ios::binary); outfile.write(reinterpret_cast(dap.data()), dap.size()); } @@ -318,8 +319,15 @@ std::tuple DeviceBootloader::flashDepthaiApplicationPackage(s return {result.success, result.errorMsg}; } -std::tuple DeviceBootloader::flashBootloader(std::function progressCb, DeviceBootloader::Config config) { - std::vector package = getEmbeddedBootloaderBinary(config); +std::tuple DeviceBootloader::flashBootloader(std::function progressCb, std::string path) { + std::vector package; + if(path != "") { + std::ifstream fwStream(path, std::ios::binary); + if(!fwStream.is_open()) throw std::runtime_error("Cannot flash bootloader, binary at path: " + path + " doesn't exist"); + package = std::vector(std::istreambuf_iterator(fwStream), {}); + } else { + package = getEmbeddedBootloaderBinary(); + } // get streamId streamId_t streamId = stream->getStreamId(); @@ -365,8 +373,8 @@ bool DeviceBootloader::isEmbeddedVersion() { return isEmbedded; } -std::vector DeviceBootloader::getEmbeddedBootloaderBinary(Config config) { - return Resources::getInstance().getBootloaderFirmware(config); +std::vector DeviceBootloader::getEmbeddedBootloaderBinary() { + return Resources::getInstance().getBootloaderFirmware(); } DeviceBootloader::Version::Version(const std::string& v) : versionMajor(0), versionMinor(0), versionPatch(0) { diff --git a/src/opencv/ImgFrame.cpp b/src/opencv/ImgFrame.cpp old mode 100755 new mode 100644 index 658f8aa309..0dfe6d5627 --- a/src/opencv/ImgFrame.cpp +++ b/src/opencv/ImgFrame.cpp @@ -2,6 +2,8 @@ #include +// #include "spdlog/spdlog.h" + namespace dai { void ImgFrame::setFrame(cv::Mat frame) { @@ -64,8 +66,13 @@ cv::Mat ImgFrame::getFrame(bool deepCopy) { // Check if enough data long requiredSize = CV_ELEM_SIZE(type) * size.area(); - if(static_cast(img.data.size()) < requiredSize) { - throw std::runtime_error("ImgFrame doesn't have enough data to encode specified frame. Maybe metadataOnly transfer was made?"); + long actualSize = static_cast(img.data.size()); + if(actualSize < requiredSize) { + throw std::runtime_error("ImgFrame doesn't have enough data to encode specified frame, required " + std::to_string(requiredSize) + ", actual " + + std::to_string(actualSize) + ". Maybe metadataOnly transfer was made?"); + } else if(actualSize > requiredSize) { + // FIXME doesn't build on Windows (multiple definitions during link) + // spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); } if(getWidth() <= 0 || getHeight() <= 0) { throw std::runtime_error("ImgFrame metadata not valid (width or height = 0)"); diff --git a/src/pipeline/node/MonoCamera.cpp b/src/pipeline/node/MonoCamera.cpp index 9191e117c4..32ee64483e 100644 --- a/src/pipeline/node/MonoCamera.cpp +++ b/src/pipeline/node/MonoCamera.cpp @@ -17,7 +17,7 @@ std::string MonoCamera::getName() const { } std::vector MonoCamera::getOutputs() { - return {out}; + return {out, raw}; } std::vector MonoCamera::getInputs() { diff --git a/src/utility/Resources.cpp b/src/utility/Resources.cpp index 071daaa537..be12cdf576 100644 --- a/src/utility/Resources.cpp +++ b/src/utility/Resources.cpp @@ -17,7 +17,8 @@ #include "spdlog/spdlog.h" // shared -#include "depthai-bootloader-shared/PrebootConfig.hpp" +#include "depthai-shared/device/PrebootConfig.hpp" +#include "depthai-shared/utility/Checksum.hpp" extern "C" { #include "bspatch/bspatch.h" @@ -31,7 +32,7 @@ CMRC_DECLARE(depthai); namespace dai { -static std::vector createPrebootHeader(const std::vector& payload); +static std::vector createPrebootHeader(const std::vector& payload, uint32_t magic1, uint32_t magic2); #ifdef DEPTHAI_RESOURCES_TAR_XZ @@ -150,7 +151,7 @@ std::vector Resources::getDeviceBinary(Device::Config config) { } // Prepend preboot config - auto prebootHeader = createPrebootHeader(nlohmann::json::to_msgpack(config.preboot)); + auto prebootHeader = createPrebootHeader(nlohmann::json::to_msgpack(config.preboot), PREBOOT_CONFIG_MAGIC1, PREBOOT_CONFIG_MAGIC2); depthaiBinary.insert(depthaiBinary.begin(), prebootHeader.begin(), prebootHeader.end()); // Return created firmware @@ -372,11 +373,7 @@ std::vector Resources::getDeviceFirmware(bool usb2Mode, OpenVINO:: return getDeviceFirmware(cfg); } -std::vector Resources::getBootloaderFirmware(DeviceBootloader::Config config) { - // Convert Config to PrebootConfig - bootloader::PrebootConfig prebootCfg; - prebootCfg.timeoutMs = config.timeout.count(); - +std::vector Resources::getBootloaderFirmware() { // Binaries are resource compiled #ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES @@ -389,12 +386,6 @@ std::vector Resources::getBootloaderFirmware(DeviceBootloader::Con auto bootloaderBinary = fs.open(CMRC_DEPTHAI_BOOTLOADER_PATH); std::vector bootloaderFw{bootloaderBinary.begin(), bootloaderBinary.end()}; - // Create prebootCfg header - const uint8_t* pCfg = reinterpret_cast(&prebootCfg); - auto header = createPrebootHeader({pCfg, pCfg + sizeof(prebootCfg)}); - bootloaderFw.insert(bootloaderFw.begin(), header.begin(), header.end()); - - // Return final bootloader fw return bootloaderFw; #else @@ -403,8 +394,16 @@ std::vector Resources::getBootloaderFirmware(DeviceBootloader::Con #endif } -std::vector createPrebootHeader(const std::vector& payload) { - constexpr const std::uint8_t HEADER[] = {77, 65, 50, 120, 0x8A, 0x00, 0x00, 0x00, 0x70}; +std::vector createPrebootHeader(const std::vector& payload, uint32_t magic1, uint32_t magic2) { + const std::uint8_t HEADER[] = {77, + 65, + 50, + 120, + 0x8A, + static_cast((magic1 >> 0) & 0xFF), + static_cast((magic1 >> 8) & 0xFF), + static_cast((magic1 >> 16) & 0xFF), + static_cast((magic1 >> 24) & 0xFF)}; // Store the constructed preboot information std::vector prebootHeader; @@ -413,8 +412,11 @@ std::vector createPrebootHeader(const std::vector& payloa prebootHeader.insert(prebootHeader.begin(), std::begin(HEADER), std::end(HEADER)); // Calculate size - std::size_t totalPayloadSize = payload.size() + sizeof(std::uint32_t); - std::size_t toAddBytes = 4 - (totalPayloadSize % 4); + std::size_t totalPayloadSize = payload.size() + sizeof(magic2) + sizeof(uint32_t) + sizeof(uint32_t); + std::size_t toAddBytes = 0; + if(totalPayloadSize % 4 != 0) { + toAddBytes = 4 - (totalPayloadSize % 4); + } std::size_t totalSize = totalPayloadSize + toAddBytes; std::size_t totalSizeWord = totalSize / 4; @@ -422,9 +424,14 @@ std::vector createPrebootHeader(const std::vector& payloa prebootHeader.push_back((totalSizeWord >> 0) & 0xFF); prebootHeader.push_back((totalSizeWord >> 8) & 0xFF); - // Write payload size (uint32_t LE) - for(int i = 0; i < 4; i++) { - prebootHeader.push_back((payload.size() >> (i * 8)) & 0xFF); + // Compute payload checksum + auto checksum = utility::checksum(payload.data(), payload.size()); + + // Write checksum & payload size as uint32_t LE + for(const auto& field : {magic2, checksum, static_cast(payload.size())}) { + for(int i = 0; i < 4; i++) { + prebootHeader.push_back((field >> (i * 8)) & 0xFF); + } } // Copy payload diff --git a/src/utility/Resources.hpp b/src/utility/Resources.hpp index b1e1e3756e..0270283469 100644 --- a/src/utility/Resources.hpp +++ b/src/utility/Resources.hpp @@ -33,7 +33,7 @@ class Resources { // Available resources std::vector getDeviceFirmware(bool usb2Mode, OpenVINO::Version version = OpenVINO::VERSION_2020_1); std::vector getDeviceFirmware(Device::Config config); - std::vector getBootloaderFirmware(DeviceBootloader::Config config); + std::vector getBootloaderFirmware(); }; diff --git a/tests/src/image_manip_node_test.cpp b/tests/src/image_manip_node_test.cpp index e5a303e3f3..2665ff332c 100644 --- a/tests/src/image_manip_node_test.cpp +++ b/tests/src/image_manip_node_test.cpp @@ -52,7 +52,7 @@ int main() { inFrame.getData().resize(originalWidth * originalHeight * 3); inFrame.setWidth(originalWidth); inFrame.setHeight(originalHeight); - inFrame.setType(dai::RawImgFrame::Type::RGB888p); + inFrame.setType(dai::ImgFrame::Type::RGB888p); // Send the frame in->send(inFrame); diff --git a/tests/src/neural_network_test.cpp b/tests/src/neural_network_test.cpp index 2f4bfbec1a..8a3879f66c 100644 --- a/tests/src/neural_network_test.cpp +++ b/tests/src/neural_network_test.cpp @@ -72,7 +72,7 @@ TEST_CASE("Neural network node data checks") { dai::ImgFrame frame; frame.setWidth(MOBILENET_WIDTH); frame.setHeight(MOBILENET_HEIGHT); - frame.setType(dai::RawImgFrame::Type::BGR888p); + frame.setType(dai::ImgFrame::Type::BGR888p); frame.setData(std::vector(MOBILENET_DATA_SIZE + i * 1024 * 10)); int msgIndex = 0; From 7dd586a30d337bf0041bb30d0add37d31e550961 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 28 May 2021 14:48:02 +0200 Subject: [PATCH 03/10] Updated example --- examples/src/rgb_preview.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/src/rgb_preview.cpp b/examples/src/rgb_preview.cpp index 911f7d1d95..eba6edca35 100644 --- a/examples/src/rgb_preview.cpp +++ b/examples/src/rgb_preview.cpp @@ -25,11 +25,10 @@ int main() { camRgb->preview.link(xoutRgb->input); // Connect to device and start pipeline - dai::Device device(pipeline); + dai::Device device(pipeline, dai::UsbSpeed::SUPER); cout << "Connected cameras: "; for(const auto& cam : device.getConnectedCameras()) { - cout << static_cast(cam) << " "; cout << cam << " "; } cout << endl; From 0df668c7cfc04059e8997f5c9afa786ca0b57354 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 31 May 2021 22:06:06 +0200 Subject: [PATCH 04/10] Removed deprecated OpenVINO versions --- CMakeLists.txt | 2 - cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- src/utility/Resources.cpp | 113 +++----------------- src/utility/Resources.hpp | 2 +- 4 files changed, 14 insertions(+), 105 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 135f2c6b86..b39c8df8f3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -358,8 +358,6 @@ target_compile_definitions(${TARGET_CORE_NAME} __PC__ # Add depthai-device version DEPTHAI_DEVICE_VERSION="${DEPTHAI_DEVICE_SIDE_COMMIT}" - # Add if using depthai device FWP - DEPTHAI_RESOURCES_TAR_XZ # Add depthai-bootloader version DEPTHAI_BOOTLOADER_VERSION="${DEPTHAI_BOOTLOADER_VERSION}" ) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index ef84e670f4..d288ac9a6e 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "67acb9bdda92ca767d26ce5a264740bb72524372") +set(DEPTHAI_DEVICE_SIDE_COMMIT "f5e364878a118dcd3b9e8a08878a96923a29ee18") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/src/utility/Resources.cpp b/src/utility/Resources.cpp index be12cdf576..a1e9d2a33b 100644 --- a/src/utility/Resources.cpp +++ b/src/utility/Resources.cpp @@ -34,17 +34,13 @@ namespace dai { static std::vector createPrebootHeader(const std::vector& payload, uint32_t magic1, uint32_t magic2); -#ifdef DEPTHAI_RESOURCES_TAR_XZ - constexpr static auto CMRC_DEPTHAI_DEVICE_TAR_XZ = "depthai-device-fwp-" DEPTHAI_DEVICE_VERSION ".tar.xz"; // Main FW constexpr static auto DEPTHAI_CMD_OPENVINO_2021_3_PATH = "depthai-device-openvino-2021.3-" DEPTHAI_DEVICE_VERSION ".cmd"; // Patches from Main FW -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_1_PATCH_PATH = "depthai-device-openvino-2020.1-" DEPTHAI_DEVICE_VERSION ".patch"; constexpr static auto DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH = "depthai-device-openvino-2020.3-" DEPTHAI_DEVICE_VERSION ".patch"; -constexpr static auto DEPTHAI_CMD_OPENVINO_2020_2_PATCH_PATH = DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH; constexpr static auto DEPTHAI_CMD_OPENVINO_2020_4_PATCH_PATH = "depthai-device-openvino-2020.4-" DEPTHAI_DEVICE_VERSION ".patch"; constexpr static auto DEPTHAI_CMD_OPENVINO_2021_1_PATCH_PATH = "depthai-device-openvino-2021.1-" DEPTHAI_DEVICE_VERSION ".patch"; constexpr static auto DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH = "depthai-device-openvino-2021.2-" DEPTHAI_DEVICE_VERSION ".patch"; @@ -56,14 +52,16 @@ static constexpr auto array_of(T&&... t) -> std::array { } constexpr static auto resourcesListTarXz = array_of(DEPTHAI_CMD_OPENVINO_2021_3_PATH, - DEPTHAI_CMD_OPENVINO_2020_1_PATCH_PATH, DEPTHAI_CMD_OPENVINO_2020_3_PATCH_PATH, - DEPTHAI_CMD_OPENVINO_2020_2_PATCH_PATH, DEPTHAI_CMD_OPENVINO_2020_4_PATCH_PATH, DEPTHAI_CMD_OPENVINO_2021_1_PATCH_PATH, DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH); std::vector Resources::getDeviceBinary(Device::Config config) { + // Acquire mutex (this mutex signifies that lazy load is complete) + // It is necessary when accessing resourceMap variable + std::unique_lock lock(mtx); + std::vector depthaiBinary; // Get OpenVINO version @@ -82,25 +80,21 @@ std::vector Resources::getDeviceBinary(Device::Config config) { depthaiBinary = std::vector(std::istreambuf_iterator(stream), {}); } else { - // Binaries are resource compiled - #ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES +// Binaries are resource compiled +#ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES // Temporary binary std::vector tmpDepthaiBinary; // Main FW depthaiBinary = resourceMap[DEPTHAI_CMD_OPENVINO_2021_3_PATH]; // Patch from main to specified - std::vector& depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH]; + std::vector depthaiPatch; switch(version) { case OpenVINO::VERSION_2020_1: - spdlog::warn("OpenVino version 2020.1 is deprecated and will be removed in the next release!"); - depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_1_PATCH_PATH]; - break; - case OpenVINO::VERSION_2020_2: - spdlog::warn("OpenVino version 2020.2 is deprecated and will be removed in the next release!"); - depthaiPatch = resourceMap[DEPTHAI_CMD_OPENVINO_2020_2_PATCH_PATH]; + // Deprecated + throw std::invalid_argument("Selected OpenVINO version is deprecated"); break; case OpenVINO::VERSION_2020_3: @@ -144,10 +138,10 @@ std::vector Resources::getDeviceBinary(Device::Config config) { depthaiBinary = tmpDepthaiBinary; } - #else - // Binaries from default path (TODO) +#else + // Binaries from default path (TODO) - #endif +#endif } // Prepend preboot config @@ -158,80 +152,12 @@ std::vector Resources::getDeviceBinary(Device::Config config) { return depthaiBinary; } -#else -// TODO - DEPRECATE - -constexpr static auto CMRC_DEPTHAI_CMD_PATH = "depthai-" DEPTHAI_DEVICE_VERSION ".cmd"; - #ifdef DEPTHAI_PATCH_ONLY_MODE -constexpr static auto CMRC_DEPTHAI_USB2_PATCH_PATH = "depthai-usb2-patch-" DEPTHAI_DEVICE_VERSION ".patch"; - #else -constexpr static auto CMRC_DEPTHAI_USB2_CMD_PATH = "depthai-usb2-" DEPTHAI_DEVICE_VERSION ".cmd"; - #endif - -static std::vector getEmbeddedDeviceBinary(bool usb2Mode) { - std::vector finalCmd; - - // Binaries are resource compiled - #ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES - - // Get binaries from internal sources - auto fs = cmrc::depthai::get_filesystem(); - - if(usb2Mode) { - #ifdef DEPTHAI_PATCH_ONLY_MODE - - // Get size of original - auto depthaiBinary = fs.open(CMRC_DEPTHAI_CMD_PATH); - - // Open patch - auto depthaiUsb2Patch = fs.open(CMRC_DEPTHAI_USB2_PATCH_PATH); - - // Get new size - int64_t patchedSize = bspatch_mem_get_newsize(reinterpret_cast(depthaiUsb2Patch.begin()), depthaiUsb2Patch.size()); - - // Reserve space for patched binary - finalCmd.resize(patchedSize); - - // Patch - int error = bspatch_mem(reinterpret_cast(depthaiBinary.begin()), - depthaiBinary.size(), - reinterpret_cast(depthaiUsb2Patch.begin()), - depthaiUsb2Patch.size(), - finalCmd.data()); - - // if patch not successful - if(error > 0) throw std::runtime_error("Error while patching cmd for usb2 mode"); - - #else - - auto depthaiUsb2Binary = fs.open(CMRC_DEPTHAI_USB2_CMD_PATH); - finalCmd = std::vector(depthaiUsb2Binary.begin(), depthaiUsb2Binary.end()); - - #endif - - } else { - auto depthaiBinary = fs.open(CMRC_DEPTHAI_CMD_PATH); - finalCmd = std::vector(depthaiBinary.begin(), depthaiBinary.end()); - } - - #else - // Binaries from default path (TODO) - - #endif - - return finalCmd; -} - -#endif - Resources& Resources::getInstance() { static Resources instance; // Guaranteed to be destroyed, instantiated on first use. return instance; } Resources::Resources() { -#ifdef DEPTHAI_RESOURCES_TAR_XZ - // condition variable to let this thread know when the mutex was acquired std::mutex mtxCv; std::condition_variable cv; @@ -333,8 +259,6 @@ Resources::Resources() { // Wait for 'cv' to signal std::unique_lock l(mtxCv); cv.wait(l, [&mutexAcquired]() { return mutexAcquired; }); - -#endif } Resources::~Resources() { @@ -343,21 +267,8 @@ Resources::~Resources() { } std::vector Resources::getDeviceFirmware(Device::Config config) { - // Acquire mutex (this mutex signifies that lazy load is complete) - // It is necessary when accessing resourceMap variable - std::unique_lock lock(mtx); - -#ifdef DEPTHAI_RESOURCES_TAR_XZ - // Return device firmware return getDeviceBinary(config); - -#else - - // Return device firmware - return getEmbeddedDeviceBinary(config); - -#endif } // Get device firmware diff --git a/src/utility/Resources.hpp b/src/utility/Resources.hpp index 0270283469..fc5d705b4a 100644 --- a/src/utility/Resources.hpp +++ b/src/utility/Resources.hpp @@ -31,7 +31,7 @@ class Resources { void operator=(Resources const&) = delete; // Available resources - std::vector getDeviceFirmware(bool usb2Mode, OpenVINO::Version version = OpenVINO::VERSION_2020_1); + std::vector getDeviceFirmware(bool usb2Mode, OpenVINO::Version version = Pipeline::DEFAULT_OPENVINO_VERSION); std::vector getDeviceFirmware(Device::Config config); std::vector getBootloaderFirmware(); From e37fd321bc617b4d48629f7525db5c76bd115efc Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 22 Jul 2021 12:25:48 +0200 Subject: [PATCH 05/10] Updated preboot and added watchdog configuration --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/device/Device.hpp | 10 +- shared/depthai-shared | 2 +- src/device/Device.cpp | 113 ++++++++++---------- src/utility/Resources.cpp | 33 +++--- src/utility/Resources.hpp | 2 +- 6 files changed, 86 insertions(+), 76 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index ca6442a075..abe061b719 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "9cee56147e74ce7f0e070f938be91063c219786a") +set(DEPTHAI_DEVICE_SIDE_COMMIT "f7fedcfc1f8af380c6181f6daca5b569688bc508") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/device/Device.hpp b/include/depthai/device/Device.hpp index 4189adad68..f482bbe518 100644 --- a/include/depthai/device/Device.hpp +++ b/include/depthai/device/Device.hpp @@ -524,11 +524,11 @@ class Device { private: // private - void init(OpenVINO::Version version, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd); - void init(const Pipeline& pipeline, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd); - void init(OpenVINO::Version version, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd); - void init(const Pipeline& pipeline, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd); - void init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcmd, tl::optional pipeline); + void init(OpenVINO::Version version, bool usb2Mode, const std::string& pathToMvcmd); + void init(const Pipeline& pipeline, bool usb2Mode, const std::string& pathToMvcmd); + void init(OpenVINO::Version version, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd); + void init(const Pipeline& pipeline, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd); + void init2(Config cfg, const std::string& pathToMvcmd, tl::optional pipeline); void checkClosed() const; std::shared_ptr connection; diff --git a/shared/depthai-shared b/shared/depthai-shared index f3b2c82f24..47be3e59e6 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit f3b2c82f24908931de1e403ae754c2238c35dfdf +Subproject commit 47be3e59e62e3c37ea2421892dc02c0a9349fdea diff --git a/src/device/Device.cpp b/src/device/Device.cpp index a6ea80129f..0c7f4d2a91 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -27,6 +27,7 @@ // libraries #include "nanorpc/core/client.h" #include "nanorpc/packer/nlohmann_msgpack.h" +#include "spdlog/details/os.h" #include "spdlog/fmt/chrono.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" @@ -221,19 +222,19 @@ LogLevel Device::Impl::getLogLevel() { /////////////////////////////////////////////// Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, bool usb2Mode) : deviceInfo(devInfo) { - init(pipeline, true, usb2Mode, ""); + init(pipeline, usb2Mode, ""); } Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, UsbSpeed maxUsbSpeed) : deviceInfo(devInfo) { - init(pipeline, true, maxUsbSpeed, ""); + init(pipeline, maxUsbSpeed, ""); } Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, const char* pathToCmd) : deviceInfo(devInfo) { - init(pipeline, false, false, std::string(pathToCmd)); + init(pipeline, false, std::string(pathToCmd)); } Device::Device(const Pipeline& pipeline, const DeviceInfo& devInfo, const std::string& pathToCmd) : deviceInfo(devInfo) { - init(pipeline, false, false, pathToCmd); + init(pipeline, false, pathToCmd); } Device::Device(const Pipeline& pipeline) { @@ -244,7 +245,7 @@ Device::Device(const Pipeline& pipeline) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(pipeline, true, false, ""); + init(pipeline, false, ""); } Device::Device(const Pipeline& pipeline, const char* pathToCmd) { @@ -255,7 +256,7 @@ Device::Device(const Pipeline& pipeline, const char* pathToCmd) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(pipeline, false, false, std::string(pathToCmd)); + init(pipeline, false, std::string(pathToCmd)); } Device::Device(const Pipeline& pipeline, const std::string& pathToCmd) { @@ -265,7 +266,7 @@ Device::Device(const Pipeline& pipeline, const std::string& pathToCmd) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(pipeline, false, false, pathToCmd); + init(pipeline, false, pathToCmd); } Device::Device(const Pipeline& pipeline, UsbSpeed maxUsbSpeed) { @@ -275,7 +276,7 @@ Device::Device(const Pipeline& pipeline, UsbSpeed maxUsbSpeed) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(pipeline, true, maxUsbSpeed, ""); + init(pipeline, maxUsbSpeed, ""); } Device::Device(const Pipeline& pipeline, bool usb2Mode) { @@ -285,23 +286,23 @@ Device::Device(const Pipeline& pipeline, bool usb2Mode) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(pipeline, true, usb2Mode, ""); + init(pipeline, usb2Mode, ""); } Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, bool usb2Mode) : deviceInfo(devInfo) { - init(version, true, usb2Mode, ""); + init(version, usb2Mode, ""); } Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, UsbSpeed maxUsbSpeed) : deviceInfo(devInfo) { - init(version, true, maxUsbSpeed, ""); + init(version, maxUsbSpeed, ""); } Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, const char* pathToCmd) : deviceInfo(devInfo) { - init(version, false, false, std::string(pathToCmd)); + init(version, false, std::string(pathToCmd)); } Device::Device(OpenVINO::Version version, const DeviceInfo& devInfo, const std::string& pathToCmd) : deviceInfo(devInfo) { - init(version, false, false, pathToCmd); + init(version, false, pathToCmd); } Device::Device(OpenVINO::Version version) { @@ -312,7 +313,7 @@ Device::Device(OpenVINO::Version version) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(version, true, false, ""); + init(version, false, ""); } Device::Device(OpenVINO::Version version, const char* pathToCmd) { @@ -323,7 +324,7 @@ Device::Device(OpenVINO::Version version, const char* pathToCmd) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(version, false, false, std::string(pathToCmd)); + init(version, false, std::string(pathToCmd)); } Device::Device(OpenVINO::Version version, const std::string& pathToCmd) { @@ -333,7 +334,7 @@ Device::Device(OpenVINO::Version version, const std::string& pathToCmd) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(version, false, false, pathToCmd); + init(version, false, pathToCmd); } Device::Device(OpenVINO::Version version, bool usb2Mode) { @@ -343,7 +344,7 @@ Device::Device(OpenVINO::Version version, bool usb2Mode) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(version, true, usb2Mode, ""); + init(version, usb2Mode, ""); } Device::Device(OpenVINO::Version version, UsbSpeed maxUsbSpeed) { @@ -353,11 +354,12 @@ Device::Device(OpenVINO::Version version, UsbSpeed maxUsbSpeed) { // If no device found, throw if(!found) throw std::runtime_error("No available devices"); - init(version, true, maxUsbSpeed, ""); + init(version, maxUsbSpeed, ""); } Device::Device(const DeviceInfo& devInfo, Config config) { - init2(config, false, {}, {}); + deviceInfo = devInfo; + init2(config, {}, {}); } void Device::close() { @@ -415,40 +417,40 @@ Device::~Device() { close(); } -void Device::init(OpenVINO::Version version, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd) { +void Device::init(OpenVINO::Version version, bool usb2Mode, const std::string& pathToMvcmd) { Config cfg; // Specify usb speed - cfg.preboot.maxUsbSpeed = usb2Mode ? UsbSpeed::HIGH : Device::DEFAULT_USB_SPEED; + cfg.preboot.usb.maxSpeed = usb2Mode ? UsbSpeed::HIGH : Device::DEFAULT_USB_SPEED; // Specify the OpenVINO version cfg.version = version; - init2(cfg, embeddedMvcmd, pathToMvcmd, {}); + init2(cfg, pathToMvcmd, {}); } -void Device::init(const Pipeline& pipeline, bool embeddedMvcmd, bool usb2Mode, const std::string& pathToMvcmd) { +void Device::init(const Pipeline& pipeline, bool usb2Mode, const std::string& pathToMvcmd) { Config cfg; // Specify usb speed - cfg.preboot.maxUsbSpeed = usb2Mode ? UsbSpeed::HIGH : Device::DEFAULT_USB_SPEED; + cfg.preboot.usb.maxSpeed = usb2Mode ? UsbSpeed::HIGH : Device::DEFAULT_USB_SPEED; // Specify the OpenVINO version cfg.version = pipeline.getOpenVINOVersion(); - init2(cfg, embeddedMvcmd, pathToMvcmd, pipeline); + init2(cfg, pathToMvcmd, pipeline); } -void Device::init(OpenVINO::Version version, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd) { +void Device::init(OpenVINO::Version version, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd) { Config cfg; // Specify usb speed - cfg.preboot.maxUsbSpeed = maxUsbSpeed; + cfg.preboot.usb.maxSpeed = maxUsbSpeed; // Specify the OpenVINO version cfg.version = version; - init2(cfg, embeddedMvcmd, pathToMvcmd, {}); + init2(cfg, pathToMvcmd, {}); } -void Device::init(const Pipeline& pipeline, bool embeddedMvcmd, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd) { +void Device::init(const Pipeline& pipeline, UsbSpeed maxUsbSpeed, const std::string& pathToMvcmd) { Config cfg; // Specify usb speed - cfg.preboot.maxUsbSpeed = maxUsbSpeed; + cfg.preboot.usb.maxSpeed = maxUsbSpeed; // Specify the OpenVINO version cfg.version = pipeline.getOpenVINOVersion(); - init2(cfg, embeddedMvcmd, pathToMvcmd, pipeline); + init2(cfg, pathToMvcmd, pipeline); } -void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcmd, tl::optional pipeline) { +void Device::init2(Config cfg, const std::string& pathToMvcmd, tl::optional pipeline) { // Initalize depthai library if not already initialize(); @@ -464,18 +466,27 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm // Set logging pattern of device (device id + shared pattern) pimpl->setPattern(fmt::format("[{}] {}", deviceInfo.getMxId(), LOG_DEFAULT_PATTERN)); - // Get embedded mvcmd - std::vector embeddedFw = Resources::getInstance().getDeviceFirmware(config); + // Check if WD env var is set + std::chrono::milliseconds watchdogTimeout = device::XLINK_WATCHDOG_TIMEOUT; + auto watchdogMsStr = spdlog::details::os::getenv("DEPTHAI_WATCHDOG"); + if(!watchdogMsStr.empty()) { + // Try parsing the string as a number + try { + std::chrono::milliseconds watchdog{std::stoi(watchdogMsStr)}; + config.preboot.watchdogTimeoutMs = watchdog.count(); + watchdogTimeout = watchdog; + } catch(const std::invalid_argument e) { + spdlog::warn("DEPTHAI_WATCHDOG value invalid: {}", e.what()); + } + } + + // Get embedded mvcmd or external with applied config + std::vector fwWithConfig = Resources::getInstance().getDeviceFirmware(config, pathToMvcmd); // Init device (if bootloader, handle correctly - issue USB boot command) if(deviceInfo.state == X_LINK_UNBOOTED) { // Unbooted device found, boot and connect with XLinkConnection constructor - if(embeddedMvcmd) { - connection = std::make_shared(deviceInfo, embeddedFw); - } else { - connection = std::make_shared(deviceInfo, pathToMvcmd); - } - + connection = std::make_shared(deviceInfo, fwWithConfig); } else if(deviceInfo.state == X_LINK_BOOTLOADER) { // Scope so bootloaderConnection is desctructed and XLink cleans its state { @@ -518,8 +529,8 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm if(version >= DeviceBootloader::Version(0, 0, 12)) { // Send request to boot firmware directly from bootloader dai::bootloader::request::BootMemory bootMemory; - bootMemory.totalSize = static_cast(embeddedFw.size()); - bootMemory.numPackets = ((static_cast(embeddedFw.size()) - 1) / bootloader::XLINK_STREAM_MAX_SIZE) + 1; + bootMemory.totalSize = static_cast(fwWithConfig.size()); + bootMemory.numPackets = ((static_cast(fwWithConfig.size()) - 1) / bootloader::XLINK_STREAM_MAX_SIZE) + 1; if(!sendBootloaderRequest(stream.getStreamId(), bootMemory)) { throw std::runtime_error("Error trying to connect to device"); } @@ -527,7 +538,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm using namespace std::chrono; auto t1 = steady_clock::now(); // After that send numPackets of data - stream.writeSplit(embeddedFw.data(), embeddedFw.size(), bootloader::XLINK_STREAM_MAX_SIZE); + stream.writeSplit(fwWithConfig.data(), fwWithConfig.size(), bootloader::XLINK_STREAM_MAX_SIZE); spdlog::debug( "Booting FW with Bootloader. Version {}, Time taken: {}", version.toString(), duration_cast(steady_clock::now() - t1)); @@ -559,19 +570,11 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm } // Boot and connect with XLinkConnection constructor - if(embeddedMvcmd) { - connection = std::make_shared(deviceInfo, embeddedFw); - } else { - connection = std::make_shared(deviceInfo, pathToMvcmd); - } + connection = std::make_shared(deviceInfo, fwWithConfig); } else if(deviceInfo.state == X_LINK_BOOTED) { // Connect without booting - if(embeddedMvcmd) { - connection = std::make_shared(deviceInfo, embeddedFw); - } else { - connection = std::make_shared(deviceInfo, pathToMvcmd); - } + connection = std::make_shared(deviceInfo, fwWithConfig); } else { throw std::runtime_error("Cannot find any device with given deviceInfo"); } @@ -600,7 +603,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm }); // prepare watchdog thread, which will keep device alive - watchdogThread = std::thread([this]() { + watchdogThread = std::thread([this, watchdogTimeout]() { std::shared_ptr conn = this->connection; while(watchdogRunning) { try { @@ -609,7 +612,7 @@ void Device::init2(Config cfg, bool embeddedMvcmd, const std::string& pathToMvcm break; } // Ping with a period half of that of the watchdog timeout - std::this_thread::sleep_for(device::XLINK_WATCHDOG_TIMEOUT / 2); + std::this_thread::sleep_for(watchdogTimeout / 2); } // Watchdog ended. Useful for checking disconnects diff --git a/src/utility/Resources.cpp b/src/utility/Resources.cpp index 42502d2368..94e39c7926 100644 --- a/src/utility/Resources.cpp +++ b/src/utility/Resources.cpp @@ -61,7 +61,7 @@ constexpr static auto RESOURCE_LIST_DEVICE = array_of(DEPTHAI_CMD_O DEPTHAI_CMD_OPENVINO_2021_2_PATCH_PATH, DEPTHAI_CMD_OPENVINO_2021_3_PATCH_PATH); -std::vector Resources::getDeviceBinary(Device::Config config) { +std::vector Resources::getDeviceFirmware(Device::Config config, std::string pathToMvcmd) { // Acquire mutex (this mutex signifies that lazy load is complete) // It is necessary when accessing resourceMap variable std::unique_lock lock(mtxDevice); @@ -71,11 +71,20 @@ std::vector Resources::getDeviceBinary(Device::Config config) { // Get OpenVINO version auto& version = config.version; - // Check if env variable DEPTHAI_DEVICE_BINARY is set + // Check if pathToMvcmd variable is set + std::string finalFwBinaryPath = ""; + if(!pathToMvcmd.empty()) { + finalFwBinaryPath = pathToMvcmd; + } + // Override if env variable DEPTHAI_DEVICE_BINARY is set auto fwBinaryPath = spdlog::details::os::getenv("DEPTHAI_DEVICE_BINARY"); if(!fwBinaryPath.empty()) { + finalFwBinaryPath = fwBinaryPath; + } + // Return binary from file if any of above paths are present + if(!finalFwBinaryPath.empty()) { // Load binary file at path - std::ifstream stream(fwBinaryPath, std::ios::binary); + std::ifstream stream(finalFwBinaryPath, std::ios::binary); if(!stream.is_open()) { // Throw an error // TODO(themarpe) - Unify exceptions into meaningful groups @@ -88,9 +97,9 @@ std::vector Resources::getDeviceBinary(Device::Config config) { #ifdef DEPTHAI_RESOURCE_COMPILED_BINARIES // Main FW - std::vector depthaiBinary{}; + std::vector depthaiBinary; // Patch from main to specified - std::vector depthaiPatch{}; + std::vector depthaiPatch; switch(version) { case OpenVINO::VERSION_2020_3: @@ -134,7 +143,10 @@ std::vector Resources::getDeviceBinary(Device::Config config) { int error = bspatch_mem(depthaiBinary.data(), depthaiBinary.size(), depthaiPatch.data(), depthaiPatch.size(), tmpDepthaiBinary.data()); // if patch not successful - if(error > 0) throw std::runtime_error("Error while patching cmd for usb2 mode"); + if(error > 0) { + throw std::runtime_error(fmt::format( + "Error while patching OpenVINO FW version from {} to {}", OpenVINO::getVersionName(MAIN_FW_VERSION), OpenVINO::getVersionName(version))); + } // Change depthaiBinary to tmpDepthaiBinary depthaiBinary = std::move(tmpDepthaiBinary); @@ -326,18 +338,13 @@ Resources::~Resources() { if(lazyThreadBootloader.joinable()) lazyThreadBootloader.join(); } -std::vector Resources::getDeviceFirmware(Device::Config config) { - // Return device firmware - return getDeviceBinary(config); -} - // Get device firmware std::vector Resources::getDeviceFirmware(bool usb2Mode, OpenVINO::Version version) { Device::Config cfg; if(usb2Mode) { - cfg.preboot.maxUsbSpeed = UsbSpeed::HIGH; + cfg.preboot.usb.maxSpeed = UsbSpeed::HIGH; } else { - cfg.preboot.maxUsbSpeed = Device::DEFAULT_USB_SPEED; + cfg.preboot.usb.maxSpeed = Device::DEFAULT_USB_SPEED; } cfg.version = version; diff --git a/src/utility/Resources.hpp b/src/utility/Resources.hpp index a19c104317..77e8d888b9 100644 --- a/src/utility/Resources.hpp +++ b/src/utility/Resources.hpp @@ -36,7 +36,7 @@ class Resources { // Available resources std::vector getDeviceFirmware(bool usb2Mode, OpenVINO::Version version = Pipeline::DEFAULT_OPENVINO_VERSION); - std::vector getDeviceFirmware(Device::Config config); + std::vector getDeviceFirmware(Device::Config config, std::string pathToMvcmd = ""); std::vector getBootloaderFirmware(DeviceBootloader::Type type = DeviceBootloader::Type::USB); }; From 64f1aba871829c1cb49ae78e887f793c360b417a Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 22 Jul 2021 13:14:36 +0200 Subject: [PATCH 06/10] Modified watchdog to use a separate stream --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- shared/depthai-shared | 2 +- src/device/Device.cpp | 19 +++++++++++-------- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index abe061b719..85a6c69c6d 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "f7fedcfc1f8af380c6181f6daca5b569688bc508") +set(DEPTHAI_DEVICE_SIDE_COMMIT "08bcc9966ca8df97aa4b0ccb45d03951fd4f1fbd") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/shared/depthai-shared b/shared/depthai-shared index 47be3e59e6..07ecb24a7f 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit 47be3e59e62e3c37ea2421892dc02c0a9349fdea +Subproject commit 07ecb24a7f57c64f82e43ed295712d7113d06eea diff --git a/src/device/Device.cpp b/src/device/Device.cpp index 0c7f4d2a91..7ad83b6846 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -603,16 +603,19 @@ void Device::init2(Config cfg, const std::string& pathToMvcmd, tl::optional conn = this->connection; - while(watchdogRunning) { - try { - pimpl->rpcClient->call("watchdogKeepalive"); - } catch(const std::exception&) { - break; + try { + XLinkStream stream(*this->connection, device::XLINK_CHANNEL_WATCHDOG, 128); + std::vector watchdogKeepalive = {0, 0, 0, 0}; + while(watchdogRunning) { + stream.write(watchdogKeepalive); + // Ping with a period half of that of the watchdog timeout + std::this_thread::sleep_for(watchdogTimeout / 2); } - // Ping with a period half of that of the watchdog timeout - std::this_thread::sleep_for(watchdogTimeout / 2); + } catch(const std::exception& ex) { + // ignore + spdlog::debug("Watchdog thread exception caught: {}", ex.what()); } // Watchdog ended. Useful for checking disconnects From d6d95e25d0188c1029be97cc59725fdcd455d021 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 22 Jul 2021 13:43:49 +0200 Subject: [PATCH 07/10] Fixed patching --- src/utility/Resources.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/utility/Resources.cpp b/src/utility/Resources.cpp index 94e39c7926..912e1295ed 100644 --- a/src/utility/Resources.cpp +++ b/src/utility/Resources.cpp @@ -123,15 +123,17 @@ std::vector Resources::getDeviceFirmware(Device::Config config, st break; case MAIN_FW_VERSION: - spdlog::debug("resourceMapDevice[{}] size: {}", MAIN_FW_PATH, resourceMapDevice[MAIN_FW_PATH].size()); depthaiBinary = resourceMapDevice[MAIN_FW_PATH]; break; } // is patching required? - if(version != MAIN_FW_VERSION) { + if(!depthaiPatch.empty()) { spdlog::debug("Patching OpenVINO FW version from {} to {}", OpenVINO::getVersionName(MAIN_FW_VERSION), OpenVINO::getVersionName(version)); + // Load full binary for patch + depthaiBinary = resourceMapDevice[MAIN_FW_PATH]; + // Get new size int64_t patchedSize = bspatch_mem_get_newsize(depthaiPatch.data(), depthaiPatch.size()); From 23319c3a25cd7d5cc43541b81d41b0c1922d429d Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sat, 24 Jul 2021 22:35:27 +0200 Subject: [PATCH 08/10] Updated FW and a catch clause --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/pipeline/Pipeline.hpp | 2 +- src/device/Device.cpp | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 98420a121d..c9b2679cc6 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "ba0f81ce8fa357040a6bac4435468ec14f31c358") +set(DEPTHAI_DEVICE_SIDE_COMMIT "b5d2239124f9b66d056222676e9e845824329928") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 5e055012e7..153db114a0 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -250,7 +250,7 @@ class Pipeline { return impl()->isOpenVINOVersionCompatible(version); } - /// Checks whether a given OpenVINO version is compatible with the pipeline + /// Get device configuration needed for this pipeline Device::Config getDeviceConfig() const { return impl()->getDeviceConfig(); } diff --git a/src/device/Device.cpp b/src/device/Device.cpp index a1fc064ed8..ee5060472d 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -481,7 +481,8 @@ void Device::init2(Config cfg, const std::string& pathToMvcmd, tl::optional Date: Mon, 23 Aug 2021 20:37:54 +0200 Subject: [PATCH 09/10] Updated FW for UsbSpeed handling --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index c9b2679cc6..74430323ad 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "b5d2239124f9b66d056222676e9e845824329928") +set(DEPTHAI_DEVICE_SIDE_COMMIT "02668d20837db9d90cd66a9f47b5a4a8f359f9e7") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") From 8071ddb6c4b2228c48c6eca11d0c400c3b3081d4 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 23 Aug 2021 22:36:06 +0200 Subject: [PATCH 10/10] Fixed Super Speed mode and added a test --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- tests/CMakeLists.txt | 4 ++-- tests/src/device_usbspeed_test.cpp | 24 +++++++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 tests/src/device_usbspeed_test.cpp diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 74430323ad..708a9b0227 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "02668d20837db9d90cd66a9f47b5a4a8f359f9e7") +set(DEPTHAI_DEVICE_SIDE_COMMIT "3f22c4cf67ca3ec8dcf5e01fffdf9137f49aefbf") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 6b258c53bb..0dfd3c6278 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -99,5 +99,5 @@ target_compile_definitions(openvino_blob PRIVATE OPENVINO_2021_4_BLOB_PATH="${openvino_2021_4_blob}" ) - - +# Device USB Speed test +dai_add_test(device_usbspeed_test src/device_usbspeed_test.cpp) diff --git a/tests/src/device_usbspeed_test.cpp b/tests/src/device_usbspeed_test.cpp new file mode 100644 index 0000000000..7df9e61e21 --- /dev/null +++ b/tests/src/device_usbspeed_test.cpp @@ -0,0 +1,24 @@ +#define CATCH_CONFIG_MAIN +#include + +// std +#include +#include + +// Include depthai library +#include + +TEST_CASE("UsbSpeed::HIGH") { + dai::Pipeline p; + dai::Device d(p, dai::UsbSpeed::HIGH); +} + +TEST_CASE("UsbSpeed::SUPER") { + dai::Pipeline p; + dai::Device d(p, dai::UsbSpeed::SUPER); +} + +TEST_CASE("UsbSpeed::SUPER_PLUS") { + dai::Pipeline p; + dai::Device d(p, dai::UsbSpeed::SUPER_PLUS); +}