From 58dd163bd185b654914dbf05a18e38e8181ee04a Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 11 Oct 2017 14:18:54 +0200 Subject: [PATCH 01/49] Add .gitattributes for line-ending handling --- .gitattributes | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a9280e7 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,15 @@ +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto + +# Explicitly declare text files you want to always be normalized and converted +# to native line endings on checkout. +*.c text +*.cpp text +*.h text + +# Declare files that will always have CRLF line endings on checkout. +*.sln text eol=crlf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary From 34e7171ea79b7d073c7364c1f7d9513874949748 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 11 Oct 2017 14:19:25 +0200 Subject: [PATCH 02/49] Add backticks for core count --- install_track4k.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/install_track4k.sh b/install_track4k.sh index 1b9dc16..39066e4 100755 --- a/install_track4k.sh +++ b/install_track4k.sh @@ -2,10 +2,12 @@ echo "Installing TRACK4K" jFlag="-j" -numCores= cat /proc/cpuinfo | grep processor | wc -l +numCores=`cat /proc/cpuinfo | grep processor | wc -l` + +echo Cores: $numCores echo "STAGE 1/4: Removing previous builds..." -rm -r build +rm -rf build mkdir build cd build From 34475a7ff292cef390c55e8b22d04f311bf24917 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 11 Oct 2017 14:21:03 +0200 Subject: [PATCH 03/49] Normalize all the line endings --- .gitignore | 38 +- ECL2 LICENSE.txt | 28 +- source/CMakeLists.txt | 46 +- source/FileReader.cpp | 262 +-- source/FileReader.h | 174 +- source/MetaFrame.cpp | 152 +- source/MetaFrame.h | 100 +- source/PersistentData.cpp | 68 +- source/PersistentData.h | 128 +- source/mainDriver.cpp | 228 +- source/panning/PanLogic.cpp | 714 +++--- source/panning/PanLogic.h | 164 +- source/panning/PresenterMotion.cpp | 944 ++++---- source/panning/PresenterMotion.h | 154 +- source/panning/VirtualCinematographer.cpp | 214 +- source/panning/VirtualCinematographer.h | 70 +- source/segmentation/BoardDetection.cpp | 1086 ++++----- source/segmentation/BoardDetection.h | 336 +-- .../segmentation/IlluminationCorrection.cpp | 146 +- source/segmentation/IlluminationCorrection.h | 90 +- source/segmentation/MotionDetection.cpp | 292 +-- source/segmentation/MotionDetection.h | 120 +- source/segmentation/Track4KPreProcess.cpp | 152 +- source/segmentation/Track4KPreProcess.h | 82 +- source/tracking/Ghost.cpp | 298 +-- source/tracking/Ghost.h | 162 +- source/tracking/ImageRecognition.cpp | 152 +- source/tracking/ImageRecognition.h | 68 +- source/tracking/MovementDetection.cpp | 2022 ++++++++--------- source/tracking/MovementDetection.h | 366 +-- source/tracking/RecognitionDriver.cpp | 62 +- source/tracking/RecognitionDriver.h | 64 +- 32 files changed, 4491 insertions(+), 4491 deletions(-) diff --git a/.gitignore b/.gitignore index 3381182..34174f9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,19 @@ -Videos/ -images/ -.idea/ -build/ -*.flv -*.mp4 -*.mkv -*.avi -*.cbp -*.cmake -*.exe -cmake-build-debug/ -cmake/ -build/ -CMakeFiles/ -CMakeCache.txt -Makefile -boardCropCoordinates.txt -Track4K +Videos/ +images/ +.idea/ +build/ +*.flv +*.mp4 +*.mkv +*.avi +*.cbp +*.cmake +*.exe +cmake-build-debug/ +cmake/ +build/ +CMakeFiles/ +CMakeCache.txt +Makefile +boardCropCoordinates.txt +Track4K diff --git a/ECL2 LICENSE.txt b/ECL2 LICENSE.txt index 57f81ac..ddd7d4c 100644 --- a/ECL2 LICENSE.txt +++ b/ECL2 LICENSE.txt @@ -1,15 +1,15 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * */ \ No newline at end of file diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index 308ff13..5b00b47 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -1,23 +1,23 @@ -cmake_minimum_required(VERSION 3.5) -project(track4k) - -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") -#set(CMAKE_CXX_STANDARD 11) - -set(OpenCV_DIR "C:\\opencv\\build\\install") - -find_package(OpenCV REQUIRED) -include_directories(${OpenCV_INCLUDE_DIRS}) - - -set(SOURCE_FILES FileReader.cpp FileReader.h MetaFrame.cpp MetaFrame.h "segmentation/Track4KPreProcess.cpp" "segmentation/IlluminationCorrection.cpp" "segmentation/IlluminationCorrection.h" "segmentation/MotionDetection.cpp" "segmentation/MotionDetection.h" panning/VirtualCinematographer.cpp panning/VirtualCinematographer.h "segmentation/Track4KPreProcess.h" "segmentation/BoardDetection.cpp" "segmentation/BoardDetection.h" panning/PresenterMotion.cpp panning/PresenterMotion.h panning/PanLogic.cpp panning/PanLogic.h mainDriver.cpp PersistentData.h PersistentData.cpp tracking/RecognitionDriver.h tracking/RecognitionDriver.cpp - tracking/MovementDetection.h tracking/MovementDetection.cpp tracking/Ghost.cpp tracking/Ghost.h) -add_executable(track4k ${SOURCE_FILES}) - - -set(OpenCV_LIBS opencv_calib3d opencv_core opencv_features2d opencv_flann opencv_highgui opencv_imgcodecs opencv_imgproc opencv_ml opencv_objdetect opencv_photo opencv_shape opencv_stitching opencv_superres opencv_video opencv_videoio opencv_videostab opencv_xfeatures2d) - -target_link_libraries(track4k ${OpenCV_LIBS}) - - -install(TARGETS track4k DESTINATION bin) +cmake_minimum_required(VERSION 3.5) +project(track4k) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") +#set(CMAKE_CXX_STANDARD 11) + +set(OpenCV_DIR "C:\\opencv\\build\\install") + +find_package(OpenCV REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) + + +set(SOURCE_FILES FileReader.cpp FileReader.h MetaFrame.cpp MetaFrame.h "segmentation/Track4KPreProcess.cpp" "segmentation/IlluminationCorrection.cpp" "segmentation/IlluminationCorrection.h" "segmentation/MotionDetection.cpp" "segmentation/MotionDetection.h" panning/VirtualCinematographer.cpp panning/VirtualCinematographer.h "segmentation/Track4KPreProcess.h" "segmentation/BoardDetection.cpp" "segmentation/BoardDetection.h" panning/PresenterMotion.cpp panning/PresenterMotion.h panning/PanLogic.cpp panning/PanLogic.h mainDriver.cpp PersistentData.h PersistentData.cpp tracking/RecognitionDriver.h tracking/RecognitionDriver.cpp + tracking/MovementDetection.h tracking/MovementDetection.cpp tracking/Ghost.cpp tracking/Ghost.h) +add_executable(track4k ${SOURCE_FILES}) + + +set(OpenCV_LIBS opencv_calib3d opencv_core opencv_features2d opencv_flann opencv_highgui opencv_imgcodecs opencv_imgproc opencv_ml opencv_objdetect opencv_photo opencv_shape opencv_stitching opencv_superres opencv_video opencv_videoio opencv_videostab opencv_xfeatures2d) + +target_link_libraries(track4k ${OpenCV_LIBS}) + + +install(TARGETS track4k DESTINATION bin) diff --git a/source/FileReader.cpp b/source/FileReader.cpp index 497a202..8bb6f92 100644 --- a/source/FileReader.cpp +++ b/source/FileReader.cpp @@ -1,131 +1,131 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/14. -// - -#include "FileReader.h" -#include "opencv2/opencv.hpp" - -using namespace std; -using namespace cv; - -bool FileReader::readFile(std::string filename, PersistentData &pD) -{ - //read in video file - inputVideo = VideoCapture(filename); - if (!inputVideo.isOpened()) - { - cout << "Could not open the input video: " << filename << endl; - return -1; - } - - fps = inputVideo.get(CV_CAP_PROP_FPS); //Frame Rate - numFrames = inputVideo.get(CV_CAP_PROP_FRAME_COUNT); //Number of frames - - videoDuration = numFrames / fps; //Duration of video file in seconds - - ex = static_cast(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form - - // Transform from int to char via Bitwise operators - char EXT[] = {(char) (ex & 0XFF), (char) ((ex & 0XFF00) >> 8), (char) ((ex & 0XFF0000) >> 16), - (char) ((ex & 0XFF000000) >> 24), 0}; - - videoDimension = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size - (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); - - //Print out progress info - - cout << "Input frame resolution: Width=" << videoDimension.width << " Height=" << videoDimension.height - << " of nr#: " << numFrames << endl; - cout << "Input codec type: " << EXT << endl; - cout << "Video Duration (Seconds): " << videoDuration << endl; - cout << "FPS: " << fps << endl; - - //Set video file info - pD.setVideoInfo(fps, numFrames, videoDimension, ex); - - return 0; - -} - -//This method returns the next section (where @segSize is in seconds) -void FileReader::getNextSegment(int segSize, std::vector &frameVec) -{ - //Clear the vector - frameVec.clear(); - - //Calculate how many frames to read that will amount to segSize - int numFrames = fps * segSize; //This is the number of frames in segSize seconds - - cout << "Section is " << segSize << " seconds(s) long, which amounts to " << numFrames << " frames." << endl; - for (int i = 0; i < numFrames; i++) - { - - //read the current frame - if (!inputVideo.read(frame)) - { - cerr << "End of video file" << endl; - endOfFile = true; - break; //If end of video file - } - - MetaFrame tmp = MetaFrame(frame.clone()); - - //add frame to vector - frameVec.push_back(tmp); - - } -} - -void FileReader::getNextSegment(int segSize, std::vector &frameVec) -{ - //Clear the vector - frameVec.clear(); - - //cout << "Section is " << segSize << " frames." << endl; - for (int i = 0; i < segSize; i++) - { - //read the current frame - if (!inputVideo.read(frame)) - { - //cerr << "End of reading video file" << endl; - endOfFile = true; - break; //If end of video file - } - //add frame to vector - frameVec.push_back(move(frame)); - } -} - -void FileReader::getNextFrame(cv::Mat &frame) -{ - //read the current frame - if (!inputVideo.read(frame)) - { - cerr << "End of video file" << endl; - endOfFile = true; - } - -} - - -bool FileReader::isEndOfFile() -{ - return endOfFile; -} - +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/14. +// + +#include "FileReader.h" +#include "opencv2/opencv.hpp" + +using namespace std; +using namespace cv; + +bool FileReader::readFile(std::string filename, PersistentData &pD) +{ + //read in video file + inputVideo = VideoCapture(filename); + if (!inputVideo.isOpened()) + { + cout << "Could not open the input video: " << filename << endl; + return -1; + } + + fps = inputVideo.get(CV_CAP_PROP_FPS); //Frame Rate + numFrames = inputVideo.get(CV_CAP_PROP_FRAME_COUNT); //Number of frames + + videoDuration = numFrames / fps; //Duration of video file in seconds + + ex = static_cast(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form + + // Transform from int to char via Bitwise operators + char EXT[] = {(char) (ex & 0XFF), (char) ((ex & 0XFF00) >> 8), (char) ((ex & 0XFF0000) >> 16), + (char) ((ex & 0XFF000000) >> 24), 0}; + + videoDimension = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size + (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); + + //Print out progress info + + cout << "Input frame resolution: Width=" << videoDimension.width << " Height=" << videoDimension.height + << " of nr#: " << numFrames << endl; + cout << "Input codec type: " << EXT << endl; + cout << "Video Duration (Seconds): " << videoDuration << endl; + cout << "FPS: " << fps << endl; + + //Set video file info + pD.setVideoInfo(fps, numFrames, videoDimension, ex); + + return 0; + +} + +//This method returns the next section (where @segSize is in seconds) +void FileReader::getNextSegment(int segSize, std::vector &frameVec) +{ + //Clear the vector + frameVec.clear(); + + //Calculate how many frames to read that will amount to segSize + int numFrames = fps * segSize; //This is the number of frames in segSize seconds + + cout << "Section is " << segSize << " seconds(s) long, which amounts to " << numFrames << " frames." << endl; + for (int i = 0; i < numFrames; i++) + { + + //read the current frame + if (!inputVideo.read(frame)) + { + cerr << "End of video file" << endl; + endOfFile = true; + break; //If end of video file + } + + MetaFrame tmp = MetaFrame(frame.clone()); + + //add frame to vector + frameVec.push_back(tmp); + + } +} + +void FileReader::getNextSegment(int segSize, std::vector &frameVec) +{ + //Clear the vector + frameVec.clear(); + + //cout << "Section is " << segSize << " frames." << endl; + for (int i = 0; i < segSize; i++) + { + //read the current frame + if (!inputVideo.read(frame)) + { + //cerr << "End of reading video file" << endl; + endOfFile = true; + break; //If end of video file + } + //add frame to vector + frameVec.push_back(move(frame)); + } +} + +void FileReader::getNextFrame(cv::Mat &frame) +{ + //read the current frame + if (!inputVideo.read(frame)) + { + cerr << "End of video file" << endl; + endOfFile = true; + } + +} + + +bool FileReader::isEndOfFile() +{ + return endOfFile; +} + diff --git a/source/FileReader.h b/source/FileReader.h index 96b7e02..fcaffed 100644 --- a/source/FileReader.h +++ b/source/FileReader.h @@ -1,87 +1,87 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/15. -// - -#ifndef TRACK4K_PREPROCESS_H -#define TRACK4K_PREPROCESS_H - -#include "MetaFrame.h" -#include "opencv2/opencv.hpp" -#include "PersistentData.h" - -class FileReader -{ -private: - cv::VideoCapture inputVideo; - cv::Mat frame; //current frame - float fps; //Frame Rate - int numFrames; //Number of frames - int videoDuration; - cv::Size videoDimension; - int ex; - bool endOfFile = false; - -public: - bool isEndOfFile(); - - bool readFile(std::string filename, PersistentData &pD); - - void getNextSegment(int segSize, std::vector &frameVec); - - void getNextSegment(int segSize, std::vector &frameVec); - - void getNextFrame(cv::Mat &frame); - - cv::VideoCapture &getInputVideo() - { - return inputVideo; - } - - int getFps() const - { - return fps; - } - - int getNumFrames() const - { - return numFrames; - } - - int getVideoDuration() const - { - return videoDuration; - } - - cv::Mat &getFrame() - { - return frame; - } - - const cv::Size &getVideoDimension() const - { - return videoDimension; - } - - int getEx() const - { - return ex; - } -}; - -#endif //TRACK4K_PREPROCESS_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/15. +// + +#ifndef TRACK4K_PREPROCESS_H +#define TRACK4K_PREPROCESS_H + +#include "MetaFrame.h" +#include "opencv2/opencv.hpp" +#include "PersistentData.h" + +class FileReader +{ +private: + cv::VideoCapture inputVideo; + cv::Mat frame; //current frame + float fps; //Frame Rate + int numFrames; //Number of frames + int videoDuration; + cv::Size videoDimension; + int ex; + bool endOfFile = false; + +public: + bool isEndOfFile(); + + bool readFile(std::string filename, PersistentData &pD); + + void getNextSegment(int segSize, std::vector &frameVec); + + void getNextSegment(int segSize, std::vector &frameVec); + + void getNextFrame(cv::Mat &frame); + + cv::VideoCapture &getInputVideo() + { + return inputVideo; + } + + int getFps() const + { + return fps; + } + + int getNumFrames() const + { + return numFrames; + } + + int getVideoDuration() const + { + return videoDuration; + } + + cv::Mat &getFrame() + { + return frame; + } + + const cv::Size &getVideoDimension() const + { + return videoDimension; + } + + int getEx() const + { + return ex; + } +}; + +#endif //TRACK4K_PREPROCESS_H diff --git a/source/MetaFrame.cpp b/source/MetaFrame.cpp index aeb6511..4860211 100644 --- a/source/MetaFrame.cpp +++ b/source/MetaFrame.cpp @@ -1,76 +1,76 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/15. -// - -#include "MetaFrame.h" - -/* - * This file will contain all data that needs to pass through to the following stages. - * This data includes: - * 1) Video information such as: - * - Length (float) - * - Frame Rate (int) - * - Processing/Detection rate (e.g Every 60 frames) and this will be the interval of the metadata - * 2) Areas of motion - * - Stored as an array of rectangles - * - [rect(sx,sy,ex,ey);...] - * - * 3) Blackboard columns - * - Stored as a vector of rectangles - * - * 4) Blackboard column usage - * - Detect writing on blackboard - * - Used boards stored as index from 3 above - * - E.g [0,1,3] Index in rectangle vector - * - * 5) Grayscale Video - * - */ -MetaFrame::MetaFrame(cv::Mat frame) : colourFrame(cv::Mat(frame)) -{ - -} - -void MetaFrame::setColourFrame(cv::Mat cFrame) -{ - - colourFrame = cv::Mat(cFrame); -} - -void MetaFrame::setBoardUsage(bool hasBoardData, bool lB, bool rB, bool lP, bool rP) -{ - - hasBoardUsageData = hasBoardData; - leftBoard = lB; - rightBoard = rB; - leftProjector = lP; - rightProjector = rP; -} - -MetaFrame::MetaFrame(bool hasBoardData, bool lB, bool rB, bool lP, bool rP) -{ - hasBoardUsageData = hasBoardData; - leftBoard = lB; - rightBoard = rB; - leftProjector = lP; - rightProjector = rP; -} - - - +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/15. +// + +#include "MetaFrame.h" + +/* + * This file will contain all data that needs to pass through to the following stages. + * This data includes: + * 1) Video information such as: + * - Length (float) + * - Frame Rate (int) + * - Processing/Detection rate (e.g Every 60 frames) and this will be the interval of the metadata + * 2) Areas of motion + * - Stored as an array of rectangles + * - [rect(sx,sy,ex,ey);...] + * + * 3) Blackboard columns + * - Stored as a vector of rectangles + * + * 4) Blackboard column usage + * - Detect writing on blackboard + * - Used boards stored as index from 3 above + * - E.g [0,1,3] Index in rectangle vector + * + * 5) Grayscale Video + * + */ +MetaFrame::MetaFrame(cv::Mat frame) : colourFrame(cv::Mat(frame)) +{ + +} + +void MetaFrame::setColourFrame(cv::Mat cFrame) +{ + + colourFrame = cv::Mat(cFrame); +} + +void MetaFrame::setBoardUsage(bool hasBoardData, bool lB, bool rB, bool lP, bool rP) +{ + + hasBoardUsageData = hasBoardData; + leftBoard = lB; + rightBoard = rB; + leftProjector = lP; + rightProjector = rP; +} + +MetaFrame::MetaFrame(bool hasBoardData, bool lB, bool rB, bool lP, bool rP) +{ + hasBoardUsageData = hasBoardData; + leftBoard = lB; + rightBoard = rB; + leftProjector = lP; + rightProjector = rP; +} + + + diff --git a/source/MetaFrame.h b/source/MetaFrame.h index bc800e8..e4d98de 100644 --- a/source/MetaFrame.h +++ b/source/MetaFrame.h @@ -1,50 +1,50 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/15. -// - -#ifndef TRACK4K_METADATA_H -#define TRACK4K_METADATA_H - -#include -#include "opencv2/opencv.hpp" - -class MetaFrame -{ - public: - cv::Mat colourFrame; - - //Board used - bool hasBoardUsageData = false; - bool leftBoard = false; - bool rightBoard = false; - bool rightProjector = false; - bool leftProjector = false; - - void setBoardUsage(bool hasBoardData, bool lB, bool rB, bool lP, bool rP); - - //Contructors - MetaFrame(cv::Mat frame); - - MetaFrame(bool hasBoardData, bool lB, bool rB, bool lP, bool rP); - - void setColourFrame(cv::Mat cFrame); - -}; - -#endif //TRACK4K_METADATA_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/15. +// + +#ifndef TRACK4K_METADATA_H +#define TRACK4K_METADATA_H + +#include +#include "opencv2/opencv.hpp" + +class MetaFrame +{ + public: + cv::Mat colourFrame; + + //Board used + bool hasBoardUsageData = false; + bool leftBoard = false; + bool rightBoard = false; + bool rightProjector = false; + bool leftProjector = false; + + void setBoardUsage(bool hasBoardData, bool lB, bool rB, bool lP, bool rP); + + //Contructors + MetaFrame(cv::Mat frame); + + MetaFrame(bool hasBoardData, bool lB, bool rB, bool lP, bool rP); + + void setColourFrame(cv::Mat cFrame); + +}; + +#endif //TRACK4K_METADATA_H diff --git a/source/PersistentData.cpp b/source/PersistentData.cpp index c9063b6..bb674af 100644 --- a/source/PersistentData.cpp +++ b/source/PersistentData.cpp @@ -1,34 +1,34 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/15. -// - -#include "PersistentData.h" - - -void PersistentData::setVideoInfo(float f, int t, cv::Size s, int ext) -{ - if(!videoInfoSet){ - fps = f; - totalFrames = t; - videoDimension = s; - ext_int = ext; - videoInfoSet = true; - } - -} +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/15. +// + +#include "PersistentData.h" + + +void PersistentData::setVideoInfo(float f, int t, cv::Size s, int ext) +{ + if(!videoInfoSet){ + fps = f; + totalFrames = t; + videoDimension = s; + ext_int = ext; + videoInfoSet = true; + } + +} diff --git a/source/PersistentData.h b/source/PersistentData.h index ba81b3b..4521dcc 100644 --- a/source/PersistentData.h +++ b/source/PersistentData.h @@ -1,64 +1,64 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/15. -// - -#ifndef TRACK4K_PERSISTENTDATA_H -#define TRACK4K_PERSISTENTDATA_H - -#include -#include "opencv2/opencv.hpp" -#include "MetaFrame.h" -#include "PersistentData.h" -#include "tracking/Ghost.h" - -class PersistentData -{ -private: - bool videoInfoSet = false; -public: - //Store the areas of motion. Each vector stores x frames worth of motion - int segmentationNumFramesToProcessPerIteration = 29; // number of frames that will be read into memory - std::vector areasOfMotion; - std::vector metaFrameVector; - - float fps; //Frame Rate - int totalFrames; //Number of frames - cv::Size videoDimension; - - void setVideoInfo(float f, int t, cv::Size s, int ext); - - std::string saveFileExtension = "mp4"; //Default save extension - std::string inputFileName = ""; - int ext_int; //The int version of the file extension - int codec; //Default codec for mp4 - cv::Size panOutputVideoSize = cv::Size(1280, 720); - std::string outputVideoFilenameSuffix = ""; - - vector lecturerTrackedLocationRectangles; - int skipFrameMovementDetection; - - int boardDetectionSkipFrames = 28*2; - - bool boardsFound = false; - cv::Rect boardCropRegion; - -}; - - -#endif //TRACK4K_PERSISTENTDATA_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/15. +// + +#ifndef TRACK4K_PERSISTENTDATA_H +#define TRACK4K_PERSISTENTDATA_H + +#include +#include "opencv2/opencv.hpp" +#include "MetaFrame.h" +#include "PersistentData.h" +#include "tracking/Ghost.h" + +class PersistentData +{ +private: + bool videoInfoSet = false; +public: + //Store the areas of motion. Each vector stores x frames worth of motion + int segmentationNumFramesToProcessPerIteration = 29; // number of frames that will be read into memory + std::vector areasOfMotion; + std::vector metaFrameVector; + + float fps; //Frame Rate + int totalFrames; //Number of frames + cv::Size videoDimension; + + void setVideoInfo(float f, int t, cv::Size s, int ext); + + std::string saveFileExtension = "mp4"; //Default save extension + std::string inputFileName = ""; + int ext_int; //The int version of the file extension + int codec; //Default codec for mp4 + cv::Size panOutputVideoSize = cv::Size(1280, 720); + std::string outputVideoFilenameSuffix = ""; + + vector lecturerTrackedLocationRectangles; + int skipFrameMovementDetection; + + int boardDetectionSkipFrames = 28*2; + + bool boardsFound = false; + cv::Rect boardCropRegion; + +}; + + +#endif //TRACK4K_PERSISTENTDATA_H diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index 58e70dd..681bbdf 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -1,114 +1,114 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/09/22. -// - -#include -#include "segmentation/Track4KPreProcess.h" -#include "panning/VirtualCinematographer.h" -#include "tracking/MovementDetection.h" - -using namespace std; - -int main(int argc, char *argv[]) { - - //Create object of persistent data to share between sections - PersistentData persistentData; - - //Store input parameters from the input command - string inputFilename = ""; - string outputFilename = ""; - string inputFileExtension = ""; - string outputFileExtension = ""; - int cropWidth = 0; - int cropHeight = 0; - cv::Size saveDimensions; - - //Check if input of command line parameters are valid - if (argc == 6) { - string codecInput = argv[5]; - persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); - - } else if (argc == 5) { - //Use default codec - persistentData.codec = CV_FOURCC('X', '2', '6', '4'); - - } else { - cerr - << "The number of parameters entered were incorrect. Expected track4k.exe [FOURCC Codec] \n See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!" - << endl; - return -1; - } - - //Get filenames from the command line and store them - inputFilename = argv[1]; - outputFilename = argv[2]; - - //Extract the extensions from the filenames - inputFileExtension = inputFilename.substr(inputFilename.find_first_of('.') + 1); - outputFileExtension = outputFilename.substr(outputFilename.find_first_of('.') + 1); - - - //Extract the crop dimensions from the parameters - cropWidth = stoi(argv[3]); - cropHeight = stoi(argv[4]); - saveDimensions = cv::Size(cropWidth, cropHeight); - - - //Update this information in PersistantData - persistentData.inputFileName = inputFilename; - persistentData.outputVideoFilenameSuffix = outputFilename.substr(0,outputFilename.find_first_of('.')); - persistentData.saveFileExtension = outputFileExtension; - persistentData.panOutputVideoSize = saveDimensions; - - cout << "\n----------------------------------------" << endl; - cout << "Stage [1 of 3] - Board Segmentation" << endl; - cout << "----------------------------------------\n" << endl; - Track4KPreProcess pre; - pre.preProcessDriver(persistentData); - cout << "\nStage 1 Complete" << endl; - cout << "----------------------------------------\n" << endl; - - vector r; - cout << "\n----------------------------------------" << endl; - cout << "Stage [2 of 3] - Lecturer Tracking" << endl; - cout << "----------------------------------------\n" << endl; - MovementDetection move(persistentData.inputFileName, &r); - vector *rR = new vector(); - move.getLecturer(rR); - cout << "\nStage 2 Complete" << endl; - cout << "----------------------------------------\n" << endl; - - - for (int i = 0; i < rR->size(); i++) { - persistentData.lecturerTrackedLocationRectangles.push_back(std::move(rR->at(i))); - } - - persistentData.skipFrameMovementDetection = move.getFrameSkipReset(); - - - cout << "\n----------------------------------------" << endl; - cout << "Stage [3 of 3] - Virtual Cinematographer" << endl; - cout << "----------------------------------------\n" << endl; - VirtualCinematographer vc; - vc.cinematographerDriver(persistentData); - cout << "\nStage 3 Complete" << endl; - cout << "----------------------------------------\n" << endl; - - -} +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/09/22. +// + +#include +#include "segmentation/Track4KPreProcess.h" +#include "panning/VirtualCinematographer.h" +#include "tracking/MovementDetection.h" + +using namespace std; + +int main(int argc, char *argv[]) { + + //Create object of persistent data to share between sections + PersistentData persistentData; + + //Store input parameters from the input command + string inputFilename = ""; + string outputFilename = ""; + string inputFileExtension = ""; + string outputFileExtension = ""; + int cropWidth = 0; + int cropHeight = 0; + cv::Size saveDimensions; + + //Check if input of command line parameters are valid + if (argc == 6) { + string codecInput = argv[5]; + persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); + + } else if (argc == 5) { + //Use default codec + persistentData.codec = CV_FOURCC('X', '2', '6', '4'); + + } else { + cerr + << "The number of parameters entered were incorrect. Expected track4k.exe [FOURCC Codec] \n See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!" + << endl; + return -1; + } + + //Get filenames from the command line and store them + inputFilename = argv[1]; + outputFilename = argv[2]; + + //Extract the extensions from the filenames + inputFileExtension = inputFilename.substr(inputFilename.find_first_of('.') + 1); + outputFileExtension = outputFilename.substr(outputFilename.find_first_of('.') + 1); + + + //Extract the crop dimensions from the parameters + cropWidth = stoi(argv[3]); + cropHeight = stoi(argv[4]); + saveDimensions = cv::Size(cropWidth, cropHeight); + + + //Update this information in PersistantData + persistentData.inputFileName = inputFilename; + persistentData.outputVideoFilenameSuffix = outputFilename.substr(0,outputFilename.find_first_of('.')); + persistentData.saveFileExtension = outputFileExtension; + persistentData.panOutputVideoSize = saveDimensions; + + cout << "\n----------------------------------------" << endl; + cout << "Stage [1 of 3] - Board Segmentation" << endl; + cout << "----------------------------------------\n" << endl; + Track4KPreProcess pre; + pre.preProcessDriver(persistentData); + cout << "\nStage 1 Complete" << endl; + cout << "----------------------------------------\n" << endl; + + vector r; + cout << "\n----------------------------------------" << endl; + cout << "Stage [2 of 3] - Lecturer Tracking" << endl; + cout << "----------------------------------------\n" << endl; + MovementDetection move(persistentData.inputFileName, &r); + vector *rR = new vector(); + move.getLecturer(rR); + cout << "\nStage 2 Complete" << endl; + cout << "----------------------------------------\n" << endl; + + + for (int i = 0; i < rR->size(); i++) { + persistentData.lecturerTrackedLocationRectangles.push_back(std::move(rR->at(i))); + } + + persistentData.skipFrameMovementDetection = move.getFrameSkipReset(); + + + cout << "\n----------------------------------------" << endl; + cout << "Stage [3 of 3] - Virtual Cinematographer" << endl; + cout << "----------------------------------------\n" << endl; + VirtualCinematographer vc; + vc.cinematographerDriver(persistentData); + cout << "\nStage 3 Complete" << endl; + cout << "----------------------------------------\n" << endl; + + +} diff --git a/source/panning/PanLogic.cpp b/source/panning/PanLogic.cpp index 916cd3c..db2d5b1 100644 --- a/source/panning/PanLogic.cpp +++ b/source/panning/PanLogic.cpp @@ -1,357 +1,357 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Mohamed Tanweer Khatieb on 2016/09/19. -// - -#include "PanLogic.h" - -using namespace std; -using namespace cv; - -bool initialCrop = true; - -/** - * Cos function over with which the pan operation is divided - * @param x - * @return - */ -long double PanLogic::smooth(double x) -{ - return (cos(x - (135)) + 1); -} - -/** - * This method, given a start and end point, creates a vector of rectangles that will pan over that line. - * @param start - * @param end - * @param numFrames - * @param right - * @param panOffsetType - * @param croppingRectangles - */ -void PanLogic::smoothMove(int start, int end, int numFrames, bool right, Position panOffsetType, std::vector &croppingRectangles) -{ - - if (initialCrop) - { - setPan(start, currentPan); - initialCrop = false; - } - - if (!checkIfPanRequired(start, end) && numFrames > 59) - { - - switch (panOffsetType) - { - case LEFT: - end -= (cropFrameWidth / 2) - boardOffSet; - break; - case RIGHT: - end += (cropFrameWidth / 2) - boardOffSet; - break; - case CENTER: - break; - } - - - int currentPos = start; - - int pixels = (int) abs(end - start); - - double sampleSize = 2 * CV_PI / numFrames; - double normal = pixels / numFrames; - - for (int i = 0; i < numFrames; i++) - { - - long double outP = smooth(i * sampleSize) * normal; - - //Needed to move rectangle left/right depending on the direction the lecturer is moving in - if (right) - { - currentPos += outP; - } - else - { - currentPos -= outP; - } - - //Creates a cropping rectangle - setPan(currentPos, currentPan); - - - croppingRectangles.push_back(currentPan); - - } - } - else - { - - //Don't pan. just push back current crop - for (int i = 0; i < numFrames; i++) - { - croppingRectangles.push_back(currentPan); - } - } -} - -//Essentially the main method for this class -/** - * Makes the crop window move over the pan operations. - * @param motionLines - * @param croppingRectangles - */ -void PanLogic::doPan(std::vector &motionLines, std::vector &croppingRectangles) -{ - int startingPosition; - - //Loop over all motion lines - for (int i = 0; i < motionLines.size(); i++) - { - PresenterMotion::Movement movement = motionLines.at(i); - if (!movement.isDropData) - { - //This is an actual motion line - if (i > 0) - { - startingPosition = getPan(currentPan); - } - else - { - startingPosition = movement.start.x; - } - - //If boards used in this motion line - if (motionLines.at(i).boardUsed) - { - //True should be board used - if (!motionLines.at(i).rightBoardUsed) - { - smoothMove(startingPosition, movement.end.x, movement.numFrames, movement.right, LEFT, croppingRectangles); - - } - else - { - smoothMove(startingPosition, movement.end.x, movement.numFrames, movement.right, RIGHT, croppingRectangles); - } - - } - else - { - smoothMove(startingPosition, movement.end.x, movement.numFrames, movement.right, CENTER, croppingRectangles); - } - - } - else - { - //This is dropped frames (noise) - //Where noise is reduced (jittery motions dropped), fill up vacant frames with last known data - if (movement.numFrames > 50) - { - int startPosition = getPan(currentPan); - int endPosition = int (movement.start.x + abs(movement.end.x - movement.start.x)); - bool right = false; - if (startPosition < endPosition) - { - right = true; - } - - smoothMove(startPosition, endPosition, movement.numFrames, right, CENTER, croppingRectangles); - } - else - { - int speed = motionLines.at(i).numFrames; //Number of frames to reposition over - - /* - - if (motionLines.at(i).boardUsed) - { - //True should be board used - if (!motionLines.at(i).rightBoardUsed) - { - cout << "Reposition LEFT" << endl; - rePosition(LEFT, speed, m, croppingRectangles); - - } else - { - cout << "Reposition RIGHT" << endl; - rePosition(RIGHT, speed, m, croppingRectangles); - } - - } else - { - cout << "Reposition CENTER" << endl; - rePosition(CENTER, speed, m, croppingRectangles); - } - */ - - int remain = speed; - for (int j = 0; j < remain; j++) - { - croppingRectangles.push_back(currentPan); - } - } - - } - } -} - -/** - * Adjusts the crop window position. - * @param moveToPosition - * @param numFrames - * @param movement - * @param croppingRectangles - */ -void PanLogic::rePosition(Position moveToPosition, int numFrames, PresenterMotion::Movement &movement, std::vector &croppingRectangles) -{ - - int startPos = getPan(currentPan); - int endPos; - bool right; - - switch (moveToPosition) - { - case LEFT: - if (movement.start.x > movement.end.x) - { - endPos = movement.start.x; - } - else - { - endPos = movement.end.x; - } - break; - - case RIGHT: - if (movement.start.x < movement.end.x) - { - endPos = movement.start.x; - } - else - { - endPos = movement.end.x; - } - break; - case CENTER: - if (movement.start.x < movement.end.x) - { - endPos = movement.start.x + movement.length() / 2; - } - else - { - endPos = movement.start.x - movement.length() / 2; - } - break; - } - - right = startPos > endPos; - - smoothMove(startPos, endPos, numFrames, right, moveToPosition, croppingRectangles); - -} - -//Method to initialise the dimensions of the panning class -/** - * Initialise the dimensions of the frame and cropping window - * @param inputFrameSize - * @param cropSize - * @param yPanLevel - */ -void PanLogic::initialise(cv::Size inputFrameSize, cv::Size cropSize, int yPanLevel) -{ - - cropFrameWidth = cropSize.width; - cropFrameHeight = cropSize.height; - inputFrameWidth = inputFrameSize.width; - inputFrameHeight = inputFrameSize.height; - - yLevelOfPanWindow = yPanLevel; - - boardOffSet = (int) (0.1 * cropFrameWidth); - -} - -/** - * Checks if a pan is necessary - * @param start - * @param end - * @return - */ -bool PanLogic::checkIfPanRequired(int start, int end) -{ - - //Add some buffer padding - return start >= currentPan.tl().x + 1.5 * boardOffSet && end >= currentPan.tl().x + 1.5 * boardOffSet && - start <= currentPan.br().x - 1.5 * boardOffSet && end <= currentPan.br().x - 1.5 * boardOffSet; - -} - -/** - * Checks edge cases of the pan operation and adjusts coordinates accordingly. Leaves pan operation in an orderly state - * @param currentPosX - * @param crop - */ -void PanLogic::setPan(int currentPosX, Rect &crop) -{ - Rect tempRect = Rect(currentPosX - (cropFrameWidth / 2), yLevelOfPanWindow, cropFrameWidth, cropFrameHeight); - if (!inBounds(tempRect)) - { - if (initialCrop) - { - if (currentPosX > cropFrameWidth) - { - tempRect = Rect(inputFrameWidth - cropFrameWidth, yLevelOfPanWindow, cropFrameWidth, cropFrameHeight); - } - else - { - tempRect = Rect(0, yLevelOfPanWindow, cropFrameWidth, cropFrameHeight); - } - - initialCrop = false; - currentPan = tempRect; - } - crop = currentPan; - } - else - { - crop = tempRect; - } -} - -/** - * Checks that a crop window falls within the bounds of the original frame - * @param crop - * @return - */ -bool PanLogic::inBounds(cv::Rect &crop) -{ - return crop.tl().x >= 0 && crop.tl().x + cropFrameWidth <= inputFrameWidth; -} - -/** - * returns crop window coordinates (central x-value of rectangle) - * @param crop - * @return - */ -int PanLogic::getPan(cv::Rect &crop) -{ - return crop.x + (cropFrameWidth / 2); -} - - +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Mohamed Tanweer Khatieb on 2016/09/19. +// + +#include "PanLogic.h" + +using namespace std; +using namespace cv; + +bool initialCrop = true; + +/** + * Cos function over with which the pan operation is divided + * @param x + * @return + */ +long double PanLogic::smooth(double x) +{ + return (cos(x - (135)) + 1); +} + +/** + * This method, given a start and end point, creates a vector of rectangles that will pan over that line. + * @param start + * @param end + * @param numFrames + * @param right + * @param panOffsetType + * @param croppingRectangles + */ +void PanLogic::smoothMove(int start, int end, int numFrames, bool right, Position panOffsetType, std::vector &croppingRectangles) +{ + + if (initialCrop) + { + setPan(start, currentPan); + initialCrop = false; + } + + if (!checkIfPanRequired(start, end) && numFrames > 59) + { + + switch (panOffsetType) + { + case LEFT: + end -= (cropFrameWidth / 2) - boardOffSet; + break; + case RIGHT: + end += (cropFrameWidth / 2) - boardOffSet; + break; + case CENTER: + break; + } + + + int currentPos = start; + + int pixels = (int) abs(end - start); + + double sampleSize = 2 * CV_PI / numFrames; + double normal = pixels / numFrames; + + for (int i = 0; i < numFrames; i++) + { + + long double outP = smooth(i * sampleSize) * normal; + + //Needed to move rectangle left/right depending on the direction the lecturer is moving in + if (right) + { + currentPos += outP; + } + else + { + currentPos -= outP; + } + + //Creates a cropping rectangle + setPan(currentPos, currentPan); + + + croppingRectangles.push_back(currentPan); + + } + } + else + { + + //Don't pan. just push back current crop + for (int i = 0; i < numFrames; i++) + { + croppingRectangles.push_back(currentPan); + } + } +} + +//Essentially the main method for this class +/** + * Makes the crop window move over the pan operations. + * @param motionLines + * @param croppingRectangles + */ +void PanLogic::doPan(std::vector &motionLines, std::vector &croppingRectangles) +{ + int startingPosition; + + //Loop over all motion lines + for (int i = 0; i < motionLines.size(); i++) + { + PresenterMotion::Movement movement = motionLines.at(i); + if (!movement.isDropData) + { + //This is an actual motion line + if (i > 0) + { + startingPosition = getPan(currentPan); + } + else + { + startingPosition = movement.start.x; + } + + //If boards used in this motion line + if (motionLines.at(i).boardUsed) + { + //True should be board used + if (!motionLines.at(i).rightBoardUsed) + { + smoothMove(startingPosition, movement.end.x, movement.numFrames, movement.right, LEFT, croppingRectangles); + + } + else + { + smoothMove(startingPosition, movement.end.x, movement.numFrames, movement.right, RIGHT, croppingRectangles); + } + + } + else + { + smoothMove(startingPosition, movement.end.x, movement.numFrames, movement.right, CENTER, croppingRectangles); + } + + } + else + { + //This is dropped frames (noise) + //Where noise is reduced (jittery motions dropped), fill up vacant frames with last known data + if (movement.numFrames > 50) + { + int startPosition = getPan(currentPan); + int endPosition = int (movement.start.x + abs(movement.end.x - movement.start.x)); + bool right = false; + if (startPosition < endPosition) + { + right = true; + } + + smoothMove(startPosition, endPosition, movement.numFrames, right, CENTER, croppingRectangles); + } + else + { + int speed = motionLines.at(i).numFrames; //Number of frames to reposition over + + /* + + if (motionLines.at(i).boardUsed) + { + //True should be board used + if (!motionLines.at(i).rightBoardUsed) + { + cout << "Reposition LEFT" << endl; + rePosition(LEFT, speed, m, croppingRectangles); + + } else + { + cout << "Reposition RIGHT" << endl; + rePosition(RIGHT, speed, m, croppingRectangles); + } + + } else + { + cout << "Reposition CENTER" << endl; + rePosition(CENTER, speed, m, croppingRectangles); + } + */ + + int remain = speed; + for (int j = 0; j < remain; j++) + { + croppingRectangles.push_back(currentPan); + } + } + + } + } +} + +/** + * Adjusts the crop window position. + * @param moveToPosition + * @param numFrames + * @param movement + * @param croppingRectangles + */ +void PanLogic::rePosition(Position moveToPosition, int numFrames, PresenterMotion::Movement &movement, std::vector &croppingRectangles) +{ + + int startPos = getPan(currentPan); + int endPos; + bool right; + + switch (moveToPosition) + { + case LEFT: + if (movement.start.x > movement.end.x) + { + endPos = movement.start.x; + } + else + { + endPos = movement.end.x; + } + break; + + case RIGHT: + if (movement.start.x < movement.end.x) + { + endPos = movement.start.x; + } + else + { + endPos = movement.end.x; + } + break; + case CENTER: + if (movement.start.x < movement.end.x) + { + endPos = movement.start.x + movement.length() / 2; + } + else + { + endPos = movement.start.x - movement.length() / 2; + } + break; + } + + right = startPos > endPos; + + smoothMove(startPos, endPos, numFrames, right, moveToPosition, croppingRectangles); + +} + +//Method to initialise the dimensions of the panning class +/** + * Initialise the dimensions of the frame and cropping window + * @param inputFrameSize + * @param cropSize + * @param yPanLevel + */ +void PanLogic::initialise(cv::Size inputFrameSize, cv::Size cropSize, int yPanLevel) +{ + + cropFrameWidth = cropSize.width; + cropFrameHeight = cropSize.height; + inputFrameWidth = inputFrameSize.width; + inputFrameHeight = inputFrameSize.height; + + yLevelOfPanWindow = yPanLevel; + + boardOffSet = (int) (0.1 * cropFrameWidth); + +} + +/** + * Checks if a pan is necessary + * @param start + * @param end + * @return + */ +bool PanLogic::checkIfPanRequired(int start, int end) +{ + + //Add some buffer padding + return start >= currentPan.tl().x + 1.5 * boardOffSet && end >= currentPan.tl().x + 1.5 * boardOffSet && + start <= currentPan.br().x - 1.5 * boardOffSet && end <= currentPan.br().x - 1.5 * boardOffSet; + +} + +/** + * Checks edge cases of the pan operation and adjusts coordinates accordingly. Leaves pan operation in an orderly state + * @param currentPosX + * @param crop + */ +void PanLogic::setPan(int currentPosX, Rect &crop) +{ + Rect tempRect = Rect(currentPosX - (cropFrameWidth / 2), yLevelOfPanWindow, cropFrameWidth, cropFrameHeight); + if (!inBounds(tempRect)) + { + if (initialCrop) + { + if (currentPosX > cropFrameWidth) + { + tempRect = Rect(inputFrameWidth - cropFrameWidth, yLevelOfPanWindow, cropFrameWidth, cropFrameHeight); + } + else + { + tempRect = Rect(0, yLevelOfPanWindow, cropFrameWidth, cropFrameHeight); + } + + initialCrop = false; + currentPan = tempRect; + } + crop = currentPan; + } + else + { + crop = tempRect; + } +} + +/** + * Checks that a crop window falls within the bounds of the original frame + * @param crop + * @return + */ +bool PanLogic::inBounds(cv::Rect &crop) +{ + return crop.tl().x >= 0 && crop.tl().x + cropFrameWidth <= inputFrameWidth; +} + +/** + * returns crop window coordinates (central x-value of rectangle) + * @param crop + * @return + */ +int PanLogic::getPan(cv::Rect &crop) +{ + return crop.x + (cropFrameWidth / 2); +} + + diff --git a/source/panning/PanLogic.h b/source/panning/PanLogic.h index 56ec198..af4a18d 100644 --- a/source/panning/PanLogic.h +++ b/source/panning/PanLogic.h @@ -1,82 +1,82 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Mohamed Tanweer Khatieb on 2016/09/19. -// - -#ifndef TRACK4K_PANLOGIC_H -#define TRACK4K_PANLOGIC_H - -#include -#include "opencv2/opencv.hpp" -#include "PresenterMotion.h" - -class PanLogic -{ - //Take in a vector of line segments - //Decide if pan size contains line segments - //Else pan from start to end of line segment - //Need to consider how long the lecturer is allowed to be in a margin - - public: - //Variables - enum Position - { - LEFT, RIGHT, CENTER - }; - - - //Methods - long double smooth(double x); - - void smoothMove(int start, int end, int numFrames, bool right, Position panOffsetType, std::vector &croppingRectangles); - - void doPan(std::vector &motionLines, std::vector &croppingRectangles); - - void rePosition(Position moveToPosition, int numFrames, PresenterMotion::Movement &movement, std::vector &croppingRectangles); - - void - initialise(cv::Size inputFrameSize, cv::Size cropSize, int yPanLevel); - - bool checkIfPanRequired(int start, int end); - - void setPan(int currentPosX, cv::Rect &crop); - - int getPan(cv::Rect &crop); - - bool inBounds(int x); - - bool inBounds(cv::Rect &crop); - - - //Size of the crop frame and input frame - int cropFrameWidth; - int cropFrameHeight; - int inputFrameWidth; - int inputFrameHeight; - - int yLevelOfPanWindow; - - - int boardOffSet; - - cv::Rect currentPan; - -}; - - -#endif //TRACK4K_PANLOGIC_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Mohamed Tanweer Khatieb on 2016/09/19. +// + +#ifndef TRACK4K_PANLOGIC_H +#define TRACK4K_PANLOGIC_H + +#include +#include "opencv2/opencv.hpp" +#include "PresenterMotion.h" + +class PanLogic +{ + //Take in a vector of line segments + //Decide if pan size contains line segments + //Else pan from start to end of line segment + //Need to consider how long the lecturer is allowed to be in a margin + + public: + //Variables + enum Position + { + LEFT, RIGHT, CENTER + }; + + + //Methods + long double smooth(double x); + + void smoothMove(int start, int end, int numFrames, bool right, Position panOffsetType, std::vector &croppingRectangles); + + void doPan(std::vector &motionLines, std::vector &croppingRectangles); + + void rePosition(Position moveToPosition, int numFrames, PresenterMotion::Movement &movement, std::vector &croppingRectangles); + + void + initialise(cv::Size inputFrameSize, cv::Size cropSize, int yPanLevel); + + bool checkIfPanRequired(int start, int end); + + void setPan(int currentPosX, cv::Rect &crop); + + int getPan(cv::Rect &crop); + + bool inBounds(int x); + + bool inBounds(cv::Rect &crop); + + + //Size of the crop frame and input frame + int cropFrameWidth; + int cropFrameHeight; + int inputFrameWidth; + int inputFrameHeight; + + int yLevelOfPanWindow; + + + int boardOffSet; + + cv::Rect currentPan; + +}; + + +#endif //TRACK4K_PANLOGIC_H diff --git a/source/panning/PresenterMotion.cpp b/source/panning/PresenterMotion.cpp index bdd564d..bee9ec3 100644 --- a/source/panning/PresenterMotion.cpp +++ b/source/panning/PresenterMotion.cpp @@ -1,473 +1,473 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Mohamed Tanweer Khatieb on 2016/09/16. -// - -#include "PresenterMotion.h" - -using namespace std; -using namespace cv; - - -/** - * Given a vector of points (Lecturer positions), create line segments each time the direction of motion changes. - * @param lecturerPositions - * @param skipFrames - */ -void PresenterMotion::generateMotionLines(std::vector lecturerPositions, int skipFrames) -{ - bool right; - right = lecturerPositions.at(0).x < lecturerPositions.at(1).x; - int counter = skipFrames; - Point endPoint; - Point startPoint = lecturerPositions.at(0); - - Movement movement; - Point prev; - Point current; -/* - * @counter stores the number of frames processed - */ - for (int i = 1; i < lecturerPositions.size(); i++) - { - prev = lecturerPositions.at(i - 1); - current = lecturerPositions.at(i); - - - if (prev.x < current.x && right) - { - counter += skipFrames; - } - else if (prev.x > current.x && !right) - { - counter += skipFrames; - } - else - { - endPoint = prev; - movement.start = startPoint; - movement.end = endPoint; - movement.numFrames = counter; - movement.right = right; - motion.push_back(movement); - right = !right; - startPoint = endPoint; - counter = skipFrames; - } - } - endPoint = prev; - movement.start = startPoint; - movement.end = endPoint; - movement.numFrames = counter; - movement.right = right; - motion.push_back(movement); -} - -/** - * Reduces jittery movements by fusing short lines - * @param ignoreThresh - * @param yFix - */ -void PresenterMotion::cullMotion(int ignoreThresh, int yFix) -{ - - vector tempMovements; - int dropCounter = 0; - - int minX = INT_MAX; - int maxX = INT_MIN; - - for (Movement movement:motion) - { - - if (movement.length() > ignoreThresh && (movement.length() / movement.numFrames) < 25) - { - - if (dropCounter > 0) - { - Movement movement1; - movement1.numFrames = dropCounter; - movement1.isDropData = true; - movement1.start = Point(minX, yFix); - movement1.end = Point(maxX, yFix); - tempMovements.push_back(movement1); - dropCounter = 0; - minX = INT_MAX; - maxX = INT_MIN; - } - tempMovements.push_back(movement); - } - else - { - - dropCounter += movement.numFrames; - - if (movement.start.x < minX) - { - minX = movement.start.x; - } - - if (movement.end.x > maxX) - { - maxX = movement.end.x; - } - - if (movement.end.x < minX) - { - minX = movement.end.x; - } - - if (movement.start.x > maxX) - { - maxX = movement.start.x; - } - } - } - - if (dropCounter > 0) - { - Movement movement; - movement.numFrames = dropCounter; - movement.isDropData = true; - movement.start = Point(minX, yFix); - movement.end = Point(maxX, yFix); - tempMovements.push_back(movement); - } - - motion.clear(); - motion = tempMovements; -} - -/*void PresenterMotion::repairCulling(int ignoreThresh) -{ - - std::vector temp; - Movement m_new; - for (int i = 0; i < motion.size(); i++) - { - if (i > motion.size() - 1) - { - break; - } - - //For motion going right - if (!motion[i].isDropData && motion[i].right) - { - if (motion[i + 1].isDropData && - (!motion[i + 2].isDropData && motion[i + 2].right && motion[i + 1].numFrames <= ignoreThresh)) - { - m_new.start = motion[i].start; - m_new.end = motion[i + 2].end; - m_new.numFrames = motion[i].numFrames + motion[i + 1].numFrames + motion[i + 2].numFrames; - m_new.isDropData = false; - m_new.right = true; //TODO - temp.push_back(m_new); - i += 2; - //Case 2 - } else if (motion[i + 1].isDropData && - (!motion[i + 2].isDropData && !motion[i + 2].right)) - { - temp.push_back(motion[i]); - temp.push_back(motion[i + 1]); - temp.push_back(motion[i + 2]); - i += 2; - - } else if (!motion[i + 1].isDropData && !motion[i + 1].right) - { - - temp.push_back(motion[i]); - temp.push_back(motion[i + 1]); - i++; - - } else if (motion[i + 1].isDropData && motion[i + 1].numFrames > ignoreThresh) - { - temp.push_back(motion[i]); - temp.push_back(motion[i + 1]); - temp.push_back(motion[i + 2]); - i += 2; - } - //For motion going left - } else if (!motion[i].isDropData && !motion[i].right) - { - if (motion[i + 1].isDropData && - (!motion[i + 2].isDropData && !motion[i + 2].right && motion[i + 1].numFrames < ignoreThresh)) - { - m_new.start = motion[i].start; - m_new.end = motion[i + 2].end; - m_new.numFrames = motion[i].numFrames + motion[i + 1].numFrames + motion[i + 2].numFrames; - m_new.isDropData = false; - m_new.right = false; //TODO - temp.push_back(m_new); - i += 2; - } else if (motion[i + 1].isDropData && - (!motion[i + 2].isDropData && motion[i + 2].right)) - { - temp.push_back(motion[i]); - temp.push_back(motion[i + 1]); - temp.push_back(motion[i + 2]); - i += 2; - - } else if (!motion[i + 1].isDropData && motion[i + 1].right) - { - - temp.push_back(motion[i]); - temp.push_back(motion[i + 1]); - i++; - - } else if (motion[i + 1].isDropData && motion[i + 1].numFrames > ignoreThresh) - { - temp.push_back(motion[i]); - temp.push_back(motion[i + 1]); - temp.push_back(motion[i + 2]); - i += 2; - } - - } - //For non-motion (Noise) - else - { - temp.push_back(motion[i]); - } - } - motion.clear(); - motion = temp; -}*/ - - -/** - * Stitches together smaller pan operations going in the same direction (interrupted by noise). - * @param ignoreThresh - */ -void PresenterMotion::repairCulling(int ignoreThresh) -{ - - std::vector temp; - int dropFramesCount = 0; - Movement m_new; - m_new.numFrames = 0; - Movement current; - Movement next; - bool first = true; - bool right = false; - - for (int i = 0; i < motion.size(); i++) - { - current = motion.at(i); - - if (i > motion.size() - 1) - { - break; - } - - - if (current.isDropData) - { - if (current.numFrames < ignoreThresh) - { - dropFramesCount += current.numFrames; - } - else - { - if (first) - { - temp.push_back(current); - } - else - { - m_new.numFrames += dropFramesCount; - dropFramesCount = 0; - temp.push_back(m_new); - temp.push_back(current); - first = true; - } - } - } - else - { - if (first) - { - m_new.isDropData = false; - m_new.start = current.start; - m_new.end = current.end; - m_new.right = current.right; - m_new.numFrames += current.numFrames; - first = false; - } - else if (!first && (current.right == m_new.right)) - { - m_new.isDropData = false; - m_new.end = current.end; - m_new.right = current.right; - m_new.numFrames += current.numFrames; - first = false; - } - else if (!first && (current.right != m_new.right)) - { - temp.push_back(m_new); - m_new.isDropData = false; - m_new.start = current.start; - m_new.end = current.end; - m_new.right = current.right; - m_new.numFrames = 0; - m_new.numFrames += dropFramesCount; - dropFramesCount = 0; - m_new.numFrames += current.numFrames; - } - } - } - - if (temp.at(temp.size() - 1).start != m_new.start && temp.at(temp.size() - 1).end != m_new.end) - { - if (dropFramesCount > 0) - { - m_new.numFrames += dropFramesCount; - temp.push_back(m_new); - } - else - { - temp.push_back(m_new); - } - } - motion.clear(); - motion = temp; -} - -/** - * Creates a motion graph where each line represents a pan operation. Blue is left, red is right, white is noise and the length is the distance on the x-plane. - * @param img - */ -void PresenterMotion::generateMotionImage(Mat &img) -{ - - int yOff = 20; - int addOn = 0; - for (Movement m:motion) - { - Scalar s; - if (m.isDropData) - { - s = Scalar(255, 255, 255); - DrawLine(img, Point(m.start.x, yOff + addOn), Point(m.end.x, yOff + addOn), s); - addOn += yOff; - } - else - { - if (m.right) - { - s = Scalar(0, 0, 255); - } - else - { - s = Scalar(255, 170, 0); - } - DrawLine(img, Point(m.start.x, yOff + addOn), Point(m.end.x, yOff + addOn), s); - addOn += yOff; - } - } -} - -/** - * Aligns jagged edges between pans (i.e The start point of the next pan is the end of the current pan). - */ -void PresenterMotion::relinkMotion() -{ - int continueFrom = 0; - bool last_x = false; - for (int i = 0; i < motion.size(); i++) - { - Movement m = motion.at(i); - if (!m.isDropData) - { - if (!last_x) - { - continueFrom = m.end.x; - } - if (i > 0 && last_x) - { - motion.at(i).start.x = continueFrom; - last_x = false; - } - } - else - { - last_x = true; - continueFrom = m.end.x; - } - } -} - -/** - * Returns by reference through parameter given - * @param outMotionVec - */ -void PresenterMotion::getMotionLines(vector &outMotionVec) -{ - outMotionVec = motion; -} - -/** - * Helper method used by the generateMotionImage() method to draw the actual line segments onto an image - * @param img - * @param start - * @param end - * @param s - */ -void PresenterMotion::DrawLine(Mat &img, Point start, Point end, Scalar s) -{ - - int thickness = 10; - int lineType = 8; - line(img, start, end, s, thickness, lineType); -} - -/** - * Labels a motion line if (and where) a board was used. - * @param persistentData - */ -void PresenterMotion::attatchBoardUsage(PersistentData &persistentData) -{ - int rangeStart = 0; - int rangeEnd = 0; - - int checkpoint = 0; - - for (int i = 0; i < motion.size(); i++) - { - rangeEnd = rangeStart + motion.at(i).numFrames; - - for (int j = checkpoint; j < persistentData.metaFrameVector.size(); j++) - { - if (j * persistentData.boardDetectionSkipFrames >= rangeStart && j * persistentData.boardDetectionSkipFrames < rangeEnd) - { - if (persistentData.metaFrameVector.at(j).leftProjector || persistentData.metaFrameVector.at(j).leftBoard) - { - motion.at(i).boardUsed = true; - motion.at(i).rightBoardUsed = false; - } - else if (persistentData.metaFrameVector.at(j).rightProjector || persistentData.metaFrameVector.at(j).rightBoard) - { - motion.at(i).boardUsed = true; - motion.at(i).rightBoardUsed = true; - } - - checkpoint = j; - } - } - rangeStart = rangeEnd; - } +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Mohamed Tanweer Khatieb on 2016/09/16. +// + +#include "PresenterMotion.h" + +using namespace std; +using namespace cv; + + +/** + * Given a vector of points (Lecturer positions), create line segments each time the direction of motion changes. + * @param lecturerPositions + * @param skipFrames + */ +void PresenterMotion::generateMotionLines(std::vector lecturerPositions, int skipFrames) +{ + bool right; + right = lecturerPositions.at(0).x < lecturerPositions.at(1).x; + int counter = skipFrames; + Point endPoint; + Point startPoint = lecturerPositions.at(0); + + Movement movement; + Point prev; + Point current; +/* + * @counter stores the number of frames processed + */ + for (int i = 1; i < lecturerPositions.size(); i++) + { + prev = lecturerPositions.at(i - 1); + current = lecturerPositions.at(i); + + + if (prev.x < current.x && right) + { + counter += skipFrames; + } + else if (prev.x > current.x && !right) + { + counter += skipFrames; + } + else + { + endPoint = prev; + movement.start = startPoint; + movement.end = endPoint; + movement.numFrames = counter; + movement.right = right; + motion.push_back(movement); + right = !right; + startPoint = endPoint; + counter = skipFrames; + } + } + endPoint = prev; + movement.start = startPoint; + movement.end = endPoint; + movement.numFrames = counter; + movement.right = right; + motion.push_back(movement); +} + +/** + * Reduces jittery movements by fusing short lines + * @param ignoreThresh + * @param yFix + */ +void PresenterMotion::cullMotion(int ignoreThresh, int yFix) +{ + + vector tempMovements; + int dropCounter = 0; + + int minX = INT_MAX; + int maxX = INT_MIN; + + for (Movement movement:motion) + { + + if (movement.length() > ignoreThresh && (movement.length() / movement.numFrames) < 25) + { + + if (dropCounter > 0) + { + Movement movement1; + movement1.numFrames = dropCounter; + movement1.isDropData = true; + movement1.start = Point(minX, yFix); + movement1.end = Point(maxX, yFix); + tempMovements.push_back(movement1); + dropCounter = 0; + minX = INT_MAX; + maxX = INT_MIN; + } + tempMovements.push_back(movement); + } + else + { + + dropCounter += movement.numFrames; + + if (movement.start.x < minX) + { + minX = movement.start.x; + } + + if (movement.end.x > maxX) + { + maxX = movement.end.x; + } + + if (movement.end.x < minX) + { + minX = movement.end.x; + } + + if (movement.start.x > maxX) + { + maxX = movement.start.x; + } + } + } + + if (dropCounter > 0) + { + Movement movement; + movement.numFrames = dropCounter; + movement.isDropData = true; + movement.start = Point(minX, yFix); + movement.end = Point(maxX, yFix); + tempMovements.push_back(movement); + } + + motion.clear(); + motion = tempMovements; +} + +/*void PresenterMotion::repairCulling(int ignoreThresh) +{ + + std::vector temp; + Movement m_new; + for (int i = 0; i < motion.size(); i++) + { + if (i > motion.size() - 1) + { + break; + } + + //For motion going right + if (!motion[i].isDropData && motion[i].right) + { + if (motion[i + 1].isDropData && + (!motion[i + 2].isDropData && motion[i + 2].right && motion[i + 1].numFrames <= ignoreThresh)) + { + m_new.start = motion[i].start; + m_new.end = motion[i + 2].end; + m_new.numFrames = motion[i].numFrames + motion[i + 1].numFrames + motion[i + 2].numFrames; + m_new.isDropData = false; + m_new.right = true; //TODO + temp.push_back(m_new); + i += 2; + //Case 2 + } else if (motion[i + 1].isDropData && + (!motion[i + 2].isDropData && !motion[i + 2].right)) + { + temp.push_back(motion[i]); + temp.push_back(motion[i + 1]); + temp.push_back(motion[i + 2]); + i += 2; + + } else if (!motion[i + 1].isDropData && !motion[i + 1].right) + { + + temp.push_back(motion[i]); + temp.push_back(motion[i + 1]); + i++; + + } else if (motion[i + 1].isDropData && motion[i + 1].numFrames > ignoreThresh) + { + temp.push_back(motion[i]); + temp.push_back(motion[i + 1]); + temp.push_back(motion[i + 2]); + i += 2; + } + //For motion going left + } else if (!motion[i].isDropData && !motion[i].right) + { + if (motion[i + 1].isDropData && + (!motion[i + 2].isDropData && !motion[i + 2].right && motion[i + 1].numFrames < ignoreThresh)) + { + m_new.start = motion[i].start; + m_new.end = motion[i + 2].end; + m_new.numFrames = motion[i].numFrames + motion[i + 1].numFrames + motion[i + 2].numFrames; + m_new.isDropData = false; + m_new.right = false; //TODO + temp.push_back(m_new); + i += 2; + } else if (motion[i + 1].isDropData && + (!motion[i + 2].isDropData && motion[i + 2].right)) + { + temp.push_back(motion[i]); + temp.push_back(motion[i + 1]); + temp.push_back(motion[i + 2]); + i += 2; + + } else if (!motion[i + 1].isDropData && motion[i + 1].right) + { + + temp.push_back(motion[i]); + temp.push_back(motion[i + 1]); + i++; + + } else if (motion[i + 1].isDropData && motion[i + 1].numFrames > ignoreThresh) + { + temp.push_back(motion[i]); + temp.push_back(motion[i + 1]); + temp.push_back(motion[i + 2]); + i += 2; + } + + } + //For non-motion (Noise) + else + { + temp.push_back(motion[i]); + } + } + motion.clear(); + motion = temp; +}*/ + + +/** + * Stitches together smaller pan operations going in the same direction (interrupted by noise). + * @param ignoreThresh + */ +void PresenterMotion::repairCulling(int ignoreThresh) +{ + + std::vector temp; + int dropFramesCount = 0; + Movement m_new; + m_new.numFrames = 0; + Movement current; + Movement next; + bool first = true; + bool right = false; + + for (int i = 0; i < motion.size(); i++) + { + current = motion.at(i); + + if (i > motion.size() - 1) + { + break; + } + + + if (current.isDropData) + { + if (current.numFrames < ignoreThresh) + { + dropFramesCount += current.numFrames; + } + else + { + if (first) + { + temp.push_back(current); + } + else + { + m_new.numFrames += dropFramesCount; + dropFramesCount = 0; + temp.push_back(m_new); + temp.push_back(current); + first = true; + } + } + } + else + { + if (first) + { + m_new.isDropData = false; + m_new.start = current.start; + m_new.end = current.end; + m_new.right = current.right; + m_new.numFrames += current.numFrames; + first = false; + } + else if (!first && (current.right == m_new.right)) + { + m_new.isDropData = false; + m_new.end = current.end; + m_new.right = current.right; + m_new.numFrames += current.numFrames; + first = false; + } + else if (!first && (current.right != m_new.right)) + { + temp.push_back(m_new); + m_new.isDropData = false; + m_new.start = current.start; + m_new.end = current.end; + m_new.right = current.right; + m_new.numFrames = 0; + m_new.numFrames += dropFramesCount; + dropFramesCount = 0; + m_new.numFrames += current.numFrames; + } + } + } + + if (temp.at(temp.size() - 1).start != m_new.start && temp.at(temp.size() - 1).end != m_new.end) + { + if (dropFramesCount > 0) + { + m_new.numFrames += dropFramesCount; + temp.push_back(m_new); + } + else + { + temp.push_back(m_new); + } + } + motion.clear(); + motion = temp; +} + +/** + * Creates a motion graph where each line represents a pan operation. Blue is left, red is right, white is noise and the length is the distance on the x-plane. + * @param img + */ +void PresenterMotion::generateMotionImage(Mat &img) +{ + + int yOff = 20; + int addOn = 0; + for (Movement m:motion) + { + Scalar s; + if (m.isDropData) + { + s = Scalar(255, 255, 255); + DrawLine(img, Point(m.start.x, yOff + addOn), Point(m.end.x, yOff + addOn), s); + addOn += yOff; + } + else + { + if (m.right) + { + s = Scalar(0, 0, 255); + } + else + { + s = Scalar(255, 170, 0); + } + DrawLine(img, Point(m.start.x, yOff + addOn), Point(m.end.x, yOff + addOn), s); + addOn += yOff; + } + } +} + +/** + * Aligns jagged edges between pans (i.e The start point of the next pan is the end of the current pan). + */ +void PresenterMotion::relinkMotion() +{ + int continueFrom = 0; + bool last_x = false; + for (int i = 0; i < motion.size(); i++) + { + Movement m = motion.at(i); + if (!m.isDropData) + { + if (!last_x) + { + continueFrom = m.end.x; + } + if (i > 0 && last_x) + { + motion.at(i).start.x = continueFrom; + last_x = false; + } + } + else + { + last_x = true; + continueFrom = m.end.x; + } + } +} + +/** + * Returns by reference through parameter given + * @param outMotionVec + */ +void PresenterMotion::getMotionLines(vector &outMotionVec) +{ + outMotionVec = motion; +} + +/** + * Helper method used by the generateMotionImage() method to draw the actual line segments onto an image + * @param img + * @param start + * @param end + * @param s + */ +void PresenterMotion::DrawLine(Mat &img, Point start, Point end, Scalar s) +{ + + int thickness = 10; + int lineType = 8; + line(img, start, end, s, thickness, lineType); +} + +/** + * Labels a motion line if (and where) a board was used. + * @param persistentData + */ +void PresenterMotion::attatchBoardUsage(PersistentData &persistentData) +{ + int rangeStart = 0; + int rangeEnd = 0; + + int checkpoint = 0; + + for (int i = 0; i < motion.size(); i++) + { + rangeEnd = rangeStart + motion.at(i).numFrames; + + for (int j = checkpoint; j < persistentData.metaFrameVector.size(); j++) + { + if (j * persistentData.boardDetectionSkipFrames >= rangeStart && j * persistentData.boardDetectionSkipFrames < rangeEnd) + { + if (persistentData.metaFrameVector.at(j).leftProjector || persistentData.metaFrameVector.at(j).leftBoard) + { + motion.at(i).boardUsed = true; + motion.at(i).rightBoardUsed = false; + } + else if (persistentData.metaFrameVector.at(j).rightProjector || persistentData.metaFrameVector.at(j).rightBoard) + { + motion.at(i).boardUsed = true; + motion.at(i).rightBoardUsed = true; + } + + checkpoint = j; + } + } + rangeStart = rangeEnd; + } } \ No newline at end of file diff --git a/source/panning/PresenterMotion.h b/source/panning/PresenterMotion.h index 910973d..facb100 100644 --- a/source/panning/PresenterMotion.h +++ b/source/panning/PresenterMotion.h @@ -1,77 +1,77 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Mohamed Tanweer Khatieb on 2016/09/16. -// - -#ifndef TRACK4K_PRESENTERMOTION_H -#define TRACK4K_PRESENTERMOTION_H - -#include "opencv2/opencv.hpp" -#include "../MetaFrame.h" -#include "../PersistentData.h" -#include -#include - - -class PresenterMotion -{ - -public: - - /** - * Struct stores data of each line segment. It also can be used to store noise by enabling the @isDropData boolean - */ - struct Movement - { - bool isDropData = false; - cv::Point start; - cv::Point end; - int numFrames; - bool right; - bool boardUsed = false; - bool rightBoardUsed = false; - - int length() - { - return (int) abs(start.x - end.x); // This line was complaining about returning a double into an int returning function. I convert it to int before returning now. - // Was this a possible cause for the co-ordinate mix-up? it said double may be too big to fit into an int - } - }; - - std::vector motion; - - void generateMotionLines(std::vector lecturerPositions, int skipFrames); - - void DrawLine(cv::Mat &img, cv::Point start, cv::Point end, cv::Scalar s); - - void cullMotion(int ignoreThresh, int yFix); - - void repairCulling(int ignoreThresh); - - void generateMotionImage(cv::Mat &img); - - void getMotionLines(std::vector &outMotionVec); - - void relinkMotion(); - - void attatchBoardUsage(PersistentData &persistentData); - -}; - - -#endif //TRACK4K_PRESENTERMOTION_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Mohamed Tanweer Khatieb on 2016/09/16. +// + +#ifndef TRACK4K_PRESENTERMOTION_H +#define TRACK4K_PRESENTERMOTION_H + +#include "opencv2/opencv.hpp" +#include "../MetaFrame.h" +#include "../PersistentData.h" +#include +#include + + +class PresenterMotion +{ + +public: + + /** + * Struct stores data of each line segment. It also can be used to store noise by enabling the @isDropData boolean + */ + struct Movement + { + bool isDropData = false; + cv::Point start; + cv::Point end; + int numFrames; + bool right; + bool boardUsed = false; + bool rightBoardUsed = false; + + int length() + { + return (int) abs(start.x - end.x); // This line was complaining about returning a double into an int returning function. I convert it to int before returning now. + // Was this a possible cause for the co-ordinate mix-up? it said double may be too big to fit into an int + } + }; + + std::vector motion; + + void generateMotionLines(std::vector lecturerPositions, int skipFrames); + + void DrawLine(cv::Mat &img, cv::Point start, cv::Point end, cv::Scalar s); + + void cullMotion(int ignoreThresh, int yFix); + + void repairCulling(int ignoreThresh); + + void generateMotionImage(cv::Mat &img); + + void getMotionLines(std::vector &outMotionVec); + + void relinkMotion(); + + void attatchBoardUsage(PersistentData &persistentData); + +}; + + +#endif //TRACK4K_PRESENTERMOTION_H diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index d1552b3..12d87a5 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -1,108 +1,108 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Mohamed Tanweer Khatieb on 2016/07/21. -// -#include "../segmentation/Track4KPreProcess.h" -#include "VirtualCinematographer.h" -#include - -using namespace cv; -using namespace std; - -int skipLecturePosition = 2; //Only process every second point of lecturers position (Helps filter noisy movement/jitter out) - -/** - * Main method for the Virtual Cinematographer module - * @param persistentData - * @return - */ -int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData) { - //Vector of points representing the lecturers positions - vector lectPoints; - - //Set a fixed y-value for the crop window - long int y_value = 0; - - //Generate this fixed y-value from average y-value of all lecture positions - for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i++) { - y_value += ((persistentData.lecturerTrackedLocationRectangles.at(i).tl().y + - (persistentData.lecturerTrackedLocationRectangles.at(i).height / 2))); - } - - y_value = y_value / persistentData.lecturerTrackedLocationRectangles.size(); - - //Add an offset to the y-value - int y = y_value - 500; - - //Remove every second point as we dont need that accuracy, only general direction of lecturer - for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i += skipLecturePosition) { - if (i > persistentData.lecturerTrackedLocationRectangles.size()) { - break; - } - int x = ((persistentData.lecturerTrackedLocationRectangles.at(i).tl().x + - (persistentData.lecturerTrackedLocationRectangles.at(i).width / 2))); - int y = persistentData.lecturerTrackedLocationRectangles.at(i).y; - - lectPoints.push_back(Point(x, y)); - } - - PresenterMotion presenterMotion; - presenterMotion.generateMotionLines(lectPoints, (persistentData.skipFrameMovementDetection + 1) * skipLecturePosition); - //4*2 because tracking section evaluates every 4th frame - // and here we evaluate every 2nd one of those points, so essentially - // we evaluating every 8th frame from the original video file - - presenterMotion.cullMotion(150, y); - - vector movementLines; - presenterMotion.getMotionLines(movementLines); - - PanLogic panLogic; - panLogic.initialise(persistentData.videoDimension, persistentData.panOutputVideoSize, y); - vector cropRectangles; - panLogic.doPan(movementLines, cropRectangles); - - //Create video writer object for writing the cropped output video - VideoWriter outputVideo; - outputVideo.open(persistentData.outputVideoFilenameSuffix + "." + persistentData.saveFileExtension, - persistentData.codec, persistentData.fps, persistentData.panOutputVideoSize, 1); - - - //Open original input video file - FileReader fileReader; - fileReader.readFile(persistentData.inputFileName, persistentData); - - Mat drawing; - - //Loop over all frames in the input video and save the cropped frames to a stream as well as the board segment - for (int i = 0; i < cropRectangles.size(); i++) { - - fileReader.getNextFrame(drawing); - - if (!fileReader.isEndOfFile()) { - outputVideo.write(drawing(cropRectangles[i])); - } - - drawing.release(); - - } - - //Close all file writers - outputVideo.release(); - fileReader.getInputVideo().release(); +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Mohamed Tanweer Khatieb on 2016/07/21. +// +#include "../segmentation/Track4KPreProcess.h" +#include "VirtualCinematographer.h" +#include + +using namespace cv; +using namespace std; + +int skipLecturePosition = 2; //Only process every second point of lecturers position (Helps filter noisy movement/jitter out) + +/** + * Main method for the Virtual Cinematographer module + * @param persistentData + * @return + */ +int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData) { + //Vector of points representing the lecturers positions + vector lectPoints; + + //Set a fixed y-value for the crop window + long int y_value = 0; + + //Generate this fixed y-value from average y-value of all lecture positions + for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i++) { + y_value += ((persistentData.lecturerTrackedLocationRectangles.at(i).tl().y + + (persistentData.lecturerTrackedLocationRectangles.at(i).height / 2))); + } + + y_value = y_value / persistentData.lecturerTrackedLocationRectangles.size(); + + //Add an offset to the y-value + int y = y_value - 500; + + //Remove every second point as we dont need that accuracy, only general direction of lecturer + for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i += skipLecturePosition) { + if (i > persistentData.lecturerTrackedLocationRectangles.size()) { + break; + } + int x = ((persistentData.lecturerTrackedLocationRectangles.at(i).tl().x + + (persistentData.lecturerTrackedLocationRectangles.at(i).width / 2))); + int y = persistentData.lecturerTrackedLocationRectangles.at(i).y; + + lectPoints.push_back(Point(x, y)); + } + + PresenterMotion presenterMotion; + presenterMotion.generateMotionLines(lectPoints, (persistentData.skipFrameMovementDetection + 1) * skipLecturePosition); + //4*2 because tracking section evaluates every 4th frame + // and here we evaluate every 2nd one of those points, so essentially + // we evaluating every 8th frame from the original video file + + presenterMotion.cullMotion(150, y); + + vector movementLines; + presenterMotion.getMotionLines(movementLines); + + PanLogic panLogic; + panLogic.initialise(persistentData.videoDimension, persistentData.panOutputVideoSize, y); + vector cropRectangles; + panLogic.doPan(movementLines, cropRectangles); + + //Create video writer object for writing the cropped output video + VideoWriter outputVideo; + outputVideo.open(persistentData.outputVideoFilenameSuffix + "." + persistentData.saveFileExtension, + persistentData.codec, persistentData.fps, persistentData.panOutputVideoSize, 1); + + + //Open original input video file + FileReader fileReader; + fileReader.readFile(persistentData.inputFileName, persistentData); + + Mat drawing; + + //Loop over all frames in the input video and save the cropped frames to a stream as well as the board segment + for (int i = 0; i < cropRectangles.size(); i++) { + + fileReader.getNextFrame(drawing); + + if (!fileReader.isEndOfFile()) { + outputVideo.write(drawing(cropRectangles[i])); + } + + drawing.release(); + + } + + //Close all file writers + outputVideo.release(); + fileReader.getInputVideo().release(); } \ No newline at end of file diff --git a/source/panning/VirtualCinematographer.h b/source/panning/VirtualCinematographer.h index c91d674..38d3bee 100644 --- a/source/panning/VirtualCinematographer.h +++ b/source/panning/VirtualCinematographer.h @@ -1,35 +1,35 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Mohamed Tanweer Khatieb on 2016/07/21. -// - -#ifndef TRACK4K_VIRTUALCINEMATOGRAPHER_H -#define TRACK4K_VIRTUALCINEMATOGRAPHER_H - -#include "opencv2/opencv.hpp" -#include "PresenterMotion.h" -#include "PanLogic.h" - -class VirtualCinematographer -{ - public: - int cinematographerDriver(PersistentData &persistentData); -}; - - -#endif //TRACK4K_VIRTUALCINEMATOGRAPHER_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Mohamed Tanweer Khatieb on 2016/07/21. +// + +#ifndef TRACK4K_VIRTUALCINEMATOGRAPHER_H +#define TRACK4K_VIRTUALCINEMATOGRAPHER_H + +#include "opencv2/opencv.hpp" +#include "PresenterMotion.h" +#include "PanLogic.h" + +class VirtualCinematographer +{ + public: + int cinematographerDriver(PersistentData &persistentData); +}; + + +#endif //TRACK4K_VIRTUALCINEMATOGRAPHER_H diff --git a/source/segmentation/BoardDetection.cpp b/source/segmentation/BoardDetection.cpp index d63406b..60be423 100644 --- a/source/segmentation/BoardDetection.cpp +++ b/source/segmentation/BoardDetection.cpp @@ -1,543 +1,543 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/09/05. -// - -#include "BoardDetection.h" - -using namespace cv; -using namespace std; - -Mat sourceFrame; -Mat sourceFrameGray; -Mat thresholdFrame; -int cannyThreshold = 100; -RNG rng(12345); - -int keyPointChangeThresh = 150; -int leftKeyPointInitial = 0; -int rightKeyPointInitial = 0; -Rect rightHalf; -Rect leftHalf; -bool initialLoop = true; - -double boardFoundThresh = 0.4; - -IlluminationCorrection illuminationCorrection; - -BoardDetection::BoardDetection() -{ - - -} - -///----------------------------// -/// Main method to find boards // -///----------------------------// -void BoardDetection::extractBoards(std::vector &frames, PersistentData &pD) -{ - //Initialise parameters needed - Size sizeOfFrames = pD.videoDimension; - leftHalf = Rect(Point(0, 0), Point(sizeOfFrames.width / 2, sizeOfFrames.height)); - rightHalf = Rect(Point(sizeOfFrames.width / 2, 0), Point(sizeOfFrames.width, sizeOfFrames.height)); - - vector boardRectangles; //Store all board rectangles found - vector boardColumnRectangles; - - Rect cropRegion; - Rect finalCrop; - - - //Find board crop region - for (int i = 0; i < frames.size(); i += pD.boardDetectionSkipFrames) - { - if (i >= frames.size()) - { - break; - } - Mat frame = frames[i]; - illuminationCorrection.applyCLAHE(frame); - - //Find the cropping area - boardRectangles.clear(); - boardColumnRectangles.clear(); - findEdgesInImage(frame, cropRegion, boardRectangles, boardColumnRectangles); - - int leftK = 0; - int rightK = 0; - - bool projectorUsedLeft = false; - bool projectorUsedRight = false; - - bool leftBoardUsed = false; - bool rightBoardUsed = false; - - Scalar color; - - for (int a = 0; a < boardRectangles.size(); a++) - { - BoardDetection::BoardRectangleStruct b = boardRectangles.at(a); - - switch (b.boardType) - { - case BLACKBOARD: - if (isContained(b.boundingRectangle, rightHalf)) - { - //Board is on the right - rightK += b.numFeatures; - } else if (isContained(b.boundingRectangle, leftHalf)) - { - //Board is on the left - leftK += b.numFeatures; - } - - break; - case PROJECTORSCREEN: - if (isContained(b.boundingRectangle, rightHalf)) - { - projectorUsedRight = true; - } else if (isContained(b.boundingRectangle, leftHalf)) - { - projectorUsedLeft = true; - } - - } - } - - if (initialLoop) - { - leftKeyPointInitial = leftK; - rightKeyPointInitial = rightK; - initialLoop = false; - - } else - { - //Now check if change in number of keypoints is significant - if (leftK > leftKeyPointInitial + keyPointChangeThresh) - { - //Left board used - leftBoardUsed = true; - } - - if (rightK > rightKeyPointInitial + keyPointChangeThresh) - { - //right board used - rightBoardUsed = true; - } - - //Now apply this update back to the metaframe - pD.metaFrameVector.push_back( - MetaFrame(true, leftBoardUsed, rightBoardUsed, projectorUsedLeft, projectorUsedRight)); - } - - - //If we find an area big enough we assume the boards are contained in the crop region - if (cropRegion.area() > int(boardFoundThresh * sizeOfFrames.area()) && !pD.boardsFound) - { - finalCrop = cropRegion; - pD.boardsFound = true; - pD.boardCropRegion = cropRegion; - } else - { - //If the new area found is bigger than current crop area --> Update current crop area to new - if (cropRegion.area() > pD.boardCropRegion.area()) - { - // pD.boardCropRegion = cropRegion; - } - } - - } -} - -///-------------------------------------------// -/// Main method to find rectangles in a frame // -///-------------------------------------------// -void BoardDetection::findEdgesInImage(cv::Mat &frame, Rect &cropArea, - vector &boardRects, - vector &boardColumnRects) -{ - - - sourceFrame = frame; - // Convert image to gray and blur it - cvtColor(sourceFrame, sourceFrameGray, CV_BGR2GRAY); - blur(sourceFrameGray, sourceFrameGray, Size(3, 3)); - - - Mat canny_output; - vector > contours; - vector hierarchy; - - //cv::Mat grayOverlay = sourceFrameGray.clone(); - - - //Threshold the image to detect blackboards easier - threshold(sourceFrameGray, thresholdFrame, 200, 255, THRESH_BINARY); - - - // Detect edges using canny - Canny(sourceFrameGray, canny_output, cannyThreshold, cannyThreshold * 2, 3); - - - // Find contours - findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); - - // Draw contours - Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3); - for (int i = 0; i < contours.size(); i++) - { - Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); - drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point()); - } - - - //Bound contours with rectangle - //Vector to store bounding rectangles - vector rects; - BoardDetection::boundContoursWithRectangles(contours, rects); - - //Rect cropArea; - vector possibleBoardColumnsVec; - - - BoardDetection::findBoards(rects, possibleBoardColumnsVec, boardRects); - BoardDetection::findBoardColumns(possibleBoardColumnsVec, boardColumnRects); - BoardDetection::findOverallCropOfBoards(boardColumnRects, cropArea, boardRects); - - BoardDetection::removeOverlappingRectangles(boardRects); - BoardDetection::removeOverlappingRectangles(boardColumnRects); - -} - -void BoardDetection::boundContoursWithRectangles(vector > contours, vector &allBoundingRectangles) -{ - - // Approximate contours to polygons + get bounding rectangles - vector> contours_poly(contours.size()); - vector boundRect(contours.size()); - - - for (int i = 0; i < contours.size(); i++) - { - approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true); - boundRect[i] = boundingRect(Mat(contours_poly[i])); - } - - allBoundingRectangles = boundRect; -} - -//Simple method to check if a value is contained in a vector of values -bool BoardDetection::checkContainment(vector &v, int val) -{ - - bool found = false; - for (int i = 0; i < v.size(); i++) - { - if (v.at(i) == val) - { - found = true; - } - } - - return found; -} - -void BoardDetection::findBoards(std::vector &allRectangles, std::vector &possibleBoardColumns, - std::vector &boards) -{ - - int boardIdCounter = 0; - int projectorIdCounter = 0; - - vector rectanglesToKeep; - //Loop over all rectangles - for (int i = 0; i < allRectangles.size(); i++) - { - Rect r = allRectangles.at(i); - //Ignore really small rectangles - if (r.width > 400 && r.height > 300 && r.height <= 720) - { - - //Based on aspect ratio, classify as Projector screen or blackboard - if (calculateAspectRatio(r) >= 165 && calculateAspectRatio(r) <= 200 && isDark(r)) - { - //Close to a 16:9 ratio and classified as being a dark area... So assume blackboard - rectanglesToKeep.push_back(r); - - BoardRectangleStruct b(boardIdCounter, r, countFeatures(sourceFrameGray(r)), BLACKBOARD); - boards.push_back(b); - boardIdCounter++; - - } else if (calculateAspectRatio(r) >= 100 && calculateAspectRatio(r) <= 145 && !isDark(r)) - { - //Close to a 4:3 ratio and classified as being a light area... So assume projector screen - - BoardRectangleStruct p(projectorIdCounter, r, countFeatures(sourceFrameGray(r)), PROJECTORSCREEN); - boards.push_back(p); - - projectorIdCounter++; - } - } else if (r.width > 400 && r.height > 300 && r.height > 1000) - { - //Possibly the board column - possibleBoardColumns.push_back(r); - } - } -} - -void BoardDetection::removeOverlappingRectangles(vector &boards) -{ -///-------------------------------------------/// - /// STAGE 1 - Remove all contained rectangles /// - ///-------------------------------------------/// - vector toRemove; - //Remove rectangles contained in another - for (int i = 0; i < boards.size(); i++) - { - - Rect r1 = boards.at(i).boundingRectangle; - for (int j = i + 1; j < boards.size(); j++) - { - - //cout << "Comparing rect " << i << " with " << j << endl; - - Rect r2 = boards.at(j).boundingRectangle; - Rect r3 = r1 & r2; - - if (r3.area() > 0) - { - if (r3.area() == r2.area()) - { - //cout << "r2 is inside r1" << endl; - toRemove.push_back(boards.at(i).ItemID); - } else if (r3.area() == r1.area()) - { - //cout << "r1 is inside r2" << endl; - toRemove.push_back(boards.at(j).ItemID); - } else - { - //cout << "Overlapping Rectangles" << endl; - if (r1.area() > r2.area()) - { - toRemove.push_back(boards.at(i).ItemID); //Remove smaller rectangle R2 - } else - { - toRemove.push_back(boards.at(j).ItemID);//Remove smaller rectangle R1 - } - } - } else - { - //cout << "Non-overlapping Rectangles" << endl; - - } - } - } - - vector toKeep; - for (int i = 0; i < boards.size(); i++) - { - if (!checkContainment(toRemove, boards.at(i).ItemID)) - { - toKeep.push_back(boards.at(i)); - } - } - - boards.clear(); - boards = move(toKeep); - -} - -void -BoardDetection::findBoardColumns(std::vector &allRectangles, std::vector &boardColumnsR) -{ - - ///-------------------------------------------/// - /// STAGE 1 - Remove all contained rectangles /// - ///-------------------------------------------/// - vector toRemove; - //Remove rectangles contained in another - for (int i = 0; i < allRectangles.size(); i++) - { - - Rect r1 = allRectangles[i]; - for (int j = i + 1; j < allRectangles.size(); j++) - { - - //cout << "Comparing rect " << i << " with " << j << endl; - - Rect r2 = allRectangles[j]; - Rect r3 = r1 & r2; - - if (r3.area() > 0) - { - if (r3.area() == r2.area()) - { - //cout << "r2 is inside r1" << endl; - toRemove.push_back(j); - } else if (r3.area() == r1.area()) - { - //cout << "r1 is inside r2" << endl; - toRemove.push_back(i); - } else - { - //cout << "Overlapping Rectangles" << endl; - if (r1.area() > r2.area()) - { - toRemove.push_back(j); //Remove smaller rectangle R2 - } else - { - toRemove.push_back(i); //Remove smaller rectangle R1 - } - } - } else - { - //cout << "Non-overlapping Rectangles" << endl; - } - } - } - - ///---------------------------------------------------/// - /// STAGE 2 - Keep only possible candidate rectangles /// - ///---------------------------------------------------/// - //Evaluate all rectangles and keep only candidates that pass a certain criteria - //vector filtered; - int boardColumnId = 0; - for (int i = 0; i < allRectangles.size(); i++) - { - - if (!checkContainment(toRemove, i)) - { - Rect t = allRectangles.at(i); - - - if (t.width > 400 && t.width < 2000 && t.height > 500) - { - //Check to see if the rectangle is not unreasonably small - //filtered.push_back(t); - BoardRectangleStruct bc(boardColumnId, t, countFeatures(sourceFrameGray(t)), BOUNDING); - boardColumnsR.push_back(bc); - boardColumnId++; - } - } - } - - //allRectangles.clear(); - //allRectangles = move(filtered); -} - -void BoardDetection::findOverallCropOfBoards(std::vector &boardColumns, Rect &finalCrop, - std::vector &Boards) -{ - ///-------------------------------------/// - /// STAGE 1 - Decide on area to segment /// - ///-------------------------------------/// - //Bound overall crop region - int minX = INT_MAX, minY = INT_MAX; - int maxX = INT_MIN, maxY = INT_MIN; - - for (BoardRectangleStruct boardRect:boardColumns) - { - - if (boardRect.boundingRectangle.tl().x < minX) - { - minX = boardRect.boundingRectangle.tl().x; - } - if (boardRect.boundingRectangle.tl().y < minY) - { - minY = boardRect.boundingRectangle.tl().y; - } - if (boardRect.boundingRectangle.br().x > maxX) - { - maxX = boardRect.boundingRectangle.br().x; - } - if (boardRect.boundingRectangle.br().y > maxY) - { - maxY = boardRect.boundingRectangle.br().y; - } - } - - //Extend further incase boards do extend - for (BoardRectangleStruct boardRect:Boards) - { - - if (boardRect.boundingRectangle.tl().x < minX) - { - minX = boardRect.boundingRectangle.tl().x; - } - if (boardRect.boundingRectangle.tl().y < minY) - { - minY = boardRect.boundingRectangle.tl().y; - } - if (boardRect.boundingRectangle.br().x > maxX) - { - maxX = boardRect.boundingRectangle.br().x; - } - if (boardRect.boundingRectangle.br().y > maxY) - { - maxY = boardRect.boundingRectangle.br().y; - } - } - - ///-----------------------------------/// - /// STAGE 2 - Set overall crop region /// - ///-----------------------------------/// - finalCrop = Rect(Point(minX, minY), Point(maxX, maxY)); -} - -int BoardDetection::calculateAspectRatio(cv::Rect &r) -{ - //Returns 177 for 16:9 and 133 for 4:3 - // cout << "Width = " << boundingRectangle.width << " Height = " << boundingRectangle.height << " Aspect Ratio = " << (double) (boundingRectangle.width / boundingRectangle.height)<< endl; - return (int) ((r.width / r.height) * 100); -} - -bool BoardDetection::isDark(cv::Rect &r) -{ - - Mat tmpImg = (thresholdFrame(r)); - int TotalNumberOfPixels = tmpImg.rows * tmpImg.cols; - int ZeroPixels = TotalNumberOfPixels - countNonZero(tmpImg); - float percent = ((float) ZeroPixels / (float) TotalNumberOfPixels) * 100; - ///cout << "Percentage black: " << percent << endl; - return percent > 75; -} - -std::vector BoardDetection::countFeatures(Mat cropImg) -{ - - //Perform sift function on image to find key featurePoints such as writing on the boards - - //-- Step 1: Detect the keypoints using SURF Detector - int minHessian = 400; - - Ptr detector = xfeatures2d::SURF::create(minHessian); - - std::vector keypoints_1; - - detector->detect(cropImg, keypoints_1); - - return keypoints_1; -} - -bool BoardDetection::isContained(cv::Rect r1, cv::Rect r2) -{ - - Rect r3 = r1 & r2; - //Is r1 contained in r2? - return r3.area() == r1.area(); -} +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/09/05. +// + +#include "BoardDetection.h" + +using namespace cv; +using namespace std; + +Mat sourceFrame; +Mat sourceFrameGray; +Mat thresholdFrame; +int cannyThreshold = 100; +RNG rng(12345); + +int keyPointChangeThresh = 150; +int leftKeyPointInitial = 0; +int rightKeyPointInitial = 0; +Rect rightHalf; +Rect leftHalf; +bool initialLoop = true; + +double boardFoundThresh = 0.4; + +IlluminationCorrection illuminationCorrection; + +BoardDetection::BoardDetection() +{ + + +} + +///----------------------------// +/// Main method to find boards // +///----------------------------// +void BoardDetection::extractBoards(std::vector &frames, PersistentData &pD) +{ + //Initialise parameters needed + Size sizeOfFrames = pD.videoDimension; + leftHalf = Rect(Point(0, 0), Point(sizeOfFrames.width / 2, sizeOfFrames.height)); + rightHalf = Rect(Point(sizeOfFrames.width / 2, 0), Point(sizeOfFrames.width, sizeOfFrames.height)); + + vector boardRectangles; //Store all board rectangles found + vector boardColumnRectangles; + + Rect cropRegion; + Rect finalCrop; + + + //Find board crop region + for (int i = 0; i < frames.size(); i += pD.boardDetectionSkipFrames) + { + if (i >= frames.size()) + { + break; + } + Mat frame = frames[i]; + illuminationCorrection.applyCLAHE(frame); + + //Find the cropping area + boardRectangles.clear(); + boardColumnRectangles.clear(); + findEdgesInImage(frame, cropRegion, boardRectangles, boardColumnRectangles); + + int leftK = 0; + int rightK = 0; + + bool projectorUsedLeft = false; + bool projectorUsedRight = false; + + bool leftBoardUsed = false; + bool rightBoardUsed = false; + + Scalar color; + + for (int a = 0; a < boardRectangles.size(); a++) + { + BoardDetection::BoardRectangleStruct b = boardRectangles.at(a); + + switch (b.boardType) + { + case BLACKBOARD: + if (isContained(b.boundingRectangle, rightHalf)) + { + //Board is on the right + rightK += b.numFeatures; + } else if (isContained(b.boundingRectangle, leftHalf)) + { + //Board is on the left + leftK += b.numFeatures; + } + + break; + case PROJECTORSCREEN: + if (isContained(b.boundingRectangle, rightHalf)) + { + projectorUsedRight = true; + } else if (isContained(b.boundingRectangle, leftHalf)) + { + projectorUsedLeft = true; + } + + } + } + + if (initialLoop) + { + leftKeyPointInitial = leftK; + rightKeyPointInitial = rightK; + initialLoop = false; + + } else + { + //Now check if change in number of keypoints is significant + if (leftK > leftKeyPointInitial + keyPointChangeThresh) + { + //Left board used + leftBoardUsed = true; + } + + if (rightK > rightKeyPointInitial + keyPointChangeThresh) + { + //right board used + rightBoardUsed = true; + } + + //Now apply this update back to the metaframe + pD.metaFrameVector.push_back( + MetaFrame(true, leftBoardUsed, rightBoardUsed, projectorUsedLeft, projectorUsedRight)); + } + + + //If we find an area big enough we assume the boards are contained in the crop region + if (cropRegion.area() > int(boardFoundThresh * sizeOfFrames.area()) && !pD.boardsFound) + { + finalCrop = cropRegion; + pD.boardsFound = true; + pD.boardCropRegion = cropRegion; + } else + { + //If the new area found is bigger than current crop area --> Update current crop area to new + if (cropRegion.area() > pD.boardCropRegion.area()) + { + // pD.boardCropRegion = cropRegion; + } + } + + } +} + +///-------------------------------------------// +/// Main method to find rectangles in a frame // +///-------------------------------------------// +void BoardDetection::findEdgesInImage(cv::Mat &frame, Rect &cropArea, + vector &boardRects, + vector &boardColumnRects) +{ + + + sourceFrame = frame; + // Convert image to gray and blur it + cvtColor(sourceFrame, sourceFrameGray, CV_BGR2GRAY); + blur(sourceFrameGray, sourceFrameGray, Size(3, 3)); + + + Mat canny_output; + vector > contours; + vector hierarchy; + + //cv::Mat grayOverlay = sourceFrameGray.clone(); + + + //Threshold the image to detect blackboards easier + threshold(sourceFrameGray, thresholdFrame, 200, 255, THRESH_BINARY); + + + // Detect edges using canny + Canny(sourceFrameGray, canny_output, cannyThreshold, cannyThreshold * 2, 3); + + + // Find contours + findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); + + // Draw contours + Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3); + for (int i = 0; i < contours.size(); i++) + { + Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); + drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point()); + } + + + //Bound contours with rectangle + //Vector to store bounding rectangles + vector rects; + BoardDetection::boundContoursWithRectangles(contours, rects); + + //Rect cropArea; + vector possibleBoardColumnsVec; + + + BoardDetection::findBoards(rects, possibleBoardColumnsVec, boardRects); + BoardDetection::findBoardColumns(possibleBoardColumnsVec, boardColumnRects); + BoardDetection::findOverallCropOfBoards(boardColumnRects, cropArea, boardRects); + + BoardDetection::removeOverlappingRectangles(boardRects); + BoardDetection::removeOverlappingRectangles(boardColumnRects); + +} + +void BoardDetection::boundContoursWithRectangles(vector > contours, vector &allBoundingRectangles) +{ + + // Approximate contours to polygons + get bounding rectangles + vector> contours_poly(contours.size()); + vector boundRect(contours.size()); + + + for (int i = 0; i < contours.size(); i++) + { + approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true); + boundRect[i] = boundingRect(Mat(contours_poly[i])); + } + + allBoundingRectangles = boundRect; +} + +//Simple method to check if a value is contained in a vector of values +bool BoardDetection::checkContainment(vector &v, int val) +{ + + bool found = false; + for (int i = 0; i < v.size(); i++) + { + if (v.at(i) == val) + { + found = true; + } + } + + return found; +} + +void BoardDetection::findBoards(std::vector &allRectangles, std::vector &possibleBoardColumns, + std::vector &boards) +{ + + int boardIdCounter = 0; + int projectorIdCounter = 0; + + vector rectanglesToKeep; + //Loop over all rectangles + for (int i = 0; i < allRectangles.size(); i++) + { + Rect r = allRectangles.at(i); + //Ignore really small rectangles + if (r.width > 400 && r.height > 300 && r.height <= 720) + { + + //Based on aspect ratio, classify as Projector screen or blackboard + if (calculateAspectRatio(r) >= 165 && calculateAspectRatio(r) <= 200 && isDark(r)) + { + //Close to a 16:9 ratio and classified as being a dark area... So assume blackboard + rectanglesToKeep.push_back(r); + + BoardRectangleStruct b(boardIdCounter, r, countFeatures(sourceFrameGray(r)), BLACKBOARD); + boards.push_back(b); + boardIdCounter++; + + } else if (calculateAspectRatio(r) >= 100 && calculateAspectRatio(r) <= 145 && !isDark(r)) + { + //Close to a 4:3 ratio and classified as being a light area... So assume projector screen + + BoardRectangleStruct p(projectorIdCounter, r, countFeatures(sourceFrameGray(r)), PROJECTORSCREEN); + boards.push_back(p); + + projectorIdCounter++; + } + } else if (r.width > 400 && r.height > 300 && r.height > 1000) + { + //Possibly the board column + possibleBoardColumns.push_back(r); + } + } +} + +void BoardDetection::removeOverlappingRectangles(vector &boards) +{ +///-------------------------------------------/// + /// STAGE 1 - Remove all contained rectangles /// + ///-------------------------------------------/// + vector toRemove; + //Remove rectangles contained in another + for (int i = 0; i < boards.size(); i++) + { + + Rect r1 = boards.at(i).boundingRectangle; + for (int j = i + 1; j < boards.size(); j++) + { + + //cout << "Comparing rect " << i << " with " << j << endl; + + Rect r2 = boards.at(j).boundingRectangle; + Rect r3 = r1 & r2; + + if (r3.area() > 0) + { + if (r3.area() == r2.area()) + { + //cout << "r2 is inside r1" << endl; + toRemove.push_back(boards.at(i).ItemID); + } else if (r3.area() == r1.area()) + { + //cout << "r1 is inside r2" << endl; + toRemove.push_back(boards.at(j).ItemID); + } else + { + //cout << "Overlapping Rectangles" << endl; + if (r1.area() > r2.area()) + { + toRemove.push_back(boards.at(i).ItemID); //Remove smaller rectangle R2 + } else + { + toRemove.push_back(boards.at(j).ItemID);//Remove smaller rectangle R1 + } + } + } else + { + //cout << "Non-overlapping Rectangles" << endl; + + } + } + } + + vector toKeep; + for (int i = 0; i < boards.size(); i++) + { + if (!checkContainment(toRemove, boards.at(i).ItemID)) + { + toKeep.push_back(boards.at(i)); + } + } + + boards.clear(); + boards = move(toKeep); + +} + +void +BoardDetection::findBoardColumns(std::vector &allRectangles, std::vector &boardColumnsR) +{ + + ///-------------------------------------------/// + /// STAGE 1 - Remove all contained rectangles /// + ///-------------------------------------------/// + vector toRemove; + //Remove rectangles contained in another + for (int i = 0; i < allRectangles.size(); i++) + { + + Rect r1 = allRectangles[i]; + for (int j = i + 1; j < allRectangles.size(); j++) + { + + //cout << "Comparing rect " << i << " with " << j << endl; + + Rect r2 = allRectangles[j]; + Rect r3 = r1 & r2; + + if (r3.area() > 0) + { + if (r3.area() == r2.area()) + { + //cout << "r2 is inside r1" << endl; + toRemove.push_back(j); + } else if (r3.area() == r1.area()) + { + //cout << "r1 is inside r2" << endl; + toRemove.push_back(i); + } else + { + //cout << "Overlapping Rectangles" << endl; + if (r1.area() > r2.area()) + { + toRemove.push_back(j); //Remove smaller rectangle R2 + } else + { + toRemove.push_back(i); //Remove smaller rectangle R1 + } + } + } else + { + //cout << "Non-overlapping Rectangles" << endl; + } + } + } + + ///---------------------------------------------------/// + /// STAGE 2 - Keep only possible candidate rectangles /// + ///---------------------------------------------------/// + //Evaluate all rectangles and keep only candidates that pass a certain criteria + //vector filtered; + int boardColumnId = 0; + for (int i = 0; i < allRectangles.size(); i++) + { + + if (!checkContainment(toRemove, i)) + { + Rect t = allRectangles.at(i); + + + if (t.width > 400 && t.width < 2000 && t.height > 500) + { + //Check to see if the rectangle is not unreasonably small + //filtered.push_back(t); + BoardRectangleStruct bc(boardColumnId, t, countFeatures(sourceFrameGray(t)), BOUNDING); + boardColumnsR.push_back(bc); + boardColumnId++; + } + } + } + + //allRectangles.clear(); + //allRectangles = move(filtered); +} + +void BoardDetection::findOverallCropOfBoards(std::vector &boardColumns, Rect &finalCrop, + std::vector &Boards) +{ + ///-------------------------------------/// + /// STAGE 1 - Decide on area to segment /// + ///-------------------------------------/// + //Bound overall crop region + int minX = INT_MAX, minY = INT_MAX; + int maxX = INT_MIN, maxY = INT_MIN; + + for (BoardRectangleStruct boardRect:boardColumns) + { + + if (boardRect.boundingRectangle.tl().x < minX) + { + minX = boardRect.boundingRectangle.tl().x; + } + if (boardRect.boundingRectangle.tl().y < minY) + { + minY = boardRect.boundingRectangle.tl().y; + } + if (boardRect.boundingRectangle.br().x > maxX) + { + maxX = boardRect.boundingRectangle.br().x; + } + if (boardRect.boundingRectangle.br().y > maxY) + { + maxY = boardRect.boundingRectangle.br().y; + } + } + + //Extend further incase boards do extend + for (BoardRectangleStruct boardRect:Boards) + { + + if (boardRect.boundingRectangle.tl().x < minX) + { + minX = boardRect.boundingRectangle.tl().x; + } + if (boardRect.boundingRectangle.tl().y < minY) + { + minY = boardRect.boundingRectangle.tl().y; + } + if (boardRect.boundingRectangle.br().x > maxX) + { + maxX = boardRect.boundingRectangle.br().x; + } + if (boardRect.boundingRectangle.br().y > maxY) + { + maxY = boardRect.boundingRectangle.br().y; + } + } + + ///-----------------------------------/// + /// STAGE 2 - Set overall crop region /// + ///-----------------------------------/// + finalCrop = Rect(Point(minX, minY), Point(maxX, maxY)); +} + +int BoardDetection::calculateAspectRatio(cv::Rect &r) +{ + //Returns 177 for 16:9 and 133 for 4:3 + // cout << "Width = " << boundingRectangle.width << " Height = " << boundingRectangle.height << " Aspect Ratio = " << (double) (boundingRectangle.width / boundingRectangle.height)<< endl; + return (int) ((r.width / r.height) * 100); +} + +bool BoardDetection::isDark(cv::Rect &r) +{ + + Mat tmpImg = (thresholdFrame(r)); + int TotalNumberOfPixels = tmpImg.rows * tmpImg.cols; + int ZeroPixels = TotalNumberOfPixels - countNonZero(tmpImg); + float percent = ((float) ZeroPixels / (float) TotalNumberOfPixels) * 100; + ///cout << "Percentage black: " << percent << endl; + return percent > 75; +} + +std::vector BoardDetection::countFeatures(Mat cropImg) +{ + + //Perform sift function on image to find key featurePoints such as writing on the boards + + //-- Step 1: Detect the keypoints using SURF Detector + int minHessian = 400; + + Ptr detector = xfeatures2d::SURF::create(minHessian); + + std::vector keypoints_1; + + detector->detect(cropImg, keypoints_1); + + return keypoints_1; +} + +bool BoardDetection::isContained(cv::Rect r1, cv::Rect r2) +{ + + Rect r3 = r1 & r2; + //Is r1 contained in r2? + return r3.area() == r1.area(); +} diff --git a/source/segmentation/BoardDetection.h b/source/segmentation/BoardDetection.h index 71c9df3..3567ee1 100644 --- a/source/segmentation/BoardDetection.h +++ b/source/segmentation/BoardDetection.h @@ -1,168 +1,168 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - - -// -// Created by Charles Fitzhenry on 2016/09/05. -// - -#ifndef TRACK4K_BOARDDETECTION_H -#define TRACK4K_BOARDDETECTION_H - -#include "../MetaFrame.h" -#include "../FileReader.h" -#include "opencv2/imgcodecs.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/videoio.hpp" -#include -#include -#include -#include "opencv2/core.hpp" -#include "opencv2/features2d.hpp" -#include "opencv2/xfeatures2d.hpp" -#include "IlluminationCorrection.h" - - -class BoardDetection -{ - -public: - BoardDetection(); - - enum type - { - BLACKBOARD, PROJECTORSCREEN, BOUNDING - }; - - ///Rectangle struct for storing boards - struct BoardRectangleStruct - { - int ItemID; - cv::Rect boundingRectangle; - int numFeatures; - type boardType; - std::vector featurePoints; - - BoardRectangleStruct(int id, cv::Rect rect, std::vector f, BoardDetection::type bT) - { - - ItemID = id; - boundingRectangle = rect; - featurePoints = f; - numFeatures = featurePoints.size(); - boardType = bT; - } - }; - - /** - * - * @param frames - * @param pD - */ - void extractBoards(std::vector &frames, PersistentData &pD); - - /** - * This method acts as the main driver for detecting the boards in the input frames - * @param frame is the input frame - * @param cropArea is the overall area that includes all the boards - * @param boardRects is the areas in the frame that contain boards - * @param boardColumnRects is the areas in the frame tht contain the board columns - */ - void findEdgesInImage(cv::Mat &frame, cv::Rect &cropArea, std::vector &boardRects, - std::vector &boardColumnRects); - - /** - * This method uses the contours provided by the findRectangles method - * @param contours - * @param allBoundingRectangles is the output vector containing all the rectangles found - */ - void boundContoursWithRectangles(std::vector> contours, - std::vector &allBoundingRectangles); - - /** - * This is a helper method to check if a vector contains a number - * @param v is the vector of number - * @param val is the value to check for in the vecctor - * @return true if contained - */ - bool checkContainment(std::vector &v, int val); - - /** - * - * @param allRectangles is a vector containing all unclassified rectangles that has been detected in the image - * @param possibleBoardColumns contains all rectangles that are classified as not being boards - * @param boards is a vector to which all rectngle are added that are classified as being black boards. - */ - void findBoards(std::vector &allRectangles, std::vector &possibleBoardColumns, - std::vector &boards); - - /** - * This method culls all the smaller rectangles that are contained and only keeps the largest rectangles that are - * not contained - * @param boards is a vector containing rectangles. - */ - void removeOverlappingRectangles(std::vector &boards); - - /** - * This method finds the area containing the board columns and places these into a vector. - * @param allRectangles is all the rectangles that is found in the image - * @param boardColumnsR is all the rectangles that meet the criteria of a board column's size - */ - void findBoardColumns(std::vector &allRectangles, std::vector &boardColumnsR); - - /** - * This method takes in vectors of board columns or boards (rectangles enclosing them) and then finds the overall - * cropping rectangle that includes all the boards. - * @param boardColumns - * @param finalCrop is the rectangle that is achieved in the end representing the overall crop region - * @param Boards - */ - void findOverallCropOfBoards(std::vector &boardColumns, cv::Rect &finalCrop, - std::vector &Boards); - - /** - * This function simply divides the width by the height of the rectangle - * @param r is the input rectangle - * @return returns width divided by height - */ - int calculateAspectRatio(cv::Rect &r); - - /** - * This method takes the following parameters - * @param cropImg is the image that will be passed through the Speeded Up Robust Features(SURF) function - * @return the features as a vector - */ - std::vector countFeatures(cv::Mat cropImg); - - /** - * Returns a ratio of dark pixels (0) to light (255) of a region in a binary image - * @param r is a rectangle depicting the region to consider in the image - * @return - */ - bool isDark(cv::Rect &r); - - /** - * This method checks if - * @param r1 is contained in - * @param r2 and then - * @return true if so. - */ - bool isContained(cv::Rect r1, cv::Rect r2); - -}; - - -#endif //TRACK4K_BOARDDETECTION_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + + +// +// Created by Charles Fitzhenry on 2016/09/05. +// + +#ifndef TRACK4K_BOARDDETECTION_H +#define TRACK4K_BOARDDETECTION_H + +#include "../MetaFrame.h" +#include "../FileReader.h" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" +#include +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/xfeatures2d.hpp" +#include "IlluminationCorrection.h" + + +class BoardDetection +{ + +public: + BoardDetection(); + + enum type + { + BLACKBOARD, PROJECTORSCREEN, BOUNDING + }; + + ///Rectangle struct for storing boards + struct BoardRectangleStruct + { + int ItemID; + cv::Rect boundingRectangle; + int numFeatures; + type boardType; + std::vector featurePoints; + + BoardRectangleStruct(int id, cv::Rect rect, std::vector f, BoardDetection::type bT) + { + + ItemID = id; + boundingRectangle = rect; + featurePoints = f; + numFeatures = featurePoints.size(); + boardType = bT; + } + }; + + /** + * + * @param frames + * @param pD + */ + void extractBoards(std::vector &frames, PersistentData &pD); + + /** + * This method acts as the main driver for detecting the boards in the input frames + * @param frame is the input frame + * @param cropArea is the overall area that includes all the boards + * @param boardRects is the areas in the frame that contain boards + * @param boardColumnRects is the areas in the frame tht contain the board columns + */ + void findEdgesInImage(cv::Mat &frame, cv::Rect &cropArea, std::vector &boardRects, + std::vector &boardColumnRects); + + /** + * This method uses the contours provided by the findRectangles method + * @param contours + * @param allBoundingRectangles is the output vector containing all the rectangles found + */ + void boundContoursWithRectangles(std::vector> contours, + std::vector &allBoundingRectangles); + + /** + * This is a helper method to check if a vector contains a number + * @param v is the vector of number + * @param val is the value to check for in the vecctor + * @return true if contained + */ + bool checkContainment(std::vector &v, int val); + + /** + * + * @param allRectangles is a vector containing all unclassified rectangles that has been detected in the image + * @param possibleBoardColumns contains all rectangles that are classified as not being boards + * @param boards is a vector to which all rectngle are added that are classified as being black boards. + */ + void findBoards(std::vector &allRectangles, std::vector &possibleBoardColumns, + std::vector &boards); + + /** + * This method culls all the smaller rectangles that are contained and only keeps the largest rectangles that are + * not contained + * @param boards is a vector containing rectangles. + */ + void removeOverlappingRectangles(std::vector &boards); + + /** + * This method finds the area containing the board columns and places these into a vector. + * @param allRectangles is all the rectangles that is found in the image + * @param boardColumnsR is all the rectangles that meet the criteria of a board column's size + */ + void findBoardColumns(std::vector &allRectangles, std::vector &boardColumnsR); + + /** + * This method takes in vectors of board columns or boards (rectangles enclosing them) and then finds the overall + * cropping rectangle that includes all the boards. + * @param boardColumns + * @param finalCrop is the rectangle that is achieved in the end representing the overall crop region + * @param Boards + */ + void findOverallCropOfBoards(std::vector &boardColumns, cv::Rect &finalCrop, + std::vector &Boards); + + /** + * This function simply divides the width by the height of the rectangle + * @param r is the input rectangle + * @return returns width divided by height + */ + int calculateAspectRatio(cv::Rect &r); + + /** + * This method takes the following parameters + * @param cropImg is the image that will be passed through the Speeded Up Robust Features(SURF) function + * @return the features as a vector + */ + std::vector countFeatures(cv::Mat cropImg); + + /** + * Returns a ratio of dark pixels (0) to light (255) of a region in a binary image + * @param r is a rectangle depicting the region to consider in the image + * @return + */ + bool isDark(cv::Rect &r); + + /** + * This method checks if + * @param r1 is contained in + * @param r2 and then + * @return true if so. + */ + bool isContained(cv::Rect r1, cv::Rect r2); + +}; + + +#endif //TRACK4K_BOARDDETECTION_H diff --git a/source/segmentation/IlluminationCorrection.cpp b/source/segmentation/IlluminationCorrection.cpp index b41f164..94c8162 100644 --- a/source/segmentation/IlluminationCorrection.cpp +++ b/source/segmentation/IlluminationCorrection.cpp @@ -1,73 +1,73 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/20. -// - -#include "IlluminationCorrection.h" - -#include "opencv2/opencv.hpp" -#include - -using namespace cv; -using namespace std; - -void IlluminationCorrection::correctLight(std::vector &mFrames) -{ - - ///CLAHE (Contrast Limited Adaptive Histogram Equalization) - std::vector temp; - for (int i = 0; i < mFrames.size(); i++) - { - //Apply CLAHE algorithm to each frame - applyCLAHE(mFrames[i]); - } -} - -//Method to apply light correction on a single frame -void IlluminationCorrection::applyCLAHE(Mat &frame) -{ - - ///CLAHE (Contrast Limited Adaptive Histogram Equalization) - // READ RGB color image and convert it to Lab - cv::Mat bgr_image = frame; - cv::Mat lab_image; - cv::cvtColor(bgr_image, lab_image, CV_BGR2Lab); - - // Extract the L channel - std::vector lab_planes(3); - cv::split(lab_image, lab_planes); // now we have the L image in lab_planes[0] - - // apply the CLAHE algorithm to the L channel - cv::Ptr clahe = cv::createCLAHE(); - clahe->setClipLimit(3); - cv::Mat dst; - clahe->apply(lab_planes[0], dst); - - // Merge the the color planes back into an Lab image - dst.copyTo(lab_planes[0]); - cv::merge(lab_planes, lab_image); - - // convert back to RGB - cv::Mat image_clahe; - cv::cvtColor(lab_image, image_clahe, CV_Lab2BGR); - - //Update video - frame = image_clahe; -} - - +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/20. +// + +#include "IlluminationCorrection.h" + +#include "opencv2/opencv.hpp" +#include + +using namespace cv; +using namespace std; + +void IlluminationCorrection::correctLight(std::vector &mFrames) +{ + + ///CLAHE (Contrast Limited Adaptive Histogram Equalization) + std::vector temp; + for (int i = 0; i < mFrames.size(); i++) + { + //Apply CLAHE algorithm to each frame + applyCLAHE(mFrames[i]); + } +} + +//Method to apply light correction on a single frame +void IlluminationCorrection::applyCLAHE(Mat &frame) +{ + + ///CLAHE (Contrast Limited Adaptive Histogram Equalization) + // READ RGB color image and convert it to Lab + cv::Mat bgr_image = frame; + cv::Mat lab_image; + cv::cvtColor(bgr_image, lab_image, CV_BGR2Lab); + + // Extract the L channel + std::vector lab_planes(3); + cv::split(lab_image, lab_planes); // now we have the L image in lab_planes[0] + + // apply the CLAHE algorithm to the L channel + cv::Ptr clahe = cv::createCLAHE(); + clahe->setClipLimit(3); + cv::Mat dst; + clahe->apply(lab_planes[0], dst); + + // Merge the the color planes back into an Lab image + dst.copyTo(lab_planes[0]); + cv::merge(lab_planes, lab_image); + + // convert back to RGB + cv::Mat image_clahe; + cv::cvtColor(lab_image, image_clahe, CV_Lab2BGR); + + //Update video + frame = image_clahe; +} + + diff --git a/source/segmentation/IlluminationCorrection.h b/source/segmentation/IlluminationCorrection.h index c2b3c8e..1c0186d 100644 --- a/source/segmentation/IlluminationCorrection.h +++ b/source/segmentation/IlluminationCorrection.h @@ -1,45 +1,45 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/20. -// - -#ifndef TRACK4K_ILLUMINATIONCORRECTION_H -#define TRACK4K_ILLUMINATIONCORRECTION_H - -#include -#include -#include "../MetaFrame.h" - -class IlluminationCorrection -{ -public: - /** - * This method takes a vecotr of frames and applies the Contrast Limited Adaptive Histogram Equalization method to each frame. - * @param mFrames is the vector containing frames of type cv::Mat - */ - void correctLight(std::vector &mFrames); - - /** - * This method takes a single frame and applies the Contrast Limited Adaptive Histogram Equalization method to the frame. - * @param frame - */ - void applyCLAHE(cv::Mat &frame); -}; - - -#endif //TRACK4K_ILLUMINATIONCORRECTION_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/20. +// + +#ifndef TRACK4K_ILLUMINATIONCORRECTION_H +#define TRACK4K_ILLUMINATIONCORRECTION_H + +#include +#include +#include "../MetaFrame.h" + +class IlluminationCorrection +{ +public: + /** + * This method takes a vecotr of frames and applies the Contrast Limited Adaptive Histogram Equalization method to each frame. + * @param mFrames is the vector containing frames of type cv::Mat + */ + void correctLight(std::vector &mFrames); + + /** + * This method takes a single frame and applies the Contrast Limited Adaptive Histogram Equalization method to the frame. + * @param frame + */ + void applyCLAHE(cv::Mat &frame); +}; + + +#endif //TRACK4K_ILLUMINATIONCORRECTION_H diff --git a/source/segmentation/MotionDetection.cpp b/source/segmentation/MotionDetection.cpp index 7f95ed4..5bbfa0a 100644 --- a/source/segmentation/MotionDetection.cpp +++ b/source/segmentation/MotionDetection.cpp @@ -1,147 +1,147 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/20. -// - -#include "MotionDetection.h" - -#include -#include -#include -#include - -using namespace cv; -using namespace std; - -int thresholdVal = 35; - -void MotionDetection::subtract(std::vector &frames, PersistentData &persistentData) -{ - - //Two adjacent frames that would store grayscale images - cv::Mat grayFrame1; - cv::Mat grayFrame2; - - //Difference between the two frames - cv::Mat differenceImage; - cv::Mat thresholdImage; - - //A frame containing the accumulates superimposed difference frames - cv::Mat thresholdAccumulation; - - // Loop over all frames and perform background subtraction - for (int i = 0; i < frames.size() - 1; i++) - { - if (i > frames.size() - 1) - { - break; - } - - //Convert frame to grayscale - cv::cvtColor(frames[i], grayFrame1, COLOR_BGRA2GRAY); - cv::cvtColor(frames[i + 1], grayFrame2, COLOR_BGRA2GRAY); - - - //Perform background subtraction - cv::absdiff(grayFrame1, grayFrame2, differenceImage); - - - cv::threshold(differenceImage, thresholdImage, thresholdVal, 255, THRESH_BINARY); - cv::blur(thresholdImage, thresholdImage, cv::Size(10, 10)); - cv::threshold(thresholdImage, thresholdImage, thresholdVal, 255, THRESH_BINARY); - - if (i == 0) - { - thresholdAccumulation = thresholdImage.clone(); - } - - bitwise_or(thresholdAccumulation, thresholdImage, thresholdAccumulation); - - } - - - cv::Mat kernel = Mat::ones(10, 10, CV_32F); - cv::morphologyEx(thresholdAccumulation, thresholdAccumulation, MORPH_OPEN, kernel); - - - cv::blur(thresholdAccumulation, thresholdAccumulation, cv::Size(10, 10)); - cv::threshold(thresholdAccumulation, thresholdAccumulation, thresholdVal, 255, THRESH_BINARY); - - - persistentData.areasOfMotion.push_back(boundMotion(thresholdAccumulation)); - -} - -//Bound motion with rectangles -Rect MotionDetection::boundMotion(Mat threshold_output) -{ - - vector > contours; - vector hierarchy; - - cv::blur(threshold_output, threshold_output, cv::Size(100, 100)); - - // Find contours - findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); - - // Approximate contours to polygons + get bounding rectangles - vector > contours_poly(contours.size()); - vector boundRect(contours.size()); - - //Bound all motion with rectangles - for (int i = 0; i < contours.size(); i++) - { - approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true); - boundRect[i] = boundingRect(Mat(contours_poly[i])); - } - - //Find larges enclosing rectangle from all small rectangles - int top_x = INT_MAX; - int top_y = INT_MAX; - int bottom_x = INT_MIN; - int bottom_y = INT_MIN; - - for (Rect r : boundRect) - { - int r_top_x = r.tl().x; - int r_top_y = r.tl().y; - int r_bottom_x = r.br().x; - int r_bottom_y = r.br().y; - - if (r_top_x < top_x) - { - top_x = r_top_x; - } - if (r_top_y < top_y) - { - top_y = r_top_y; - } - if (r_bottom_x > bottom_x) - { - bottom_x = r_bottom_x; - } - if (r_bottom_y > bottom_y) - { - bottom_y = r_bottom_y; - } - } - - Rect overallMotionCrop(Point(top_x, top_y), Point(bottom_x, bottom_y)); - - return overallMotionCrop; +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/20. +// + +#include "MotionDetection.h" + +#include +#include +#include +#include + +using namespace cv; +using namespace std; + +int thresholdVal = 35; + +void MotionDetection::subtract(std::vector &frames, PersistentData &persistentData) +{ + + //Two adjacent frames that would store grayscale images + cv::Mat grayFrame1; + cv::Mat grayFrame2; + + //Difference between the two frames + cv::Mat differenceImage; + cv::Mat thresholdImage; + + //A frame containing the accumulates superimposed difference frames + cv::Mat thresholdAccumulation; + + // Loop over all frames and perform background subtraction + for (int i = 0; i < frames.size() - 1; i++) + { + if (i > frames.size() - 1) + { + break; + } + + //Convert frame to grayscale + cv::cvtColor(frames[i], grayFrame1, COLOR_BGRA2GRAY); + cv::cvtColor(frames[i + 1], grayFrame2, COLOR_BGRA2GRAY); + + + //Perform background subtraction + cv::absdiff(grayFrame1, grayFrame2, differenceImage); + + + cv::threshold(differenceImage, thresholdImage, thresholdVal, 255, THRESH_BINARY); + cv::blur(thresholdImage, thresholdImage, cv::Size(10, 10)); + cv::threshold(thresholdImage, thresholdImage, thresholdVal, 255, THRESH_BINARY); + + if (i == 0) + { + thresholdAccumulation = thresholdImage.clone(); + } + + bitwise_or(thresholdAccumulation, thresholdImage, thresholdAccumulation); + + } + + + cv::Mat kernel = Mat::ones(10, 10, CV_32F); + cv::morphologyEx(thresholdAccumulation, thresholdAccumulation, MORPH_OPEN, kernel); + + + cv::blur(thresholdAccumulation, thresholdAccumulation, cv::Size(10, 10)); + cv::threshold(thresholdAccumulation, thresholdAccumulation, thresholdVal, 255, THRESH_BINARY); + + + persistentData.areasOfMotion.push_back(boundMotion(thresholdAccumulation)); + +} + +//Bound motion with rectangles +Rect MotionDetection::boundMotion(Mat threshold_output) +{ + + vector > contours; + vector hierarchy; + + cv::blur(threshold_output, threshold_output, cv::Size(100, 100)); + + // Find contours + findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); + + // Approximate contours to polygons + get bounding rectangles + vector > contours_poly(contours.size()); + vector boundRect(contours.size()); + + //Bound all motion with rectangles + for (int i = 0; i < contours.size(); i++) + { + approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true); + boundRect[i] = boundingRect(Mat(contours_poly[i])); + } + + //Find larges enclosing rectangle from all small rectangles + int top_x = INT_MAX; + int top_y = INT_MAX; + int bottom_x = INT_MIN; + int bottom_y = INT_MIN; + + for (Rect r : boundRect) + { + int r_top_x = r.tl().x; + int r_top_y = r.tl().y; + int r_bottom_x = r.br().x; + int r_bottom_y = r.br().y; + + if (r_top_x < top_x) + { + top_x = r_top_x; + } + if (r_top_y < top_y) + { + top_y = r_top_y; + } + if (r_bottom_x > bottom_x) + { + bottom_x = r_bottom_x; + } + if (r_bottom_y > bottom_y) + { + bottom_y = r_bottom_y; + } + } + + Rect overallMotionCrop(Point(top_x, top_y), Point(bottom_x, bottom_y)); + + return overallMotionCrop; } \ No newline at end of file diff --git a/source/segmentation/MotionDetection.h b/source/segmentation/MotionDetection.h index 4907743..14c386f 100644 --- a/source/segmentation/MotionDetection.h +++ b/source/segmentation/MotionDetection.h @@ -1,60 +1,60 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/20. -// - -#ifndef TRACK4K_MOTIONDETECTION_H -#define TRACK4K_MOTIONDETECTION_H - -//opencv -#include "opencv2/imgcodecs.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/videoio.hpp" -#include -#include -#include "../FileReader.h" - -#include "../MetaFrame.h" - -//C -#include -//C++ -#include -#include - -class MotionDetection -{ -public: -/** - * This method bounds the merged difference image with rectangles - * @param threshold_output is the image containing the merged motion from the subtract method. - * @return a rectangle that bounds all motion - */ - Rect boundMotion(cv::Mat threshold_output); - - /** - * This method loops over all frames and takes two consecutive frames and differences them. This difference is then - * accumulated in a final frame containing all motion for the frames in the vector. - * - * @param frames is the vector containing the input frames - * @param persistentData is the link to the central class sharing all data between the different modules. - */ - void subtract(std::vector &frames, PersistentData &persistentData); -}; - -#endif //TRACK4K_MOTIONDETECTION_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/20. +// + +#ifndef TRACK4K_MOTIONDETECTION_H +#define TRACK4K_MOTIONDETECTION_H + +//opencv +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" +#include +#include +#include "../FileReader.h" + +#include "../MetaFrame.h" + +//C +#include +//C++ +#include +#include + +class MotionDetection +{ +public: +/** + * This method bounds the merged difference image with rectangles + * @param threshold_output is the image containing the merged motion from the subtract method. + * @return a rectangle that bounds all motion + */ + Rect boundMotion(cv::Mat threshold_output); + + /** + * This method loops over all frames and takes two consecutive frames and differences them. This difference is then + * accumulated in a final frame containing all motion for the frames in the vector. + * + * @param frames is the vector containing the input frames + * @param persistentData is the link to the central class sharing all data between the different modules. + */ + void subtract(std::vector &frames, PersistentData &persistentData); +}; + +#endif //TRACK4K_MOTIONDETECTION_H diff --git a/source/segmentation/Track4KPreProcess.cpp b/source/segmentation/Track4KPreProcess.cpp index eb86502..5915dce 100644 --- a/source/segmentation/Track4KPreProcess.cpp +++ b/source/segmentation/Track4KPreProcess.cpp @@ -1,76 +1,76 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/07/19. -// - -#include "IlluminationCorrection.h" - -#include "MotionDetection.h" -#include "Track4KPreProcess.h" -#include -#include - -using namespace std; -using namespace cv; - -void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) -{ - - vector frameVector; - - //Read in video file - FileReader fileReader; - fileReader.readFile(persistentData.inputFileName, persistentData); - - //Create objects - MotionDetection motionDetection; //Detects and segments overall merged motion over a given number of frames - BoardDetection boardDetection; //Detects boards - - //Keep reading in frames from the video file until the end is reached. - //Number of frames to read on each iteration is defined in the PersistentData class - while (!fileReader.isEndOfFile()) - { - //Read in frames - fileReader.getNextSegment(persistentData.segmentationNumFramesToProcessPerIteration, frameVector); - - ///Unused feature - intended to speed up tracking section search space in future - //Detect areas of motion - //motionDetection.subtract(frameVector, persistentData); - - //Detect the boards - boardDetection.extractBoards(frameVector, persistentData); - - } - - //If board crop was found, write this coordinates to text file - if(persistentData.boardsFound){ - int out_w = persistentData.boardCropRegion.width; - int out_h = persistentData.boardCropRegion.height; - int out_x = persistentData.boardCropRegion.tl().x; - int out_y = persistentData.boardCropRegion.tl().y; - - - ofstream outTextFile; - outTextFile.open ("boardCropCoordinates.txt"); - outTextFile << out_w << ":" << out_h << ":" << out_x << ":" << out_y; - outTextFile.close(); - - } - - fileReader.getInputVideo().release(); -} +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/07/19. +// + +#include "IlluminationCorrection.h" + +#include "MotionDetection.h" +#include "Track4KPreProcess.h" +#include +#include + +using namespace std; +using namespace cv; + +void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) +{ + + vector frameVector; + + //Read in video file + FileReader fileReader; + fileReader.readFile(persistentData.inputFileName, persistentData); + + //Create objects + MotionDetection motionDetection; //Detects and segments overall merged motion over a given number of frames + BoardDetection boardDetection; //Detects boards + + //Keep reading in frames from the video file until the end is reached. + //Number of frames to read on each iteration is defined in the PersistentData class + while (!fileReader.isEndOfFile()) + { + //Read in frames + fileReader.getNextSegment(persistentData.segmentationNumFramesToProcessPerIteration, frameVector); + + ///Unused feature - intended to speed up tracking section search space in future + //Detect areas of motion + //motionDetection.subtract(frameVector, persistentData); + + //Detect the boards + boardDetection.extractBoards(frameVector, persistentData); + + } + + //If board crop was found, write this coordinates to text file + if(persistentData.boardsFound){ + int out_w = persistentData.boardCropRegion.width; + int out_h = persistentData.boardCropRegion.height; + int out_x = persistentData.boardCropRegion.tl().x; + int out_y = persistentData.boardCropRegion.tl().y; + + + ofstream outTextFile; + outTextFile.open ("boardCropCoordinates.txt"); + outTextFile << out_w << ":" << out_h << ":" << out_x << ":" << out_y; + outTextFile.close(); + + } + + fileReader.getInputVideo().release(); +} diff --git a/source/segmentation/Track4KPreProcess.h b/source/segmentation/Track4KPreProcess.h index 7a43ae2..db4f71c 100644 --- a/source/segmentation/Track4KPreProcess.h +++ b/source/segmentation/Track4KPreProcess.h @@ -1,41 +1,41 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Charles Fitzhenry on 2016/08/09. -// - -#ifndef TRACK4K_TRACK4KPREPROCESS_H -#define TRACK4K_TRACK4KPREPROCESS_H - -#include "../FileReader.h" -#include "../MetaFrame.h" -#include "BoardDetection.h" -#include "../PersistentData.h" - -class Track4KPreProcess -{ - -public: - /** - * This is the main method used by the mainDriver class to run the segmentation section - * @param persistentData is the link to the central class sharing all data between the different modules. - */ - void preProcessDriver(PersistentData &persistentData); -}; - - -#endif //TRACK4K_TRACK4KPREPROCESS_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Charles Fitzhenry on 2016/08/09. +// + +#ifndef TRACK4K_TRACK4KPREPROCESS_H +#define TRACK4K_TRACK4KPREPROCESS_H + +#include "../FileReader.h" +#include "../MetaFrame.h" +#include "BoardDetection.h" +#include "../PersistentData.h" + +class Track4KPreProcess +{ + +public: + /** + * This is the main method used by the mainDriver class to run the segmentation section + * @param persistentData is the link to the central class sharing all data between the different modules. + */ + void preProcessDriver(PersistentData &persistentData); +}; + + +#endif //TRACK4K_TRACK4KPREPROCESS_H diff --git a/source/tracking/Ghost.cpp b/source/tracking/Ghost.cpp index 570acda..9cda6f1 100644 --- a/source/tracking/Ghost.cpp +++ b/source/tracking/Ghost.cpp @@ -1,149 +1,149 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/10/14. -// - -#include -#include "opencv/cv.h" -#include "opencv/highgui.h" -#include -#include -#include -#include "opencv2/imgcodecs.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/videoio.hpp" -#include "opencv2/video.hpp" -#include "opencv2/highgui.hpp" -#include -#include "Ghost.h" - -using namespace cv; -using namespace std; - -Ghost::Ghost(Rect g){ - ghost = g; - onScreenTime = 1; -} - -//other ghost constructor -Ghost::Ghost(Rect g, long screenTime){ - ghost = g; - onScreenTime = screenTime; -} - -//reset the variable ghost; -void Ghost::reset(Rect newGhost){ - ghost = newGhost; -} -//change the dimensions of the ghost with intersecting rectangles -//return true if the ghost needs to be deleted -bool Ghost::resize(vector* sizingRects){ - int amountIntersected = 0; - bool intersected = false; //if a rectangle intersects this ghost - for (int i = 0; i < sizingRects->size(); i++) { - - //the intersection of sizingRects[i] and ghost - Rect intersect; - if(sizingRects->at(i).area() > ghost.area()){ - (intersect = sizingRects->at(i) & ghost); - } - else{ - (intersect = ghost & sizingRects->at(i)); - } - - //if an intersection exists - if (intersect.area() > 0) { - amountIntersected += intersect.area(); //calculate how much of the ghost is filled - intersected = true; - } - //else there is no intersection with this ghost just skip - else { - continue; - } - - //in a top left coordinate system - - Rect rect = sizingRects->at(i); //temporary var for readability - - //top left x - if (rect.tl().x < ghost.tl().x) { - double val = ghost.tl().x + ((rect.tl().x - ghost.tl().x) * sizeRatio); - ghost = Rect(Point((int)val, ghost.tl().y), Point(ghost.br().x, ghost.br().y)); - } - //top left y - if (rect.tl().y < ghost.tl().y) { - double val = ghost.tl().y + ((rect.tl().y - ghost.tl().y) * sizeRatio); - ghost = Rect(Point(ghost.tl().x, (int)val), Point(ghost.br().x, ghost.br().y)); - } - //bottom right x - if (rect.br().x > ghost.br().x) { - double val = ghost.br().x + ((rect.br().x - ghost.br().x) * sizeRatio); - ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point((int)val, ghost.br().y)); - } - //bottom right y - if (rect.br().y > ghost.br().y) { - double val = ghost.br().y + ((rect.br().y - ghost.br().y) * sizeRatio); - ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point(ghost.br().x, (int)val)); - } - } - - //increment time alive - onScreenTime++; - - //if no rects intersected the ghost decrease area (shrink) or if cannyThreshold of intersection hasn't been reached - if (!intersected || ((double)amountIntersected / (double) ghost.area()) < shrinkThresh) { - return decreaseArea(); - } - - //if intersections valid then don't delete - return false; -} - -//decreases the size of the ghost, returns true to prompt delete -//top-left origin co-ordinate system -bool Ghost::decreaseArea(){ - //x & y distance between top-left and bottom-right - int x = ghost.br().x - ghost.tl().x; - int y = ghost.br().y - ghost.tl().y; - - //if the rectangle is tiny then return false to delete - if(x < deleteThresh || y < deleteThresh){ - return true; - } - - //shrink the ghost inwards - double val = ghost.tl().x + (x * shrinkRatio); - ghost = Rect(Point((int)val, ghost.tl().y), Point(ghost.br().x, ghost.br().y)); - - val = ghost.tl().y + (y * shrinkRatio); - ghost = Rect(Point(ghost.tl().x, (int)val), Point(ghost.br().x, ghost.br().y)); - - val = ghost.br().x - (x * shrinkRatio); - ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point((int)val, ghost.br().y)); - - val = ghost.br().y - (y * shrinkRatio); - ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point(ghost.br().x, (int)val)); - - return false; -} - - - - - - +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/10/14. +// + +#include +#include "opencv/cv.h" +#include "opencv/highgui.h" +#include +#include +#include +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/video.hpp" +#include "opencv2/highgui.hpp" +#include +#include "Ghost.h" + +using namespace cv; +using namespace std; + +Ghost::Ghost(Rect g){ + ghost = g; + onScreenTime = 1; +} + +//other ghost constructor +Ghost::Ghost(Rect g, long screenTime){ + ghost = g; + onScreenTime = screenTime; +} + +//reset the variable ghost; +void Ghost::reset(Rect newGhost){ + ghost = newGhost; +} +//change the dimensions of the ghost with intersecting rectangles +//return true if the ghost needs to be deleted +bool Ghost::resize(vector* sizingRects){ + int amountIntersected = 0; + bool intersected = false; //if a rectangle intersects this ghost + for (int i = 0; i < sizingRects->size(); i++) { + + //the intersection of sizingRects[i] and ghost + Rect intersect; + if(sizingRects->at(i).area() > ghost.area()){ + (intersect = sizingRects->at(i) & ghost); + } + else{ + (intersect = ghost & sizingRects->at(i)); + } + + //if an intersection exists + if (intersect.area() > 0) { + amountIntersected += intersect.area(); //calculate how much of the ghost is filled + intersected = true; + } + //else there is no intersection with this ghost just skip + else { + continue; + } + + //in a top left coordinate system + + Rect rect = sizingRects->at(i); //temporary var for readability + + //top left x + if (rect.tl().x < ghost.tl().x) { + double val = ghost.tl().x + ((rect.tl().x - ghost.tl().x) * sizeRatio); + ghost = Rect(Point((int)val, ghost.tl().y), Point(ghost.br().x, ghost.br().y)); + } + //top left y + if (rect.tl().y < ghost.tl().y) { + double val = ghost.tl().y + ((rect.tl().y - ghost.tl().y) * sizeRatio); + ghost = Rect(Point(ghost.tl().x, (int)val), Point(ghost.br().x, ghost.br().y)); + } + //bottom right x + if (rect.br().x > ghost.br().x) { + double val = ghost.br().x + ((rect.br().x - ghost.br().x) * sizeRatio); + ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point((int)val, ghost.br().y)); + } + //bottom right y + if (rect.br().y > ghost.br().y) { + double val = ghost.br().y + ((rect.br().y - ghost.br().y) * sizeRatio); + ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point(ghost.br().x, (int)val)); + } + } + + //increment time alive + onScreenTime++; + + //if no rects intersected the ghost decrease area (shrink) or if cannyThreshold of intersection hasn't been reached + if (!intersected || ((double)amountIntersected / (double) ghost.area()) < shrinkThresh) { + return decreaseArea(); + } + + //if intersections valid then don't delete + return false; +} + +//decreases the size of the ghost, returns true to prompt delete +//top-left origin co-ordinate system +bool Ghost::decreaseArea(){ + //x & y distance between top-left and bottom-right + int x = ghost.br().x - ghost.tl().x; + int y = ghost.br().y - ghost.tl().y; + + //if the rectangle is tiny then return false to delete + if(x < deleteThresh || y < deleteThresh){ + return true; + } + + //shrink the ghost inwards + double val = ghost.tl().x + (x * shrinkRatio); + ghost = Rect(Point((int)val, ghost.tl().y), Point(ghost.br().x, ghost.br().y)); + + val = ghost.tl().y + (y * shrinkRatio); + ghost = Rect(Point(ghost.tl().x, (int)val), Point(ghost.br().x, ghost.br().y)); + + val = ghost.br().x - (x * shrinkRatio); + ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point((int)val, ghost.br().y)); + + val = ghost.br().y - (y * shrinkRatio); + ghost = Rect(Point(ghost.tl().x, ghost.tl().y), Point(ghost.br().x, (int)val)); + + return false; +} + + + + + + diff --git a/source/tracking/Ghost.h b/source/tracking/Ghost.h index 6c99a21..0267741 100644 --- a/source/tracking/Ghost.h +++ b/source/tracking/Ghost.h @@ -1,81 +1,81 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/10/14. -// - - - -#ifndef TRACK4K_GHOST_H -#define TRACK4K_GHOST_H - -#include "opencv/cv.h" -#include "opencv/highgui.h" -#include -#include -#include -#include "opencv2/imgcodecs.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/videoio.hpp" -#include "opencv2/video.hpp" -#include "opencv2/highgui.hpp" -#include - -using namespace cv; -using namespace std; - -class Ghost{ -private: - Rect ghost; //rectangle of the ghost - long onScreenTime; //time existing on screen (number of frames) - double sizeRatio = 0.75; //amount that a resize affects the ghost - double shrinkRatio = 0.1; //the amount by which a rect shrinks each turn - double shrinkThresh = 0.6; //cannyThreshold value (0.1 - 1) of how full rect needs to be to avoid shrinking - int deleteThresh = 40; - -public: - //ghost constructor - Ghost(Rect g); - //ghost constructor including time - Ghost(Rect g, long screenTime); - //return on screen time - long getOnScreenTime(){ return onScreenTime; } - //set on screen time - void setOnScreenTime(long time){ onScreenTime = time; } - //set on screen time - void subOnScreenTime(long time){ - onScreenTime -= time; - - if(onScreenTime < 0){ - onScreenTime = 1; - } - } - //return Rect dimensions - Rect getGhost(){ return ghost; } - //reset the variable ghost; - void reset(Rect newGhost); - //change the dimensions of ghost with intersecting rectangles - bool resize(vector* sizingRects); - //decreases the size of the ghost, returns false to prompt delete - bool decreaseArea(); - //get tl of ghost - Point tl(){ return ghost.tl(); } - //get br of ghost - Point br(){ return ghost.br(); } -}; - -#endif //TRACK4K_GHOST_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/10/14. +// + + + +#ifndef TRACK4K_GHOST_H +#define TRACK4K_GHOST_H + +#include "opencv/cv.h" +#include "opencv/highgui.h" +#include +#include +#include +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/video.hpp" +#include "opencv2/highgui.hpp" +#include + +using namespace cv; +using namespace std; + +class Ghost{ +private: + Rect ghost; //rectangle of the ghost + long onScreenTime; //time existing on screen (number of frames) + double sizeRatio = 0.75; //amount that a resize affects the ghost + double shrinkRatio = 0.1; //the amount by which a rect shrinks each turn + double shrinkThresh = 0.6; //cannyThreshold value (0.1 - 1) of how full rect needs to be to avoid shrinking + int deleteThresh = 40; + +public: + //ghost constructor + Ghost(Rect g); + //ghost constructor including time + Ghost(Rect g, long screenTime); + //return on screen time + long getOnScreenTime(){ return onScreenTime; } + //set on screen time + void setOnScreenTime(long time){ onScreenTime = time; } + //set on screen time + void subOnScreenTime(long time){ + onScreenTime -= time; + + if(onScreenTime < 0){ + onScreenTime = 1; + } + } + //return Rect dimensions + Rect getGhost(){ return ghost; } + //reset the variable ghost; + void reset(Rect newGhost); + //change the dimensions of ghost with intersecting rectangles + bool resize(vector* sizingRects); + //decreases the size of the ghost, returns false to prompt delete + bool decreaseArea(); + //get tl of ghost + Point tl(){ return ghost.tl(); } + //get br of ghost + Point br(){ return ghost.br(); } +}; + +#endif //TRACK4K_GHOST_H diff --git a/source/tracking/ImageRecognition.cpp b/source/tracking/ImageRecognition.cpp index 320e720..4a63967 100644 --- a/source/tracking/ImageRecognition.cpp +++ b/source/tracking/ImageRecognition.cpp @@ -1,77 +1,77 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/08/09. -// - -#include "opencv2/objdetect/objdetect.hpp" -#include "opencv2/highgui/highgui.hpp" -#include "opencv2/imgproc/imgproc.hpp" - -#include "ImageRecognition.h" - -#include -#include - - -using namespace std; -using namespace cv; - -bool ImageRecognition::loadImage(string imageName){ - - //load image in colour and display - Mat image; - image = imread(imageName, CV_LOAD_IMAGE_GRAYSCALE); - - //if image failed to load - if(image.empty()){ - std::cout << "\nImage load " << imageName << " failed!" << std::endl; - return false; - } - - //namedWindow("window1", 1); - //imshow("window1", image); - - //load face cascade (.xml file) - CascadeClassifier face_cascade; - face_cascade.load("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"); - - //Detect face - vector faces; - // - face_cascade.detectMultiScale(image, - faces, //vector of rectangles on detected objects - 1.1, //how much image size is reduced at each scale - 5, //how many min neighbours to have to retain - 0 | CV_HAAR_SCALE_IMAGE, //search type flag - Size( - 20, //min scale of object - 25 //max scale of object (max = min = unbounded) - )); - - //Draw circles on the detected faces - for(int i = 0; i < faces.size(); i++){ - Point center(faces[i].x + faces[i].width * 0.5, faces[i].y + faces[i].height * 0.5); - ellipse(image, center, Size(faces[i].width * 0.5, faces[i].height * 0.5), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0); - } - - imshow("Detected Face", image); - - waitKey(0); - - return true; +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/08/09. +// + +#include "opencv2/objdetect/objdetect.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" + +#include "ImageRecognition.h" + +#include +#include + + +using namespace std; +using namespace cv; + +bool ImageRecognition::loadImage(string imageName){ + + //load image in colour and display + Mat image; + image = imread(imageName, CV_LOAD_IMAGE_GRAYSCALE); + + //if image failed to load + if(image.empty()){ + std::cout << "\nImage load " << imageName << " failed!" << std::endl; + return false; + } + + //namedWindow("window1", 1); + //imshow("window1", image); + + //load face cascade (.xml file) + CascadeClassifier face_cascade; + face_cascade.load("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"); + + //Detect face + vector faces; + // + face_cascade.detectMultiScale(image, + faces, //vector of rectangles on detected objects + 1.1, //how much image size is reduced at each scale + 5, //how many min neighbours to have to retain + 0 | CV_HAAR_SCALE_IMAGE, //search type flag + Size( + 20, //min scale of object + 25 //max scale of object (max = min = unbounded) + )); + + //Draw circles on the detected faces + for(int i = 0; i < faces.size(); i++){ + Point center(faces[i].x + faces[i].width * 0.5, faces[i].y + faces[i].height * 0.5); + ellipse(image, center, Size(faces[i].width * 0.5, faces[i].height * 0.5), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0); + } + + imshow("Detected Face", image); + + waitKey(0); + + return true; } \ No newline at end of file diff --git a/source/tracking/ImageRecognition.h b/source/tracking/ImageRecognition.h index 1f0361f..f7da63a 100644 --- a/source/tracking/ImageRecognition.h +++ b/source/tracking/ImageRecognition.h @@ -1,34 +1,34 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/08/09. -// - -#ifndef TRACK4K_IMAGERECOGNITION_H -#define TRACK4K_IMAGERECOGNITION_H - -#include -#include - -class ImageRecognition { - -public: - bool loadImage(std::string imageName); - -}; - -#endif //TRACK4K_IMAGERECOGNITION_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/08/09. +// + +#ifndef TRACK4K_IMAGERECOGNITION_H +#define TRACK4K_IMAGERECOGNITION_H + +#include +#include + +class ImageRecognition { + +public: + bool loadImage(std::string imageName); + +}; + +#endif //TRACK4K_IMAGERECOGNITION_H diff --git a/source/tracking/MovementDetection.cpp b/source/tracking/MovementDetection.cpp index 8c08a4b..033a175 100644 --- a/source/tracking/MovementDetection.cpp +++ b/source/tracking/MovementDetection.cpp @@ -1,1012 +1,1012 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/09/05. -// - -#include "MovementDetection.h" -#include -#include - -using namespace cv; -using namespace std; - - -VideoWriter maxS; - - -MovementDetection::MovementDetection(string vidLocation, vector *lect) { - - //set vid directory and window name - setVideoDir(vidLocation); - setShowLabel("MyVideo"); - - if (showFrameStatus) { - maxS.open("maxS.flv", CV_FOURCC('F', 'L', 'V', '1'), 7, cv::Size(3840, 2160), 1); - } - - //create the inputVideo object - VideoCapture inputVideo(videoDir); - if (!inputVideo.isOpened()) { - //error opening the video input - cerr << "Unable to open video file" << endl; - return; - } - - // -------------------- // - - //get input video settings and apply to output video - double fps = inputVideo.get(CV_CAP_PROP_FPS); - Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), - (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); - - if (showFileInfo) { - cout << "File Information" << endl; - cout << "Input file location: " << vidLocation << endl; - cout << "Video FPS: " << fps << endl; - cout << "Video Frame Count: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl; - cout << endl; - } - - //if we get an acceptable fps value - //certain file formats (e.g flv) sometimes have no fps when read with this OpenCV version - if (fps > 5 && fps < 60) { - validFps = true; - } else { - cerr << "Invalid FPS video from file, removing FPS tracking." << endl; - } - - center.x = S.width / 2; //center of the screen X value - center.y = S.height / 2; //center of the screen Y value - - //if create window - if (drawFrameStatus) { - namedWindow(showLabel, CV_WINDOW_NORMAL); - } - - //frame read dependent processing variable - clock_t processTime = clock(); - - int emptyFrameCount = 0; - - //preliminarily read previous frame so we can do absDiff - if (!inputVideo.read(prevFrame)) { - cerr << "Unable to read first frame." << endl; - cerr << "Exiting..." << endl; - return; - } - - //convert initial to grayscale - cvtColor(prevFrame, prevFrame, CV_BGR2GRAY); - - //if true read frame as the next frame, if false read prevFrame as - // the next frame, this is to avoid copying and moving around data - bool swap = true; - - //number of frames to skip on successive reads - int frameSkip = frameSkipReset; - - // -------------------- // - - //frame information - long frameNumber = 0; - - //y rect ignore zones - double lowScreenBar = S.height - (S.height * lowScreenThresh); //actual pixels from the bottom - double highScreenBar = (S.height * highScreenThresh); //actual pixels from the top - - //number of frames before onScreenTime subtract - int ghostFrameCount = ghostResetValue; - - vector ghostRects; //vector of ghost rectangle objects - - // -------------------- // - - //read input data - while (true) { - - onLoopReset(); - - frameNumber++; - - //require 2 frames loaded to do absdiff - if (swap) { - swap = !swap; - //end of video - if (!inputVideo.read(frame)) { - break; - } - //greyscale applied to frame - cvtColor(frame, frame, CV_BGR2GRAY); - } else { - swap = !swap; //make true - //end of video - if (!inputVideo.read(prevFrame)) { - break; - } - //grayscale applied to prevFrame - cvtColor(prevFrame, prevFrame, CV_BGR2GRAY); - } - - // -------------------- // - - //perform frame skips - if (frameSkip == 0) { - frameSkip = frameSkipReset; - } else { - frameSkip--; - continue; - } - - //apply absolute difference background subtraction - absdiff(frame, prevFrame, frameAbsDiff); - - //perform thresholding - threshold(frameAbsDiff, frameThresh, 25, 255, 0); - - //get structuring element for morphological filters - Mat element = getStructuringElement(MORPH_RECT, Size(3, 3), Point(0, 0)); - - //perform morphological dilation - dilate(frameThresh, frameMorph, element); - blur(frameMorph, frameBlur, Size(15, 15), Point(0, 0), BORDER_DEFAULT); - - // -------------------- // - - //find contours - vector> contours; - vector hierarchy; - - findContours(frameBlur, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); - - //random number generator definition - RNG rng(12345); - - //remove small or inconsequential contours (should help with camera refocus and noise) - for (int i = 0; i < contours.size(); i++) { - if (contours[i].size() < contourMinSize) { - contours.erase(contours.begin() + i); - i--; - } - } - - //create bounding rectangles out of contours - vector boundingRects; - for (int i = 0; i < contours.size(); i++) { - boundingRects.push_back(cv::boundingRect(contours[i])); - } - - // -------------------- // - - //remove rectangles below a certain threshold of the screen or above a certain threshold of the screen - //in a top-left origin co-ordinate system - for (int i = (int) boundingRects.size() - 1; i >= 0; i--) { - if (boundingRects[i].tl().y >= lowScreenBar || boundingRects[i].br().y <= highScreenBar) { - boundingRects.erase(boundingRects.begin() + i); - } - } - - //remove rectangles with a very wide aspect ratio aspectR:1 - //this is specifically for boards and projectors - for (int i = 0; i < boundingRects.size(); i++) { - int xDiff = (boundingRects[i].br().x - boundingRects[i].tl().x); - int yDiff = (boundingRects[i].br().y - boundingRects[i].tl().y); - if ((double) xDiff / (double) yDiff > aspectR) { - boundingRects.erase(boundingRects.begin() + i); - i--; - } - } - - // -------------------- // - - //perform overlap and proximity check to ensure nearby or overlapping rectangles are grouped - overlapProximityLoop(&boundingRects); - - //if any massive shapes are found (usually on refocusing camera) just purge the frame of rects and create a default rect - if (oversizeCheck(&boundingRects, S.width, S.height)) { - emptyFrameCount++; - boundingRects.clear(); - boundingRects.push_back(Rect(Point(center.x - 50, center.y - 50), Point(center.x + 50, center.y + 50))); - if (showFrameStatus) { - //status information - cout << "Massive rects found at: " << frameNumber << endl; - cout << "No usable information!" << endl; - cout << "Generating center rectangle" << endl; - cout << "-------------------------" << endl << endl; - } - } - - //if there are no bounding rects left then generate a center rect - if (boundingRects.size() == 0) { - emptyFrameCount++; - //if no bounding rects, push_back a default rect - boundingRects.push_back(Rect(Point(center.x - 50, center.y - 50), Point(center.x + 50, center.y + 50))); - if (showFrameStatus) { - //status information - cout << "Empty Frame: " << frameNumber << endl; - cout << "No rectangles found!" << endl; - cout << "Generating center rectangle" << endl; - cout << "-------------------------" << endl << endl; - } - } - - // -------------------- // - - //add new ghosts (rects that don't intersect anywhere) - addNewGhosts(&ghostRects, &boundingRects); - - //push overlapping ghosts together - ghostOverlapCheck(&ghostRects); - - //perform ghost resize checks - for (int i = 0; i < ghostRects.size(); i++) { - if (ghostRects.at(i).resize(&boundingRects)) { - ghostRects.erase(ghostRects.begin() + i); - i--; - } - } - - //push overlapping ghosts together - ghostOverlapCheck(&ghostRects); - - //check if rectangles within a ghost are a certain distance away, if so split into 2 or more ghosts - ghostSplitCheck(&ghostRects, &boundingRects); - - // -------------------- // - - //after a certain number of frames subtract ghost counts - ghostFrameCount--; - if (ghostFrameCount == 0) { - for (int i = 0; i < ghostRects.size(); i++) { - ghostRects[i].subOnScreenTime(ghostResetAmount); - } - - ghostFrameCount = ghostResetValue; - } - - // -------------------- // - - //add rectangles & ghosts to memory - memoryRects.push_back(boundingRects); - memoryGhosts.push_back(ghostRects); - - // -------------------- // - - //just to visualize colours// - cvtColor(frameBlur, frameBlur, CV_GRAY2RGB); - - //display over the original frame - frameDisplay = frame.clone(); - cvtColor(frameDisplay, frameDisplay, CV_GRAY2RGB); - - if (drawFrameStatus) { - //draw contours - if (drawSettings[0]) { - for (int i = 0; i < contours.size(); i++) { - Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); - drawContours(frameDisplay, contours, i, color, 2, 8, hierarchy, 0, Point()); - } - } - - //draw rectangles around contours - if (drawSettings[1]) { - for (int i = 0; i < boundingRects.size(); i++) { - Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); - rectangle(frameDisplay, boundingRects[i].tl(), boundingRects[i].br(), color, 2, 8, 0); - } - } - - //draw ghosts in frame - if (drawSettings[2]) { - for (int i = 0; i < ghostRects.size(); i++) { - Scalar color = Scalar(0, 0, 0); - rectangle(frameDisplay, ghostRects[i].tl(), ghostRects[i].br(), color, 6, 8, 0); - } - } - - //draw ghost on screen time to ghosts - if (drawSettings[3]) { - for (int i = 0; i < ghostRects.size(); i++) { - Scalar color = Scalar(0, 0, 0); - - ss << ghostRects.at(i).getOnScreenTime(); - frameText = ss.str(); - ss.str(""); - - Rect rect = ghostRects.at(i).getGhost(); - Point rectCent(rect.x + (rect.width / 3), rect.y + (rect.height / 2)); - - putText(frameDisplay, frameText, rectCent, FONT_HERSHEY_PLAIN, 3, color, 3, 8, false); - } - } - } - - - if (drawFrameStatus) { - //add values to print to frame - if (drawSettings[4]) { - if (validFps) { - addTextToFrame(&frameDisplay, "", convertFrameToTime(frameNumber, fps)); - } - } - if (drawSettings[5]) { - addTextToFrame(&frameDisplay, "Frame: ", frameNumber); - } - if (drawSettings[6]) { - addTextToFrame(&frameDisplay, "Rectangles: ", boundingRects.size()); - } - - //show processing over grayscale frame - imshow(showLabel, frameDisplay); - maxS.write(frameDisplay); - - - } - - if (showFrameStatus) { - //status information - cout << "Frame: " << frameNumber << endl; - if (validFps) { - cout << convertFrameToTime(frameNumber, fps) << endl; - } - cout << "Number of rectangles: " << boundingRects.size() << endl; - cout << "Number of ghosts: " << ghostRects.size() << endl; - cout << "-------------------------" << endl << endl; - } - - //waitKey(will step through at x milliseconds rate) - //if 0 waitKey will step through when key is pressed - if (waitKey(1)) { - continue; - } - } - - //push final frame on - memoryGhosts.push_back((memoryGhosts.at(memoryGhosts.size() - 1))); - memoryRects.push_back((memoryRects.at(memoryRects.size() - 1))); - - cout << "Final frame count: " << frameNumber << endl; - - inputVideo.release(); - - processTime = clock() - processTime; - - cout << "Processing took: " << (processTime / 1000.0) << "s" << endl; - - cout << "Performing ghost adjustments!" << endl; - - // -------------------- // - - //time the post-processing - clock_t time = clock(); - - //begin the post-processing to find the lecturer from multiple ghosts - - //use the largest number calculated with a weight based on x-position favoring the center of the screen - - findLecturer(&memoryGhosts, &lecturer); - - //once main ghost has been found shift the ghost to match the avg of rectangles coordinates that - //intersect it by a weighted average of intersection amount - adjustLecturer(&lecturer, &memoryRects, lect); - - //decide to draw frame - if (drawFrameStatus) { - //print ghosts to frame and output - Scalar color = Scalar(0, 0, 0); //ghost color - showFramesMemoryRects(&memoryGhosts, color); - } - - time = (clock() - time); - - cout << "Postprocessing took: " << (time) << "ms" << endl; - - cout << "Finished processing!" << endl; - - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_1_NORM_LECT.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_2_LECT_WAVE.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_3_LECT_PACE.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_4_LIGHTS.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_5_MOVE_BOARDS.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_6_MOVE_SCREEN.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_7_MOVE_SCREEN_2.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_8_OFF_ON.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_9_LECT_CROSS.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_10_BOTH_MOVE.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_11_BOTH_MOVE_2.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_12_RUNNING.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_13_THROWING.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_14_MULTI_STUDENT.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_15_MOVE_3.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_16_MOVE_CHAIRS.avi"); - //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_17_NO_MOVE.avi"); -} - -//calculates the distance between the closest two edge points of two rectangles -double MovementDetection::closestDistance(Rect a, Rect b) { - bool left = b.br().x < a.tl().x; //b left of a - bool right = a.br().x < b.tl().x; //b right of a - bool bottom = a.br().y < b.tl().y; //b below a - bool top = b.br().y < a.tl().y; //b above a - if (top and left) - return euclideanDist(a.tl(), b.br()); - else if (left and bottom) { - return euclideanDist(Point(a.tl().x, a.br().y), Point(b.br().x, b.tl().y)); - } else if (bottom and right) { - return euclideanDist(a.br(), b.tl()); - } else if (right and top) { - return euclideanDist(Point(a.br().x, a.tl().y), Point(b.tl().x, b.br().y)); - } else if (left) { - return a.tl().x - b.br().x; - } else if (right) { - return b.tl().x - a.br().x; - } else if (bottom) { - return b.tl().y - a.br().y; - } else { - return a.tl().y - b.br().y; - } -} - -//helper function to calculate distance between two points -double MovementDetection::euclideanDist(Point p, Point q) { - Point diff = p - q; - return cv::sqrt(diff.x * diff.x + diff.y * diff.y); -} - -//check for overlapping rectangles and push them together -//return true if changes made -bool MovementDetection::overlapCheck(vector *boundingRects) { - //push overlapping rectangles together - bool changeDetected; - bool anyChange = false; - - do { - //this array records changes in vector to be implemented - bool deletionMark[boundingRects->size()]; - for (int i = 0; i < boundingRects->size(); i++) { - deletionMark[i] = false; - } - - //if changes happen the rectangles need to be compared again - //this shouldn't be a problem given that number of rectangles is mostly very small (1 - 20) - changeDetected = false; - - /* - * possible efficiency here, checking the area before complete occlusion - * if the square is in another is their area > 0?? in which case throw the top - * in as an if inside the other if - */ - for (int i = 0; i < boundingRects->size(); i++) { - for (int k = 0; k < boundingRects->size(); k++) { - if (i != k) { - //the intersection of 2 rectangles - Rect intersect = (boundingRects->at(i)) & (boundingRects->at(k)); - - //if first rectangle is completely inside second - if (intersect == boundingRects->at(i)) { - deletionMark[i] = true; //mark for deletion - changeDetected = true; - continue; - } - //if rectangles overlap then just add them together - if (intersect.area() > 0) { - boundingRects->at(i) = (boundingRects->at(i) | boundingRects->at(k)); //merge - deletionMark[k] = true; - changeDetected = true; - } - - } - } - } - - //delete marked cells - //in reverse makes index easier to maintain - for (int i = (int) boundingRects->size() - 1; i >= 0; i--) { - if (deletionMark[i]) { - boundingRects->erase(boundingRects->begin() + i); - } - } - if (changeDetected) - anyChange = true; - } while (changeDetected); - - return anyChange; -} - -//check for nearby rectangles and push them together -//return true if changes made -bool MovementDetection::proximityCheck(vector *boundingRects) { - //returns if there were no changes at all - bool anyChange = false; - - //check each rectangle against each other to see if distances between rectangles - //are small enough to merge - for (int i = 0; i < boundingRects->size(); i++) { - for (int k = i; k < boundingRects->size(); k++) { - if (i != k) { - //if closer than threshold - if (closestDistance(boundingRects->at(i), boundingRects->at(k)) < clusterThreshold) { - boundingRects->at(i) = (boundingRects->at(i) | boundingRects->at(k)); //merge - //erase the rect and restart search - boundingRects->erase(boundingRects->begin() + k); - i = 0; - k = 0; //reset the loop - anyChange = true; - } - } - } - } - - return anyChange; -} - -//performs overlap and proximity checks until no more changes occur -void MovementDetection::overlapProximityLoop(vector *boundingRects) { - //keep going until no more changes - while (true) { - //if both return no changes - if (!overlapCheck(boundingRects) && !proximityCheck(boundingRects)) { - break; - } - } -} - -//check if any massive rectangles exist in the scene -bool MovementDetection::oversizeCheck(vector *boundingRects, int width, int height) { - for (int i = 0; i < boundingRects->size(); i++) { - if (boundingRects->at(i).area() > (width * height * massiveThreshold)) { - return true; - } - } - return false; -} - -//add text to frame function -void MovementDetection::addTextToFrame(Mat *frame, string str, auto value) { - ss << str << value; - ++textCount; - frameText = ss.str(); - putText(*frame, frameText, Point(30, 80 * textCount), FONT_HERSHEY_PLAIN, 4, Scalar(0, 0, 255), 2, 8, false); - ss.str(""); -} - -//values to reset on loop -void MovementDetection::onLoopReset() { - textCount = 0; -} - -//push overlapping ghosts together -void MovementDetection::ghostOverlapCheck(vector *ghostRects) { - //mark rects that need to be deleted - bool deletionMark[ghostRects->size()]; - for (int i = 0; i < ghostRects->size(); i++) { - deletionMark[i] = false; - } - - /* - * check ghost intersection, if intersected then push together - */ - for (int i = 0; i < ghostRects->size(); i++) { - for (int k = 0; k < ghostRects->size(); k++) { - if (i != k) { - //the intersection of 2 rectangles - Rect intersect = (ghostRects->at(i).getGhost()) & (ghostRects->at(k).getGhost()); - - //if first rectangle is completely inside second - if (intersect == ghostRects->at(i).getGhost()) { - deletionMark[i] = true; //mark for deletion - continue; - } - //if rectangles overlap then just add them together - if (intersect.area() > 0) { - ghostRects->at(i).reset((ghostRects->at(i).getGhost() | ghostRects->at(k).getGhost())); //merge - //mark largest time as time - if (ghostRects->at(k).getOnScreenTime() > ghostRects->at(i).getOnScreenTime()) { - ghostRects->at(i).setOnScreenTime(ghostRects->at(k).getOnScreenTime()); - } - deletionMark[k] = true; - } - - } - } - } - - //remove marked to delete ghosts - for (int i = (int) ghostRects->size() - 1; i >= 0; i--) { - if (deletionMark[i]) { - ghostRects->erase(ghostRects->begin() + i); - } - } -} - -//show frames using ghostRectangles -void MovementDetection::showFramesMemoryRects(vector> *memoryGhosts, Scalar color) { - VideoCapture inputVideo(videoDir); - if (!inputVideo.isOpened()) { - //error opening the video input - cerr << "Unable to open video file" << endl; - return; - } - - //the frame that is rendered to file - Mat printFrame; - - if (!inputVideo.read(printFrame)) { - cerr << "Unable to read first frame." << endl; - cerr << "Exiting..." << endl; - return; - } - - int frameSkip = frameSkipReset; - - for (int i = 0; i < memoryGhosts->size() - 1;) { - - if (!inputVideo.read(printFrame)) { - cout << "Moving to next stage!" << endl; - return; - } - - //perform frame skips - if (frameSkip == 0) { - frameSkip = frameSkipReset; - i++; - } else { - frameSkip--; - continue; - } - - //draw decided lecturer to frame from ghosts - rectangle(printFrame, memoryGhosts->at(i).at(0).tl(), memoryGhosts->at(i).at(0).br(), color, 6, 8, 0); - - imshow(showLabel, printFrame); - - if (waitKey(1)) { - continue; - } - } - - inputVideo.release(); -} - -//reposition ghost to centroid of squares -void -MovementDetection::adjustLecturer(vector *lecturer, vector> *memoryRects, vector *newLect) { - - for (int i = 0; i < lecturer->size(); i++) { //for each main ghost - Rect tempGhost = lecturer->at(i); - - //our index array of rectangle that intersect validly - vector intersected; - - //for each rectangle at that frame - for (int r = 0; r < memoryRects->at(i).size(); r++) { - - Rect tempRect = memoryRects->at(i).at(r); - - Rect intersect = tempRect & tempGhost; - - //if there is any intersection - if (intersect.area() > 0) { - //if the rect is much larger than ghost do nothing - if (tempRect.area() / tempGhost.area() > 2) { - continue; - } else { - intersected.push_back(r); - } - } - } - - //perform shifting operations on selected rects - if (intersected.size() == 1) { - //take average 0.9 movement to the rect and make new (tl(), br()) rect - int newTlX = (int) (shiftR * tempGhost.tl().x) + - (int) (shiftRInv * memoryRects->at(i).at(intersected.at(0)).tl().x); - int newBrX = (int) (shiftR * tempGhost.br().x) + - (int) (shiftRInv * memoryRects->at(i).at(intersected.at(0)).br().x); - - Rect newRect(Rect(Point(newTlX, tempGhost.tl().y), Point(newBrX, tempGhost.br().y))); - - newLect->push_back(newRect); - } else if (intersected.size() > 1) { - //average all intersecting rects and the ghost's coords - int newTlX = tempGhost.tl().x; - int newBrX = tempGhost.br().x; - - for (int j = 0; j < intersected.size(); j++) { - newTlX += memoryRects->at(i).at(intersected.at(j)).tl().x; - newBrX += memoryRects->at(i).at(intersected.at(j)).br().x; - } - - newTlX /= (intersected.size() + 1); - newBrX /= (intersected.size() + 1); - - Rect newRect(Rect(Point(newTlX, tempGhost.tl().y), Point(newBrX, tempGhost.br().y))); - - newLect->push_back(newRect); - } - } -} - -//add new ghosts (rectangles that don't intersect anywhere -void MovementDetection::addNewGhosts(vector *ghostRects, vector *boundingRects) { - - for (int i = 0; i < boundingRects->size(); i++) { - bool add = true; - for (int g = 0; g < ghostRects->size(); g++) { - //check for overlap - Rect intersect = ((ghostRects->at(g).getGhost()) & (boundingRects->at(i))); - //if none break and move to next rect - if (intersect.area() > 0) { - add = false; - break; - } - } - if (add) { - ghostRects->push_back(Ghost(boundingRects->at(i))); - } - } -} - -//return timeOnScreen * (ratioOfDistFromCenter ^ 2) averaged between x and y -double MovementDetection::positionWeighting(Rect r, long onScreenTime) { - double distX = center.x - abs(r.x - center.x); //distance from center to rect - double distY = center.y - abs(r.y - center.y); //distance from center to top of rect - - distX /= center.x; //get x ratio - distY /= center.y; //get y ratio - - //pair of ratiod lecturerTrackedLocationRectangles - double ratioX; - double ratioY; - ratioX = onScreenTime * (distX * distX); - ratioY = onScreenTime * (distY * distY); - - return (ratioX + ratioY) / 2; -} - -//divide ghosts with multiple rects in it that are far apart -void MovementDetection::ghostSplitCheck(vector *ghostRects, vector *boundingRects) { - - bool ghostDelete[ghostRects->size()]; //assign all as false - for (int i = 0; i < ghostRects->size(); i++) { - ghostDelete[i] = false; - } - - vector newGhosts; - - for (int g = 0; g < ghostRects->size(); g++) { - vector intersected; - Ghost currGhost = ghostRects->at(g); - for (int r = 0; r < boundingRects->size(); r++) { - - //test intersection of rect with ghost - Rect intersect = (currGhost.getGhost() & boundingRects->at(r)); - - //check intersection & record - if (intersect.area() > 0) { - intersected.push_back(r); - } - } - - //if more than one intersection - //see if it needs to be split - if (intersected.size() > 1) { - - bool newGhostCheck[intersected.size()]; //if the index that intersected points to needs to be made new - - for (int m = 0; m < intersected.size(); m++) { - newGhostCheck[m] = false; - } - - //check if minimum distance in ghost is below global allowed - int minDist = currGhost.getGhost().width / 3; - if (minDist < minSplitDist) { - minDist = minSplitDist; - } - - //iterate over all intersection - //use boolean checks to avoid creating duplicates - for (int i = 0; i < intersected.size(); i++) { - for (int j = i + 1; j < intersected.size(); j++) { - Rect a = boundingRects->at(intersected[i]); - Rect b = boundingRects->at(intersected[j]); - - double dist; - - //if there is intersection between the two rects then ignore - if ((a & b).area() > 0) { - continue; - } - //if completely above or below then ignore - if (a.br().y < b.tl().y || b.br().y < a.tl().y) { - continue; - } - - //if a is left of b - if (a.br().x < b.tl().x) { - dist = b.tl().x - a.br().x; - } - //if b is left of a - else if (b.br().x < a.tl().x) { - dist = a.tl().x - b.br().x; - } - - //if x distance apart is large split them up - if (dist > minDist) { - if (!newGhostCheck[i]) { - newGhostCheck[i] = true; - newGhosts.push_back(Ghost(a, currGhost.getOnScreenTime())); - } - - if (!newGhostCheck[j]) { - newGhostCheck[j] = true; - newGhosts.push_back(Ghost(b, currGhost.getOnScreenTime())); - } - ghostDelete[g] = true; - } - } - } - } - intersected.clear(); - } - - //delete all duplicate ghosts - for (int i = ghostRects->size() - 1; i >= 0; i--) { - if (ghostDelete[i]) { - ghostRects->erase(ghostRects->begin() + i); - } - } - - //add all new ghosts onto ghostRects - for (int i = 0; i < newGhosts.size(); i++) { - ghostRects->push_back(newGhosts.at(i)); - } -} - -//find lecturer based on onScreenTime and position on the screen -void MovementDetection::findLecturer(vector> *memoryGhosts, vector *lecturer) { - - //cout << "\n--------------------\nMemoryGhosts size: " << memoryGhosts->size() << "\nalc--------------------\n" << endl; - - for (int i = 0; i < memoryGhosts->size(); i++) { - //cout << "Ghost: " << i << " Size: " << memoryGhosts->at(i).size() << endl; - if (memoryGhosts->at(i).size() == 0) { - if (i == 0) { - lecturer->push_back(Rect(Point(center.x - 50, center.y - 50), Point(center.x + 50, center.y + 50))); - continue; - } - //if no ghost then set default previous - lecturer->push_back(Rect(lecturer->at(i - 1))); - } else { - double largestVal = positionWeighting(memoryGhosts->at(i).at(0).getGhost(), - memoryGhosts->at(i).at(0).getOnScreenTime()); - - int largestIdx = 0; - for (int k = 0; k < memoryGhosts->at(i).size(); k++) { - if (memoryGhosts->at(i).at(k).getOnScreenTime() > largestVal) { - largestVal = positionWeighting(memoryGhosts->at(i).at(k).getGhost(), - memoryGhosts->at(i).at(k).getOnScreenTime()); - largestIdx = k; - } - } - lecturer->push_back(memoryGhosts->at(i).at(largestIdx).getGhost()); - } - } - - cout << "\n--------------------\nLecturer size: " << lecturer->size() << endl; -} - -//convert frame number to string && hours:minutes:seconds -string MovementDetection::convertFrameToTime(long frameNumber, double fps) { - - double currTime = frameNumber / fps; - - stringstream ss; - int hours = (int) currTime / 3600; - currTime -= (hours * 3600); - int minutes = (int) currTime / 60; - currTime -= (minutes * 60); - int seconds = (int) currTime; - - ss << "Time: "; - if (hours < 10) - ss << "0" << hours; - else - ss << hours; - ss << ":"; - if (minutes < 10) - ss << "0" << minutes; - else - ss << minutes; - ss << ":"; - if (seconds < 10) - ss << "0" << seconds; - else - ss << seconds; - - return ss.str(); -} - -//copy lecturer values across -void MovementDetection::getLecturer(vector *newLect) { - //cout << "start of get lecturer" << endl; - //cout << "lecturer size: " << lecturer.size() << endl; - for (int i = 0; i < lecturer.size(); i++) { - //cout << "loop: " << i << endl; - newLect->push_back(lecturer.at(i)); - } - //cout << "end of get lecturer" << endl; -} - -//write video with lecturer rectangle -void MovementDetection::writeVideo(vector *lecturer, string outName) { - - //create the inputVideo object - VideoCapture inputVideo(videoDir); - if (!inputVideo.isOpened()) { - //error opening the video input - cerr << "Unable to open video file" << endl; - return; - } - - // -------------------- // - - //get input video settings and apply to output video - double fps = inputVideo.get(CV_CAP_PROP_FPS); - Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), - (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); - - VideoWriter outVid(outName, CV_FOURCC('X', 'V', 'I', 'D'), fps, S, true); - - // -------------------- // - - if (!outVid.isOpened()) { - cerr << "Unable to open video write file" << endl; - return; - } - - Mat inFrame; - - Scalar color = Scalar(255, 0, 0); - int count = 0; - int frameReset = frameSkipReset; - int frameNumber = 0; - - while (inputVideo.read(inFrame)) { - - //draw lecturer onto frame - rectangle(inFrame, lecturer->at(count).tl(), lecturer->at(count).br(), color, 2, 8, 0); - - outVid.write(inFrame); - - //write rect to each frame skipped as well as main - if (frameReset != 0) { - frameReset--; - } else { - frameReset = frameSkipReset; - if (count != lecturer->size() - 1) { - count++; - } - - } - - frameNumber++; - } - - cout << "Finished writing " << outName << endl; - +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/09/05. +// + +#include "MovementDetection.h" +#include +#include + +using namespace cv; +using namespace std; + + +VideoWriter maxS; + + +MovementDetection::MovementDetection(string vidLocation, vector *lect) { + + //set vid directory and window name + setVideoDir(vidLocation); + setShowLabel("MyVideo"); + + if (showFrameStatus) { + maxS.open("maxS.flv", CV_FOURCC('F', 'L', 'V', '1'), 7, cv::Size(3840, 2160), 1); + } + + //create the inputVideo object + VideoCapture inputVideo(videoDir); + if (!inputVideo.isOpened()) { + //error opening the video input + cerr << "Unable to open video file" << endl; + return; + } + + // -------------------- // + + //get input video settings and apply to output video + double fps = inputVideo.get(CV_CAP_PROP_FPS); + Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), + (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); + + if (showFileInfo) { + cout << "File Information" << endl; + cout << "Input file location: " << vidLocation << endl; + cout << "Video FPS: " << fps << endl; + cout << "Video Frame Count: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl; + cout << endl; + } + + //if we get an acceptable fps value + //certain file formats (e.g flv) sometimes have no fps when read with this OpenCV version + if (fps > 5 && fps < 60) { + validFps = true; + } else { + cerr << "Invalid FPS video from file, removing FPS tracking." << endl; + } + + center.x = S.width / 2; //center of the screen X value + center.y = S.height / 2; //center of the screen Y value + + //if create window + if (drawFrameStatus) { + namedWindow(showLabel, CV_WINDOW_NORMAL); + } + + //frame read dependent processing variable + clock_t processTime = clock(); + + int emptyFrameCount = 0; + + //preliminarily read previous frame so we can do absDiff + if (!inputVideo.read(prevFrame)) { + cerr << "Unable to read first frame." << endl; + cerr << "Exiting..." << endl; + return; + } + + //convert initial to grayscale + cvtColor(prevFrame, prevFrame, CV_BGR2GRAY); + + //if true read frame as the next frame, if false read prevFrame as + // the next frame, this is to avoid copying and moving around data + bool swap = true; + + //number of frames to skip on successive reads + int frameSkip = frameSkipReset; + + // -------------------- // + + //frame information + long frameNumber = 0; + + //y rect ignore zones + double lowScreenBar = S.height - (S.height * lowScreenThresh); //actual pixels from the bottom + double highScreenBar = (S.height * highScreenThresh); //actual pixels from the top + + //number of frames before onScreenTime subtract + int ghostFrameCount = ghostResetValue; + + vector ghostRects; //vector of ghost rectangle objects + + // -------------------- // + + //read input data + while (true) { + + onLoopReset(); + + frameNumber++; + + //require 2 frames loaded to do absdiff + if (swap) { + swap = !swap; + //end of video + if (!inputVideo.read(frame)) { + break; + } + //greyscale applied to frame + cvtColor(frame, frame, CV_BGR2GRAY); + } else { + swap = !swap; //make true + //end of video + if (!inputVideo.read(prevFrame)) { + break; + } + //grayscale applied to prevFrame + cvtColor(prevFrame, prevFrame, CV_BGR2GRAY); + } + + // -------------------- // + + //perform frame skips + if (frameSkip == 0) { + frameSkip = frameSkipReset; + } else { + frameSkip--; + continue; + } + + //apply absolute difference background subtraction + absdiff(frame, prevFrame, frameAbsDiff); + + //perform thresholding + threshold(frameAbsDiff, frameThresh, 25, 255, 0); + + //get structuring element for morphological filters + Mat element = getStructuringElement(MORPH_RECT, Size(3, 3), Point(0, 0)); + + //perform morphological dilation + dilate(frameThresh, frameMorph, element); + blur(frameMorph, frameBlur, Size(15, 15), Point(0, 0), BORDER_DEFAULT); + + // -------------------- // + + //find contours + vector> contours; + vector hierarchy; + + findContours(frameBlur, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); + + //random number generator definition + RNG rng(12345); + + //remove small or inconsequential contours (should help with camera refocus and noise) + for (int i = 0; i < contours.size(); i++) { + if (contours[i].size() < contourMinSize) { + contours.erase(contours.begin() + i); + i--; + } + } + + //create bounding rectangles out of contours + vector boundingRects; + for (int i = 0; i < contours.size(); i++) { + boundingRects.push_back(cv::boundingRect(contours[i])); + } + + // -------------------- // + + //remove rectangles below a certain threshold of the screen or above a certain threshold of the screen + //in a top-left origin co-ordinate system + for (int i = (int) boundingRects.size() - 1; i >= 0; i--) { + if (boundingRects[i].tl().y >= lowScreenBar || boundingRects[i].br().y <= highScreenBar) { + boundingRects.erase(boundingRects.begin() + i); + } + } + + //remove rectangles with a very wide aspect ratio aspectR:1 + //this is specifically for boards and projectors + for (int i = 0; i < boundingRects.size(); i++) { + int xDiff = (boundingRects[i].br().x - boundingRects[i].tl().x); + int yDiff = (boundingRects[i].br().y - boundingRects[i].tl().y); + if ((double) xDiff / (double) yDiff > aspectR) { + boundingRects.erase(boundingRects.begin() + i); + i--; + } + } + + // -------------------- // + + //perform overlap and proximity check to ensure nearby or overlapping rectangles are grouped + overlapProximityLoop(&boundingRects); + + //if any massive shapes are found (usually on refocusing camera) just purge the frame of rects and create a default rect + if (oversizeCheck(&boundingRects, S.width, S.height)) { + emptyFrameCount++; + boundingRects.clear(); + boundingRects.push_back(Rect(Point(center.x - 50, center.y - 50), Point(center.x + 50, center.y + 50))); + if (showFrameStatus) { + //status information + cout << "Massive rects found at: " << frameNumber << endl; + cout << "No usable information!" << endl; + cout << "Generating center rectangle" << endl; + cout << "-------------------------" << endl << endl; + } + } + + //if there are no bounding rects left then generate a center rect + if (boundingRects.size() == 0) { + emptyFrameCount++; + //if no bounding rects, push_back a default rect + boundingRects.push_back(Rect(Point(center.x - 50, center.y - 50), Point(center.x + 50, center.y + 50))); + if (showFrameStatus) { + //status information + cout << "Empty Frame: " << frameNumber << endl; + cout << "No rectangles found!" << endl; + cout << "Generating center rectangle" << endl; + cout << "-------------------------" << endl << endl; + } + } + + // -------------------- // + + //add new ghosts (rects that don't intersect anywhere) + addNewGhosts(&ghostRects, &boundingRects); + + //push overlapping ghosts together + ghostOverlapCheck(&ghostRects); + + //perform ghost resize checks + for (int i = 0; i < ghostRects.size(); i++) { + if (ghostRects.at(i).resize(&boundingRects)) { + ghostRects.erase(ghostRects.begin() + i); + i--; + } + } + + //push overlapping ghosts together + ghostOverlapCheck(&ghostRects); + + //check if rectangles within a ghost are a certain distance away, if so split into 2 or more ghosts + ghostSplitCheck(&ghostRects, &boundingRects); + + // -------------------- // + + //after a certain number of frames subtract ghost counts + ghostFrameCount--; + if (ghostFrameCount == 0) { + for (int i = 0; i < ghostRects.size(); i++) { + ghostRects[i].subOnScreenTime(ghostResetAmount); + } + + ghostFrameCount = ghostResetValue; + } + + // -------------------- // + + //add rectangles & ghosts to memory + memoryRects.push_back(boundingRects); + memoryGhosts.push_back(ghostRects); + + // -------------------- // + + //just to visualize colours// + cvtColor(frameBlur, frameBlur, CV_GRAY2RGB); + + //display over the original frame + frameDisplay = frame.clone(); + cvtColor(frameDisplay, frameDisplay, CV_GRAY2RGB); + + if (drawFrameStatus) { + //draw contours + if (drawSettings[0]) { + for (int i = 0; i < contours.size(); i++) { + Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); + drawContours(frameDisplay, contours, i, color, 2, 8, hierarchy, 0, Point()); + } + } + + //draw rectangles around contours + if (drawSettings[1]) { + for (int i = 0; i < boundingRects.size(); i++) { + Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); + rectangle(frameDisplay, boundingRects[i].tl(), boundingRects[i].br(), color, 2, 8, 0); + } + } + + //draw ghosts in frame + if (drawSettings[2]) { + for (int i = 0; i < ghostRects.size(); i++) { + Scalar color = Scalar(0, 0, 0); + rectangle(frameDisplay, ghostRects[i].tl(), ghostRects[i].br(), color, 6, 8, 0); + } + } + + //draw ghost on screen time to ghosts + if (drawSettings[3]) { + for (int i = 0; i < ghostRects.size(); i++) { + Scalar color = Scalar(0, 0, 0); + + ss << ghostRects.at(i).getOnScreenTime(); + frameText = ss.str(); + ss.str(""); + + Rect rect = ghostRects.at(i).getGhost(); + Point rectCent(rect.x + (rect.width / 3), rect.y + (rect.height / 2)); + + putText(frameDisplay, frameText, rectCent, FONT_HERSHEY_PLAIN, 3, color, 3, 8, false); + } + } + } + + + if (drawFrameStatus) { + //add values to print to frame + if (drawSettings[4]) { + if (validFps) { + addTextToFrame(&frameDisplay, "", convertFrameToTime(frameNumber, fps)); + } + } + if (drawSettings[5]) { + addTextToFrame(&frameDisplay, "Frame: ", frameNumber); + } + if (drawSettings[6]) { + addTextToFrame(&frameDisplay, "Rectangles: ", boundingRects.size()); + } + + //show processing over grayscale frame + imshow(showLabel, frameDisplay); + maxS.write(frameDisplay); + + + } + + if (showFrameStatus) { + //status information + cout << "Frame: " << frameNumber << endl; + if (validFps) { + cout << convertFrameToTime(frameNumber, fps) << endl; + } + cout << "Number of rectangles: " << boundingRects.size() << endl; + cout << "Number of ghosts: " << ghostRects.size() << endl; + cout << "-------------------------" << endl << endl; + } + + //waitKey(will step through at x milliseconds rate) + //if 0 waitKey will step through when key is pressed + if (waitKey(1)) { + continue; + } + } + + //push final frame on + memoryGhosts.push_back((memoryGhosts.at(memoryGhosts.size() - 1))); + memoryRects.push_back((memoryRects.at(memoryRects.size() - 1))); + + cout << "Final frame count: " << frameNumber << endl; + + inputVideo.release(); + + processTime = clock() - processTime; + + cout << "Processing took: " << (processTime / 1000.0) << "s" << endl; + + cout << "Performing ghost adjustments!" << endl; + + // -------------------- // + + //time the post-processing + clock_t time = clock(); + + //begin the post-processing to find the lecturer from multiple ghosts + + //use the largest number calculated with a weight based on x-position favoring the center of the screen + + findLecturer(&memoryGhosts, &lecturer); + + //once main ghost has been found shift the ghost to match the avg of rectangles coordinates that + //intersect it by a weighted average of intersection amount + adjustLecturer(&lecturer, &memoryRects, lect); + + //decide to draw frame + if (drawFrameStatus) { + //print ghosts to frame and output + Scalar color = Scalar(0, 0, 0); //ghost color + showFramesMemoryRects(&memoryGhosts, color); + } + + time = (clock() - time); + + cout << "Postprocessing took: " << (time) << "ms" << endl; + + cout << "Finished processing!" << endl; + + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_1_NORM_LECT.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_2_LECT_WAVE.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_3_LECT_PACE.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_4_LIGHTS.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_5_MOVE_BOARDS.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_6_MOVE_SCREEN.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_7_MOVE_SCREEN_2.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_8_OFF_ON.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_9_LECT_CROSS.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_10_BOTH_MOVE.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_11_BOTH_MOVE_2.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_12_RUNNING.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_13_THROWING.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_14_MULTI_STUDENT.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_15_MOVE_3.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_16_MOVE_CHAIRS.avi"); + //writeVideo(&lecturer, "Videos/Test Cases Output/CLIP_17_NO_MOVE.avi"); +} + +//calculates the distance between the closest two edge points of two rectangles +double MovementDetection::closestDistance(Rect a, Rect b) { + bool left = b.br().x < a.tl().x; //b left of a + bool right = a.br().x < b.tl().x; //b right of a + bool bottom = a.br().y < b.tl().y; //b below a + bool top = b.br().y < a.tl().y; //b above a + if (top and left) + return euclideanDist(a.tl(), b.br()); + else if (left and bottom) { + return euclideanDist(Point(a.tl().x, a.br().y), Point(b.br().x, b.tl().y)); + } else if (bottom and right) { + return euclideanDist(a.br(), b.tl()); + } else if (right and top) { + return euclideanDist(Point(a.br().x, a.tl().y), Point(b.tl().x, b.br().y)); + } else if (left) { + return a.tl().x - b.br().x; + } else if (right) { + return b.tl().x - a.br().x; + } else if (bottom) { + return b.tl().y - a.br().y; + } else { + return a.tl().y - b.br().y; + } +} + +//helper function to calculate distance between two points +double MovementDetection::euclideanDist(Point p, Point q) { + Point diff = p - q; + return cv::sqrt(diff.x * diff.x + diff.y * diff.y); +} + +//check for overlapping rectangles and push them together +//return true if changes made +bool MovementDetection::overlapCheck(vector *boundingRects) { + //push overlapping rectangles together + bool changeDetected; + bool anyChange = false; + + do { + //this array records changes in vector to be implemented + bool deletionMark[boundingRects->size()]; + for (int i = 0; i < boundingRects->size(); i++) { + deletionMark[i] = false; + } + + //if changes happen the rectangles need to be compared again + //this shouldn't be a problem given that number of rectangles is mostly very small (1 - 20) + changeDetected = false; + + /* + * possible efficiency here, checking the area before complete occlusion + * if the square is in another is their area > 0?? in which case throw the top + * in as an if inside the other if + */ + for (int i = 0; i < boundingRects->size(); i++) { + for (int k = 0; k < boundingRects->size(); k++) { + if (i != k) { + //the intersection of 2 rectangles + Rect intersect = (boundingRects->at(i)) & (boundingRects->at(k)); + + //if first rectangle is completely inside second + if (intersect == boundingRects->at(i)) { + deletionMark[i] = true; //mark for deletion + changeDetected = true; + continue; + } + //if rectangles overlap then just add them together + if (intersect.area() > 0) { + boundingRects->at(i) = (boundingRects->at(i) | boundingRects->at(k)); //merge + deletionMark[k] = true; + changeDetected = true; + } + + } + } + } + + //delete marked cells + //in reverse makes index easier to maintain + for (int i = (int) boundingRects->size() - 1; i >= 0; i--) { + if (deletionMark[i]) { + boundingRects->erase(boundingRects->begin() + i); + } + } + if (changeDetected) + anyChange = true; + } while (changeDetected); + + return anyChange; +} + +//check for nearby rectangles and push them together +//return true if changes made +bool MovementDetection::proximityCheck(vector *boundingRects) { + //returns if there were no changes at all + bool anyChange = false; + + //check each rectangle against each other to see if distances between rectangles + //are small enough to merge + for (int i = 0; i < boundingRects->size(); i++) { + for (int k = i; k < boundingRects->size(); k++) { + if (i != k) { + //if closer than threshold + if (closestDistance(boundingRects->at(i), boundingRects->at(k)) < clusterThreshold) { + boundingRects->at(i) = (boundingRects->at(i) | boundingRects->at(k)); //merge + //erase the rect and restart search + boundingRects->erase(boundingRects->begin() + k); + i = 0; + k = 0; //reset the loop + anyChange = true; + } + } + } + } + + return anyChange; +} + +//performs overlap and proximity checks until no more changes occur +void MovementDetection::overlapProximityLoop(vector *boundingRects) { + //keep going until no more changes + while (true) { + //if both return no changes + if (!overlapCheck(boundingRects) && !proximityCheck(boundingRects)) { + break; + } + } +} + +//check if any massive rectangles exist in the scene +bool MovementDetection::oversizeCheck(vector *boundingRects, int width, int height) { + for (int i = 0; i < boundingRects->size(); i++) { + if (boundingRects->at(i).area() > (width * height * massiveThreshold)) { + return true; + } + } + return false; +} + +//add text to frame function +void MovementDetection::addTextToFrame(Mat *frame, string str, auto value) { + ss << str << value; + ++textCount; + frameText = ss.str(); + putText(*frame, frameText, Point(30, 80 * textCount), FONT_HERSHEY_PLAIN, 4, Scalar(0, 0, 255), 2, 8, false); + ss.str(""); +} + +//values to reset on loop +void MovementDetection::onLoopReset() { + textCount = 0; +} + +//push overlapping ghosts together +void MovementDetection::ghostOverlapCheck(vector *ghostRects) { + //mark rects that need to be deleted + bool deletionMark[ghostRects->size()]; + for (int i = 0; i < ghostRects->size(); i++) { + deletionMark[i] = false; + } + + /* + * check ghost intersection, if intersected then push together + */ + for (int i = 0; i < ghostRects->size(); i++) { + for (int k = 0; k < ghostRects->size(); k++) { + if (i != k) { + //the intersection of 2 rectangles + Rect intersect = (ghostRects->at(i).getGhost()) & (ghostRects->at(k).getGhost()); + + //if first rectangle is completely inside second + if (intersect == ghostRects->at(i).getGhost()) { + deletionMark[i] = true; //mark for deletion + continue; + } + //if rectangles overlap then just add them together + if (intersect.area() > 0) { + ghostRects->at(i).reset((ghostRects->at(i).getGhost() | ghostRects->at(k).getGhost())); //merge + //mark largest time as time + if (ghostRects->at(k).getOnScreenTime() > ghostRects->at(i).getOnScreenTime()) { + ghostRects->at(i).setOnScreenTime(ghostRects->at(k).getOnScreenTime()); + } + deletionMark[k] = true; + } + + } + } + } + + //remove marked to delete ghosts + for (int i = (int) ghostRects->size() - 1; i >= 0; i--) { + if (deletionMark[i]) { + ghostRects->erase(ghostRects->begin() + i); + } + } +} + +//show frames using ghostRectangles +void MovementDetection::showFramesMemoryRects(vector> *memoryGhosts, Scalar color) { + VideoCapture inputVideo(videoDir); + if (!inputVideo.isOpened()) { + //error opening the video input + cerr << "Unable to open video file" << endl; + return; + } + + //the frame that is rendered to file + Mat printFrame; + + if (!inputVideo.read(printFrame)) { + cerr << "Unable to read first frame." << endl; + cerr << "Exiting..." << endl; + return; + } + + int frameSkip = frameSkipReset; + + for (int i = 0; i < memoryGhosts->size() - 1;) { + + if (!inputVideo.read(printFrame)) { + cout << "Moving to next stage!" << endl; + return; + } + + //perform frame skips + if (frameSkip == 0) { + frameSkip = frameSkipReset; + i++; + } else { + frameSkip--; + continue; + } + + //draw decided lecturer to frame from ghosts + rectangle(printFrame, memoryGhosts->at(i).at(0).tl(), memoryGhosts->at(i).at(0).br(), color, 6, 8, 0); + + imshow(showLabel, printFrame); + + if (waitKey(1)) { + continue; + } + } + + inputVideo.release(); +} + +//reposition ghost to centroid of squares +void +MovementDetection::adjustLecturer(vector *lecturer, vector> *memoryRects, vector *newLect) { + + for (int i = 0; i < lecturer->size(); i++) { //for each main ghost + Rect tempGhost = lecturer->at(i); + + //our index array of rectangle that intersect validly + vector intersected; + + //for each rectangle at that frame + for (int r = 0; r < memoryRects->at(i).size(); r++) { + + Rect tempRect = memoryRects->at(i).at(r); + + Rect intersect = tempRect & tempGhost; + + //if there is any intersection + if (intersect.area() > 0) { + //if the rect is much larger than ghost do nothing + if (tempRect.area() / tempGhost.area() > 2) { + continue; + } else { + intersected.push_back(r); + } + } + } + + //perform shifting operations on selected rects + if (intersected.size() == 1) { + //take average 0.9 movement to the rect and make new (tl(), br()) rect + int newTlX = (int) (shiftR * tempGhost.tl().x) + + (int) (shiftRInv * memoryRects->at(i).at(intersected.at(0)).tl().x); + int newBrX = (int) (shiftR * tempGhost.br().x) + + (int) (shiftRInv * memoryRects->at(i).at(intersected.at(0)).br().x); + + Rect newRect(Rect(Point(newTlX, tempGhost.tl().y), Point(newBrX, tempGhost.br().y))); + + newLect->push_back(newRect); + } else if (intersected.size() > 1) { + //average all intersecting rects and the ghost's coords + int newTlX = tempGhost.tl().x; + int newBrX = tempGhost.br().x; + + for (int j = 0; j < intersected.size(); j++) { + newTlX += memoryRects->at(i).at(intersected.at(j)).tl().x; + newBrX += memoryRects->at(i).at(intersected.at(j)).br().x; + } + + newTlX /= (intersected.size() + 1); + newBrX /= (intersected.size() + 1); + + Rect newRect(Rect(Point(newTlX, tempGhost.tl().y), Point(newBrX, tempGhost.br().y))); + + newLect->push_back(newRect); + } + } +} + +//add new ghosts (rectangles that don't intersect anywhere +void MovementDetection::addNewGhosts(vector *ghostRects, vector *boundingRects) { + + for (int i = 0; i < boundingRects->size(); i++) { + bool add = true; + for (int g = 0; g < ghostRects->size(); g++) { + //check for overlap + Rect intersect = ((ghostRects->at(g).getGhost()) & (boundingRects->at(i))); + //if none break and move to next rect + if (intersect.area() > 0) { + add = false; + break; + } + } + if (add) { + ghostRects->push_back(Ghost(boundingRects->at(i))); + } + } +} + +//return timeOnScreen * (ratioOfDistFromCenter ^ 2) averaged between x and y +double MovementDetection::positionWeighting(Rect r, long onScreenTime) { + double distX = center.x - abs(r.x - center.x); //distance from center to rect + double distY = center.y - abs(r.y - center.y); //distance from center to top of rect + + distX /= center.x; //get x ratio + distY /= center.y; //get y ratio + + //pair of ratiod lecturerTrackedLocationRectangles + double ratioX; + double ratioY; + ratioX = onScreenTime * (distX * distX); + ratioY = onScreenTime * (distY * distY); + + return (ratioX + ratioY) / 2; +} + +//divide ghosts with multiple rects in it that are far apart +void MovementDetection::ghostSplitCheck(vector *ghostRects, vector *boundingRects) { + + bool ghostDelete[ghostRects->size()]; //assign all as false + for (int i = 0; i < ghostRects->size(); i++) { + ghostDelete[i] = false; + } + + vector newGhosts; + + for (int g = 0; g < ghostRects->size(); g++) { + vector intersected; + Ghost currGhost = ghostRects->at(g); + for (int r = 0; r < boundingRects->size(); r++) { + + //test intersection of rect with ghost + Rect intersect = (currGhost.getGhost() & boundingRects->at(r)); + + //check intersection & record + if (intersect.area() > 0) { + intersected.push_back(r); + } + } + + //if more than one intersection + //see if it needs to be split + if (intersected.size() > 1) { + + bool newGhostCheck[intersected.size()]; //if the index that intersected points to needs to be made new + + for (int m = 0; m < intersected.size(); m++) { + newGhostCheck[m] = false; + } + + //check if minimum distance in ghost is below global allowed + int minDist = currGhost.getGhost().width / 3; + if (minDist < minSplitDist) { + minDist = minSplitDist; + } + + //iterate over all intersection + //use boolean checks to avoid creating duplicates + for (int i = 0; i < intersected.size(); i++) { + for (int j = i + 1; j < intersected.size(); j++) { + Rect a = boundingRects->at(intersected[i]); + Rect b = boundingRects->at(intersected[j]); + + double dist; + + //if there is intersection between the two rects then ignore + if ((a & b).area() > 0) { + continue; + } + //if completely above or below then ignore + if (a.br().y < b.tl().y || b.br().y < a.tl().y) { + continue; + } + + //if a is left of b + if (a.br().x < b.tl().x) { + dist = b.tl().x - a.br().x; + } + //if b is left of a + else if (b.br().x < a.tl().x) { + dist = a.tl().x - b.br().x; + } + + //if x distance apart is large split them up + if (dist > minDist) { + if (!newGhostCheck[i]) { + newGhostCheck[i] = true; + newGhosts.push_back(Ghost(a, currGhost.getOnScreenTime())); + } + + if (!newGhostCheck[j]) { + newGhostCheck[j] = true; + newGhosts.push_back(Ghost(b, currGhost.getOnScreenTime())); + } + ghostDelete[g] = true; + } + } + } + } + intersected.clear(); + } + + //delete all duplicate ghosts + for (int i = ghostRects->size() - 1; i >= 0; i--) { + if (ghostDelete[i]) { + ghostRects->erase(ghostRects->begin() + i); + } + } + + //add all new ghosts onto ghostRects + for (int i = 0; i < newGhosts.size(); i++) { + ghostRects->push_back(newGhosts.at(i)); + } +} + +//find lecturer based on onScreenTime and position on the screen +void MovementDetection::findLecturer(vector> *memoryGhosts, vector *lecturer) { + + //cout << "\n--------------------\nMemoryGhosts size: " << memoryGhosts->size() << "\nalc--------------------\n" << endl; + + for (int i = 0; i < memoryGhosts->size(); i++) { + //cout << "Ghost: " << i << " Size: " << memoryGhosts->at(i).size() << endl; + if (memoryGhosts->at(i).size() == 0) { + if (i == 0) { + lecturer->push_back(Rect(Point(center.x - 50, center.y - 50), Point(center.x + 50, center.y + 50))); + continue; + } + //if no ghost then set default previous + lecturer->push_back(Rect(lecturer->at(i - 1))); + } else { + double largestVal = positionWeighting(memoryGhosts->at(i).at(0).getGhost(), + memoryGhosts->at(i).at(0).getOnScreenTime()); + + int largestIdx = 0; + for (int k = 0; k < memoryGhosts->at(i).size(); k++) { + if (memoryGhosts->at(i).at(k).getOnScreenTime() > largestVal) { + largestVal = positionWeighting(memoryGhosts->at(i).at(k).getGhost(), + memoryGhosts->at(i).at(k).getOnScreenTime()); + largestIdx = k; + } + } + lecturer->push_back(memoryGhosts->at(i).at(largestIdx).getGhost()); + } + } + + cout << "\n--------------------\nLecturer size: " << lecturer->size() << endl; +} + +//convert frame number to string && hours:minutes:seconds +string MovementDetection::convertFrameToTime(long frameNumber, double fps) { + + double currTime = frameNumber / fps; + + stringstream ss; + int hours = (int) currTime / 3600; + currTime -= (hours * 3600); + int minutes = (int) currTime / 60; + currTime -= (minutes * 60); + int seconds = (int) currTime; + + ss << "Time: "; + if (hours < 10) + ss << "0" << hours; + else + ss << hours; + ss << ":"; + if (minutes < 10) + ss << "0" << minutes; + else + ss << minutes; + ss << ":"; + if (seconds < 10) + ss << "0" << seconds; + else + ss << seconds; + + return ss.str(); +} + +//copy lecturer values across +void MovementDetection::getLecturer(vector *newLect) { + //cout << "start of get lecturer" << endl; + //cout << "lecturer size: " << lecturer.size() << endl; + for (int i = 0; i < lecturer.size(); i++) { + //cout << "loop: " << i << endl; + newLect->push_back(lecturer.at(i)); + } + //cout << "end of get lecturer" << endl; +} + +//write video with lecturer rectangle +void MovementDetection::writeVideo(vector *lecturer, string outName) { + + //create the inputVideo object + VideoCapture inputVideo(videoDir); + if (!inputVideo.isOpened()) { + //error opening the video input + cerr << "Unable to open video file" << endl; + return; + } + + // -------------------- // + + //get input video settings and apply to output video + double fps = inputVideo.get(CV_CAP_PROP_FPS); + Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), + (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); + + VideoWriter outVid(outName, CV_FOURCC('X', 'V', 'I', 'D'), fps, S, true); + + // -------------------- // + + if (!outVid.isOpened()) { + cerr << "Unable to open video write file" << endl; + return; + } + + Mat inFrame; + + Scalar color = Scalar(255, 0, 0); + int count = 0; + int frameReset = frameSkipReset; + int frameNumber = 0; + + while (inputVideo.read(inFrame)) { + + //draw lecturer onto frame + rectangle(inFrame, lecturer->at(count).tl(), lecturer->at(count).br(), color, 2, 8, 0); + + outVid.write(inFrame); + + //write rect to each frame skipped as well as main + if (frameReset != 0) { + frameReset--; + } else { + frameReset = frameSkipReset; + if (count != lecturer->size() - 1) { + count++; + } + + } + + frameNumber++; + } + + cout << "Finished writing " << outName << endl; + } \ No newline at end of file diff --git a/source/tracking/MovementDetection.h b/source/tracking/MovementDetection.h index b0c2b77..75e7a7b 100644 --- a/source/tracking/MovementDetection.h +++ b/source/tracking/MovementDetection.h @@ -1,183 +1,183 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/09/05. -// - -#ifndef TRACK4K_MOVEMENTDETECTION_H -#define TRACK4K_MOVEMENTDETECTION_H - -#include "opencv/cv.h" -#include "opencv/highgui.h" -#include -#include -#include -#include "opencv2/imgcodecs.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/videoio.hpp" -#include "opencv2/video.hpp" -#include "opencv2/highgui.hpp" -#include -#include "Ghost.h" - -using namespace cv; -using namespace std; - -class MovementDetection{ - -public: - - //method to check bounding box intersection (replaced by | and & overloaded params on Rect) - bool BoundingBoxIntersect(Rect a, Rect b){ - return (abs(a.x - b.x) * 2 < (a.width + b.width)) && - (abs(a.y - b.y) * 2 < (a.height + b.height)); - } - - //return frameskipreset value - int getFrameSkipReset(){ return frameSkipReset;} - - //get video name - string getVideoDir(){ return videoDir; } - - //set show label - void setShowLabel(string label){ showLabel = label; } - - //set video name - void setVideoDir(string name){ videoDir = name; } - - //default constructor and tracking runner - MovementDetection(string name, vector* lect); - - //calculates the distance between the closest two edge points of two rectangles - double closestDistance(Rect a, Rect b); - - //helper function to calculate distance between two points - double euclideanDist(Point p, Point q); - - //check for overlapping rectangles and push them together - //return true if changes made - bool overlapCheck(vector* boundingRects); - - //check for nearby rectangles and push them together - //return true if changes made - bool proximityCheck(vector* boundingRects); - - //performs overlap and proximity checks until no more changes occur - void overlapProximityLoop(vector* boundingRects); - - //check if any massive rectangles exist in the scene - bool oversizeCheck(vector* boundingRects, int width, int height); - - //add text to frame function - void addTextToFrame(Mat* frame, string str, auto value); - - //values to reset on loop - void onLoopReset(); - - //push overalpping ghosts together - void ghostOverlapCheck(vector* ghostRects); - - //show frames using ghostRectangles - void showFramesMemoryRects(vector>* memoryGhosts, Scalar color); - - //reposition ghost to centroid of squares - void adjustLecturer(vector* lecturer, vector>* memRects, vector* newLect); - - //add new ghosts (rectangles that don't intersect anywhere - void addNewGhosts(vector* ghostRects, vector* boundingRects); - - //return timeOnScreen * (1 / (ratioOfDistFromCenter ^ 2)) averaged between x and y - double positionWeighting(Rect r, long onScreenTime); - - //divide ghosts with multiple rects in it that are far apart - void ghostSplitCheck(vector* ghostRects, vector* boundingRect); - - //find lecturer based on onScreenTime and position on the screen - void findLecturer(vector>* memoryGhosts, vector* lecturer); - - //convert frame number to string && hours:minutes:seconds - string convertFrameToTime(long frameNumber, double fps); - - //copy lecturer values across - void getLecturer(vector* newLect); - - //write video - void writeVideo(vector* lecturer, string outName); - -private: - - //frames used during processing - Mat frame; //current frame - Mat prevFrame; //previous frame for diff - Mat frameAbsDiff; //absolute difference (change) frame - Mat frameThresh; //thresholding - Mat frameMorph; //morphological operations - Mat frameBlur; //blur operation - Mat frameDisplay; //the frame that we print to imShow() - - //constant values used to process - int aspectR = 3; //aspect ratio threshold of square (width / height) to delete - int frameSkipReset = 3; //number of frames to skip per interval - double clusterThreshold = 50; //distance between objects before they are considered separate - double massiveThreshold = 0.3; //value from 0.1 to 1 that represents how much area a square can be of the screen before its too large - string frameText = ""; - stringstream ss; - int textCount = 0; - int contourMinSize = 20; //min threshold value for array size of contour - double lowScreenThresh = 0.3; //lowest point that rect tl().y is valid (0 - 1) - double highScreenThresh = 0.3; //highest point that rect br().y is valid (0 - 1) - int ghostResetValue = 100; //number of frames to do before resetting counts - int ghostResetAmount = (int)(ghostResetValue * 0.67); //number to reduce every {ghostResetValue} frames - string videoDir = ""; - string showLabel = "MyVideo"; - bool validFps = false; - double shiftR = 0.7; //value between 0.1 and 0.9 symbolizing how much a single rect will shift the ghost - double shiftRInv = 1 - shiftR; //inverse of ghostShiftRatio - Point center; //center of the screen - int minSplitDist = 150; //minimum distance to bother splitting ghosts - - //debugging - - //draw visuals - bool drawFrameStatus = false; - /* - * 0 - draw contours - * 1 - draw bounding rectangles - * 2 - draw ghosts - * 3 - draw ghost time on screen - * 4 - write time - * 5 - write frame number - * 6 - write number of rectangles - */ - bool drawSettings[7] = {true, true, true, true, true, true, true}; - - //write text to console - bool showFrameStatus = false; - - //write video file information - bool showFileInfo = false; - - //vectors that are saved for final processing step - vector> memoryRects; //list of all rectangles - vector> memoryGhosts; //list of all ghosts - vector lecturer; //positions on lecturer -}; - - - - -#endif //TRACK4K_MOVEMENTDETECTION_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/09/05. +// + +#ifndef TRACK4K_MOVEMENTDETECTION_H +#define TRACK4K_MOVEMENTDETECTION_H + +#include "opencv/cv.h" +#include "opencv/highgui.h" +#include +#include +#include +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/video.hpp" +#include "opencv2/highgui.hpp" +#include +#include "Ghost.h" + +using namespace cv; +using namespace std; + +class MovementDetection{ + +public: + + //method to check bounding box intersection (replaced by | and & overloaded params on Rect) + bool BoundingBoxIntersect(Rect a, Rect b){ + return (abs(a.x - b.x) * 2 < (a.width + b.width)) && + (abs(a.y - b.y) * 2 < (a.height + b.height)); + } + + //return frameskipreset value + int getFrameSkipReset(){ return frameSkipReset;} + + //get video name + string getVideoDir(){ return videoDir; } + + //set show label + void setShowLabel(string label){ showLabel = label; } + + //set video name + void setVideoDir(string name){ videoDir = name; } + + //default constructor and tracking runner + MovementDetection(string name, vector* lect); + + //calculates the distance between the closest two edge points of two rectangles + double closestDistance(Rect a, Rect b); + + //helper function to calculate distance between two points + double euclideanDist(Point p, Point q); + + //check for overlapping rectangles and push them together + //return true if changes made + bool overlapCheck(vector* boundingRects); + + //check for nearby rectangles and push them together + //return true if changes made + bool proximityCheck(vector* boundingRects); + + //performs overlap and proximity checks until no more changes occur + void overlapProximityLoop(vector* boundingRects); + + //check if any massive rectangles exist in the scene + bool oversizeCheck(vector* boundingRects, int width, int height); + + //add text to frame function + void addTextToFrame(Mat* frame, string str, auto value); + + //values to reset on loop + void onLoopReset(); + + //push overalpping ghosts together + void ghostOverlapCheck(vector* ghostRects); + + //show frames using ghostRectangles + void showFramesMemoryRects(vector>* memoryGhosts, Scalar color); + + //reposition ghost to centroid of squares + void adjustLecturer(vector* lecturer, vector>* memRects, vector* newLect); + + //add new ghosts (rectangles that don't intersect anywhere + void addNewGhosts(vector* ghostRects, vector* boundingRects); + + //return timeOnScreen * (1 / (ratioOfDistFromCenter ^ 2)) averaged between x and y + double positionWeighting(Rect r, long onScreenTime); + + //divide ghosts with multiple rects in it that are far apart + void ghostSplitCheck(vector* ghostRects, vector* boundingRect); + + //find lecturer based on onScreenTime and position on the screen + void findLecturer(vector>* memoryGhosts, vector* lecturer); + + //convert frame number to string && hours:minutes:seconds + string convertFrameToTime(long frameNumber, double fps); + + //copy lecturer values across + void getLecturer(vector* newLect); + + //write video + void writeVideo(vector* lecturer, string outName); + +private: + + //frames used during processing + Mat frame; //current frame + Mat prevFrame; //previous frame for diff + Mat frameAbsDiff; //absolute difference (change) frame + Mat frameThresh; //thresholding + Mat frameMorph; //morphological operations + Mat frameBlur; //blur operation + Mat frameDisplay; //the frame that we print to imShow() + + //constant values used to process + int aspectR = 3; //aspect ratio threshold of square (width / height) to delete + int frameSkipReset = 3; //number of frames to skip per interval + double clusterThreshold = 50; //distance between objects before they are considered separate + double massiveThreshold = 0.3; //value from 0.1 to 1 that represents how much area a square can be of the screen before its too large + string frameText = ""; + stringstream ss; + int textCount = 0; + int contourMinSize = 20; //min threshold value for array size of contour + double lowScreenThresh = 0.3; //lowest point that rect tl().y is valid (0 - 1) + double highScreenThresh = 0.3; //highest point that rect br().y is valid (0 - 1) + int ghostResetValue = 100; //number of frames to do before resetting counts + int ghostResetAmount = (int)(ghostResetValue * 0.67); //number to reduce every {ghostResetValue} frames + string videoDir = ""; + string showLabel = "MyVideo"; + bool validFps = false; + double shiftR = 0.7; //value between 0.1 and 0.9 symbolizing how much a single rect will shift the ghost + double shiftRInv = 1 - shiftR; //inverse of ghostShiftRatio + Point center; //center of the screen + int minSplitDist = 150; //minimum distance to bother splitting ghosts + + //debugging + + //draw visuals + bool drawFrameStatus = false; + /* + * 0 - draw contours + * 1 - draw bounding rectangles + * 2 - draw ghosts + * 3 - draw ghost time on screen + * 4 - write time + * 5 - write frame number + * 6 - write number of rectangles + */ + bool drawSettings[7] = {true, true, true, true, true, true, true}; + + //write text to console + bool showFrameStatus = false; + + //write video file information + bool showFileInfo = false; + + //vectors that are saved for final processing step + vector> memoryRects; //list of all rectangles + vector> memoryGhosts; //list of all ghosts + vector lecturer; //positions on lecturer +}; + + + + +#endif //TRACK4K_MOVEMENTDETECTION_H diff --git a/source/tracking/RecognitionDriver.cpp b/source/tracking/RecognitionDriver.cpp index 60c4a05..54f974f 100644 --- a/source/tracking/RecognitionDriver.cpp +++ b/source/tracking/RecognitionDriver.cpp @@ -1,31 +1,31 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/09/05. -// - -#include "MovementDetection.h" -#include "RecognitionDriver.h" -#include -#include -#include "Ghost.h" - -using namespace std; - -RecognitionDriver::RecognitionDriver(){ - -} +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/09/05. +// + +#include "MovementDetection.h" +#include "RecognitionDriver.h" +#include +#include +#include "Ghost.h" + +using namespace std; + +RecognitionDriver::RecognitionDriver(){ + +} diff --git a/source/tracking/RecognitionDriver.h b/source/tracking/RecognitionDriver.h index d87cfeb..6833e9c 100644 --- a/source/tracking/RecognitionDriver.h +++ b/source/tracking/RecognitionDriver.h @@ -1,32 +1,32 @@ -/** - * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn - * Licensed under the Educational Community License, Version 2.0 - * (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.osedu.org/licenses/ECL-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing - * permissions and limitations under the License. - * - */ - -// -// Created by Maximilian Hahn on 2016/09/05. -// - -#ifndef TRACK4K_RECOGNITIONDRIVER_H -#define TRACK4K_RECOGNITIONDRIVER_H - -class RecognitionDriver{ - -public: - - RecognitionDriver(); - -}; - -#endif //TRACK4K_RECOGNITIONDRIVER_H +/** + * Copyright 2016 Charles Fitzhenry / Mohamed Tanweer Khatieb / Maximilian Hahn + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.osedu.org/licenses/ECL-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +// +// Created by Maximilian Hahn on 2016/09/05. +// + +#ifndef TRACK4K_RECOGNITIONDRIVER_H +#define TRACK4K_RECOGNITIONDRIVER_H + +class RecognitionDriver{ + +public: + + RecognitionDriver(); + +}; + +#endif //TRACK4K_RECOGNITIONDRIVER_H From 13555d0e0438c2b91967add2ef92c5318ead04f1 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 11 Oct 2017 14:22:05 +0200 Subject: [PATCH 04/49] Disable board detection --- source/segmentation/Track4KPreProcess.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/segmentation/Track4KPreProcess.cpp b/source/segmentation/Track4KPreProcess.cpp index 5915dce..9c48977 100644 --- a/source/segmentation/Track4KPreProcess.cpp +++ b/source/segmentation/Track4KPreProcess.cpp @@ -53,7 +53,7 @@ void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) //motionDetection.subtract(frameVector, persistentData); //Detect the boards - boardDetection.extractBoards(frameVector, persistentData); + //boardDetection.extractBoards(frameVector, persistentData); } From 86e14ef0a52a292d0158400ebf3189a5e06125cc Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 11 Oct 2017 14:32:14 +0200 Subject: [PATCH 05/49] Typo --- install_track4k.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install_track4k.sh b/install_track4k.sh index 39066e4..13f1761 100755 --- a/install_track4k.sh +++ b/install_track4k.sh @@ -17,7 +17,7 @@ cmake ../source/ echo "STAGE 3/4: Executing make operation..." make $jFlag$numCores -echo "STAGE 4/4: Insalling..." +echo "STAGE 4/4: Installing..." sudo make install echo "Complete!" From e928c8a9e89192f0c07a2a3a010f33f74d0b9c91 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Thu, 12 Oct 2017 14:49:42 +0200 Subject: [PATCH 06/49] OPENCAST-1538 Add frame padding at start to preserve A/V sync --- source/PersistentData.h | 3 ++- source/mainDriver.cpp | 13 ++++++++----- source/panning/VirtualCinematographer.cpp | 9 ++++++++- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/source/PersistentData.h b/source/PersistentData.h index 4521dcc..2e26a70 100644 --- a/source/PersistentData.h +++ b/source/PersistentData.h @@ -58,7 +58,8 @@ class PersistentData bool boardsFound = false; cv::Rect boardCropRegion; + // Number of frames to repeat at the start of the output video + int outputPadding = 0; }; - #endif //TRACK4K_PERSISTENTDATA_H diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index 681bbdf..e72f95a 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -40,17 +40,17 @@ int main(int argc, char *argv[]) { cv::Size saveDimensions; //Check if input of command line parameters are valid - if (argc == 6) { + if (argc == 7) { string codecInput = argv[5]; persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); - - } else if (argc == 5) { + } else if (argc == 6) { //Use default codec persistentData.codec = CV_FOURCC('X', '2', '6', '4'); - } else { cerr - << "The number of parameters entered were incorrect. Expected track4k.exe [FOURCC Codec] \n See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!" + << "\ntrack4k build UCT 2017-10-12a\n\n" + << "Parameters:\n track4k [FOURCC Codec]\n\n" + << "See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!\n" << endl; return -1; } @@ -69,12 +69,15 @@ int main(int argc, char *argv[]) { cropHeight = stoi(argv[4]); saveDimensions = cv::Size(cropWidth, cropHeight); + // Padding frames + int padding = stoi(argv[5]); //Update this information in PersistantData persistentData.inputFileName = inputFilename; persistentData.outputVideoFilenameSuffix = outputFilename.substr(0,outputFilename.find_first_of('.')); persistentData.saveFileExtension = outputFileExtension; persistentData.panOutputVideoSize = saveDimensions; + persistentData.outputPadding = padding; cout << "\n----------------------------------------" << endl; cout << "Stage [1 of 3] - Board Segmentation" << endl; diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 12d87a5..6f107d0 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -96,6 +96,13 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData if (!fileReader.isEndOfFile()) { outputVideo.write(drawing(cropRectangles[i])); + + // Pad the start of the video if necessary + if ((i == 0) && (persistentData.outputPadding > 0)) { + for (int j = 0; j < persistentData.outputPadding; j++) { + outputVideo.write(drawing(cropRectangles[i])); + } + } } drawing.release(); @@ -105,4 +112,4 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData //Close all file writers outputVideo.release(); fileReader.getInputVideo().release(); -} \ No newline at end of file +} From c056360955c45e9bd576cbad5e5143bc9af81c2f Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Thu, 12 Oct 2017 16:36:26 +0200 Subject: [PATCH 07/49] Skip reading the file for board segmentation --- source/mainDriver.cpp | 4 ++-- source/segmentation/Track4KPreProcess.cpp | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index e72f95a..7479908 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -48,7 +48,7 @@ int main(int argc, char *argv[]) { persistentData.codec = CV_FOURCC('X', '2', '6', '4'); } else { cerr - << "\ntrack4k build UCT 2017-10-12a\n\n" + << "\ntrack4k build UCT 2017-10-12b\n\n" << "Parameters:\n track4k [FOURCC Codec]\n\n" << "See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!\n" << endl; @@ -80,7 +80,7 @@ int main(int argc, char *argv[]) { persistentData.outputPadding = padding; cout << "\n----------------------------------------" << endl; - cout << "Stage [1 of 3] - Board Segmentation" << endl; + cout << "Stage [1 of 3] - Board Segmentation (skip)" << endl; cout << "----------------------------------------\n" << endl; Track4KPreProcess pre; pre.preProcessDriver(persistentData); diff --git a/source/segmentation/Track4KPreProcess.cpp b/source/segmentation/Track4KPreProcess.cpp index 9c48977..4b732f9 100644 --- a/source/segmentation/Track4KPreProcess.cpp +++ b/source/segmentation/Track4KPreProcess.cpp @@ -31,6 +31,8 @@ using namespace cv; void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) { + int skip = 1; + vector frameVector; //Read in video file @@ -43,7 +45,7 @@ void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) //Keep reading in frames from the video file until the end is reached. //Number of frames to read on each iteration is defined in the PersistentData class - while (!fileReader.isEndOfFile()) + while (!skip && !fileReader.isEndOfFile()) { //Read in frames fileReader.getNextSegment(persistentData.segmentationNumFramesToProcessPerIteration, frameVector); From b1e11a59ed3d78062906829f6cff37028888f8b3 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Fri, 13 Oct 2017 16:26:04 +0200 Subject: [PATCH 08/49] Double precision for FPS and update some output messages --- source/FileReader.cpp | 17 +++++++++++------ source/FileReader.h | 4 ++-- source/PersistentData.cpp | 2 +- source/PersistentData.h | 4 ++-- source/mainDriver.cpp | 2 +- source/panning/VirtualCinematographer.cpp | 1 - source/tracking/MovementDetection.cpp | 6 +++--- 7 files changed, 20 insertions(+), 16 deletions(-) diff --git a/source/FileReader.cpp b/source/FileReader.cpp index 8bb6f92..4acf414 100644 --- a/source/FileReader.cpp +++ b/source/FileReader.cpp @@ -21,6 +21,9 @@ #include "FileReader.h" #include "opencv2/opencv.hpp" +#include +#include + using namespace std; using namespace cv; @@ -28,12 +31,15 @@ bool FileReader::readFile(std::string filename, PersistentData &pD) { //read in video file inputVideo = VideoCapture(filename); + if (!inputVideo.isOpened()) { cout << "Could not open the input video: " << filename << endl; return -1; } + cout << "Reading video file: " << filename << endl; + fps = inputVideo.get(CV_CAP_PROP_FPS); //Frame Rate numFrames = inputVideo.get(CV_CAP_PROP_FRAME_COUNT); //Number of frames @@ -48,13 +54,12 @@ bool FileReader::readFile(std::string filename, PersistentData &pD) videoDimension = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); - //Print out progress info - + // Print out progress info cout << "Input frame resolution: Width=" << videoDimension.width << " Height=" << videoDimension.height - << " of nr#: " << numFrames << endl; - cout << "Input codec type: " << EXT << endl; - cout << "Video Duration (Seconds): " << videoDuration << endl; - cout << "FPS: " << fps << endl; + << " Frames=" << numFrames << endl; + // cout << "Input codec type: " << EXT << endl; + cout << "Calculated Video Duration (frames / fps): " << std::fixed << std::setprecision(3) << videoDuration << " seconds" << endl; + cout << "FPS: " << std::fixed << std::setprecision(6) << fps << endl; //Set video file info pD.setVideoInfo(fps, numFrames, videoDimension, ex); diff --git a/source/FileReader.h b/source/FileReader.h index fcaffed..b613409 100644 --- a/source/FileReader.h +++ b/source/FileReader.h @@ -30,9 +30,9 @@ class FileReader private: cv::VideoCapture inputVideo; cv::Mat frame; //current frame - float fps; //Frame Rate + double fps; //Frame Rate int numFrames; //Number of frames - int videoDuration; + float videoDuration; cv::Size videoDimension; int ex; bool endOfFile = false; diff --git a/source/PersistentData.cpp b/source/PersistentData.cpp index bb674af..3db02ba 100644 --- a/source/PersistentData.cpp +++ b/source/PersistentData.cpp @@ -21,7 +21,7 @@ #include "PersistentData.h" -void PersistentData::setVideoInfo(float f, int t, cv::Size s, int ext) +void PersistentData::setVideoInfo(double f, int t, cv::Size s, int ext) { if(!videoInfoSet){ fps = f; diff --git a/source/PersistentData.h b/source/PersistentData.h index 2e26a70..2fe4083 100644 --- a/source/PersistentData.h +++ b/source/PersistentData.h @@ -37,11 +37,11 @@ class PersistentData std::vector areasOfMotion; std::vector metaFrameVector; - float fps; //Frame Rate + double fps; //Frame Rate int totalFrames; //Number of frames cv::Size videoDimension; - void setVideoInfo(float f, int t, cv::Size s, int ext); + void setVideoInfo(double f, int t, cv::Size s, int ext); std::string saveFileExtension = "mp4"; //Default save extension std::string inputFileName = ""; diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index 7479908..d9565aa 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -48,7 +48,7 @@ int main(int argc, char *argv[]) { persistentData.codec = CV_FOURCC('X', '2', '6', '4'); } else { cerr - << "\ntrack4k build UCT 2017-10-12b\n\n" + << "\ntrack4k build UCT 2017-10-13a\n\n" << "Parameters:\n track4k [FOURCC Codec]\n\n" << "See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!\n" << endl; diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 6f107d0..28ccb58 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -82,7 +82,6 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData outputVideo.open(persistentData.outputVideoFilenameSuffix + "." + persistentData.saveFileExtension, persistentData.codec, persistentData.fps, persistentData.panOutputVideoSize, 1); - //Open original input video file FileReader fileReader; fileReader.readFile(persistentData.inputFileName, persistentData); diff --git a/source/tracking/MovementDetection.cpp b/source/tracking/MovementDetection.cpp index 033a175..1cdb475 100644 --- a/source/tracking/MovementDetection.cpp +++ b/source/tracking/MovementDetection.cpp @@ -385,7 +385,7 @@ MovementDetection::MovementDetection(string vidLocation, vector *lect) { processTime = clock() - processTime; - cout << "Processing took: " << (processTime / 1000.0) << "s" << endl; + cout << "Processing took: " << int(processTime / CLOCKS_PER_SEC) << "s" << endl; cout << "Performing ghost adjustments!" << endl; @@ -413,7 +413,7 @@ MovementDetection::MovementDetection(string vidLocation, vector *lect) { time = (clock() - time); - cout << "Postprocessing took: " << (time) << "ms" << endl; + cout << "Postprocessing took: " << int(time / CLOCKS_PER_SEC) << "s" << endl; cout << "Finished processing!" << endl; @@ -1009,4 +1009,4 @@ void MovementDetection::writeVideo(vector *lecturer, string outName) { cout << "Finished writing " << outName << endl; -} \ No newline at end of file +} From 1e1b6d4c65d35c8e218b6261d70d640a3b9babf6 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Fri, 13 Oct 2017 22:00:30 +0200 Subject: [PATCH 09/49] Add a target frame-rate parameter (and some info output) --- source/FileReader.cpp | 4 ++-- source/PersistentData.h | 1 + source/mainDriver.cpp | 20 +++++++++++--------- source/panning/VirtualCinematographer.cpp | 10 +++++++--- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/source/FileReader.cpp b/source/FileReader.cpp index 4acf414..b026157 100644 --- a/source/FileReader.cpp +++ b/source/FileReader.cpp @@ -84,7 +84,7 @@ void FileReader::getNextSegment(int segSize, std::vector &frameVec) //read the current frame if (!inputVideo.read(frame)) { - cerr << "End of video file" << endl; + cerr << "End of video file in getNextSegment()" << endl; endOfFile = true; break; //If end of video file } @@ -122,7 +122,7 @@ void FileReader::getNextFrame(cv::Mat &frame) //read the current frame if (!inputVideo.read(frame)) { - cerr << "End of video file" << endl; + cerr << "End of video file in getNextFrame()" << endl; endOfFile = true; } diff --git a/source/PersistentData.h b/source/PersistentData.h index 2fe4083..727b3df 100644 --- a/source/PersistentData.h +++ b/source/PersistentData.h @@ -38,6 +38,7 @@ class PersistentData std::vector metaFrameVector; double fps; //Frame Rate + double outputFps; // Output Frame Rate int totalFrames; //Number of frames cv::Size videoDimension; diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index d9565aa..1e7d227 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -40,16 +40,16 @@ int main(int argc, char *argv[]) { cv::Size saveDimensions; //Check if input of command line parameters are valid - if (argc == 7) { - string codecInput = argv[5]; + if (argc == 8) { + string codecInput = argv[7]; persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); - } else if (argc == 6) { + } else if (argc == 7) { //Use default codec persistentData.codec = CV_FOURCC('X', '2', '6', '4'); } else { cerr - << "\ntrack4k build UCT 2017-10-13a\n\n" - << "Parameters:\n track4k [FOURCC Codec]\n\n" + << "\ntrack4k build UCT " << __DATE__ << " " << __TIME__ << "\n\n" + << "Parameters:\n track4k [FOURCC Codec]\n\n" << "See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!\n" << endl; return -1; @@ -63,7 +63,6 @@ int main(int argc, char *argv[]) { inputFileExtension = inputFilename.substr(inputFilename.find_first_of('.') + 1); outputFileExtension = outputFilename.substr(outputFilename.find_first_of('.') + 1); - //Extract the crop dimensions from the parameters cropWidth = stoi(argv[3]); cropHeight = stoi(argv[4]); @@ -72,12 +71,18 @@ int main(int argc, char *argv[]) { // Padding frames int padding = stoi(argv[5]); + // Target framerate + double targetFps = stod(argv[6]); + //Update this information in PersistantData persistentData.inputFileName = inputFilename; persistentData.outputVideoFilenameSuffix = outputFilename.substr(0,outputFilename.find_first_of('.')); persistentData.saveFileExtension = outputFileExtension; persistentData.panOutputVideoSize = saveDimensions; persistentData.outputPadding = padding; + persistentData.outputFps = targetFps; + + cout << "track4k build UCT " << __DATE__ << " " << __TIME__ << endl; cout << "\n----------------------------------------" << endl; cout << "Stage [1 of 3] - Board Segmentation (skip)" << endl; @@ -97,14 +102,12 @@ int main(int argc, char *argv[]) { cout << "\nStage 2 Complete" << endl; cout << "----------------------------------------\n" << endl; - for (int i = 0; i < rR->size(); i++) { persistentData.lecturerTrackedLocationRectangles.push_back(std::move(rR->at(i))); } persistentData.skipFrameMovementDetection = move.getFrameSkipReset(); - cout << "\n----------------------------------------" << endl; cout << "Stage [3 of 3] - Virtual Cinematographer" << endl; cout << "----------------------------------------\n" << endl; @@ -113,5 +116,4 @@ int main(int argc, char *argv[]) { cout << "\nStage 3 Complete" << endl; cout << "----------------------------------------\n" << endl; - } diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 28ccb58..0d9abed 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -77,10 +77,12 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData vector cropRectangles; panLogic.doPan(movementLines, cropRectangles); + cout << "Writing output file " << persistentData.outputVideoFilenameSuffix << "." << persistentData.saveFileExtension << endl; + //Create video writer object for writing the cropped output video VideoWriter outputVideo; outputVideo.open(persistentData.outputVideoFilenameSuffix + "." + persistentData.saveFileExtension, - persistentData.codec, persistentData.fps, persistentData.panOutputVideoSize, 1); + persistentData.codec, persistentData.outputFps, persistentData.panOutputVideoSize, 1); //Open original input video file FileReader fileReader; @@ -89,6 +91,8 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData Mat drawing; //Loop over all frames in the input video and save the cropped frames to a stream as well as the board segment + cout << "Crop rectangles: " << cropRectangles.size() << endl; + for (int i = 0; i < cropRectangles.size(); i++) { fileReader.getNextFrame(drawing); @@ -98,6 +102,7 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData // Pad the start of the video if necessary if ((i == 0) && (persistentData.outputPadding > 0)) { + cout << "Writing " << persistentData.outputPadding << " padding frames" << endl; for (int j = 0; j < persistentData.outputPadding; j++) { outputVideo.write(drawing(cropRectangles[i])); } @@ -105,10 +110,9 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData } drawing.release(); - } - //Close all file writers + // Close all file writers outputVideo.release(); fileReader.getInputVideo().release(); } From 1981aa98a00078932a521e7cb21643e744db1ec0 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Mon, 16 Oct 2017 15:08:12 +0200 Subject: [PATCH 10/49] OPENCAST-1538 Output cropping co-ordinates rather than write out the cropped video So we can crop using another technique that will preserve frame PTS values --- source/FileReader.cpp | 11 +++-- source/PersistentData.cpp | 5 +- source/PersistentData.h | 23 ++++----- source/mainDriver.cpp | 25 ++++------ source/panning/VirtualCinematographer.cpp | 57 ++++++++++++----------- source/segmentation/Track4KPreProcess.cpp | 2 +- source/tracking/MovementDetection.cpp | 6 ++- source/tracking/MovementDetection.h | 4 +- 8 files changed, 66 insertions(+), 67 deletions(-) diff --git a/source/FileReader.cpp b/source/FileReader.cpp index b026157..89eed59 100644 --- a/source/FileReader.cpp +++ b/source/FileReader.cpp @@ -40,8 +40,8 @@ bool FileReader::readFile(std::string filename, PersistentData &pD) cout << "Reading video file: " << filename << endl; - fps = inputVideo.get(CV_CAP_PROP_FPS); //Frame Rate - numFrames = inputVideo.get(CV_CAP_PROP_FRAME_COUNT); //Number of frames + fps = inputVideo.get(CV_CAP_PROP_FPS); // Frame Rate + numFrames = inputVideo.get(CV_CAP_PROP_FRAME_COUNT); // Number of frames (may not be accurate) videoDuration = numFrames / fps; //Duration of video file in seconds @@ -62,7 +62,7 @@ bool FileReader::readFile(std::string filename, PersistentData &pD) cout << "FPS: " << std::fixed << std::setprecision(6) << fps << endl; //Set video file info - pD.setVideoInfo(fps, numFrames, videoDimension, ex); + pD.setVideoInfo(fps, videoDimension, ex); return 0; @@ -124,8 +124,11 @@ void FileReader::getNextFrame(cv::Mat &frame) { cerr << "End of video file in getNextFrame()" << endl; endOfFile = true; + } else { + // Appears to return a calculated position from frame and framerate rather than actual position + // long stamp = inputVideo.get( CV_CAP_PROP_POS_MSEC ); + // std::cout << "Timestamp: " << stamp << std::endl; } - } diff --git a/source/PersistentData.cpp b/source/PersistentData.cpp index 3db02ba..371ee1b 100644 --- a/source/PersistentData.cpp +++ b/source/PersistentData.cpp @@ -20,15 +20,12 @@ #include "PersistentData.h" - -void PersistentData::setVideoInfo(double f, int t, cv::Size s, int ext) +void PersistentData::setVideoInfo(double f, cv::Size s, int ext) { if(!videoInfoSet){ fps = f; - totalFrames = t; videoDimension = s; ext_int = ext; videoInfoSet = true; } - } diff --git a/source/PersistentData.h b/source/PersistentData.h index 727b3df..f680a58 100644 --- a/source/PersistentData.h +++ b/source/PersistentData.h @@ -29,27 +29,31 @@ class PersistentData { -private: + private: bool videoInfoSet = false; -public: + + public: //Store the areas of motion. Each vector stores x frames worth of motion int segmentationNumFramesToProcessPerIteration = 29; // number of frames that will be read into memory std::vector areasOfMotion; std::vector metaFrameVector; double fps; //Frame Rate - double outputFps; // Output Frame Rate - int totalFrames; //Number of frames + int processedFrames; // Number of frames read during analysis cv::Size videoDimension; - void setVideoInfo(double f, int t, cv::Size s, int ext); + void setVideoInfo(double f, cv::Size s, int ext); + + // Input file + std::string inputFile = ""; - std::string saveFileExtension = "mp4"; //Default save extension - std::string inputFileName = ""; int ext_int; //The int version of the file extension int codec; //Default codec for mp4 + cv::Size panOutputVideoSize = cv::Size(1280, 720); - std::string outputVideoFilenameSuffix = ""; + + // Output file (cropping data) + std::string outputFile = ""; vector lecturerTrackedLocationRectangles; int skipFrameMovementDetection; @@ -58,9 +62,6 @@ class PersistentData bool boardsFound = false; cv::Rect boardCropRegion; - - // Number of frames to repeat at the start of the output video - int outputPadding = 0; }; #endif //TRACK4K_PERSISTENTDATA_H diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index 1e7d227..fd644e2 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -40,16 +40,16 @@ int main(int argc, char *argv[]) { cv::Size saveDimensions; //Check if input of command line parameters are valid - if (argc == 8) { - string codecInput = argv[7]; + if (argc == 6) { + string codecInput = argv[5]; persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); - } else if (argc == 7) { + } else if (argc == 5) { //Use default codec persistentData.codec = CV_FOURCC('X', '2', '6', '4'); } else { cerr << "\ntrack4k build UCT " << __DATE__ << " " << __TIME__ << "\n\n" - << "Parameters:\n track4k [FOURCC Codec]\n\n" + << "Parameters:\n track4k [FOURCC Codec]\n\n" << "See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!\n" << endl; return -1; @@ -61,26 +61,17 @@ int main(int argc, char *argv[]) { //Extract the extensions from the filenames inputFileExtension = inputFilename.substr(inputFilename.find_first_of('.') + 1); - outputFileExtension = outputFilename.substr(outputFilename.find_first_of('.') + 1); //Extract the crop dimensions from the parameters cropWidth = stoi(argv[3]); cropHeight = stoi(argv[4]); saveDimensions = cv::Size(cropWidth, cropHeight); - // Padding frames - int padding = stoi(argv[5]); + //Update this information in PersistentData + persistentData.inputFile = inputFilename; + persistentData.outputFile = outputFilename; - // Target framerate - double targetFps = stod(argv[6]); - - //Update this information in PersistantData - persistentData.inputFileName = inputFilename; - persistentData.outputVideoFilenameSuffix = outputFilename.substr(0,outputFilename.find_first_of('.')); - persistentData.saveFileExtension = outputFileExtension; persistentData.panOutputVideoSize = saveDimensions; - persistentData.outputPadding = padding; - persistentData.outputFps = targetFps; cout << "track4k build UCT " << __DATE__ << " " << __TIME__ << endl; @@ -96,7 +87,7 @@ int main(int argc, char *argv[]) { cout << "\n----------------------------------------" << endl; cout << "Stage [2 of 3] - Lecturer Tracking" << endl; cout << "----------------------------------------\n" << endl; - MovementDetection move(persistentData.inputFileName, &r); + MovementDetection move(persistentData, &r); vector *rR = new vector(); move.getLecturer(rR); cout << "\nStage 2 Complete" << endl; diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 0d9abed..1df3469 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -17,8 +17,11 @@ // // Created by Mohamed Tanweer Khatieb on 2016/07/21. // + #include "../segmentation/Track4KPreProcess.h" #include "VirtualCinematographer.h" + +#include #include using namespace cv; @@ -77,42 +80,40 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData vector cropRectangles; panLogic.doPan(movementLines, cropRectangles); - cout << "Writing output file " << persistentData.outputVideoFilenameSuffix << "." << persistentData.saveFileExtension << endl; - - //Create video writer object for writing the cropped output video - VideoWriter outputVideo; - outputVideo.open(persistentData.outputVideoFilenameSuffix + "." + persistentData.saveFileExtension, - persistentData.codec, persistentData.outputFps, persistentData.panOutputVideoSize, 1); + ofstream cropdata; + cropdata.open(persistentData.outputFile); - //Open original input video file - FileReader fileReader; - fileReader.readFile(persistentData.inputFileName, persistentData); - - Mat drawing; + if (cropdata.is_open()) { + cout << "Writing cropping data to output file " << persistentData.outputFile << endl; + } else { + cerr << "Unable to write cropping data to output file " << persistentData.outputFile << endl; + return 1; + } //Loop over all frames in the input video and save the cropped frames to a stream as well as the board segment - cout << "Crop rectangles: " << cropRectangles.size() << endl; + cout << "Crop rectangles : " << cropRectangles.size() << endl; + cout << "Frames processed: " << persistentData.processedFrames << endl; - for (int i = 0; i < cropRectangles.size(); i++) { + cropdata << "# track4k " << persistentData.inputFile << " " << persistentData.processedFrames + << " frames (frame top-left-x top-left-y) output frame size " << persistentData.panOutputVideoSize << endl; - fileReader.getNextFrame(drawing); + int last_x = -1; + int last_y =-1; - if (!fileReader.isEndOfFile()) { - outputVideo.write(drawing(cropRectangles[i])); - - // Pad the start of the video if necessary - if ((i == 0) && (persistentData.outputPadding > 0)) { - cout << "Writing " << persistentData.outputPadding << " padding frames" << endl; - for (int j = 0; j < persistentData.outputPadding; j++) { - outputVideo.write(drawing(cropRectangles[i])); - } - } + for (int i = 0; i < persistentData.processedFrames - 1; i++) { + if ((cropRectangles[i].x != last_x) || (cropRectangles[i].y != last_y)) { + cropdata << i << " " << cropRectangles[i].x << " " << cropRectangles[i].y << endl; + last_x = cropRectangles[i].x; + last_y = cropRectangles[i].y; } - - drawing.release(); } + // Always write out the last frame + int i = persistentData.processedFrames - 1; + cropdata << i << " " << cropRectangles[i].x << " " << cropRectangles[i].y << endl; + // Close all file writers - outputVideo.release(); - fileReader.getInputVideo().release(); + cropdata.close(); + + return 0; } diff --git a/source/segmentation/Track4KPreProcess.cpp b/source/segmentation/Track4KPreProcess.cpp index 4b732f9..8dbbf17 100644 --- a/source/segmentation/Track4KPreProcess.cpp +++ b/source/segmentation/Track4KPreProcess.cpp @@ -37,7 +37,7 @@ void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) //Read in video file FileReader fileReader; - fileReader.readFile(persistentData.inputFileName, persistentData); + fileReader.readFile(persistentData.inputFile, persistentData); //Create objects MotionDetection motionDetection; //Detects and segments overall merged motion over a given number of frames diff --git a/source/tracking/MovementDetection.cpp b/source/tracking/MovementDetection.cpp index 1cdb475..ab9a9dc 100644 --- a/source/tracking/MovementDetection.cpp +++ b/source/tracking/MovementDetection.cpp @@ -29,7 +29,9 @@ using namespace std; VideoWriter maxS; -MovementDetection::MovementDetection(string vidLocation, vector *lect) { +MovementDetection::MovementDetection(PersistentData &persistentData, vector *lect) { + + string vidLocation = persistentData.inputFile; //set vid directory and window name setVideoDir(vidLocation); @@ -383,6 +385,8 @@ MovementDetection::MovementDetection(string vidLocation, vector *lect) { inputVideo.release(); + persistentData.processedFrames = frameNumber; + processTime = clock() - processTime; cout << "Processing took: " << int(processTime / CLOCKS_PER_SEC) << "s" << endl; diff --git a/source/tracking/MovementDetection.h b/source/tracking/MovementDetection.h index 75e7a7b..238a03f 100644 --- a/source/tracking/MovementDetection.h +++ b/source/tracking/MovementDetection.h @@ -34,6 +34,8 @@ #include #include "Ghost.h" +#include "../PersistentData.h" + using namespace cv; using namespace std; @@ -60,7 +62,7 @@ class MovementDetection{ void setVideoDir(string name){ videoDir = name; } //default constructor and tracking runner - MovementDetection(string name, vector* lect); + MovementDetection(PersistentData &persistentData, vector* lect); //calculates the distance between the closest two edge points of two rectangles double closestDistance(Rect a, Rect b); From 01b805d223b9e33da2f6c0f90e549e930e21757c Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Oct 2017 14:00:40 +0200 Subject: [PATCH 11/49] OPENCAST-1538 Initial commit - use ffmpeg libs to crop output file Adapted from ffmpeg examples/transcoding.c --- .gitignore | 2 + cropvid/build.sh | 4 + cropvid/cropvid.c | 650 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 656 insertions(+) create mode 100755 cropvid/build.sh create mode 100644 cropvid/cropvid.c diff --git a/.gitignore b/.gitignore index 34174f9..2a7f7a4 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ build/ cmake-build-debug/ cmake/ build/ +cropvid/cropvid +cropvid/*.txt CMakeFiles/ CMakeCache.txt Makefile diff --git a/cropvid/build.sh b/cropvid/build.sh new file mode 100755 index 0000000..288ca85 --- /dev/null +++ b/cropvid/build.sh @@ -0,0 +1,4 @@ +#! /bin/sh + +#gcc -o cropvid cropvid.c `pkg-config --cflags --libs libavformat libavfilter libavcodec libavutil` +gcc -o cropvid cropvid.c -I/usr/local/include -L/usr/local/lib -lavfilter -lxcb -lxcb-shm -lxcb -lx265 -lx264 -lvpx -lm -lvpx -lm -lvpx -lm -lvpx -lm -ldl -llzma -lbz2 -lz -pthread -lswscale -lm -lpostproc -lavformat -lxcb -lxcb-shm -lxcb -lx265 -lx264 -lvpx -lm -lvpx -lm -lvpx -lm -lvpx -lm -ldl -llzma -lbz2 -lz -pthread -lavcodec -lvdpau -lX11 -lva -lva-x11 -lX11 -lva -lva-drm -lva -lxcb -lxcb-shm -lxcb -lx265 -lx264 -lvpx -lm -lvpx -lm -lvpx -lm -lvpx -lm -ldl -llzma -lbz2 -lz -pthread -lswresample -lm -lavutil -lm -lvdpau -lX11 -lva -lva-x11 -lX11 -lva -lva-drm -lva diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c new file mode 100644 index 0000000..2b37879 --- /dev/null +++ b/cropvid/cropvid.c @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2010 Nicolas George + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2014 Andrey Utkin + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * API example for demuxing, decoding, filtering, encoding and muxing + * @example transcoding.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static AVFormatContext *ifmt_ctx; +static AVFormatContext *ofmt_ctx; +typedef struct FilteringContext { + AVFilterContext *buffersink_ctx; + AVFilterContext *buffersrc_ctx; + AVFilterGraph *filter_graph; +} FilteringContext; +static FilteringContext *filter_ctx; + +typedef struct StreamContext { + AVCodecContext *dec_ctx; + AVCodecContext *enc_ctx; +} StreamContext; +static StreamContext *stream_ctx; + +static int open_input_file(const char *filename) +{ + int ret; + unsigned int i; + + ifmt_ctx = NULL; + if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); + return ret; + } + + if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); + return ret; + } + + stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx)); + if (!stream_ctx) + return AVERROR(ENOMEM); + + for (i = 0; i < ifmt_ctx->nb_streams; i++) { + AVStream *stream = ifmt_ctx->streams[i]; + AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id); + AVCodecContext *codec_ctx; + if (!dec) { + av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i); + return AVERROR_DECODER_NOT_FOUND; + } + codec_ctx = avcodec_alloc_context3(dec); + if (!codec_ctx) { + av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i); + return AVERROR(ENOMEM); + } + ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context " + "for stream #%u\n", i); + return ret; + } + /* Reencode video */ + if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL); + /* Open decoder */ + ret = avcodec_open2(codec_ctx, dec, NULL); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i); + return ret; + } + } + stream_ctx[i].dec_ctx = codec_ctx; + } + + av_dump_format(ifmt_ctx, 0, filename, 0); + return 0; +} + +static int open_output_file(const char *filename, int width, int height) +{ + AVStream *out_stream; + AVStream *in_stream; + AVCodecContext *dec_ctx, *enc_ctx; + AVCodec *encoder; + int ret; + unsigned int i; + + ofmt_ctx = NULL; + avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename); + if (!ofmt_ctx) { + av_log(NULL, AV_LOG_ERROR, "Could not create output context\n"); + return AVERROR_UNKNOWN; + } + + for (i = 0; i < ifmt_ctx->nb_streams; i++) { + out_stream = avformat_new_stream(ofmt_ctx, NULL); + if (!out_stream) { + av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n"); + return AVERROR_UNKNOWN; + } + + in_stream = ifmt_ctx->streams[i]; + dec_ctx = stream_ctx[i].dec_ctx; + + if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + + /* in this example, we choose transcoding to same codec */ + encoder = avcodec_find_encoder(dec_ctx->codec_id); + if (!encoder) { + av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n"); + return AVERROR_INVALIDDATA; + } + enc_ctx = avcodec_alloc_context3(encoder); + if (!enc_ctx) { + av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n"); + return AVERROR(ENOMEM); + } + + /* In this example, we transcode to same properties (picture size, + * sample rate etc.). These properties can be changed for output + * streams easily using filters */ + if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + enc_ctx->height = height; + enc_ctx->width = width; + enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; + + /* take first format from list of supported formats */ + // if (encoder->pix_fmts) + // enc_ctx->pix_fmt = encoder->pix_fmts[0]; + // else + enc_ctx->pix_fmt = dec_ctx->pix_fmt; + + // Set all the things the same + enc_ctx->time_base = dec_ctx->time_base; + enc_ctx->pkt_timebase = dec_ctx->pkt_timebase; + enc_ctx->framerate = dec_ctx->framerate; + + av_log(NULL, AV_LOG_INFO, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); + } + + /* Third parameter can be used to pass settings to encoder */ + ret = avcodec_open2(enc_ctx, encoder, NULL); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i); + return ret; + } + ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i); + return ret; + } + if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) + enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + + av_log(NULL, AV_LOG_INFO, "SM2 Setting stream params"); + out_stream->time_base = enc_ctx->time_base; + out_stream->r_frame_rate = in_stream->r_frame_rate; + out_stream->avg_frame_rate = in_stream->avg_frame_rate; + out_stream->start_time = in_stream->start_time; + + av_log(NULL, AV_LOG_INFO, "Output stream timebase for stream %i is %i/%i\n", i, out_stream->time_base.num, out_stream->time_base.den); + + stream_ctx[i].enc_ctx = enc_ctx; + + } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) { + av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i); + return AVERROR_INVALIDDATA; + } else { + /* if this stream must be remuxed */ + ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i); + return ret; + } + out_stream->time_base = in_stream->time_base; + } + + } + av_dump_format(ofmt_ctx, 0, filename, 1); + + if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) { + ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename); + return ret; + } + } + + /* init muxer, write output file header */ + ret = avformat_write_header(ofmt_ctx, NULL); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n"); + return ret; + } + + return 0; +} + +static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, + AVCodecContext *enc_ctx, const char *filter_spec) +{ + char args[512]; + int ret = 0; + AVFilter *buffersrc = NULL; + AVFilter *buffersink = NULL; + AVFilterContext *buffersrc_ctx = NULL; + AVFilterContext *buffersink_ctx = NULL; + AVFilterInOut *outputs = avfilter_inout_alloc(); + AVFilterInOut *inputs = avfilter_inout_alloc(); + AVFilterGraph *filter_graph = avfilter_graph_alloc(); + + if (!outputs || !inputs || !filter_graph) { + ret = AVERROR(ENOMEM); + goto end; + } + + if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + buffersrc = avfilter_get_by_name("buffer"); + buffersink = avfilter_get_by_name("buffersink"); + if (!buffersrc || !buffersink) { + av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n"); + ret = AVERROR_UNKNOWN; + goto end; + } + + snprintf(args, sizeof(args), + "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", + dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, + dec_ctx->time_base.num, dec_ctx->time_base.den, + dec_ctx->sample_aspect_ratio.num, + dec_ctx->sample_aspect_ratio.den); + + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", + args, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); + goto end; + } + + ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", + NULL, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); + goto end; + } + + ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", + (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt), + AV_OPT_SEARCH_CHILDREN); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); + goto end; + } + } else { + ret = AVERROR_UNKNOWN; + goto end; + } + + /* Endpoints for the filter graph. */ + outputs->name = av_strdup("in"); + outputs->filter_ctx = buffersrc_ctx; + outputs->pad_idx = 0; + outputs->next = NULL; + + inputs->name = av_strdup("out"); + inputs->filter_ctx = buffersink_ctx; + inputs->pad_idx = 0; + inputs->next = NULL; + + if (!outputs->name || !inputs->name) { + ret = AVERROR(ENOMEM); + goto end; + } + + if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, + &inputs, &outputs, NULL)) < 0) + goto end; + + if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) + goto end; + + /* Fill FilteringContext */ + fctx->buffersrc_ctx = buffersrc_ctx; + fctx->buffersink_ctx = buffersink_ctx; + fctx->filter_graph = filter_graph; + +end: + avfilter_inout_free(&inputs); + avfilter_inout_free(&outputs); + + return ret; +} + +static int init_filters(void) +{ + const char *filter_spec; + unsigned int i; + int ret; + filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx)); + if (!filter_ctx) + return AVERROR(ENOMEM); + + for (i = 0; i < ifmt_ctx->nb_streams; i++) { + filter_ctx[i].buffersrc_ctx = NULL; + filter_ctx[i].buffersink_ctx = NULL; + filter_ctx[i].filter_graph = NULL; + if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)) + continue; + + if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) + filter_spec = "null"; /* passthrough (dummy) filter for video */ + + ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx, + stream_ctx[i].enc_ctx, filter_spec); + + if (ret) + return ret; + } + return 0; +} + +static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) { + int ret; + int got_frame_local; + AVPacket enc_pkt; + + if (!got_frame) + got_frame = &got_frame_local; + + if (filt_frame != NULL) { + av_log(NULL, AV_LOG_DEBUG, "Encoding frame pts %li duration %li\n", filt_frame->pts, filt_frame->pkt_duration); + } + + /* encode filtered frame */ + enc_pkt.data = NULL; + enc_pkt.size = 0; + av_init_packet(&enc_pkt); + + ret = avcodec_send_frame(stream_ctx[stream_index].enc_ctx, filt_frame); + if (ret < 0) { + if (filt_frame != NULL) { + av_log(NULL, AV_LOG_INFO, " encoder ret %i error %s sending frame\n", ret, av_err2str(ret)); + return ret; + } + } + + ret = avcodec_receive_packet(stream_ctx[stream_index].enc_ctx, &enc_pkt); + + if (ret < 0) { + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + *got_frame = 0; + return 0; + } else { + av_log(NULL, AV_LOG_INFO, " encoder ret %i error %s receive packet\n", ret, av_err2str(ret)); + return ret; + } + } + + *got_frame = 1; + + /* prepare packet for muxing */ + enc_pkt.stream_index = stream_index; + + av_log(NULL, AV_LOG_DEBUG, "Muxing packet pts %li duration %li\n", enc_pkt.pts, enc_pkt.duration); + + /* mux encoded frame */ + ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt); + + return ret; +} + +static int flush_encoder(unsigned int stream_index) +{ + int ret; + int got_frame; + + if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities & + AV_CODEC_CAP_DELAY)) + return 0; + + while (1) { + av_log(NULL, AV_LOG_DEBUG, "Flushing stream #%u encoder\n", stream_index); + ret = encode_write_frame(NULL, stream_index, &got_frame); + if (ret < 0) + break; + if (!got_frame) + return 0; + } + return ret; +} + +static AVFilterGraph *init_crop_filter(const AVFrame *in, int left, int top, int width, int height) +{ + AVFilterGraph *filter_graph = avfilter_graph_alloc(); + AVFilterInOut *inputs = NULL, *outputs = NULL; + char args[512]; + int ret; + + snprintf(args, sizeof(args), + "buffer=video_size=%dx%d:pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];" + "[in]crop=%d:%d:%d:%d[out];" + "[out]buffersink", + in->width, in->height, in->format, + width, height, left, top); + + ret = avfilter_graph_parse2(filter_graph, args, &inputs, &outputs); + if (ret < 0) return NULL; + assert(inputs == NULL && outputs == NULL); + + ret = avfilter_graph_config(filter_graph, NULL); + if (ret < 0) return NULL; + + av_log(NULL, AV_LOG_INFO, "Filter:\n%s\n", args); + + return filter_graph; +} + +static AVFrame *crop_frame(const AVFrame *in, AVFilterGraph *filter_graph) +{ + AVFrame *f = av_frame_alloc(); + AVFilterContext *buffersink_ctx; + AVFilterContext *buffersrc_ctx; + int ret; + + assert(filter_graph != NULL); + + // Crop the frame + + buffersrc_ctx = avfilter_graph_get_filter(filter_graph, "Parsed_buffer_0"); + buffersink_ctx = avfilter_graph_get_filter(filter_graph, "Parsed_buffersink_2"); + assert(buffersrc_ctx != NULL); + assert(buffersink_ctx != NULL); + + av_frame_ref(f, in); + ret = av_buffersrc_add_frame(buffersrc_ctx, f); + if (ret < 0) return NULL; + ret = av_buffersink_get_frame(buffersink_ctx, f); + if (ret < 0) return NULL; + + // avfilter_graph_free(&filter_graph); + + return f; +} + +int main(int argc, char **argv) +{ + + int ret; + AVPacket packet = { .data = NULL, .size = 0 }; + AVFrame *frame = NULL; + AVFilterGraph *crop_filter_graph = NULL; + + unsigned int stream_index; + unsigned int i; + int framecount; + int pktcount; + int eof; + int got_frame; + + int out_width, out_height; + + out_width = 1920; + out_height = 1080; + + if (argc != 4) { + av_log(NULL, AV_LOG_ERROR, "Usage: %s \n", argv[0]); + return 1; + } + + av_register_all(); + avfilter_register_all(); + + if ((ret = open_input_file(argv[1])) < 0) + goto end; + if ((ret = open_output_file(argv[2], out_width, out_height)) < 0) + goto end; + if ((ret = init_filters()) < 0) + goto end; + + FILE* cropfile = fopen(argv[3], "r"); /* should check the result */ + if (cropfile == NULL) { + av_log(NULL, AV_LOG_ERROR, "Cannot open cropping data file %s\n", argv[3]); + goto end; + } + + char line[256]; + + while (fgets(line, sizeof(line), cropfile)) { + /* note that fgets don't strip the terminating \n, checking its + presence would allow to handle lines longer that sizeof(line) */ + printf("%s", line); + } + + fclose(cropfile); + + framecount = 0; + pktcount = 0; + eof = 0; + + /* read all packets */ + while (!eof) { + + if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0) { + av_log(NULL, AV_LOG_DEBUG, "Demuxer ret %i error %s for packet %u\n", ret, av_err2str(ret), pktcount); + eof = 1; + } + + stream_index = packet.stream_index; + + av_log(NULL, AV_LOG_DEBUG, "Demuxer gave packet %i of stream_index %u pts %li duration %li\n", + pktcount, stream_index, packet.pts, packet.duration); + + pktcount++; + + if (filter_ctx[stream_index].filter_graph) { + frame = av_frame_alloc(); + if (!frame) { + ret = AVERROR(ENOMEM); + av_log(NULL, AV_LOG_INFO, " not a frame\n"); + break; + } + + if (!eof) { + ret = avcodec_send_packet(stream_ctx[stream_index].dec_ctx, &packet); + if (ret < 0) { + av_frame_free(&frame); + av_log(NULL, AV_LOG_INFO, "Decoder send packet failed with ret err %s\n", av_err2str(ret)); + break; + } + } else { + ret = avcodec_send_packet(stream_ctx[stream_index].dec_ctx, NULL); + } + + ret = avcodec_receive_frame(stream_ctx[stream_index].dec_ctx, frame); + + if (ret < 0) { + av_frame_free(&frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + ret = 0; + av_log(NULL, AV_LOG_DEBUG, " haven't got a frame\n"); + av_frame_free(&frame); + } else { + av_log(NULL, AV_LOG_INFO, "Decoding failed with ret err %s\n", av_err2str(ret)); + break; + } + } else { + av_log(NULL, AV_LOG_INFO, "Frame %i pts is %li duration %li\n", framecount, frame->pts, frame->pkt_duration); + framecount++; + + if (crop_filter_graph == NULL) { + crop_filter_graph = init_crop_filter(frame, 0, 0, out_width, out_height); + } + + AVFrame *cropped = crop_frame(frame, crop_filter_graph); + + ret = encode_write_frame(cropped, stream_index, &got_frame); + + av_frame_free(&frame); + av_frame_free(&cropped); + + if (ret < 0) + goto end; + } + } else { + if (!eof) { + /* remux this packet without reencoding */ + av_log(NULL, AV_LOG_INFO, " remuxing packet without encoding"); + av_packet_rescale_ts(&packet, + ifmt_ctx->streams[stream_index]->time_base, + ofmt_ctx->streams[stream_index]->time_base); + + ret = av_interleaved_write_frame(ofmt_ctx, &packet); + if (ret < 0) + goto end; + } + } + + if (!eof) { + av_packet_unref(&packet); + } + } + + /* flush filters and encoders */ + for (i = 0; i < ifmt_ctx->nb_streams; i++) { + + /* flush encoder */ + ret = flush_encoder(i); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n"); + goto end; + } + } + + av_write_trailer(ofmt_ctx); +end: + av_packet_unref(&packet); + av_frame_free(&frame); + for (i = 0; i < ifmt_ctx->nb_streams; i++) { + avcodec_free_context(&stream_ctx[i].dec_ctx); + if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx) + avcodec_free_context(&stream_ctx[i].enc_ctx); + if (filter_ctx && filter_ctx[i].filter_graph) + avfilter_graph_free(&filter_ctx[i].filter_graph); + } + av_free(filter_ctx); + av_free(stream_ctx); + avformat_close_input(&ifmt_ctx); + if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) + avio_closep(&ofmt_ctx->pb); + avformat_free_context(ofmt_ctx); + + if (ret < 0) + av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret)); + + + return ret ? 1 : 0; +} From 407822a2155be708cac1380ec3af213fba590bf8 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Tue, 17 Oct 2017 14:48:56 +0200 Subject: [PATCH 12/49] Write out co-ords without []s for easier parsing --- source/panning/VirtualCinematographer.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 1df3469..4a70946 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -95,7 +95,8 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData cout << "Frames processed: " << persistentData.processedFrames << endl; cropdata << "# track4k " << persistentData.inputFile << " " << persistentData.processedFrames - << " frames (frame top-left-x top-left-y) output frame size " << persistentData.panOutputVideoSize << endl; + << " frames (frame top-left-x top-left-y) output frame size " + << persistentData.panOutputVideoSize.width << " " << persistentData.panOutputVideoSize.height << endl; int last_x = -1; int last_y =-1; From 27ed121a3b774a9c199bffbb31afdf155bf9b07f Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Oct 2017 17:08:40 +0200 Subject: [PATCH 13/49] Implement cropping, add dummy frames prior to keyframe --- cropvid/cropvid.c | 120 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 106 insertions(+), 14 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index 2b37879..312b3f6 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -40,19 +40,28 @@ static AVFormatContext *ifmt_ctx; static AVFormatContext *ofmt_ctx; + typedef struct FilteringContext { AVFilterContext *buffersink_ctx; AVFilterContext *buffersrc_ctx; AVFilterGraph *filter_graph; } FilteringContext; + static FilteringContext *filter_ctx; typedef struct StreamContext { AVCodecContext *dec_ctx; AVCodecContext *enc_ctx; } StreamContext; + static StreamContext *stream_ctx; +typedef struct FrameCrop { + int frame; + int x; + int y; +} FrameCrop; + static int open_input_file(const char *filename) { int ret; @@ -168,6 +177,10 @@ static int open_output_file(const char *filename, int width, int height) enc_ctx->pkt_timebase = dec_ctx->pkt_timebase; enc_ctx->framerate = dec_ctx->framerate; + // Quality + enc_ctx->flags |= CODEC_FLAG_QSCALE; + enc_ctx->global_quality = FF_QP2LAMBDA * 23; + av_log(NULL, AV_LOG_INFO, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); } @@ -185,13 +198,17 @@ static int open_output_file(const char *filename, int width, int height) if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - av_log(NULL, AV_LOG_INFO, "SM2 Setting stream params"); + av_log(NULL, AV_LOG_INFO, "Setting stream params\n"); out_stream->time_base = enc_ctx->time_base; out_stream->r_frame_rate = in_stream->r_frame_rate; out_stream->avg_frame_rate = in_stream->avg_frame_rate; out_stream->start_time = in_stream->start_time; - av_log(NULL, AV_LOG_INFO, "Output stream timebase for stream %i is %i/%i\n", i, out_stream->time_base.num, out_stream->time_base.den); + if (in_stream->duration > 0) + out_stream->duration = in_stream->duration; + + av_log(NULL, AV_LOG_INFO, "Output stream timebase for stream %i is %i/%i, duration is %li\n", + i, out_stream->time_base.num, out_stream->time_base.den, out_stream->duration); stream_ctx[i].enc_ctx = enc_ctx; @@ -443,7 +460,7 @@ static AVFilterGraph *init_crop_filter(const AVFrame *in, int left, int top, int ret = avfilter_graph_config(filter_graph, NULL); if (ret < 0) return NULL; - av_log(NULL, AV_LOG_INFO, "Filter:\n%s\n", args); + av_log(NULL, AV_LOG_DEBUG, "Filter:\n%s\n", args); return filter_graph; } @@ -475,6 +492,27 @@ static AVFrame *crop_frame(const AVFrame *in, AVFilterGraph *filter_graph) return f; } +// Get next cropping data. Returns -1 for frame number on EOF + +FrameCrop getCropInfo(FILE *cropfile) { + + char line[256]; + FrameCrop crop; + + crop.frame = -1; + crop.x = 0; + crop.y = 0; + + // frame x y + if (fgets(line, sizeof(line), cropfile) != NULL) { + if (3 == sscanf(line, "%i %i %i", &crop.frame, &crop.x, &crop.y)) { + av_log(NULL, AV_LOG_DEBUG, "Frame %i crop %i %i\n", crop.frame, crop.x, crop.y); + } + } + + return crop; +} + int main(int argc, char **argv) { @@ -486,9 +524,16 @@ int main(int argc, char **argv) unsigned int stream_index; unsigned int i; int framecount; + int cropframes; int pktcount; int eof; int got_frame; + int found_keyframe; + int pre_keyframe; + int64_t pre_key_pts[128]; + int first_frame; + int64_t first_pts; + int64_t last_pts; int out_width, out_height; @@ -510,25 +555,31 @@ int main(int argc, char **argv) if ((ret = init_filters()) < 0) goto end; - FILE* cropfile = fopen(argv[3], "r"); /* should check the result */ + FILE *cropfile = fopen(argv[3], "r"); /* should check the result */ if (cropfile == NULL) { av_log(NULL, AV_LOG_ERROR, "Cannot open cropping data file %s\n", argv[3]); goto end; } char line[256]; + fgets(line, sizeof(line), cropfile); - while (fgets(line, sizeof(line), cropfile)) { - /* note that fgets don't strip the terminating \n, checking its - presence would allow to handle lines longer that sizeof(line) */ - printf("%s", line); - } + // # track4k short.mkv 224 frames (frame top-left-x top-left-y) output frame size 1920 1080 + sscanf(line, "%*s %*s %*s %i %*s %*s %*s %*s %*s %*s %*s %i %i", &cropframes, &out_width, &out_height); - fclose(cropfile); + av_log(NULL, AV_LOG_INFO, "Crop data: %i frames width %i height %i\n", cropframes, out_width, out_height); + + FrameCrop crop = getCropInfo(cropfile); + FrameCrop next_crop = getCropInfo(cropfile); framecount = 0; pktcount = 0; eof = 0; + found_keyframe = 0; + pre_keyframe = 0; + first_frame = 1; + first_pts = -1; + last_pts = 0; /* read all packets */ while (!eof) { @@ -540,8 +591,21 @@ int main(int argc, char **argv) stream_index = packet.stream_index; - av_log(NULL, AV_LOG_DEBUG, "Demuxer gave packet %i of stream_index %u pts %li duration %li\n", - pktcount, stream_index, packet.pts, packet.duration); + av_log(NULL, AV_LOG_DEBUG, "Demuxer gave packet %i of stream_index %u pts %li duration %li flags %i\n", + pktcount, stream_index, packet.pts, packet.duration, packet.flags); + + if (first_pts < 0) first_pts = packet.pts; + if (packet.pts > last_pts) last_pts = packet.pts; + + // First keyframe + if (packet.flags & AV_PKT_FLAG_KEY) { + found_keyframe = 1; + } + + // Record the packet PTS value + if (!found_keyframe) { + pre_key_pts[pre_keyframe++] = packet.pts; + } pktcount++; @@ -578,19 +642,41 @@ int main(int argc, char **argv) } } else { av_log(NULL, AV_LOG_INFO, "Frame %i pts is %li duration %li\n", framecount, frame->pts, frame->pkt_duration); - framecount++; if (crop_filter_graph == NULL) { - crop_filter_graph = init_crop_filter(frame, 0, 0, out_width, out_height); + crop_filter_graph = init_crop_filter(frame, crop.x, crop.y, out_width, out_height); + } + + // New cropping data + if ((framecount >= next_crop.frame) && (next_crop.frame > 0)) { + crop = next_crop; + next_crop = getCropInfo(cropfile); + + // Recreate filter graph + avfilter_graph_free(&crop_filter_graph); + crop_filter_graph = init_crop_filter(frame, crop.x, crop.y, out_width, out_height); } AVFrame *cropped = crop_frame(frame, crop_filter_graph); + // Write out frames for pre-keyframe packets to keep audio timesync for muxing + if (first_frame && pre_keyframe) { + av_log(NULL, AV_LOG_INFO, "Writing %i frames from PTS %li for pre-keyframe buffer\n", pre_keyframe, pre_key_pts[0]); + for (int i=0; i < pre_keyframe; i++) { + cropped->pts = pre_key_pts[i]; + ret = encode_write_frame(cropped, stream_index, &got_frame); + } + first_frame = 0; + cropped->pts = frame->pts; + } + ret = encode_write_frame(cropped, stream_index, &got_frame); av_frame_free(&frame); av_frame_free(&cropped); + framecount++; + if (ret < 0) goto end; } @@ -624,7 +710,12 @@ int main(int argc, char **argv) } } + ofmt_ctx->duration = last_pts - first_pts; + + av_dump_format(ofmt_ctx, 0, argv[2], 1); + av_write_trailer(ofmt_ctx); + end: av_packet_unref(&packet); av_frame_free(&frame); @@ -645,6 +736,7 @@ int main(int argc, char **argv) if (ret < 0) av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret)); + fclose(cropfile); return ret ? 1 : 0; } From 53f28a9aa5cc96121e1dc6b1937bd656b38fbeaa Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Oct 2017 17:46:53 +0200 Subject: [PATCH 14/49] Ignore test files --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 2a7f7a4..f72abb1 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,8 @@ cmake/ build/ cropvid/cropvid cropvid/*.txt +cropvid/*.flac +cropvid/*.crop CMakeFiles/ CMakeCache.txt Makefile From c948af25a64a1835c14d0094b54d5334f012250b Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Oct 2017 18:08:21 +0200 Subject: [PATCH 15/49] Set quality as per this param buried deep in the encoder context options https://lists.ffmpeg.org/pipermail/libav-user/2015-April/008027.html --- cropvid/cropvid.c | 1 + 1 file changed, 1 insertion(+) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index 312b3f6..b6b4c5c 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -180,6 +180,7 @@ static int open_output_file(const char *filename, int width, int height) // Quality enc_ctx->flags |= CODEC_FLAG_QSCALE; enc_ctx->global_quality = FF_QP2LAMBDA * 23; + av_opt_set(enc_ctx->priv_data, "crf", "23", AV_OPT_SEARCH_CHILDREN); av_log(NULL, AV_LOG_INFO, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); } From 19878a135ee29e8f84ab4be5cfcdbd34336da1c0 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Oct 2017 18:21:21 +0200 Subject: [PATCH 16/49] Adjust the output frame PTS to start from 0 --- cropvid/cropvid.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index b6b4c5c..edcf1c6 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -664,13 +664,13 @@ int main(int argc, char **argv) if (first_frame && pre_keyframe) { av_log(NULL, AV_LOG_INFO, "Writing %i frames from PTS %li for pre-keyframe buffer\n", pre_keyframe, pre_key_pts[0]); for (int i=0; i < pre_keyframe; i++) { - cropped->pts = pre_key_pts[i]; + cropped->pts = pre_key_pts[i] - first_pts; ret = encode_write_frame(cropped, stream_index, &got_frame); } first_frame = 0; - cropped->pts = frame->pts; } + cropped->pts = frame->pts - first_pts; ret = encode_write_frame(cropped, stream_index, &got_frame); av_frame_free(&frame); @@ -711,10 +711,6 @@ int main(int argc, char **argv) } } - ofmt_ctx->duration = last_pts - first_pts; - - av_dump_format(ofmt_ctx, 0, argv[2], 1); - av_write_trailer(ofmt_ctx); end: From d4052e351f85146c4409b2dc489ba71e261364aa Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Oct 2017 20:10:27 +0200 Subject: [PATCH 17/49] Quality tweaks --- cropvid/cropvid.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index edcf1c6..4d1fc92 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -178,9 +178,17 @@ static int open_output_file(const char *filename, int width, int height) enc_ctx->framerate = dec_ctx->framerate; // Quality - enc_ctx->flags |= CODEC_FLAG_QSCALE; - enc_ctx->global_quality = FF_QP2LAMBDA * 23; - av_opt_set(enc_ctx->priv_data, "crf", "23", AV_OPT_SEARCH_CHILDREN); + + // Ignored by libx264 + // https://lists.ffmpeg.org/pipermail/ffmpeg-cvslog/2014-March/075524.html + // enc_ctx->flags |= CODEC_FLAG_QSCALE; + // enc_ctx->global_quality = FF_QP2LAMBDA * 23; + + // https://lists.ffmpeg.org/pipermail/libav-user/2015-April/008027.html + ret = av_opt_set(enc_ctx->priv_data, "crf", "27", AV_OPT_SEARCH_CHILDREN); + if (ret == AVERROR_OPTION_NOT_FOUND) { + av_log(NULL, AV_LOG_INFO, "Encoding option crf not found"); + } av_log(NULL, AV_LOG_INFO, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); } From b4c4b59ba204edba69e6497908729b9065157c5c Mon Sep 17 00:00:00 2001 From: root Date: Thu, 19 Oct 2017 12:16:47 +0200 Subject: [PATCH 18/49] Simplify logging --- cropvid/cropvid.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index 4d1fc92..d450f8f 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -187,10 +187,10 @@ static int open_output_file(const char *filename, int width, int height) // https://lists.ffmpeg.org/pipermail/libav-user/2015-April/008027.html ret = av_opt_set(enc_ctx->priv_data, "crf", "27", AV_OPT_SEARCH_CHILDREN); if (ret == AVERROR_OPTION_NOT_FOUND) { - av_log(NULL, AV_LOG_INFO, "Encoding option crf not found"); + av_log(NULL, AV_LOG_ERROR, "Encoding option crf not found (not an H264 encoder?)"); } - av_log(NULL, AV_LOG_INFO, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); + av_log(NULL, AV_LOG_DEBUG, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); } /* Third parameter can be used to pass settings to encoder */ @@ -207,7 +207,6 @@ static int open_output_file(const char *filename, int width, int height) if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - av_log(NULL, AV_LOG_INFO, "Setting stream params\n"); out_stream->time_base = enc_ctx->time_base; out_stream->r_frame_rate = in_stream->r_frame_rate; out_stream->avg_frame_rate = in_stream->avg_frame_rate; @@ -216,7 +215,7 @@ static int open_output_file(const char *filename, int width, int height) if (in_stream->duration > 0) out_stream->duration = in_stream->duration; - av_log(NULL, AV_LOG_INFO, "Output stream timebase for stream %i is %i/%i, duration is %li\n", + av_log(NULL, AV_LOG_DEBUG, "Output stream timebase for stream %i is %i/%i, duration is %li\n", i, out_stream->time_base.num, out_stream->time_base.den, out_stream->duration); stream_ctx[i].enc_ctx = enc_ctx; @@ -576,7 +575,7 @@ int main(int argc, char **argv) // # track4k short.mkv 224 frames (frame top-left-x top-left-y) output frame size 1920 1080 sscanf(line, "%*s %*s %*s %i %*s %*s %*s %*s %*s %*s %*s %i %i", &cropframes, &out_width, &out_height); - av_log(NULL, AV_LOG_INFO, "Crop data: %i frames width %i height %i\n", cropframes, out_width, out_height); + av_log(NULL, AV_LOG_INFO, "\nCrop data: %i frames width %i height %i\n\n", cropframes, out_width, out_height); FrameCrop crop = getCropInfo(cropfile); FrameCrop next_crop = getCropInfo(cropfile); @@ -622,7 +621,7 @@ int main(int argc, char **argv) frame = av_frame_alloc(); if (!frame) { ret = AVERROR(ENOMEM); - av_log(NULL, AV_LOG_INFO, " not a frame\n"); + av_log(NULL, AV_LOG_DEBUG, " not a frame\n"); break; } @@ -650,7 +649,8 @@ int main(int argc, char **argv) break; } } else { - av_log(NULL, AV_LOG_INFO, "Frame %i pts is %li duration %li\n", framecount, frame->pts, frame->pkt_duration); + if ((framecount % 300) == 0) + av_log(NULL, AV_LOG_INFO, "Frame %i pts %li\n", framecount, frame->pts); if (crop_filter_graph == NULL) { crop_filter_graph = init_crop_filter(frame, crop.x, crop.y, out_width, out_height); @@ -670,7 +670,7 @@ int main(int argc, char **argv) // Write out frames for pre-keyframe packets to keep audio timesync for muxing if (first_frame && pre_keyframe) { - av_log(NULL, AV_LOG_INFO, "Writing %i frames from PTS %li for pre-keyframe buffer\n", pre_keyframe, pre_key_pts[0]); + av_log(NULL, AV_LOG_INFO, "Writing %i frames from pts %li for pre-keyframe buffer\n", pre_keyframe, pre_key_pts[0]); for (int i=0; i < pre_keyframe; i++) { cropped->pts = pre_key_pts[i] - first_pts; ret = encode_write_frame(cropped, stream_index, &got_frame); From df336248aed75d45deaa9c5bf0e66318f729dff5 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 19 Oct 2017 16:17:44 +0200 Subject: [PATCH 19/49] Update build script and docs for cropvid --- README.md | 36 ++++++++++++++++++++++++++++++++---- install_track4k.sh | 12 ++++++++++++ 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b3c38f5..7363f27 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,11 @@ Track4K is an open source C++ project that takes a High Definition video of a le These instructions will help get the program and all its dependencies set up on your machine. ### Prerequisites -These instructions are written with the assumption that the project will be installed on a Linux-based system (preferably a Debian version). To be able to run this project, you will need to first install the following dependencies: +These instructions are written with the assumption that the project will be installed on a Linux-based system (preferably a Debian version). Track4K has been tested on Ubuntu 16.04 + + To be able to run this project, you will need to first install the following dependencies: + +* ffmpeg (3.4 or newer) * OpenCV 3 (3.2.0 or future releases) * OpenCV Extra Modules (latest version on repository) * C++ Libraries (6.3 or future releases) @@ -76,7 +80,9 @@ Finally, install these modules by running the following command: $ sudo make install ``` #### Building Track4K + ##### Automatic Method + There is a shell script in the trackhd folder called intall_track4k.sh which can be used to install track4k automatically. To use this script run the following command: @@ -87,6 +93,7 @@ sudo ./install_track4k.sh This will run all the steps listed in the manual method mentioned below. ##### Manual Method + This method is for the case where the automatic method does not work. It does everything the shell script does manually. The trackhd directory should have 2 main folders inside it: source and build. The source folder comntains all the header and source files while the build file contains all object files and executables. @@ -101,19 +108,40 @@ Now it is possible to run the build instruction: make -j`number_of_processors` ``` You can now install the project to /usr/local/bin/ by running the following command: + ``` sudo make install ``` + +Then build cropvid: + +``` +cd cropvid +./build.sh +cp cropvid /usr/local/bin/ +``` + #### Running Track4K -Run the program as follows: +Track4K runs in two parts: track4k analyzes a video file and produces a cropping data file in text format. cropvid crops the +video file according to the cropping information in the data file, using ffmpeg libraries. + +``` +$ track4k [FOURCC Codec] +$ cropvid +``` + +Example: + ``` -$ ./track4k `inputVideoFileName.extension` `outputVideoFileName.extension` `outputFrameWidth` `outputFrameHeight` [FOURCC Codec code] +track4k presenter.mkv presenter-crop.txt 1920 1080 +cropvid presenter.mkv tracked.mkv presenter-crop.txt ``` + The FOURCC CODEC parameter is optional (default CODEC is X264). To see all available CODECs, visit [FOURCC](https://www.fourcc.org/codecs.php) -###Memory Requirements +### Memory Requirements The program reads a maximum of 29 frames into memory at a time. So a minimum of 4GB RAM should be sufficient. ## Built With diff --git a/install_track4k.sh b/install_track4k.sh index 13f1761..cc0bb79 100755 --- a/install_track4k.sh +++ b/install_track4k.sh @@ -1,6 +1,8 @@ #!/bin/bash +echo "" echo "Installing TRACK4K" + jFlag="-j" numCores=`cat /proc/cpuinfo | grep processor | wc -l` @@ -20,4 +22,14 @@ make $jFlag$numCores echo "STAGE 4/4: Installing..." sudo make install +echo "" +echo "Building and installing cropvid" +echo "" + +cd ../cropvid +./build.sh +cp cropvid /usr/local/bin/ + echo "Complete!" +echo "" + From c670cfce62ed96ea3020986fbe2acd13fa56aacc Mon Sep 17 00:00:00 2001 From: root Date: Thu, 19 Oct 2017 16:28:20 +0200 Subject: [PATCH 20/49] Drop reference to FOURCC codec for now because it's not used by track4k any longer (encoding handled by cropvid) --- README.md | 5 +---- source/mainDriver.cpp | 8 +++----- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 7363f27..8d764d3 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,7 @@ Track4K runs in two parts: track4k analyzes a video file and produces a cropping video file according to the cropping information in the data file, using ffmpeg libraries. ``` -$ track4k [FOURCC Codec] +$ track4k $ cropvid ``` @@ -138,9 +138,6 @@ track4k presenter.mkv presenter-crop.txt 1920 1080 cropvid presenter.mkv tracked.mkv presenter-crop.txt ``` -The FOURCC CODEC parameter is optional (default CODEC is X264). -To see all available CODECs, visit [FOURCC](https://www.fourcc.org/codecs.php) - ### Memory Requirements The program reads a maximum of 29 frames into memory at a time. So a minimum of 4GB RAM should be sufficient. diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index fd644e2..d6e7c84 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -47,11 +47,9 @@ int main(int argc, char *argv[]) { //Use default codec persistentData.codec = CV_FOURCC('X', '2', '6', '4'); } else { - cerr - << "\ntrack4k build UCT " << __DATE__ << " " << __TIME__ << "\n\n" - << "Parameters:\n track4k [FOURCC Codec]\n\n" - << "See http://www.fourcc.org/codecs.php for available codecs. The default codec of X264 for mp4 will be used, if none is specified!\n" - << endl; + cerr << endl + << "track4k build UCT " << __DATE__ << " " << __TIME__ << endl << endl + << "Parameters:" << endl << " track4k " << endl << endl; return -1; } From f8d8d9e4a997d4e4995b212b0401644d34ab76af Mon Sep 17 00:00:00 2001 From: root Date: Sat, 21 Oct 2017 17:44:24 +0200 Subject: [PATCH 21/49] Remove unused code --- cropvid/cropvid.c | 157 ++-------------------------------------------- 1 file changed, 6 insertions(+), 151 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index d450f8f..b71cd63 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -36,19 +36,10 @@ #include #include #include -#include static AVFormatContext *ifmt_ctx; static AVFormatContext *ofmt_ctx; -typedef struct FilteringContext { - AVFilterContext *buffersink_ctx; - AVFilterContext *buffersrc_ctx; - AVFilterGraph *filter_graph; -} FilteringContext; - -static FilteringContext *filter_ctx; - typedef struct StreamContext { AVCodecContext *dec_ctx; AVCodecContext *enc_ctx; @@ -158,21 +149,12 @@ static int open_output_file(const char *filename, int width, int height) return AVERROR(ENOMEM); } - /* In this example, we transcode to same properties (picture size, - * sample rate etc.). These properties can be changed for output - * streams easily using filters */ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + // Set output parameters the same as input params enc_ctx->height = height; enc_ctx->width = width; enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; - - /* take first format from list of supported formats */ - // if (encoder->pix_fmts) - // enc_ctx->pix_fmt = encoder->pix_fmts[0]; - // else enc_ctx->pix_fmt = dec_ctx->pix_fmt; - - // Set all the things the same enc_ctx->time_base = dec_ctx->time_base; enc_ctx->pkt_timebase = dec_ctx->pkt_timebase; enc_ctx->framerate = dec_ctx->framerate; @@ -187,7 +169,7 @@ static int open_output_file(const char *filename, int width, int height) // https://lists.ffmpeg.org/pipermail/libav-user/2015-April/008027.html ret = av_opt_set(enc_ctx->priv_data, "crf", "27", AV_OPT_SEARCH_CHILDREN); if (ret == AVERROR_OPTION_NOT_FOUND) { - av_log(NULL, AV_LOG_ERROR, "Encoding option crf not found (not an H264 encoder?)"); + av_log(NULL, AV_LOG_ERROR, "Encoding option crf not found (not an H264 encoder?)\n"); } av_log(NULL, AV_LOG_DEBUG, "Output CTX timebase for stream %i is %i/%i\n", i, enc_ctx->time_base.num, enc_ctx->time_base.den); @@ -254,129 +236,6 @@ static int open_output_file(const char *filename, int width, int height) return 0; } -static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, - AVCodecContext *enc_ctx, const char *filter_spec) -{ - char args[512]; - int ret = 0; - AVFilter *buffersrc = NULL; - AVFilter *buffersink = NULL; - AVFilterContext *buffersrc_ctx = NULL; - AVFilterContext *buffersink_ctx = NULL; - AVFilterInOut *outputs = avfilter_inout_alloc(); - AVFilterInOut *inputs = avfilter_inout_alloc(); - AVFilterGraph *filter_graph = avfilter_graph_alloc(); - - if (!outputs || !inputs || !filter_graph) { - ret = AVERROR(ENOMEM); - goto end; - } - - if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { - buffersrc = avfilter_get_by_name("buffer"); - buffersink = avfilter_get_by_name("buffersink"); - if (!buffersrc || !buffersink) { - av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n"); - ret = AVERROR_UNKNOWN; - goto end; - } - - snprintf(args, sizeof(args), - "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", - dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, - dec_ctx->time_base.num, dec_ctx->time_base.den, - dec_ctx->sample_aspect_ratio.num, - dec_ctx->sample_aspect_ratio.den); - - ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", - args, NULL, filter_graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); - goto end; - } - - ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", - NULL, NULL, filter_graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); - goto end; - } - - ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", - (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt), - AV_OPT_SEARCH_CHILDREN); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); - goto end; - } - } else { - ret = AVERROR_UNKNOWN; - goto end; - } - - /* Endpoints for the filter graph. */ - outputs->name = av_strdup("in"); - outputs->filter_ctx = buffersrc_ctx; - outputs->pad_idx = 0; - outputs->next = NULL; - - inputs->name = av_strdup("out"); - inputs->filter_ctx = buffersink_ctx; - inputs->pad_idx = 0; - inputs->next = NULL; - - if (!outputs->name || !inputs->name) { - ret = AVERROR(ENOMEM); - goto end; - } - - if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, - &inputs, &outputs, NULL)) < 0) - goto end; - - if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) - goto end; - - /* Fill FilteringContext */ - fctx->buffersrc_ctx = buffersrc_ctx; - fctx->buffersink_ctx = buffersink_ctx; - fctx->filter_graph = filter_graph; - -end: - avfilter_inout_free(&inputs); - avfilter_inout_free(&outputs); - - return ret; -} - -static int init_filters(void) -{ - const char *filter_spec; - unsigned int i; - int ret; - filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx)); - if (!filter_ctx) - return AVERROR(ENOMEM); - - for (i = 0; i < ifmt_ctx->nb_streams; i++) { - filter_ctx[i].buffersrc_ctx = NULL; - filter_ctx[i].buffersink_ctx = NULL; - filter_ctx[i].filter_graph = NULL; - if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)) - continue; - - if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) - filter_spec = "null"; /* passthrough (dummy) filter for video */ - - ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx, - stream_ctx[i].enc_ctx, filter_spec); - - if (ret) - return ret; - } - return 0; -} - static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) { int ret; int got_frame_local; @@ -490,13 +349,13 @@ static AVFrame *crop_frame(const AVFrame *in, AVFilterGraph *filter_graph) assert(buffersink_ctx != NULL); av_frame_ref(f, in); + ret = av_buffersrc_add_frame(buffersrc_ctx, f); if (ret < 0) return NULL; + ret = av_buffersink_get_frame(buffersink_ctx, f); if (ret < 0) return NULL; - // avfilter_graph_free(&filter_graph); - return f; } @@ -560,8 +419,6 @@ int main(int argc, char **argv) goto end; if ((ret = open_output_file(argv[2], out_width, out_height)) < 0) goto end; - if ((ret = init_filters()) < 0) - goto end; FILE *cropfile = fopen(argv[3], "r"); /* should check the result */ if (cropfile == NULL) { @@ -617,7 +474,8 @@ int main(int argc, char **argv) pktcount++; - if (filter_ctx[stream_index].filter_graph) { + if (stream_ctx[stream_index].dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { + frame = av_frame_alloc(); if (!frame) { ret = AVERROR(ENOMEM); @@ -728,10 +586,7 @@ int main(int argc, char **argv) avcodec_free_context(&stream_ctx[i].dec_ctx); if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx) avcodec_free_context(&stream_ctx[i].enc_ctx); - if (filter_ctx && filter_ctx[i].filter_graph) - avfilter_graph_free(&filter_ctx[i].filter_graph); } - av_free(filter_ctx); av_free(stream_ctx); avformat_close_input(&ifmt_ctx); if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) From ccf085fac9f217a7baedc7af06c0bbd88fe9be24 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 21 Oct 2017 17:58:39 +0200 Subject: [PATCH 22/49] Small log changes --- cropvid/cropvid.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index b71cd63..f8f11bb 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -404,28 +404,22 @@ int main(int argc, char **argv) int out_width, out_height; + av_log(NULL, AV_LOG_INFO, "\ncropvid build %s %s\n", __DATE__, __TIME__); + out_width = 1920; out_height = 1080; if (argc != 4) { - av_log(NULL, AV_LOG_ERROR, "Usage: %s \n", argv[0]); + av_log(NULL, AV_LOG_INFO, "\nUsage: %s \n\n", argv[0]); return 1; } - av_register_all(); - avfilter_register_all(); - - if ((ret = open_input_file(argv[1])) < 0) - goto end; - if ((ret = open_output_file(argv[2], out_width, out_height)) < 0) - goto end; - + // Cropping data FILE *cropfile = fopen(argv[3], "r"); /* should check the result */ if (cropfile == NULL) { av_log(NULL, AV_LOG_ERROR, "Cannot open cropping data file %s\n", argv[3]); goto end; } - char line[256]; fgets(line, sizeof(line), cropfile); @@ -437,6 +431,16 @@ int main(int argc, char **argv) FrameCrop crop = getCropInfo(cropfile); FrameCrop next_crop = getCropInfo(cropfile); + // Input and output videos + av_register_all(); + avfilter_register_all(); + + if ((ret = open_input_file(argv[1])) < 0) + goto end; + if ((ret = open_output_file(argv[2], out_width, out_height)) < 0) + goto end; + + // Process framecount = 0; pktcount = 0; eof = 0; From 8396a755d06d7989120054543bce3cde1d7b1477 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Sun, 22 Oct 2017 20:27:15 +0200 Subject: [PATCH 23/49] Add option to set y crop height. Resolves #23 --- source/FileReader.cpp | 5 ++- source/PersistentData.h | 3 ++ source/mainDriver.cpp | 43 ++++++++++++++++------- source/panning/VirtualCinematographer.cpp | 20 ++++++----- source/segmentation/Track4KPreProcess.cpp | 9 +++-- source/segmentation/Track4KPreProcess.h | 2 +- 6 files changed, 54 insertions(+), 28 deletions(-) diff --git a/source/FileReader.cpp b/source/FileReader.cpp index 89eed59..ddb36d6 100644 --- a/source/FileReader.cpp +++ b/source/FileReader.cpp @@ -35,7 +35,7 @@ bool FileReader::readFile(std::string filename, PersistentData &pD) if (!inputVideo.isOpened()) { cout << "Could not open the input video: " << filename << endl; - return -1; + return false; } cout << "Reading video file: " << filename << endl; @@ -64,8 +64,7 @@ bool FileReader::readFile(std::string filename, PersistentData &pD) //Set video file info pD.setVideoInfo(fps, videoDimension, ex); - return 0; - + return true; } //This method returns the next section (where @segSize is in seconds) diff --git a/source/PersistentData.h b/source/PersistentData.h index f680a58..06514d5 100644 --- a/source/PersistentData.h +++ b/source/PersistentData.h @@ -52,6 +52,9 @@ class PersistentData cv::Size panOutputVideoSize = cv::Size(1280, 720); + // Height of the top of the cropping rectangle (-1 if unset) + int y_top = -1; + // Output file (cropping data) std::string outputFile = ""; diff --git a/source/mainDriver.cpp b/source/mainDriver.cpp index d6e7c84..9727c48 100644 --- a/source/mainDriver.cpp +++ b/source/mainDriver.cpp @@ -19,6 +19,8 @@ // #include +#include + #include "segmentation/Track4KPreProcess.h" #include "panning/VirtualCinematographer.h" #include "tracking/MovementDetection.h" @@ -35,24 +37,34 @@ int main(int argc, char *argv[]) { string outputFilename = ""; string inputFileExtension = ""; string outputFileExtension = ""; + string crop_y_str = ""; int cropWidth = 0; int cropHeight = 0; cv::Size saveDimensions; - //Check if input of command line parameters are valid - if (argc == 6) { - string codecInput = argv[5]; - persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); - } else if (argc == 5) { - //Use default codec - persistentData.codec = CV_FOURCC('X', '2', '6', '4'); - } else { + if ((argc < 5) || (argc > 6)) { cerr << endl << "track4k build UCT " << __DATE__ << " " << __TIME__ << endl << endl - << "Parameters:" << endl << " track4k " << endl << endl; - return -1; + << "Usage: track4k [crop-y-top]" << endl << endl; + return EXIT_FAILURE; + } + + cout << "track4k build UCT " << __DATE__ << " " << __TIME__ << endl; + + if (argc == 6) { + try { + persistentData.y_top = stoi(argv[5]); + } catch (...) { + cerr << "Invalid value for crop top y position: " << argv[5] << endl; + return EXIT_FAILURE; + } } + // Unused (until cropvid incorporated into track4k) + // string codecInput = argv[5]; + // persistentData.codec = CV_FOURCC(codecInput[0], codecInput[1], codecInput[2], codecInput[3]); + persistentData.codec = CV_FOURCC('X', '2', '6', '4'); + //Get filenames from the command line and store them inputFilename = argv[1]; outputFilename = argv[2]; @@ -71,13 +83,16 @@ int main(int argc, char *argv[]) { persistentData.panOutputVideoSize = saveDimensions; - cout << "track4k build UCT " << __DATE__ << " " << __TIME__ << endl; - cout << "\n----------------------------------------" << endl; cout << "Stage [1 of 3] - Board Segmentation (skip)" << endl; cout << "----------------------------------------\n" << endl; + Track4KPreProcess pre; - pre.preProcessDriver(persistentData); + + if (!pre.preProcessDriver(persistentData)) { + return EXIT_FAILURE; + } + cout << "\nStage 1 Complete" << endl; cout << "----------------------------------------\n" << endl; @@ -85,9 +100,11 @@ int main(int argc, char *argv[]) { cout << "\n----------------------------------------" << endl; cout << "Stage [2 of 3] - Lecturer Tracking" << endl; cout << "----------------------------------------\n" << endl; + MovementDetection move(persistentData, &r); vector *rR = new vector(); move.getLecturer(rR); + cout << "\nStage 2 Complete" << endl; cout << "----------------------------------------\n" << endl; diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 4a70946..101cc0d 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -38,19 +38,23 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData //Vector of points representing the lecturers positions vector lectPoints; - //Set a fixed y-value for the crop window - long int y_value = 0; + int y = persistentData.y_top; - //Generate this fixed y-value from average y-value of all lecture positions - for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i++) { + if (y < 0) { + //Set a fixed y-value for the crop window + long int y_value = 0; + + //Generate this fixed y-value from average y-value of all lecture positions + for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i++) { y_value += ((persistentData.lecturerTrackedLocationRectangles.at(i).tl().y + (persistentData.lecturerTrackedLocationRectangles.at(i).height / 2))); - } + } - y_value = y_value / persistentData.lecturerTrackedLocationRectangles.size(); + y_value = y_value / persistentData.lecturerTrackedLocationRectangles.size(); - //Add an offset to the y-value - int y = y_value - 500; + //Add an offset to the y-value + y = y_value - 500; + } //Remove every second point as we dont need that accuracy, only general direction of lecturer for (int i = 0; i < persistentData.lecturerTrackedLocationRectangles.size(); i += skipLecturePosition) { diff --git a/source/segmentation/Track4KPreProcess.cpp b/source/segmentation/Track4KPreProcess.cpp index 8dbbf17..35787d1 100644 --- a/source/segmentation/Track4KPreProcess.cpp +++ b/source/segmentation/Track4KPreProcess.cpp @@ -28,16 +28,17 @@ using namespace std; using namespace cv; -void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) +bool Track4KPreProcess::preProcessDriver(PersistentData &persistentData) { - int skip = 1; vector frameVector; //Read in video file FileReader fileReader; - fileReader.readFile(persistentData.inputFile, persistentData); + if (!fileReader.readFile(persistentData.inputFile, persistentData)) { + return false; + } //Create objects MotionDetection motionDetection; //Detects and segments overall merged motion over a given number of frames @@ -75,4 +76,6 @@ void Track4KPreProcess::preProcessDriver(PersistentData &persistentData) } fileReader.getInputVideo().release(); + + return true; } diff --git a/source/segmentation/Track4KPreProcess.h b/source/segmentation/Track4KPreProcess.h index db4f71c..11de44d 100644 --- a/source/segmentation/Track4KPreProcess.h +++ b/source/segmentation/Track4KPreProcess.h @@ -34,7 +34,7 @@ class Track4KPreProcess * This is the main method used by the mainDriver class to run the segmentation section * @param persistentData is the link to the central class sharing all data between the different modules. */ - void preProcessDriver(PersistentData &persistentData); + bool preProcessDriver(PersistentData &persistentData); }; From 462cf9a79017c58ca69a0d5734ba09bd9b868294 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 23 Oct 2017 12:26:32 +0200 Subject: [PATCH 24/49] Update cropvid with attribution --- cropvid/cropvid.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index f8f11bb..17e9e2e 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -23,9 +23,11 @@ */ /** - * @file - * API example for demuxing, decoding, filtering, encoding and muxing - * @example transcoding.c + * cropvid.c: Transcode an input video to an ouput video, with frame-by-frame + * cropping and preserving frame timestamps. + * + * Adapted by Stephen Marquard from FFmpeg example: + * https://www.ffmpeg.org/doxygen/3.2/transcoding_8c-example.html */ #include From 6a3ce4e065d756c3430ca4976791c9db23a11f24 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Mon, 23 Oct 2017 19:08:07 +0200 Subject: [PATCH 25/49] First crop co-ordinates start at y=0 instead of correct position Resolves #27 --- source/panning/VirtualCinematographer.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/source/panning/VirtualCinematographer.cpp b/source/panning/VirtualCinematographer.cpp index 101cc0d..094b25d 100644 --- a/source/panning/VirtualCinematographer.cpp +++ b/source/panning/VirtualCinematographer.cpp @@ -103,19 +103,18 @@ int VirtualCinematographer::cinematographerDriver(PersistentData &persistentData << persistentData.panOutputVideoSize.width << " " << persistentData.panOutputVideoSize.height << endl; int last_x = -1; - int last_y =-1; + // Write out the pan x position and the fixed y position for (int i = 0; i < persistentData.processedFrames - 1; i++) { - if ((cropRectangles[i].x != last_x) || (cropRectangles[i].y != last_y)) { - cropdata << i << " " << cropRectangles[i].x << " " << cropRectangles[i].y << endl; + if (cropRectangles[i].x != last_x) { + cropdata << i << " " << cropRectangles[i].x << " " << y << endl; last_x = cropRectangles[i].x; - last_y = cropRectangles[i].y; } } // Always write out the last frame int i = persistentData.processedFrames - 1; - cropdata << i << " " << cropRectangles[i].x << " " << cropRectangles[i].y << endl; + cropdata << i << " " << cropRectangles[i].x << " " << y << endl; // Close all file writers cropdata.close(); From 0edd644004f602a68aa2de564e3677960219b01b Mon Sep 17 00:00:00 2001 From: root Date: Wed, 25 Oct 2017 11:32:47 +0200 Subject: [PATCH 26/49] Handle cases where PTS timestamps in source video are not monotonic. To preserve the number of frames, create a synthetic PTS. Eventually the correct PTS will catch up. --- cropvid/cropvid.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index 17e9e2e..6115093 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -280,7 +280,7 @@ static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, in /* prepare packet for muxing */ enc_pkt.stream_index = stream_index; - av_log(NULL, AV_LOG_DEBUG, "Muxing packet pts %li duration %li\n", enc_pkt.pts, enc_pkt.duration); + av_log(NULL, AV_LOG_DEBUG, "Muxing packet pts %li dts %li duration %li\n", enc_pkt.pts, enc_pkt.dts, enc_pkt.duration); /* mux encoded frame */ ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt); @@ -401,8 +401,7 @@ int main(int argc, char **argv) int pre_keyframe; int64_t pre_key_pts[128]; int first_frame; - int64_t first_pts; - int64_t last_pts; + int64_t first_pts, last_pts, current_pts, next_pts; int out_width, out_height; @@ -451,6 +450,8 @@ int main(int argc, char **argv) first_frame = 1; first_pts = -1; last_pts = 0; + current_pts = -1; + next_pts = -1; /* read all packets */ while (!eof) { @@ -542,7 +543,20 @@ int main(int argc, char **argv) first_frame = 0; } - cropped->pts = frame->pts - first_pts; + // Ensure that PTS timestamps are monotonic (always increase) + next_pts = frame->pts; + + if (next_pts > current_pts) { + current_pts = next_pts; + } else { + // Frame PTS has not incremented or gone backwards. This should never be the case except for corrupt media. + av_log(NULL, AV_LOG_ERROR, "Non-monotonic frame pts %li earlier than current pts %li : adjusting to %li\n", frame->pts, current_pts, current_pts+1); + current_pts++; + next_pts = current_pts; + } + + cropped->pts = next_pts - first_pts; + ret = encode_write_frame(cropped, stream_index, &got_frame); av_frame_free(&frame); From 03fc7da5e4f2e69e531323cbf34fbbfb520bd66c Mon Sep 17 00:00:00 2001 From: root Date: Mon, 13 Nov 2017 14:36:03 +0200 Subject: [PATCH 27/49] Set the output container time_base to be the same as the input container, not the input codec. Resolves #34 --- cropvid/cropvid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cropvid/cropvid.c b/cropvid/cropvid.c index 6115093..6074506 100644 --- a/cropvid/cropvid.c +++ b/cropvid/cropvid.c @@ -191,7 +191,7 @@ static int open_output_file(const char *filename, int width, int height) if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - out_stream->time_base = enc_ctx->time_base; + out_stream->time_base = in_stream->time_base; out_stream->r_frame_rate = in_stream->r_frame_rate; out_stream->avg_frame_rate = in_stream->avg_frame_rate; out_stream->start_time = in_stream->start_time; From 99e6cb4b82805ed37484d82dfa785211ddea230b Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Tue, 13 Feb 2018 15:46:27 +0100 Subject: [PATCH 28/49] Add sample wrapper script and config file for use in an Opencast workflow --- examples/opencast/track4k.json | 7 +++ examples/opencast/track4k.pl | 82 ++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 examples/opencast/track4k.json create mode 100755 examples/opencast/track4k.pl diff --git a/examples/opencast/track4k.json b/examples/opencast/track4k.json new file mode 100644 index 0000000..c5b03a9 --- /dev/null +++ b/examples/opencast/track4k.json @@ -0,0 +1,7 @@ +{ + "em4": { "output-size": "1080p" }, + "m209": { "output-size": "1620p" }, + "hoerilt1": { "output-size": "1080p", "y_top" : 650 }, + "hoerilt2": { "output-size": "1080p", "y_top" : 750 }, + "nlt": { "output-size": "720p" } +} diff --git a/examples/opencast/track4k.pl b/examples/opencast/track4k.pl new file mode 100755 index 0000000..1d2be39 --- /dev/null +++ b/examples/opencast/track4k.pl @@ -0,0 +1,82 @@ +#! /usr/bin/perl + +use strict; + +use WWW::Mechanize; +use Time::Local; +use JSON; + +my $track4k_bin = "/usr/local/bin/track4k"; +my $cropvid_bin = "/usr/local/bin/cropvid"; +my $track4k_cfg = "/opt/opencast/wfexec/track4k.json"; + +my $track_in = $ARGV[0]; +my $track_out = $ARGV[1]; +my $location = $ARGV[2]; + +die "\nSyntax:\n $0 input-file output-file [location]\n\n" if (!defined($track_in) || !defined($track_out)); + +system("/usr/bin/logger","track4k in=$track_in out=$track_out location='$location'") == 0 or die "Cannot log params: $?"; + +my $json = "{}"; + +if (-e $track4k_cfg) { + local $/; #Enable 'slurp' mode + open my $fh, "<", $track4k_cfg; + $json = <$fh>; + close $fh; + # print "JSON config: $json\n"; +} + +my $data = decode_json($json); + +# Default resolution +(my $out_x, my $out_y) = (1920, 1080); + +my $y_top; + +# Set parameters from config +if (defined($data->{$location})) { + my $output_size = $data->{$location}->{'output-size'}; + $y_top = $data->{$location}->{'y_top'}; + + if (defined($output_size)) { + if ($output_size eq "720p") { + ($out_x, $out_y) = (1280, 720); + } + if ($output_size eq "1620p") { + ($out_x, $out_y) = (2880, 1620); + } + } + # print "Config for $location output-size $output_size out-x $out_x out-y $out_y\n"; +} + +# Cropping data +my $cropdata = "/tmp/track4k-$$-crop.txt"; + +# Redirect stdout and stderr +my $system_stdout = "/tmp/track4k-$$-stdout.log"; +my $system_stderr = "/tmp/track4k-$$-stderr.log"; + +open(STDOUT, ">$system_stdout"); +open(STDERR, ">$system_stderr"); + +# Run Track4K +if (defined($y_top)) { + my @args = ($track4k_bin, $track_in, $cropdata, $out_x, $out_y, $y_top); + system(@args) == 0 or die "executing @args failed: $?"; +} else { + my @args = ($track4k_bin, $track_in, $cropdata, $out_x, $out_y); + system(@args) == 0 or die "executing @args failed: $?"; +} + +# Run cropvid +my @args = ($cropvid_bin, $track_in, $track_out, $cropdata); +system(@args) == 0 or die "executing @args failed: $?"; + +# Clean up +unlink($cropdata); +unlink($system_stdout); +unlink($system_stderr); + +exit 0; From e30a726e3608c3d8bf0d466f95fb720c367a1b17 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 13 Jun 2018 14:58:10 +0200 Subject: [PATCH 29/49] Example configuration for Galicaster --- examples/galicaster/conf.ini | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 examples/galicaster/conf.ini diff --git a/examples/galicaster/conf.ini b/examples/galicaster/conf.ini new file mode 100644 index 0000000..a90a080 --- /dev/null +++ b/examples/galicaster/conf.ini @@ -0,0 +1,32 @@ +# Galicaster configuration for recording 4K video from Axis 1428 IP camera for Track4K +# https://github.com/teltek/Galicaster/issues/459 +# Requires Galicaster 2.1.0 or later + +[basic] +custom_flavors=presenter4k + +[track1] +name = audio +device = pulse +flavor = presenter +location = alsa_input.usb-BurrBrown_from_Texas_Instruments_USB_AUDIO_CODEC-00.analog-stereo +file = audio.flac +vumeter = True +amplification = 1.0 +player = True +audioencoder = deinterleave name=d d.src_0 ! audioconvert ! flacenc +active = True +delay = 0.2 + +[track2] +name = presenter +device = rtp +flavor = presenter4k +location = rtspt://VENUE-cam01.uct.ac.za/axis-media/media.amp +file = presenter.mkv +cameratype = h264 +audio = False +muxer = matroskamux +caps-preview = video/x-raw,framerate=1/1 +active = True + From 4f1ed5a6aa2058cf62018424d605564bf54c1cb3 Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 13 Jun 2018 15:02:01 +0200 Subject: [PATCH 30/49] Sample Opencast workflow operation and execute service config --- examples/opencast/ingest-track4k.xml | 21 +++++++++++++++++++ ...roject.execute.impl.ExecuteServiceImpl.cfg | 10 +++++++++ 2 files changed, 31 insertions(+) create mode 100644 examples/opencast/ingest-track4k.xml create mode 100644 examples/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg diff --git a/examples/opencast/ingest-track4k.xml b/examples/opencast/ingest-track4k.xml new file mode 100644 index 0000000..6347849 --- /dev/null +++ b/examples/opencast/ingest-track4k.xml @@ -0,0 +1,21 @@ + + + + + /opt/opencast/wfexec/track4k.pl + #{in} #{out} ${event_location} + presenter4k/source + true + tracked.mkv + presenter/source + archive + Track + 2.0 + + + diff --git a/examples/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg b/examples/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg new file mode 100644 index 0000000..2e4a3e7 --- /dev/null +++ b/examples/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg @@ -0,0 +1,10 @@ +# Sample configuration for Opencast Execute Service + +# Load factor +job.load.execute = 1.0 + +# The list of commands, separated by spaces, which may be run by the Execute Service. +# A value of * means any command is allowed. +# Default: empty (no commands allowed) +commands.allowed = /opt/opencast/wfexec/track4k.pl + From 97c4a03a317ea4eebc0b80b2994129311b7dd83d Mon Sep 17 00:00:00 2001 From: Stephen Marquard Date: Wed, 13 Jun 2018 15:05:23 +0200 Subject: [PATCH 31/49] Add the tag and untag operations for Opencast example --- examples/opencast/ingest-track4k.xml | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/examples/opencast/ingest-track4k.xml b/examples/opencast/ingest-track4k.xml index 6347849..68c8533 100644 --- a/examples/opencast/ingest-track4k.xml +++ b/examples/opencast/ingest-track4k.xml @@ -1,4 +1,5 @@ - + + + + + + */* + +archive + + + + + + + presenter4k/source + -archive + + + From 652ce6394d2e18fd20b2f00c4f1aa75db71a8174 Mon Sep 17 00:00:00 2001 From: maxtrix Date: Mon, 25 Jun 2018 10:31:23 +0200 Subject: [PATCH 32/49] Update of Readme.md Documentation update to include the instructions about how to build and install with all the dependencies in Ubuntu 16.04 --- README.md | 113 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 80 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 8d764d3..931f40c 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,10 @@ Track4K is an open source C++ project that takes a High Definition video of a le ## Getting Started These instructions will help get the program and all its dependencies set up on your machine. +> Please take note that this installation guide It was made for use under Ubuntu 16.04, some changes may apply for other distributions or Ubuntu variations. + ### Prerequisites -These instructions are written with the assumption that the project will be installed on a Linux-based system (preferably a Debian version). Track4K has been tested on Ubuntu 16.04 +These instructions are written with the assumption that the project will be installed on a Linux-based system (preferably a Debian version). **Track4K has been tested on Ubuntu 16.04** To be able to run this project, you will need to first install the following dependencies: @@ -16,74 +18,117 @@ These instructions are written with the assumption that the project will be inst * CMake (3.8.0 or future releases) * git (2.10.2 or future releases) -### Installation -#### Downloading and Installing base dependencies -The first on the install list (and most important) is CMake, followed by git and C++. -The following terminal command will get and install the necessary requirements + +## Installation of the requirements + + +First go to any folder to work with the files that will be downloaded and installed. Next, install the dependencies in the order are written + +### Basic libraries ``` -$ sudo apt-get install cmake git build-essential libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev +$sudo apt update +$sudo apt install git build-essential libgtk2.0-dev pkg-config ``` -#### Downloading and Installing the OpenCV libraries -The next step is to download and install the OpenCV libraries. -The necessary OpenCV library comes in two components. First download the core OpenCV library. Choose any directory as your download destination directory. -Clone OpenCV from Git as follows: +### CMAKE + ``` -$ cd `your_chosen_working_directory` -$ git clone https://github.com/opencv/opencv +$ wget https://cmake.org/files/v3.11/cmake-3.11.4-Linux-x86_64.sh +$ sudo mkdir /opt/cmake$ sudo sh cmake-3.11.4-Linux-x86_64.sh --prefix=/opt/cmake --skip-license +$ sudo update-alternatives --install /usr/bin/cmake cmake /opt/cmake/bin/cmake 1 --force +``` + +### C and C++ +The C and C++ libraries from Ubuntu's official repositories are older than the libraries required. It's needed to have the C and C++ from version 6.3 or newer. + +#### Install the repository with the updated versions of C and C++ ``` -Next, repeat the process for the Extra modules. Remain in the same working directory and execute the following terminal command: +$ sudo add-apt-repository ppa:ubuntu-toolchain-r/test +$ sudo apt update ``` -$ git clone https://github.com/opencv/opencv_contrib + +#### C Libraries installation +``` +$ sudo apt install gcc-7 +$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-7 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-7 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-7 +$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-5 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-5 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-5 +``` + +#### C++ Libraries installation +``` +sudo apt install g++-7 +sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 60 +``` + +### Install FFMPEG 3 + +#### FMPEG Main appllication +``` +$ sudo add-apt-repository ppa:jonathonf/ffmpeg-3 +$ sudo apt update +$ sudo apt install ffmpeg ``` -You should now have two folders in your working directory. -The next step is to build OpenCV. -#### Building the OpenCV library -Your Chosen directory now contains two folders, opencv and opencv_contrib. The opencv folder contains the main OpenCV libraries and opencv_contib contains the extra modules. +#### FFMPEG Development libraries +``` +$ sudo apt install libavcodec-dev libavformat-dev libavfilter-dev +$ sudo apt install libx265-dev libx264-dev libvpx-dev libbz2-dev libvdpau-dev libva-dev liblzma-dev +``` +## Installation of Track4K + +#### Clone the repositories: ``` -$ cd `your_chosen_working_directory` +$ git clone https://github.com/opencv/opencv +$ git clone https://github.com/opencv/opencv_contrib +$ git clone https://github.com/cilt-uct/trackhd.git ``` -Inside the main OpenCV folder, change directory into the build folder (create one if it does not exist) and remove all files, since it will require rebuilding. To rebuild OpenCV run the following command from within the build folder: +#### Install OpenCV + +> **Note: Track 4K works with version 3.4 of OpenCV, prerelease of V4.0 makes compilation errors.** + +In the OpenCV directory, Change to the 3.4 branch, next, build the program ``` +$ cd opencv +$ git checkout --track remotes/origin/3.4 +$ mkdir build +$ cd build $ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. ``` -This step will generate a MakeFile. Once complete perform the following command to run make faster (the number after the j-flag is the number of processors the job will use). If you are not sure how many processors the machine has use the following instruction to find out: +Once complete perform the following command to run make faster (the number after the j-flag is the number of processors the job will use). If you are not sure how many processors the machine has use the following instruction to find out: ``` -cat/proc/cpuinfo | grep processor | wc -l +$ cat /proc/cpuinfo | grep processor | wc -l ``` + Use the result from this in the j-flag ``` $ make -j`processor_count` ``` -Remain in the build folder and run the following cmake command to make the extra modules. -The path decribed below is an example. Fill in the directory path on your machine which points to the OpenCV Extra modules folder. -``` -cmake -DOPENCV_EXTRA_MODULES_PATH=`OpenCV_Extra_Modules_Folder_Path`/modules ../ -``` -Next step is to make these files: +Remain in the build folder and run the following cmake command to make the extra modules. The path decribed below is an example. Fill in the directory path on your machine which points to the OpenCV Extra modules folder. ``` -$ make -j8 +$ cmake -DOPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules ../ ``` -Finally, install these modules by running the following command: +After that compile and install the files: + ``` +$ make -j`processor_count` $ sudo make install ``` + #### Building Track4K ##### Automatic Method -There is a shell script in the trackhd folder called intall_track4k.sh which can be used to install track4k automatically. +There is a shell script in the trackhd folder called `install_track4k.sh` which can be used to install track4k automatically. To use this script run the following command: ``` @@ -102,6 +147,7 @@ The first step is to navigate into the build folder. Once inside run delete all ``` cmake ../source ``` + Now it is possible to run the build instruction: ``` @@ -131,7 +177,7 @@ $ track4k $ cropvid ``` -Example: +**Example:** ``` track4k presenter.mkv presenter-crop.txt 1920 1080 @@ -143,7 +189,8 @@ The program reads a maximum of 29 frames into memory at a time. So a minimum of ## Built With -* [OpenCV](http://www.opencv.org) - The computer vision library of choice +[OpenCV](http://www.opencv.org) - The computer vision library of choice +[FFFMPEG](https://www.ffmpeg.org) - A complete, cross-platform solution to record, convert and stream audio and video. ## License From a083f8b204374b4e3f102e9b9d336d3090f2eac6 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Tue, 10 Jul 2018 15:38:21 +0200 Subject: [PATCH 33/49] Added ssh_track4k.py script and workflows examples to work with this script. --- utils/assets/track4k-opencast-example.png | Bin 0 -> 30342 bytes utils/opencast_workflows/fast-4k.xml | 157 ++++++++++++++++++++++ utils/opencast_workflows/track4k.xml | 89 ++++++++++++ utils/readme.md | 59 ++++++++ utils/ssh_track4k.py | 73 ++++++++++ 5 files changed, 378 insertions(+) create mode 100644 utils/assets/track4k-opencast-example.png create mode 100644 utils/opencast_workflows/fast-4k.xml create mode 100644 utils/opencast_workflows/track4k.xml create mode 100644 utils/readme.md create mode 100644 utils/ssh_track4k.py diff --git a/utils/assets/track4k-opencast-example.png b/utils/assets/track4k-opencast-example.png new file mode 100644 index 0000000000000000000000000000000000000000..ffe528012f25f7d6a30024d461720b3209a9d078 GIT binary patch literal 30342 zcmeFZcUY54*Dss^3WyX7f=Ce&k)j}q6oIHHs5Fs|G-ab;=oo=e5|v_u=$0l`M0yD* zp_gn>;|2w!BuMWFp+g7|lAH;;w?6xQ-*dj}eCJ%}JJ85wPt55q!Q6NbklczDC^9k9CSE-RSpc=?f42;trgJKF5zw1gFgT z8eL%nQvBcd=h5-@it1(bijM3Irwc5)Ed(MWkO763NL->p!RJLME{Aex0D}i*lVhyN z%N2BgWrRsjjb2}F3&E2-VGJs2z9okB2@TPgh3?@hir(o6WmAyGvvF8mLT}mpeE0vw zS1zzn+3)>8cmy252>bC@z-Kmv3=XT8&>cUXuqm8Ig9rbm44dx)nV_*(em3PUfgI%E>Y6b;;hRHjYI$=(>)~0kA2Y)(aQBk@-6`X#$n}fO)OuNO_tNpyO<)(^`4S5d|=j_w&<_*Ms7B-tOYYcm^wXe zhBn_5hIw2*0c&ELS(_W9j26io$tuk02WU#9+xzB)BG&BREs?IpLwL$>p<^1NZ~qo@ zC%=WuzLBzVmpM29gV)t9%hye0(R~Knwq3EIZMFibfY&p&`ewE$ODYB6x34P3YM|EcGGUZQY`(LoyMfaDn=h_ zY0ZHFV~#iuE^vmd`Bi_Nuo`o3Tw#tf8R;#kNz`gaYV(e=HK)2cSP(-9MaD!{HtUP( zZ1z(Tqm!lKz{?4f5Z3 z(pM{>A8&543J7gg)R2~9&lk2IdpX{r&iEkFU#-rt+*Z^WxN3d()m9D;tGMk&K4a)_ zNf2@zGaWUNMYu}w3!(FGT&E3ecy7qosp_ z>s=ZkhfO-|UlGjQc?&kk$CmFj)DWmkd%_)dbVvN?(&&z}6(6=*O@vzpESqSDh)3=& zLwTZOFo?;^UM{aYq6Kor`(kx8S;u;zpyODhR5EDITxG5|$Y4_MXCRqZP$QP8F`@K2 z?J3~^Gi$Pn-J3V|hM165& zOq5MwFoiw8IMseNW{+xJ91G)+zag#FIporexQ%^IM@z_`F%d()Bj_vbzW-7Kud&BI zCg=W|noj%DF#N?%`>{q+0yU5NLc}qqkz~qNUfx#$wXn{TgQd)!-y0dLQ((K05_f(S zte_1UwQ034pcZ^VmxQ0QC+lyql3rcQiF~27Ru@IisH&6@FNHbmLs8XhiMlPS%^>5E5 zrnj@4pe{Nc$j;ey-OgO9OJ+FMvx^F605fwDia=Zmr> z+px=fUNt^0nlP>k-0oKxQ+@d%I&04b<4EJw#`Nf;#fL~8ki34G2wQ=#3SD1wDNv!!uOzmNj4Nw9&eGsj&tyl%%* zot->hPAdLNyl&i~plw6NVeL!40gZOtftdz*`!sYz#GJqZMwP_GKEl?8BAstJTbnVo zCIo&Q?X}L}u-SNBByOJ4W`V)W-EA(OKfKk-IoiDDhWW<3Kdy|~2Bf~p4j|)3l@!e3 zMuxma!#1V_VskEgJ~G!iI9X@tiq^I%DYpEyXBLFn`z&Oey4P{c>o5hb`u>ZqX_kOE zPudmO*lfqyStpDMTRxF1{|aSyeVS9OYpC==d0LEXc)<=UrxwW{=su_k0#12^k||r# zPGt0aHs2W^h{YZuY|EDf-FTyo7N6~rHrR~N6Y6LU+~Kuilbd7RHg|2eH(tW4oEXEp zU2QhejqglPo;p3R1toU*#-HV4%fHG~G(6&bqAeOCZ=Bd9c{wS{%F=o;$mJcef^ZmW zL4M8=aW4r-gu`D^$#|0_<++sN%k=}v4*)MNm2qj%1ZKa-9+RHB=;Z88A!Z50?sxn2 zvA`+yY**#@8?KF<^SxJ1x9HDDW1g|6t#&u5ka&nB=+*u#TYc`y#ssT{B=2GxFfIRN zO6Aq8=4(6r1RVRW1EUv+aXeh(#9^_w#!T^%IL23wOM@yrRymR-J6<<&TCO(AX-?)L1B{bEZ| zzuu*~!g7lCZ21FB8ODY&sfJFeTbWwub6{4%!qn@AR2i8L8y=0QmejNHskCpG-4Dk% zlJ;U{?R#3Yq{hjgNp^EfS3_uXL5P%~=?(u{8PJ;n;URMp^yiyFM2w|mtGZNbbHUDoofyK>1iA5y zX>F;nOQ+PIi`jXLw*A@>))k8bO(AQz&WY8JBpEy#a5On(XDnZ=`@2;R`4uQPDr}8p z$;b*MYmsQmsSs)lxo-TaOq9i?Yv)Y0k_$txuC0|j^ujFRjo0|XhDQo!7Eh{&%1K@c zs13CV=rGf2lXGwi$>+GTuqJAhao5Cr3^(fZ0WU8hqYXSI0EyX+GDr9#A+%X+73I%| ze-FmOdTBv50UV|^d}bQQB9C&yyhX0@Jq4~)Cv`q>`ZMxMj>Jri&MUP9xjD=FA~VA! z`SgZMYUaiWH^Yc0L#83Y1*CxdMr{c#^D8V8Ks&opAwGBA+6MeiFtO-nZttbkH%WUp67~b zwm}k&4|08?h}p%&!*6_b6j{5hGOZD=JBBNWDu|;b%V8s89Ok0&M(k`q0_(~#IjE_q zr1`y9qN_=#1dnk#P;NKu6pbM?X+27_woaFeN{}O2aGE-0Qeu2Ryv#F@GPw56k}oU# zY^9IH6z-2rL?2w$26(3LOh;|~x@E!nz7}g{4##EnY1wq_l!=w|nPnf$)XThDKOLw= z?YJq)RQGA;$1mXw+EeN|< z#8(%t?d|nRMyWJYJ*5)%E6c!&``2H~r9TbVy)owMN2pI#rXP)p%GGm>$;}c-+2_b> z2ov4b{vj&q5!tCTz33nDNZ&^hD;>$pe6MZNlNX)56YK#7N&C`u*~I{DuDw?Yrg~yq z-n3X3XtH)!1fp6ke+eFau?)YFvsR8Fd-F>5RF z4->wy#ZqanM`YJX-l9evSjJ;+ji~XFxK`Q1GV=)YF{`zfRhy;Su?l4I%9}m$=X;^Q zk};Ln!Lh_^Aoyv>h&$BA9J^PL^c*bT+VOB*C-!{h+`JG?i98OMf=1G1%%Ata9?VxBX%X=m^q#|!yYFo%F z!L?GT65ZPNmW3PCH-|`<$DYbLT;;1+Obtj)qm*2_KwQ6l_Sflm^8+Y!eM~?mxcHf_ zgq`!UdC~#df2F<1k7D%pjmaV$TuZ*_MO|v5UU&K8_p^^pZpkwHO@D`_P0>a=>_xHt zF8xi;?3Wl=9&1h!)Jeo}E!m_t9{$(2wsc$1|{b%n0HY1z;`SpG^Uh3I0(E zgYcj)a#%qO0JL=62HgXB4lrH(S@ahCZqVe=jvq~ASxpZ7XkyB0a_&cy&0ZdYUN)Ps zdf5qj*=)k<<>0>u`aiBZG;LBj=o#DK)iPbynu2BTHI9=6OkFt=eh@4e8*~qzetWM^ zUoQAm{+xILtQ!ZL0_(MdU@a{@BUw|}557x1sss2|FUr50{97;oHirMtZ<1l7+pt^h zOm4p(=L=xx7xs&QO&TB!NYwdndRBz6f!K?iqGm71vXkRhi)E4Z9 zix=E3oaG&Eu`h1fhV}$h;1{n^GlVKbV_c}ZxtKb-_Ix1Smo1-y4 zAa%BUV}8BZ94&f2Tr(vry-#$hZ0h0BZ|3!xVaW;M>$CvFHh4Bt;25ZPq*$Ww9m^m5VMV- zS4v^e`-d?i7@XjAW@p3evBo=8BmSZzSNQknNmoqpV6l6sa-wee&ijP{%r5QFBM7Cp za}xV@IcD4~x-pGNYRhPc5-(pafp6@aIu=yJ*AoMg-D9b&x2s$PB> zwhRM06cK=ECqT#tJ}6k(bE-<;{>rb(6sV8O4z-gmLb{cZ-fI6Qd$%!=Ex(xLeJRd} z)5?hyrC~<;nlNtJtd4CizUZk_M+|llt&p)Bp-0ltKLwZOE;{-uRJ2OF*Tr1Ew<}^^ zD(bg{jP|=-hur++Rsqi%Src`v^0o&4mKeNmZYe$-)>-@{Ga=F+GrYSg&7G(>dok#U zv&fi+X!~J>pVA&R1g2;T3!9h3Wi@}+=~6x+U92x5@uRm8G|mWn|CKW;iEu&-|45RE ze5d7Ez(YhH$=vyhnZ|^^gnmIo=1n-PgpNt$AElQLBdxHEWTgY!1B6|$Rm(f)qXjbU zu~V|iDQDGIG>j(O)}TZkWYty+Q9+AB-^6ZHE}=>3Z}JmenQ}e-|BVC;-blisjV`*epwcKu&p@*$c1#4^NGr3I2^s9)_jKdWQ)B7* z{QW-Y9x2vi;8eUYycf0?40{7Kq6#p|OC~^@gZ3g%(^^4t-)jUsUYB6Y&wIhlE@ZYZ zKzToMs_bu)z|l;MUDZkeZs}Fq6azy3E%hcd{baD6a{kw4Y{1eP60;M-?{fN(>i&sv z3jlof0aA!HVoh=Ox+ee7+}Z*qthqfSwIS7`zP7@lbR9oZHl61Ss62KbQ{In(XXP`Z zuUwMN=-`ckz;CNXW2F_JEAwC!^FrEiwCAaNML&y7^!9$NfpAy>PGqwwhn1h87NU8lBVx#(a=4(QoIir$Pv+n%GLYnO+-}MRU50U3d5W&Xzee_k#Mm1jQ$WQ& zyJ+`qSIo?%Edb~68;jkIQ(Z;WgthCcH6E?Go_k?Fk=u?veuG!qw6wnmj< zS5O*D;_M;+-v|la=y9)hM^iHxO|ld~TJ?8m+n10`|sa)uTE`lFZ7ikPhj!>vPPp}qUR3kNT; zMsgQlayfoO#5S%Q`lVPUcWTLA89!y@rn;i?M9W^Zqcyic!nhUPdxc$jDVDug{D<0N zG>31!2q)c_YhPnq7A_Z3n%!72bQ zTk7L@^T+ba>A(cVOz~t z|1d}f;j{3mo!d5jT)E2u~~8 z<@9X-(1o!r2X3Z%@%E|0pO#IIJ-;y(Sff?k(yC<@Mjeeus8O$twoLuea^lEOf7aVU z?Rl6WqA)}A8jVhSFJJd+>=|uE(+k$9w;%R;2(J{Pxo~W({p#Y%;}afciIB&KHf?~x z3EZE@6s=&vpYMu=l+JTWjH@l0V)to1(ERM{V^CuJd~O%DhcEc`yfSaZZv?I)R&mSb zLGcnglvz8V8+r(AhVv4&qebJz10Lcj@@XGUkuTaxxlkC>@!=7ZID57fpD1fCcz9+9 zF0)7Mltpjm6OkS&sZ@K!J~j|%G{TneTc5WGu*=eu0Bq@`dwgdYwZPT^WXGW6PUW-a zBS~)=6zvt9q4xS}4{*7gqf`*e(T1eh@rRK*`7B(bW_`-JtGcRDBciY znNzcs*nn3Tr?vhjURxLwEF^^yY)H$}4*eQVwv8&sG|h|HU}Z8$P-4Z9xk;gG|Ekti zOl;o$9K-TC+VV~fEsGw+r1S2tg>^z>WcLx3xWEeixzsz2yui>lg^(!FA0grb7^7tM zGM%@uHg3^`#pTv-zqx`#+JL9X#+2(O`yWk}3vVqkYLDs?E%ZY4K5;e|8!AdAU01@n zaHwN$io;3eh1MFi^GMB`dmoE1UTT*W=$xv?S&YLD`B`QsXZ2@$HE5DC>+79?7!Is_5ov zBXHIZRnL`um<^lvUPX%8$0(Jn z)VRlez)Xv7E8>$0(VI3Ft(%6yLzVf>{gqd&ya&{zXU*^(B5NJVt9>{gnj!b6Mz|a; zM#%#va8Rs@D!Wk_Nzkxp+C`90ue1oMu008OgJ62#W#&lxk+!w;1=xs{&JC$`N~*V5 z@1Kep4G+C?Qr-v&U5_GXmel80npSCld65`l)b8`L9VfEa@jdQ0)*j8=Vucv#NbuN$ z()Odb+{o+S=a{TPK<@waLiJvfZ&b|sfL_(|o#evU)j$PtUm^R6Lp7nUl(`)Dgi=0> zID5{Os$F&Z00y^2&WK~G>+F!H?iC0+t_^@p9>4?Z5}wTDnnFgwTD(2ONttgvUX{_e zH_lfLk?-H!<$%g%^rCj*@7eNKFPb*xF1_VVu3T4pyNo~l-c-u83OQE5Z{9!J} zH%evqJ5eK!i&?p+76biLGq=oRh9pip(+h4N#9wml-Ce)77dA2`65iaiwg5Ie3koe< zT5D+uW_l9sxMCXXDl8*Ws<9}CBQ2U1(}5DX`{9AQXcglI=EYOv8iTK(7JXl>LOtg# z2*26S)a7YJ8(*vHQr80Y5K&zX51T!b!k5HQN+b^>^gO`XcO;JIHWYe#P)0%alS2hmLn zb?i4ms@)KgIvo6<=SVyGS&0s{Odp#SSH{re|6vrc$Eo-F!fdiu46>BFwL-cn6@**# z2q%%fCnLFwnga~IiXr4KjYEXYcTZl(wPlp0)~f7NBjw=UUXKWD_tQ>oquMB0vDX)X z+J_=jrtMtrODFaDw-Tflc-TH27`aK{fEESi`8mVkb$jZR>bOB-0B(%vkzmr!?M%uz^06X>oO%dN}2q9KEq8~8?HPLom% zN(~yMWG;O(Yc4LR{j-ds`1wSLa`2bCCuu%$FR&eY434!l*S5nVpo*; zI&EpH_zmC60bUf+UmmYv;BV-)qL=~vU%9xxwPxYLJc`l>VK=e*QYY<#{R{yj1IMUaSYWSSWr#Oq5L&6IZ_M7TA$}W; zP&&b$Gl_D)&E|=}_h^MVlXtZX)$Z3h;+XTcfPuJ>ZSa#2F zwqc>(r0=MA3*OJ;^kUGIa@~5sxnIyQcn&|^%nNK6qH6S476j2k){4T12wPV@crP&Cm(ooz0QAjgYyvV+4l|pEDzF==uw(Yt zF7#HTW@R-`l4;j<=tb_xeP-@aNiYhA+yEV@h3?(3H9wv&2V zDk)bfEi>!bo91=ow1FEj_GjNrgd|#zZ1mwNh;=V~jmm!YPd3y#P5Q5|Uj;Et4#l54 zqjG9ZSp(jQs^RWY+}dm!CD@=0-SZN}&F~)x#P+d@shWYuOf09}``G|@FZ(%?k%;$Qrjqjok=9H+uVbqg8QTq?5Tt_A5^5b9#j z|3JKgP0L8FyhZVt_dWV}^RrpXzvn(FooZ`Z{=;-i_F>}a^HC@WlN~^UWfN-4HFD+d zJRNfyZWnB)>k{+D+PNU_XblXoX(()7=uMDq1Yv{w58!6|S+d3e+M7v&nsb-m8KMWe z)nw(=paYM2f-)Sg&3+&M==aue3BJ0t)07evq7Y#L&d@$3X%O^@2!L%;^H+CJuFAAifzG0qG zuSPh*%xItmYeIhwoLf4CYl-MDTfTGK;#gjk3F%}_(C^na;X5Sg2j<4)HKv)zg*UuD z&+H8v^=_2m7VpV&JGIY0gb1sfcVY+1FX#RV&;M(>q3pNgZz@|h_Lk#M#+6nMYuz>8 z4DhiG~4#&LXD`EVb#K?E)kDS`mp-H`_zc&NHb*9PJpCP-Ul@J!}R6tI> zJCERwNNLmWGWRqsblz1O>`@)%jO2dYcqeZPXHcOy*0r`4?4Qp=TDVNF38`u6L_GN_ zr2XW(Ww3+QDr40vpN&@DDL-qf%Rlz=)&Oje9&gP48||KNc?q@_6CZ7c%rDJA@Zi#0IOoN%H`u9|2TnCr_+j`+gT3D-!dnte;hpV})Pf|VFO>6n{zE)jxFeuu z5P2!<0ypCNqwd*ckUg!_4^K6B0`+F!>JgniN!?k9f~9#R;fae)=tzvslk(K1 zyCe;4q5BuL@vh*Z;h(EEsIi!xsaY?(^{jJEPDKy9;;&!%c$sJN$11@;EuSer^>eMf zwA9}<-Zi%t2#)(;xFxg-7lcyk4#o zMv`uYkDa$k{>`&$H>yL4gV$$&&KD9CZ3-p+X}tf_Pvt;X4;Mhl3wrLTj_cEB{^hwe zUgK2Bqknm1mmq9&-cQuApshu6Q2H~fV_J-m@@4=K&at0mk|*rl)~T}=QA>7(>>$Mg zAYzbSs<7i2u6$*30$0HoJoe1#t$fXL!DUY;9DC*;)29vn0>=C#yM+XV#f9Vy-Xuy)NYXXgq*2Lw9}v5uRGeW@B8l!d zD9x67)XwqXIB$*kGHfFS%A$85{^VV3`GD=}G{=-aBKrrI>TiLKQS9KzWC)qeI5j^= z*(9Gg6A4Q7G8|UYuex^I+{G%ZYYrUysx1W$Hh|;u%-i%be(^@NpM9ZG>3BSpS zFZR+dx%X48nYKy&@palKE1mZhE))-Vn%G?>+ekFvnzxqf)5%>bg|fA^s~WL%Z$Ox9 zy|P9%J5h1ABWZOGd8>1=JLddqOM}bDKjTj(dP$s&q~=%mJNrj2ASdXg0-9Qh=ibRO zE?Qw*k0V0Np6SFCzs*jHTX)jTAf?aSP?w4;QzLdz-%uDWTMN^*2XxA3kyS*B#MVO8 zu34wG)(1xDjGt+eWVZbCE~Xz01g8PGsJ9!i4!ITT7*X`uboFh&$FAl&{!5&Js=Y3IA;a#yV72!Ic3s zTIL7!;b702C_4(BYF2ZTpczi8$f=bGzn5-DVQBFN4Tx4ToCU{mvxIbr{?9bVDL@xw zRdQ(cJ#=^CQsTe+E`P_i7{ZOnW(2ov<=xBjrA>$nCc49-`@(Rp9EWwF#6qMY;`APB zB7Z2G^tpJ9+Dh7v)&h|pEeq;g4)Sy2Gv-nM{8`GYZ4Ym-&*`ZO6m>ucj@r&`e1wB@ zQik@S_Ob4Mv$L-PMkrV#TL()q&wjNDxBZuM5s`3x`r0sctNSZ?%)k|fmbDL2^!@A9@Ay|6T!;EtnjtK zn7yZyRHE&}c+)cct~hloTewa+w(qpSm&jGWKinJ#0{mMVy=~^^bddWF6S43 zW$gkN&Yd1O{VuTDVCI=Is1}C!D}>raaRpuPD$`z=`%?!|yUxhRa9F+l(DFWK|E-wr z^nMO27BvF+NPxv8MxL|fYqVNrl1$n4_0)`1MRT16t3PdJdvW{VQ`a!@rYTJwGPFhp ze@U+pN1?>SMSQ0{-UZJ@aqM@xm*Par+;!LJk|%p3ajmk$UWwVtRV2>YpVj%0BuJV^ zdh{~O2O1vIO7@ob7oYi(slf~xT<15xjN4o8>4b%OlznTVwU&c0HQA zsu8$~5qeUuRCcIA4wto(+&$MZ;*=#&T1Y>J)0+vpfi~ydK@qtE4%MuFY>p|HDbm`j zM*BvbeI{EBg@;gMZjU>`Pm3q(3iVQhEYK=E z69eJ!Tssi zw~XxgBA1-f-=(3)ZL&wE^q~}4TLW)%vU%xp%}T_whF2s!U2N;hsXMdgl$vVIH>eT= zEJ<5_jyZfzpfe0^_j;wH74Y?~^b71%onM`7275mCM#z0q$$aT*N>aIdoRM+Zd6w^D z#XMyoP8_(j_lP<1LLwEDoUrXNE5Q!-AjAWNPqi_kSF~P}Oc4><=b(QjPJ?*a6jtx} zJitxLV--tp>u+LxHhkP)iBeXy$3iN-L8+3c_22ACZg=iFO`;&IxRuP}h(l5(J0$5F z4}V|EA+)i=9Pl!T8p`R~4X4Mp(0t=@pv%FAeNsR^607Y(R{mPs(8e z=q6QNFx^5B`FqiV!%rM7dGp{DM1mv?Di_N^k_10P-Me*aNUxxMq4g4z9o=-|*XEZa zmVI4RNI)!`$S(zZAOqlb{#%hxH-!F#yca#6TNbui!`CqG-_-=I2UA93G219!-nL42I^7Yn( zfUGM&08BNQ!m?==x8(B+3@>;61=6!hD=bgYOzfBw1y||W4Hz3+ms3A|rLaDl;_bfi zusi53>6uRaS{S$sm^*_(X3y#|kZ~TWpSqVxU+w<+#x?g1Cy5)|J1x1?QrevFmx>y_ zADn8HK#>Npi(Qt7@MnL*u{LlOo%jL6eZ6saCGqy#^S6w#O%B-h$sE6>yH0V02|e)^ zg53(%iP}tlaLFI%X=}N?th7@lW~8h+rp%hxR;LEFhWr#@umQNfr=_tvdH!PlHL ze!b`)*8m;UrhnnYn1;)Mj<4BB^+U5@0z#k=eB#2qrqhPrDt^u$2Q=G5d<4y(-|c(W49HWor| z=w9`O8G615*PH8rYjq;BOHY+%uYWHoo35rEpZ&8H$4B3np+7VEd)}^|n8|zm><(b| zGKJR+9&r7fHn>|EY_$*NfkwD-Sa3&8Ju}W|Je+2?-UV(sV$(A?ZX3&h%Xqu!C)ugF z;66M^{nF%mX6gZ{`iW@vNdu2G(EVlE=G8Z?(AYXZDA9h;-hmVKF&YnEfGDY;(X1T# z?n@!IT#a`5@NEUWNTAq}wY99g?JRnlTz?6%&#I9CEI#BFmZU<2n8KeDj*5(y2sL&ngl+&B2_X@TIpTXd4J;Rx{9^6|Stlu{q8?r|XtySzR-_JoZcRDUa@)pEw z*)#)|SG&Yv739RG@B`KO?Z*!TKloSp4{}xmR`wCO`V$2$-(TtVX8&F2hFw4t)C2Q_ z6v1n$?q6$1FUruPF{Rr6zkUPweuRr4z#(4uH=QSNnzLz8^*#Sck+_{+JewV$l^3ib zZUy}AUsXsv=(#qu!GBDWRGrW&VU>CT;L~>*v|nvzvbWK*flKZ9_Pq*B8V|q+SCK~z z)Y@SCTTqczeg;6GO{QjbmiMDNi!K`63SQj=EWM`ezc=JlFi23Ki&g~w|3_8ZzNZ1m zeZST)Z9#7xmHyO?{AG&&QZx4#6#SdC%G?|R6o#_+q4xjY`LRhE;r;tC-OH$`uO5Y) zWtAbU6dWkb>rLv!bknm53?aM|ld>L;4QO{D-x$4asKOe#_Zd!osr>QrQu>3O z;Drq~1pq>YSRi!Yx&+=ZK1^aY4KE8z(fh4n9jh-KowX;{#0DgYs!`fTI<{FLtFirg z?6n@dE{{G~sk?2<(WE++FeM49K=Xj~=CF@hD3CSmq1&L&b8lYg8Dn~?xMKRL}^~4?5cR(}`VK-L&cKIZJs@{8rK4N?=l~u_B9i#)r`2hf5&Mgh;HtLeG z6t%|fQzM?N;lo@9T(fA|lV!Q|s^O1CfKksH)|SS1TM{nNfjrYirnQg1W7r{U95>`@lEtmgzHvr?lr`_;XQWJ)`jzf}&lo~f zuN^iJd23vg9cbNj%R|s+QHM3IDzEqiJnC3kd&93@}96=Mct0Evn5o!BaDXe}BbV%G2OlIx9yB zQI#y@$BA}j>SOiJf?sY?a?$ciQWh-YE8=6Hct|*sX0Cnbg|@$ffqc*H&SHt*A^Kp{ z_kf1nG=t;Bp$Kix({@aMzA~RxGevZNM!QsYvr zH}-Zyi5k^W$}!%8_Nkjafjg@|461a9vo7eY#XuMu^GFBaZ?1;R!# zGgB%hci2fgFy@88DbZy&=*PH=SV?J+xA;DdEPWWKwfecnA!ELHA4fB+EAATr9HKIw zsXAE%hChWNeY>yOoU*=Yul6y8&5;=HtizivlhW?E5KlVz&p)aY%gM^ zx3>J)Qs_C4Z>C(J#r%07F3ZEA=K$FT%;oW4wDEu)C|Sg6l${0IVFgxy6TYl)?SFQU zZpMoLJhNeDf0eP- zAr16=7OL`lyo;(x9NslIGL+mCM5KPziipkXk65h1o$^j{uInqUOKp*wo$Q*K{mG1f zjO?fGQc7h@9^}U zo)~WRd^=S}ZkueiMkDV1&8D+DyZh3q6GlC;(1meOvRcii1%P6>BYknA<@xRUm)};U z&%Hf1XU?5a%HGl*?f_f?*(<^UqL}#?I2yjw1zk&ddy5Ml-!fK zaySRsJ$&oj;WzHpoIO+id0t}1bX>vKdvQpc(V+_;mB(up+VmgbwqsiAaT62b>saI9 z)X1;Y_+vEjD$l07bARqEL4aSf30Msg-sM%$)-##AG#^|y;Gs6LLXmb)JI+*8M&uw>8S>AX?a@IDq|`fse;6OBjunLg zl;93?l2x;Bc5`1CZS}-($om=b2Ca~mZ{Mr3zzo0zjM2Wk`tWT-3b$u(%CJ31GeT*- z%Ht~>9dE%6P+}z!-erp-v?(dJ?>r(~F zceuU5&rjK?YINl366$6z^KY>-W=~!)6im6@dFSy|e4g(9l0vpnUkoL>Usd`RNJ!og z>8Y%g={x=O7(Z#BPNexQPH;t?*!~dx+ly|VbSitB1H~uV=Lk8p z(twFzeRik`_w!giw)}~^j+3tYziCjk)Jn294kg2`n1F;3BLeIQa7qREp(#oNA@8D= ziZuis#f&(tXuAlCG=j;+_d1<_iU;PGdnO&g#rwk|QUYBxYrkQaBzaM6qc_g2?$n7P znw2_1iPwG;Fy)ME;@M*L_yc`1caD%|{6Vymag4^-M=+&D)p%Cj^}0D746mUU%+b;y zc{n^Rt$S8BlD4}ojapP4JM$m(bVA=#&$T0Yz;2+}y)%$K-*n=EWV1GhigQ8_;|bYL zRbyncst^3!4h5bX5zJt&%=W9rxEqhmE6*2sf4JV8bAZkHrND!`!r8^x<6-LDPwR!x z{c-TFcg;mdj9FP#NlC|NLY4HF3oWLdh}r%KOy-%JeN1cuF|FEeS3bRxxWo(1i?WI^ z-e#4k$Y*8jyo$7^4``WRn{0_meG8vGwWqC{Hp@_UJiijT zeW=Z)2txC;Q)aBK-^$@d6VaDb4ck9F*VtYFbkIrKo=08sR2;6AYY;l&i3+@4V?1Mh zXJ%qLmKGKEN}pOoDWAD?X9sn~+s}E!E$gHsuia8!$dRUT6>MS)qqHaX^G!+xsNS(j z328*_-l70midbWZ@O*UIlFulsz}>vL%udeP_dECrwM_Wuj%KOSQ?lbyZ{2wIip4G=z#Q*a`Ca$G+JREt>35jMqe)Y1V3I#%4k`S4UyCr^*z-vy zMB%0%vaaVF33YzS_0=5H?QH7I*ZUhu&EkWUAXz96e&ZSgf@fpHc`HGUem#|oyNcU>UOf{w(3omY#a)ZkT$kq`A`V#308pZ`-4R`@bh~R82Ycy zffU2eJ=JamFdfa^e+>ZSEsmG|RoWSz3u13M6wus2;!hH6d%%`cJ?^QFUj~AY^Fw(c zJY10HhyD|l05#)ZbN{tZ1t5nE|JE`@$(3yndaYptir&J3z6d7$L(LF_1eScj(>Q|4 z#5ORI@4M&!w?j-Q*q4sUX!qv_R==0&i*h*qw+_9G^WxlTZ^Rr*gUj5j{Qo#Ht+z_oLfmITE9Lnj7jXeuIn+wJAl>)QQS8F1Id)4lsv|~{3kIc?p!FS(ghWL)3 zH14Erh*T2$29n<@u-mlp?j4=2k=E*esa#;w`;5NPXjz;S+Xy zppT>eUa%o?Vqaj{zBMX#2br=xU&FDW^1ron?SDyS{n|@qre;&0GR<3;w{g-mbJVF( zuo`QcPM68NU(g)I36;|D8iF<2U22+g)RbeLGOviHq9V}6q49=IAt4GD5h|i70s~v% zT{!QXy`S?3ob#R&KW+Bf>$~@Q7VG;w--o?7kDmW?fRZk8Y#2Mv?DxFUEYfiFE8Q}1 zQz)W!s&RZepEeURwU17hV~OozUrb99J*%biPHzJh+mT|;Po9v&EQgZ$9c2OOKJlUy z#um}mBei?&8!dmn_q8iDGkvAD;+>~U_enGasVcL6C&nX5}g zcF-Xl6kzqYagu}L(S3L-%46k`g1}d!#B5$(aX{;wCq+wEFFpNWGQDO)7j2oFTvk_w zg?W+6bNFg)#k;ijku<4f_zIO@F{5=ZWu9oCO>N#H+?6uy!)Z8U9-wiTBAAs_LQ+Q( zW^O>V^>%3&w1Ps4>Tag$PE=if40l|>w4_cGwb43JEPbEsXpGsb8(nCu`x-+CGdp{xoqfw4%4PiEK*8+~*lba9%f&qL+0&z>qil4coH#|!mp-G!zjRcLgpLn91=}~kGImyc>mO~XSB?whkVcgu= zF{U%F@0~G;O!%=nH3yI(p-@tx1S^GW{kOP4BTGu*k7 zHt@YeM36Bt*K514w|gw#r?0=+mP-N*mGo_O6VhC+L|hq~ROR#JcZG&SGBP(8c>~UYJi`pTZVFiuVuU{-x@g%8FdZ zH1aJ?vzT+ZS2n9GS`1;=s(Q9|Gf9f3UGXl9q{SRB1)HI8#&P(QgTx!!4XP&YRXDPH zCQUIEU=6!lOFx)xQF8#XKGAInJU*}wm)DQ;u%D-o`Na6zULg{47MBo3=#fN$1&!Tif`q*H&#yV1t`L(QAY;6{@2f2i8$+s)m!>xKjyUgbZI%XGM}ZltG)=KO~s$RhG77STcDK9iPRLPh(!d^FZsgqwuU zZ#AH;FZZz4NI%!f9i{Zq^1O+-=ql#E+lf_Sxi+App zRvWy)$~Mr60Mi#YA5-A6j$a&m`AJdybp)5MH{zt3r!ntFwrM5nLKB=T6@+8%MP ziQ@-yp%nUO;+`vVvNf_7j3kK@S)xtmcV5J<+U&)SwHU)0zadiz37j*vCkHz3#IDW_ zGQJ+?^h-}Qq@X8}JM!5ybp-roi{xA1yq0#6+_jo1y&Iwvqv$E2IO)mJwJzPvZES}Z zurRwWCMue(BnbsHbHvn%P;7j`u!N{0XaC0s+WKkV_L5hzIh|OgNSHUKdzbb*jAcKf zDIK2nc;dZ4Xxp|z&TvjP5tn230Yjq~8=`js(*CF9csOfir>o&Tda}^%UwCk&nyO9v zog>(yQinTQjh$P_=LWQ{HZ@uO542@uNS!&7B7naBjTx@!93fTEoJ;ysaXiKFiF<1)z$aH2cj?>seWiE z8BYlOXuxM~a`2c8Zdv!$=T-kV)P&-)@xVk}I4x0jIA^F6;ggm^CuP~ys=R$!=;sbB z?l<-obJsMCzcdhWbe-{l1mB*+EpX$#j^m~CW)8VwxwWa1!x=S!$~NsW(ifQ%Z*z#M zoqeIW7qYoEn(2r`;0|z0uI0RNH~8}+?ThHG)fkcI>_iuRLy#_=gwsxXc&oAMD~^q0 z_7XXfgp8JxCkJ9+tpX&i0A-Yi2M_zCs!WMOOslTwl+i7{BAcJm_ZSdN+U_Z8RY9Bt z%(!;=g*SaRPDOQWqzj8xMI>Rd-}-3m*et7$xOvCoFj@o?Q9J7(`g)C$B&An z1cW!6=DIH`V5*Xe9c~a*RKyb@j=RKO_EM8NT60^JCk(3+W{v~i8(LFeR)UQ(ABtyd z+O_6Xg|7sWPNy?-HQ2f+$xIhC@aP_W>J@V8NC%14dn&66c0e{sX4<0%d<1j(BObF_ z(q2XEs7lT9&u%Ac1m+VstSzLh_O<^Qj`xPumaEUNL*piv2w^KrtDG8Z-aIDT%Wau|bYPp0O2k}Tu?G$(j1YuY2bZ(T?1W=ax! zP0sdYIUeol;)MoUac0L^-5$_RhYP;d*P$enC;^pMfpW;^E^1Ryl1qevM7y9qTT<$y zd$0lC=9RT3$sW$==P5f@0RM`3S6;+vtAn9r@8m7YVb5I&p6}J&d5j@lhFO7JOEB3_T* zeOyPijF6@!JnoL)^sL5hQIRV)_z?jq@THGb=6ps?7-A3SC8=slS|qkdS`vWBqjZ z#WR@{J*111r9(6BFtd*?hp;?k&pFJxu;yWbMbdssXq5F2w?rB_zD{Lv5n)~bot5<< zHY}eRGeLo2QyImCMKw=8Us5AtKo9Bf+s0AFNo}!o7a?+6q&lKhyCGU0B`Aqpb?x0o z$X%fmq|Jk1o=C7W0`0zBO7Nb4Ob|a^%|Rxn0=d3PqTI`LBc61-uGe$RXH>{F3mOuEHa70yNjcgzw0sX5l6 zyS)93^nvu5R(p^UCRkBF80|5;PBGq(Nn+Rx7RI%n`AkVhIMzEoDJS7>_ugcsgZ=8O z=SpU#C%!T4)>{nq_z-Z*^(a1&c`@gGR5~sgA^36ByS%h5MirvTJQYi+ByBwHGKF*I z_xtx*4(oDU(MVRr&UF*jMgwg-<2A4*l&y4&o=pK z&#IeVuTMEe{#oT#Mt6QR-7^ZUVmjV?2shfSL4S7pkLh_AXC7iopYbA>kt|J7irn+9n#8QLv;U`I zV7Z@h)269TY9~-)WC8duxGSA+Zu*#8o`h8?d9trPh|rs zbsz2#L5^JY-*ygBu6DYB^Qw-|g8Ar;*NZ8=ozqiW5DLr{qgx+#t*QsLlVLZ=D;2WU zyYXWz^S=>-z%0U$?hSZ@lZTDgFlLeYKkst}(TI&}L;VaWpBtJ*t3CGoe)p!z7|#m5 z;sRbca|6G4o2ICocSxj2h|?O3q^{Wru3NbYjA#(`aFCX$@2@-#{;``}sMY@WIXV;y z-iNV-VF|S1T%F$Xat!mk1nv>D0}c}%rT5h+QzvE-I+Ylcy2*f&2cD?R-kT4+t&>Xj zx8P_{M9h_4F9+Ir2z-rYu>w91LN3*{7XP~#fS>O9p@Ojie!Sd + + + fast4k + Fast Test 4k Tracker system + + upload + + Test the Track4K implementation + + + +
+ Hint: Media Library is the standard publication channel and automatically selected +
+ +
+ Choose additional publication channels +
    +
  • + + +
  • +
  • + + +
  • +
  • + + + +
    + Choose layout for above selected channels (used if two video streams exist) +
      +
    • + + +
    • +
    • + + +
    • +
    +
    + +
  • +
+
+ +
+ 4K Video options +
    +
  • + + +
  • +
  • + + +
  • +
  • + + +
  • +
+
+ +
+ Do you need to trim ? +
    +
  • + + +
  • +
+
+ + + + + + + + + + + ]]> +
+ + + + + + + + track4k + + + + + + + + +
diff --git a/utils/opencast_workflows/track4k.xml b/utils/opencast_workflows/track4k.xml new file mode 100644 index 0000000..db6d831 --- /dev/null +++ b/utils/opencast_workflows/track4k.xml @@ -0,0 +1,89 @@ + + + + uzk-track4k + Track the presenter by cropping the 4K Video + + + + + + + + + + presenter/source + presenter4k/source + + + + + + + + + /etc/opencast/ssh_track4k.py + #{in} #{out} 1920 1080 txt + presenter4k/source + true + tracked.mp4 + presenter/source + archive + Track + 2.0 + + + + + + + + /etc/opencast/ssh_track4k.py + #{in} #{out} 1920 1080 json + presenter/source + trackhd.json + presenter/trackhd + engage-download,engage-streaming,archive + Attachment + + + + + + + + */* + +archive + + + + + + + + + diff --git a/utils/readme.md b/utils/readme.md new file mode 100644 index 0000000..296d6e6 --- /dev/null +++ b/utils/readme.md @@ -0,0 +1,59 @@ +# Track4K Utilities + +In this folder you can find different utilities to work with Track4K, the utilities availible at the moment are: + +* **ssh_track4k.py** : Python3 script built with Paramiko to execute remotely Track4K +* **opencast_workflows** : Samples of workflows to work with ssh_track4k.py + + +## ssh_track4k + +The idea behind ssh_track4k is because Track4 and cropvid were built to work in Ubuntu systems and it was need to run this programs over other Linux machines like CentOS or Debian based systems. + +ssh_track4k makes possible to work with any other machine without installing dependencies not supported officially for the distribution or making custom builds that might be very troublesome. + +One of the uses of ssh_track4k is to be part of an Opencast workflow, the next image shows how works: + +![ssh_track4K example](/utils/assets/track4k-opencast-example.png) + +### Installation + +Simply you need to have this dependencies: + +* Python 3 or newer +* Argparse and Paramiko libraries (Can be installed using PiP) + +**Important:** Before the first execution, you need to: + +* Put the *IP*, the *username* and the *password* of the machine that has track4K and cropvid installed inside the script. +* If you use with an NFS share, you need to use the same username, group, uid and gid of the same user that will work with the processed assets. + + + + +### Usage +The options to use ssh_track4k are: + +``` +usage: ssh_track4k.py [-h] + input_file output_file width_out height_out {txt,json} + +Executes track4K and cropvid in a remote machine + +positional arguments: + input_file Input filename + output_file Name of the output file + width_out Output width of the video + height_out Output height of the video + {txt,json} Mode of the tracking, txt mode: Track + Video Crop. json mode: + Only Track in JSON format for use in applications that can use + that info + +optional arguments: + -h, --help show this help message and exit +``` + + +## Opencast Workflows + +This workflows for opencast are a samples they how will work with **ssh_track4K**, you only have to remember to install and allow this script in each admin and worker. node of opencast. diff --git a/utils/ssh_track4k.py b/utils/ssh_track4k.py new file mode 100644 index 0000000..7e3b7b9 --- /dev/null +++ b/utils/ssh_track4k.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + +import argparse +import paramiko +import os +import sys + + +parser = argparse.ArgumentParser(description='Executes track4K and cropvid in a remote machine') + +#Argparsers arguments and description + +parser.add_argument('input_file', type=str, + help ='Input filename') + +parser.add_argument('output_file', type=str, + help='Name of the output file') + +parser.add_argument('width_out', type=str, + help ='Output width of the video') + +parser.add_argument('height_out', type=str, + help ='Output height of the video') + +parser.add_argument('track_mode', type=str, choices=['txt', 'json'], + help='Mode of the tracking, txt mode: Track + Video Crop. json mode: Only Track in JSON format for use in applications that can use that info') + + +args = parser.parse_args() + +ssh = paramiko.SSHClient() +ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + +# Connection to the remote machine +ssh.connect({{IP_Address}}, port=22, username = {{Machine_Username}}, password ={{Machine_Password}}) + + +# Track4K in cropping mode +if args.track_mode == 'txt': + cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.txt' + ' ' + args.width_out + ' ' + args.height_out + print (cmd) + stdin, stdout, stderr = ssh.exec_command(cmd) + for line in stdout: + print('... ' + line.strip('\n')) + for line in stderr: + print('... ' + line.strip('\n')) + + cmd = '/usr/local/bin/cropvid ' + args.input_file + ' ' + args.output_file + ' ' + args.output_file + '.txt' + stdin, stdout, stderr = ssh.exec_command(cmd) + for line in stdout: + print('... ' + line.strip('\n')) + for line in stderr: + print('... ' + line.strip('\n')) + + +# Track4K in virtual cropping mode (Creates a JSON file with the tracking position) +if args.track_mode == 'json': + cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.json' + ' ' + args.width_out + ' ' + args.height_out + stdin, stdout, stderr = ssh.exec_command(cmd) + for line in stdout: + print('... ' + line.strip('\n')) + for line in stderr: + print('... ' + line.strip('\n')) + + +# Close the SSH pipe after finishing +ssh.close() +print('Cropped video ready') +exit() From ad19c2d2eeecccc9e8f55e6b4ff8cddf6498d392 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto <8040628+mliradelc@users.noreply.github.com> Date: Tue, 10 Jul 2018 15:42:37 +0200 Subject: [PATCH 34/49] cleaned code --- utils/opencast_workflows/track4k.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/opencast_workflows/track4k.xml b/utils/opencast_workflows/track4k.xml index db6d831..c01f567 100644 --- a/utils/opencast_workflows/track4k.xml +++ b/utils/opencast_workflows/track4k.xml @@ -1,7 +1,7 @@ - uzk-track4k + track4k Track the presenter by cropping the 4K Video @@ -27,7 +27,7 @@ if="${crop4k}" fail-on-error="true" retry-strategy="hold" - exception-handler-workflow="uzk-partial-error" + exception-handler-workflow="partial-error" description="Track4K TXT mode"> /etc/opencast/ssh_track4k.py @@ -48,7 +48,7 @@ id="execute-once" if="${track4k}" fail-on-error="true" - exception-handler-workflow="uzk-partial-error" + exception-handler-workflow="partial-error" description="Track4K JSON mode"> /etc/opencast/ssh_track4k.py From 218b5e4340ea2a42fedde76d1df3a548c95818c7 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Tue, 10 Jul 2018 15:52:19 +0200 Subject: [PATCH 35/49] typing mistakes --- utils/opencast_workflows/fast-4k.xml | 2 +- utils/opencast_workflows/track4k.xml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/opencast_workflows/fast-4k.xml b/utils/opencast_workflows/fast-4k.xml index ec9dd34..b6599a5 100644 --- a/utils/opencast_workflows/fast-4k.xml +++ b/utils/opencast_workflows/fast-4k.xml @@ -142,7 +142,7 @@ track4k diff --git a/utils/opencast_workflows/track4k.xml b/utils/opencast_workflows/track4k.xml index db6d831..01fed9b 100644 --- a/utils/opencast_workflows/track4k.xml +++ b/utils/opencast_workflows/track4k.xml @@ -1,7 +1,7 @@ - uzk-track4k + track4k Track the presenter by cropping the 4K Video @@ -27,7 +27,7 @@ if="${crop4k}" fail-on-error="true" retry-strategy="hold" - exception-handler-workflow="uzk-partial-error" + exception-handler-workflow="partial-error" description="Track4K TXT mode"> /etc/opencast/ssh_track4k.py @@ -48,7 +48,7 @@ id="execute-once" if="${track4k}" fail-on-error="true" - exception-handler-workflow="uzk-partial-error" + exception-handler-workflow="partial-error" description="Track4K JSON mode"> /etc/opencast/ssh_track4k.py @@ -74,7 +74,7 @@ - + From e34f4a0bf4741af5f6c87e3947f2259192080914 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Fri, 13 Jul 2018 12:03:48 +0200 Subject: [PATCH 36/49] Created some python scripts for testing features --- utils/shellhandler.py | 68 ++++++++++++++++++++ utils/ssh_interactiveShell.py | 60 ++++++++++++++++++ utils/ssh_track4k_v2.py | 115 ++++++++++++++++++++++++++++++++++ 3 files changed, 243 insertions(+) create mode 100644 utils/shellhandler.py create mode 100644 utils/ssh_interactiveShell.py create mode 100644 utils/ssh_track4k_v2.py diff --git a/utils/shellhandler.py b/utils/shellhandler.py new file mode 100644 index 0000000..f56740a --- /dev/null +++ b/utils/shellhandler.py @@ -0,0 +1,68 @@ +import paramiko +import os +import sys +import re + + +class ShellHandler: + + def __init__(self, host, user, psw): + self.ssh = paramiko.SSHClient() + self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh.connect(host, username=user, password=psw, port=22) + + channel = self.ssh.invoke_shell() + self.stdin = channel.makefile('wb') + self.stdout = channel.makefile('r') + + def __del__(self): + self.ssh.close() + + def execute(self, cmd): + """ + + :param cmd: the command to be executed on the remote computer + :examples: execute('ls') + execute('finger') + execute('cd folder_name') + """ + cmd = cmd.strip('\n') + self.stdin.write(cmd + '\n') + finish = 'end of stdOUT buffer. finished with exit status' + echo_cmd = 'echo {} $?'.format(finish) + self.stdin.write(echo_cmd + '\n') + shin = self.stdin + self.stdin.flush() + + shout = [] + sherr = [] + exit_status = 0 + for line in self.stdout: + if str(line).startswith(cmd) or str(line).startswith(echo_cmd): + # up for now filled with shell junk from stdin + shout = [] + elif str(line).startswith(finish): + # our finish command ends with the exit status + exit_status = int(str(line).rsplit(maxsplit=1)[1]) + if exit_status: + # stderr is combined with stdout. + # thus, swap sherr with shout in a case of failure. + sherr = shout + shout = [] + break + else: + # get rid of 'coloring and formatting' special characters + shout.append(re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]').sub('', line). + replace('\b', '').replace('\r', '')) + + # first and last lines of shout/sherr contain a prompt + if shout and echo_cmd in shout[-1]: + shout.pop() + if shout and cmd in shout[0]: + shout.pop(0) + if sherr and echo_cmd in sherr[-1]: + sherr.pop() + if sherr and cmd in sherr[0]: + sherr.pop(0) + + return shin, shout, sherr diff --git a/utils/ssh_interactiveShell.py b/utils/ssh_interactiveShell.py new file mode 100644 index 0000000..41eb386 --- /dev/null +++ b/utils/ssh_interactiveShell.py @@ -0,0 +1,60 @@ +import threading, paramiko + +class ssh: + shell = None + client = None + transport = None + + def __init__(self, address, username, password): + print("Connecting to server on ip", str(address) + ".") + self.client = paramiko.client.SSHClient() + self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy()) + self.client.connect(address, username=username, password=password, look_for_keys=False) + self.transport = paramiko.Transport((address, 22)) + self.transport.connect(username=username, password=password) + + thread = threading.Thread(target=self.process) + thread.daemon = True + thread.start() + + def closeConnection(self): + if(self.client != None): + self.client.close() + self.transport.close() + + def openShell(self): + self.shell = self.client.invoke_shell() + + def sendShell(self, command): + if(self.shell): + self.shell.send(command + "\n") + else: + print("Shell not opened.") + + def process(self): + global connection + while True: + # Print data when available + if self.shell != None and self.shell.recv_ready(): + alldata = self.shell.recv(1024) + while self.shell.recv_ready(): + alldata += self.shell.recv(1024) + strdata = str(alldata, "utf8") + strdata.replace('\r', '') + print(strdata, end = "") + if(strdata.endswith("$ ")): + print("\n$ ", end = "") + + +sshUsername = "opencast" +sshPassword = "opencast" +sshServer = "134.95.68.60" + + +connection = ssh(sshServer, sshUsername, sshPassword) +connection.openShell() +while True: + command = input('$ ') + if command.startswith(" "): + command = command[1:] + connection.sendShell(command) diff --git a/utils/ssh_track4k_v2.py b/utils/ssh_track4k_v2.py new file mode 100644 index 0000000..84e9156 --- /dev/null +++ b/utils/ssh_track4k_v2.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + +import argparse +import paramiko +import os +import sys +import re + +parser = argparse.ArgumentParser(description='Executes track4K and cropvid in a remote machine') + +#Argparsers arguments and description + +parser.add_argument('input_file', type=str, + help ='Input filename') + +parser.add_argument('output_file', type=str, + help='Name of the output file') + +parser.add_argument('width_out', type=str, + help ='Output width of the video') + +parser.add_argument('height_out', type=str, + help ='Output height of the video') + +parser.add_argument('track_mode', type=str, choices=['txt', 'json'], + help='Mode of the tracking, txt mode: Track + Video Crop. json mode: Only Track in JSON format for use in applications that can use that info') + + +args = parser.parse_args() + +class ShellHandler: + + def __init__(self, host, user, psw): + self.ssh = paramiko.SSHClient() + self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh.connect(host, username=user, password=psw, port=22) + + channel = self.ssh.invoke_shell() + self.stdin = channel.makefile('wb') + self.stdout = channel.makefile('r') + + def __del__(self): + self.ssh.close() + + def execute(self, cmd): + """ + + :param cmd: the command to be executed on the remote computer + :examples: execute('ls') + execute('finger') + execute('cd folder_name') + """ + cmd = cmd.strip('\n') + self.stdin.write(cmd + '\n') + finish = 'end of stdOUT buffer. finished with exit status' + echo_cmd = 'echo {} $?'.format(finish) + self.stdin.write(echo_cmd + '\n') + shin = self.stdin + self.stdin.flush() + + shout = [] + sherr = [] + exit_status = 0 + for line in self.stdout: + if str(line).startswith(cmd) or str(line).startswith(echo_cmd): + # up for now filled with shell junk from stdin + shout = [] + elif str(line).startswith(finish): + # our finish command ends with the exit status + exit_status = int(str(line).rsplit(maxsplit=1)[1]) + if exit_status: + # stderr is combined with stdout. + # thus, swap sherr with shout in a case of failure. + sherr = shout + shout = [] + break + else: + # get rid of 'coloring and formatting' special characters + shout.append(re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]').sub('', line). + replace('\b', '').replace('\r', '')) + + # first and last lines of shout/sherr contain a prompt + if shout and echo_cmd in shout[-1]: + shout.pop() + if shout and cmd in shout[0]: + shout.pop(0) + if sherr and echo_cmd in sherr[-1]: + sherr.pop() + if sherr and cmd in sherr[0]: + sherr.pop(0) + + return shin, shout, sherr + +shell = ShellHandler('134.95.68.60', 'opencast', 'opencast') + +if args.track_mode == 'txt': + cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.txt' + ' ' + args.width_out + ' ' + args.height_out + track = shell.execute(cmd) + print(type(track)) + + cmd = '/usr/local/bin/cropvid ' + args.input_file + ' ' + args.output_file + ' ' + args.output_file + '.txt' + track = shell.execute(cmd) + + +# Track4K in virtual cropping mode (Creates a JSON file with the tracking position) +if args.track_mode == 'json': + cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.json' + ' ' + args.width_out + ' ' + args.height_out + track = shell.execute(cmd) + print(type(track)) +# Close the SSH pipe after finishing +print('Cropped video ready') +exit() From 8243715cfe78407e67ddaa5254e36e7530f32c74 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Fri, 13 Jul 2018 12:05:37 +0200 Subject: [PATCH 37/49] Update to gitignore file --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index f72abb1..6e92899 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ CMakeCache.txt Makefile boardCropCoordinates.txt Track4K +*.pyc +__pycache__/ From deb489a5a806c7960c24241c47bfab7e3cc618ed Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Mon, 16 Jul 2018 11:18:45 +0200 Subject: [PATCH 38/49] Solved error in trackhd_server.py --- utils/Pyro/trackhd_server.py | 42 ++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 utils/Pyro/trackhd_server.py diff --git a/utils/Pyro/trackhd_server.py b/utils/Pyro/trackhd_server.py new file mode 100644 index 0000000..fcabe41 --- /dev/null +++ b/utils/Pyro/trackhd_server.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + + + +#import Pyro4 +import subprocess +import shlex + +#@Pyro4.expose +#@Pyro4.behavior(instance_mode = 'single') + + +class trackhd: + + def track4k(self, input_file, output_file, width, height, mode): + if mode == 'txt': + output_file = output_file + '.txt' + else: + output_file = output_file + '.json' + cmd = ['/usr/local/bin/track4k', input_file, output_file, width, height] + while True: + print('En proceso JSON') + app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) + print(app.stdout) + if app.returncode == 0: + break + return [app.returncode, app.stdout] + + def cropvid(self, input_file, output_file, track_file): + cmd = ['/usr/local/bin/cropvid', input_file, output_file, track_file] + while True: + app = subprocess.run(cmd, stdout=subprocess.PIPE ) + if app.returncode == 0: + break + return [app.returncode, app.stdout] + + +app = trackhd() +app.track4k(input_file='/mnt/opencast/4k_sample/presenter.mkv',output_file='/mnt/opencast/4k_sample/tracked.mkv',width='1920', height='1080', mode='json') From 44a5949dd57dc7f7bf128c2e3b0114843e2e7e87 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Thu, 19 Jul 2018 16:53:02 +0200 Subject: [PATCH 39/49] Re-organized folders, uploaded new approach (Pyro method) to not to use SSH. --- utils/Pyro/trackhd_server.py | 42 ------------ utils/Track4KPyro/trackhd_client.py | 44 +++++++++++++ utils/Track4KPyro/trackhd_server.py | 82 ++++++++++++++++++++++++ utils/deprecated/readme.md | 50 +++++++++++++++ utils/{ => deprecated}/ssh_track4k.py | 0 utils/{ => deprecated}/ssh_track4k_v2.py | 4 +- utils/opencast_workflows/fast-4k.xml | 26 ++++++-- utils/opencast_workflows/track4k.xml | 55 ++++++++-------- utils/readme.md | 59 +++++++++++------ utils/shellhandler.py | 68 -------------------- utils/ssh_interactiveShell.py | 60 ----------------- 11 files changed, 270 insertions(+), 220 deletions(-) delete mode 100644 utils/Pyro/trackhd_server.py create mode 100644 utils/Track4KPyro/trackhd_client.py create mode 100644 utils/Track4KPyro/trackhd_server.py create mode 100644 utils/deprecated/readme.md rename utils/{ => deprecated}/ssh_track4k.py (100%) rename utils/{ => deprecated}/ssh_track4k_v2.py (97%) delete mode 100644 utils/shellhandler.py delete mode 100644 utils/ssh_interactiveShell.py diff --git a/utils/Pyro/trackhd_server.py b/utils/Pyro/trackhd_server.py deleted file mode 100644 index fcabe41..0000000 --- a/utils/Pyro/trackhd_server.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python3 -# Track4k Script to execute Track4K and cropvid from a -# remote machine. -# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany - - - -#import Pyro4 -import subprocess -import shlex - -#@Pyro4.expose -#@Pyro4.behavior(instance_mode = 'single') - - -class trackhd: - - def track4k(self, input_file, output_file, width, height, mode): - if mode == 'txt': - output_file = output_file + '.txt' - else: - output_file = output_file + '.json' - cmd = ['/usr/local/bin/track4k', input_file, output_file, width, height] - while True: - print('En proceso JSON') - app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) - print(app.stdout) - if app.returncode == 0: - break - return [app.returncode, app.stdout] - - def cropvid(self, input_file, output_file, track_file): - cmd = ['/usr/local/bin/cropvid', input_file, output_file, track_file] - while True: - app = subprocess.run(cmd, stdout=subprocess.PIPE ) - if app.returncode == 0: - break - return [app.returncode, app.stdout] - - -app = trackhd() -app.track4k(input_file='/mnt/opencast/4k_sample/presenter.mkv',output_file='/mnt/opencast/4k_sample/tracked.mkv',width='1920', height='1080', mode='json') diff --git a/utils/Track4KPyro/trackhd_client.py b/utils/Track4KPyro/trackhd_client.py new file mode 100644 index 0000000..31b31bc --- /dev/null +++ b/utils/Track4KPyro/trackhd_client.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. For Opencast execution +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + + +import sys +import Pyro4 +import Pyro4.util +import argparse + +sys.excepthook = Pyro4.util.excepthook + +parser = argparse.ArgumentParser(description='Executes track4K and cropvid in a remote machine') + +#Argparsers arguments and description + +parser.add_argument('input_file', type=str, + help ='Input filename') + +parser.add_argument('output_file', type=str, + help='Name of the output file') + +parser.add_argument('width_out', type=str, + help ='Output width of the video') + +parser.add_argument('height_out', type=str, + help ='Output height of the video') + +parser.add_argument('track_mode', type=str, choices=['txt', 'json'], + help='Mode of the tracking, txt mode: Track + Video Crop. json mode: Only Track in JSON format for use in applications that can use that info') + + +args = parser.parse_args() + + +# Configure IP and port of the TrackHD serverself. +uri = 'PYRO:trackhd.prototype@:' +trackhd = Pyro4.Proxy(uri) + + +#Run the application +app = trackhd +app.track4k(args.input_file, args.output_file, args.width_out, args.height_out, args.track_mode) diff --git a/utils/Track4KPyro/trackhd_server.py b/utils/Track4KPyro/trackhd_server.py new file mode 100644 index 0000000..3b1d143 --- /dev/null +++ b/utils/Track4KPyro/trackhd_server.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. To run in a standalone server (With Track4K and Cropvid installed). +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + + + +import Pyro4 +import subprocess +import os + +@Pyro4.expose +@Pyro4.behavior(instance_mode = 'single') +class trackhd: + + def cropvid(self, input_file, output_file, track_file): + cmd = ['/usr/local/bin/cropvid', input_file, output_file, track_file] + while True: + app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) + if app.returncode == 0: + break + return [app.returncode, app.stdout] + + def track4k(self, input_file, output_file, width, height, mode): + if mode == 'txt': + output_track = output_file + '.txt' + else: + output_track = 'trackhd.json' + cmd = ['/usr/local/bin/track4k', input_file, output_track, width, height] + while True: + print('Processing video') + app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) + print(app.stdout) + if app.returncode == 0: + break + if mode == 'txt': + print('Executing Cropvid') + self.cropvid(input_file, output_file, output_track) + print('Crop succesfull, output file: ' + output_file) + return [app.returncode, app.stdout] + + + +# def getNS(): +# """ +# Return a Pyro name server proxy. If there is no name server running, +# start one on 0.0.0.0 (all interfaces), as a background process. +# +# """ +# import Pyro4 +# try: +# return Pyro4.locateNS() +# except Pyro4.errors.NamingError: +# print("Pyro name server not found; starting a new one") +# os.system("python3 -m Pyro4.naming -n 0.0.0.0 -p 15236 &") +# # TODO: spawn a proper daemon ala http://code.activestate.com/recipes/278731/ ? +# # like this, if there's an error somewhere, we'll never know... (and the loop +# # below will block). And it probably doesn't work on windows, either. +# while True: +# try: +# return Pyro4.locateNS() +# except: +# pass + +def main(): + +# getNS() + + Pyro4.Daemon.serveSimple( + { + trackhd: "trackhd.prototype" + }, + host = '0.0.0.0', + port = 15236, + ns = False) + +if __name__=="__main__": + main() + + +#app = trackhd() +#app.track4k(input_file='/mnt/opencast/4k_sample/presenter.mkv',output_file='/mnt/opencast/4k_sample/tracked.mkv',width='1920', height='1080', mode='json') diff --git a/utils/deprecated/readme.md b/utils/deprecated/readme.md new file mode 100644 index 0000000..3dc8966 --- /dev/null +++ b/utils/deprecated/readme.md @@ -0,0 +1,50 @@ +# Deprecated Packages + +In this folder are utilities that where deprecated because there is no improvement for the project or there is a new better approach to achieve the same task. + +## ssh_track4k + +The idea behind ssh_track4k is because Track4 and cropvid were built to work in Ubuntu systems and it was need to run this programs over other Linux machines like CentOS or Debian based systems. + +ssh_track4k makes possible to work with any other machine without installing dependencies not supported officially for the distribution or making custom builds that might be very troublesome. + +One of the uses of ssh_track4k is to be part of an Opencast workflow, the next image shows how works: + +![ssh_track4K example](/utils/assets/track4k-opencast-example.png) + +### Installation + +Simply you need to have this dependencies: + +* Python 3 or newer +* Argparse and Paramiko libraries (Can be installed using PiP) + +**Important:** Before the first execution, you need to: + +* Put the *IP*, the *username* and the *password* of the machine that has track4K and cropvid installed inside the script. +* If you use with an NFS share, you need to use the same username, group, uid and gid of the same user that will work with the processed assets. + + + + +### Usage +The options to use ssh_track4k are: + +``` +usage: ssh_track4k.py [-h] + input_file output_file width_out height_out {txt,json} + +Executes track4K and cropvid in a remote machine + +positional arguments: + input_file Input filename + output_file Name of the output file + width_out Output width of the video + height_out Output height of the video + {txt,json} Mode of the tracking, txt mode: Track + Video Crop. json mode: + Only Track in JSON format for use in applications that can use + that info + +optional arguments: + -h, --help show this help message and exit +``` diff --git a/utils/ssh_track4k.py b/utils/deprecated/ssh_track4k.py similarity index 100% rename from utils/ssh_track4k.py rename to utils/deprecated/ssh_track4k.py diff --git a/utils/ssh_track4k_v2.py b/utils/deprecated/ssh_track4k_v2.py similarity index 97% rename from utils/ssh_track4k_v2.py rename to utils/deprecated/ssh_track4k_v2.py index 84e9156..dd82797 100644 --- a/utils/ssh_track4k_v2.py +++ b/utils/deprecated/ssh_track4k_v2.py @@ -94,7 +94,9 @@ def execute(self, cmd): return shin, shout, sherr -shell = ShellHandler('134.95.68.60', 'opencast', 'opencast') + +# Set the IP, Username and Password +shell = ShellHandler(, , ) if args.track_mode == 'txt': cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.txt' + ' ' + args.width_out + ' ' + args.height_out diff --git a/utils/opencast_workflows/fast-4k.xml b/utils/opencast_workflows/fast-4k.xml index ec9dd34..90f440c 100644 --- a/utils/opencast_workflows/fast-4k.xml +++ b/utils/opencast_workflows/fast-4k.xml @@ -52,15 +52,15 @@ 4K Video options
  • - +
  • - +
  • - +
@@ -137,12 +137,30 @@ + + + + + + false + false + false + false + 1 + false + false + + + track4k diff --git a/utils/opencast_workflows/track4k.xml b/utils/opencast_workflows/track4k.xml index db6d831..e5ca7e3 100644 --- a/utils/opencast_workflows/track4k.xml +++ b/utils/opencast_workflows/track4k.xml @@ -1,7 +1,7 @@ - uzk-track4k + track4k Track the presenter by cropping the 4K Video @@ -22,45 +22,46 @@ - - /etc/opencast/ssh_track4k.py - #{in} #{out} 1920 1080 txt + /etc/opencast/trackhd_client.py + #{flavor(presenter4k/source)} #{out} 1920 1080 txt presenter4k/source true - tracked.mp4 + tracked.mkv presenter/source archive Track 2.0 - + - - - /etc/opencast/ssh_track4k.py - #{in} #{out} 1920 1080 json - presenter/source - trackhd.json - presenter/trackhd - engage-download,engage-streaming,archive - Attachment - - + + + /etc/opencast/trackhd_client.py + #{flavor(presenter4k/source)} #{out} 1920 1080 json + presenter/source + trackhd.json + presenter/trackhd + engage-download,engage-streaming,archive + Attachment + + @@ -74,7 +75,7 @@ - + diff --git a/utils/readme.md b/utils/readme.md index 296d6e6..5381dfd 100644 --- a/utils/readme.md +++ b/utils/readme.md @@ -2,41 +2,59 @@ In this folder you can find different utilities to work with Track4K, the utilities availible at the moment are: -* **ssh_track4k.py** : Python3 script built with Paramiko to execute remotely Track4K +* **Track4KPyro** : Python3 script built with Pyro4 to execute remotely Track4K by using remote objects in a Client/Server Scheme. * **opencast_workflows** : Samples of workflows to work with ssh_track4k.py +* **ssh_track4k.py** (Deprecated): Python3 script built with Paramiko to execute remotely Track4K +## Track4KPyro -## ssh_track4k +The idea behind Track4KPyro is because Track4 and cropvid were built to work in Ubuntu systems and it was need to run this programs over other Linux machines like CentOS or Debian based systems. -The idea behind ssh_track4k is because Track4 and cropvid were built to work in Ubuntu systems and it was need to run this programs over other Linux machines like CentOS or Debian based systems. +Track4KPyro makes possible to work with any other machine without installing dependencies not supported officially for the distribution or making custom builds that might be very troublesome. -ssh_track4k makes possible to work with any other machine without installing dependencies not supported officially for the distribution or making custom builds that might be very troublesome. - -One of the uses of ssh_track4k is to be part of an Opencast workflow, the next image shows how works: - -![ssh_track4K example](/utils/assets/track4k-opencast-example.png) +One of the uses of Track4KPyro is to be part of an Opencast workflow. ### Installation -Simply you need to have this dependencies: +Simply you need to install this dependencies in each machine, for opencast users: this includes all the workers and the admin: * Python 3 or newer -* Argparse and Paramiko libraries (Can be installed using PiP) +* Argparse and Pyro4 libraries (Can be installed using PiP for Python 3) -**Important:** Before the first execution, you need to: +**Important:** Before the first execution, you need to check: -* Put the *IP*, the *username* and the *password* of the machine that has track4K and cropvid installed inside the script. * If you use with an NFS share, you need to use the same username, group, uid and gid of the same user that will work with the processed assets. +#### In the machine with track4K and cropvid installed: + +* Copy and execute `trackhd_server-py`, you should see this in the terminal: + +``` +Object : + + uri = PYRO:trackhd.prototype@0.0.0.0:15236 +Pyro daemon running. +``` +\* The port and the IP from what accept the clients can be changed in the code. + +* Allow inbound TCP connection throught the port + +#### In the client machine: +* Set the *IP*, and *Port* of the machine that has `trackhd_server.py`running inside the `trackhd_client.py` script. +* for Opencast users: + - Allow the client script to be executed by adding in the Execute bundle configuration file `org.opencastproject.execute.impl.ExecuteServiceImpl.cfg` *In each worker and admin node*. + - Install the dependencies and the client script *In each worker and admin node*. + - Install or configure the scripts to use the `Execute-Once` or `Execute-many` WOH ### Usage -The options to use ssh_track4k are: +The options to use in `trackhd_client.py` are: ``` -usage: ssh_track4k.py [-h] - input_file output_file width_out height_out {txt,json} +usage: trackhd_client.py [-h] + input_file output_file width_out height_out + {txt,json} Executes track4K and cropvid in a remote machine @@ -46,14 +64,19 @@ positional arguments: width_out Output width of the video height_out Output height of the video {txt,json} Mode of the tracking, txt mode: Track + Video Crop. json mode: - Only Track in JSON format for use in applications that can use - that info + Only Track in JSON format for use in applications that can use that info optional arguments: -h, --help show this help message and exit ``` +### Future Work: + +* Integrate *trackhd_server.py* as a OS service. +* Exception handling in case of problems. + + ## Opencast Workflows -This workflows for opencast are a samples they how will work with **ssh_track4K**, you only have to remember to install and allow this script in each admin and worker. node of opencast. +This workflows for opencast are a samples they how will work with **Track4KPyro**, you only have to remember to install and allow this script in each admin and worker. node of opencast. diff --git a/utils/shellhandler.py b/utils/shellhandler.py deleted file mode 100644 index f56740a..0000000 --- a/utils/shellhandler.py +++ /dev/null @@ -1,68 +0,0 @@ -import paramiko -import os -import sys -import re - - -class ShellHandler: - - def __init__(self, host, user, psw): - self.ssh = paramiko.SSHClient() - self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - self.ssh.connect(host, username=user, password=psw, port=22) - - channel = self.ssh.invoke_shell() - self.stdin = channel.makefile('wb') - self.stdout = channel.makefile('r') - - def __del__(self): - self.ssh.close() - - def execute(self, cmd): - """ - - :param cmd: the command to be executed on the remote computer - :examples: execute('ls') - execute('finger') - execute('cd folder_name') - """ - cmd = cmd.strip('\n') - self.stdin.write(cmd + '\n') - finish = 'end of stdOUT buffer. finished with exit status' - echo_cmd = 'echo {} $?'.format(finish) - self.stdin.write(echo_cmd + '\n') - shin = self.stdin - self.stdin.flush() - - shout = [] - sherr = [] - exit_status = 0 - for line in self.stdout: - if str(line).startswith(cmd) or str(line).startswith(echo_cmd): - # up for now filled with shell junk from stdin - shout = [] - elif str(line).startswith(finish): - # our finish command ends with the exit status - exit_status = int(str(line).rsplit(maxsplit=1)[1]) - if exit_status: - # stderr is combined with stdout. - # thus, swap sherr with shout in a case of failure. - sherr = shout - shout = [] - break - else: - # get rid of 'coloring and formatting' special characters - shout.append(re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]').sub('', line). - replace('\b', '').replace('\r', '')) - - # first and last lines of shout/sherr contain a prompt - if shout and echo_cmd in shout[-1]: - shout.pop() - if shout and cmd in shout[0]: - shout.pop(0) - if sherr and echo_cmd in sherr[-1]: - sherr.pop() - if sherr and cmd in sherr[0]: - sherr.pop(0) - - return shin, shout, sherr diff --git a/utils/ssh_interactiveShell.py b/utils/ssh_interactiveShell.py deleted file mode 100644 index 41eb386..0000000 --- a/utils/ssh_interactiveShell.py +++ /dev/null @@ -1,60 +0,0 @@ -import threading, paramiko - -class ssh: - shell = None - client = None - transport = None - - def __init__(self, address, username, password): - print("Connecting to server on ip", str(address) + ".") - self.client = paramiko.client.SSHClient() - self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy()) - self.client.connect(address, username=username, password=password, look_for_keys=False) - self.transport = paramiko.Transport((address, 22)) - self.transport.connect(username=username, password=password) - - thread = threading.Thread(target=self.process) - thread.daemon = True - thread.start() - - def closeConnection(self): - if(self.client != None): - self.client.close() - self.transport.close() - - def openShell(self): - self.shell = self.client.invoke_shell() - - def sendShell(self, command): - if(self.shell): - self.shell.send(command + "\n") - else: - print("Shell not opened.") - - def process(self): - global connection - while True: - # Print data when available - if self.shell != None and self.shell.recv_ready(): - alldata = self.shell.recv(1024) - while self.shell.recv_ready(): - alldata += self.shell.recv(1024) - strdata = str(alldata, "utf8") - strdata.replace('\r', '') - print(strdata, end = "") - if(strdata.endswith("$ ")): - print("\n$ ", end = "") - - -sshUsername = "opencast" -sshPassword = "opencast" -sshServer = "134.95.68.60" - - -connection = ssh(sshServer, sshUsername, sshPassword) -connection.openShell() -while True: - command = input('$ ') - if command.startswith(" "): - command = command[1:] - connection.sendShell(command) From 012fdd7e792483ce1633ec44bce997c2300d71f1 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Thu, 2 Aug 2018 13:48:22 +0200 Subject: [PATCH 40/49] - Update to Track4KPyro Utility - Update to Readme.md from root folder and Utils folder --- .gitignore | 8 ++ README.md | 65 +++++----- utils/Track4KPyro/trackhd.service | 17 +++ utils/Track4KPyro/trackhd_client.py | 53 +++++++- utils/Track4KPyro/trackhd_server.py | 47 +++---- utils/TrackHD_Ansible_Playbook.yml | 172 +++++++++++++++++++++++++ utils/assets/Diagram-Track4K-Pyro4.png | Bin 0 -> 38147 bytes utils/readme.md | 31 +++-- 8 files changed, 325 insertions(+), 68 deletions(-) create mode 100644 utils/Track4KPyro/trackhd.service create mode 100644 utils/TrackHD_Ansible_Playbook.yml create mode 100644 utils/assets/Diagram-Track4K-Pyro4.png diff --git a/.gitignore b/.gitignore index 6e92899..6ca86c1 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,11 @@ boardCropCoordinates.txt Track4K *.pyc __pycache__/ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db +.retry diff --git a/README.md b/README.md index ac38dda..e5ff0d8 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,9 @@ These instructions will help get the program and all its dependencies set up on > Please take note that this installation guide It was made for use under Ubuntu 16.04, some changes may apply for other distributions or Ubuntu variations. +> All the commands are run as normal user unless if its written as super user "\#" + + ### Prerequisites These instructions are written with the assumption that the project will be installed on a Linux-based system (preferably a Debian version). **Track4K has been tested on Ubuntu 16.04** @@ -29,16 +32,16 @@ First go to any folder to work with the files that will be downloaded and instal ### Basic libraries ``` -$sudo apt update -$sudo apt install git build-essential libgtk2.0-dev pkg-config +sudo apt update +sudo apt install git build-essential libgtk2.0-dev pkg-config ``` ### CMAKE ``` -$ wget https://cmake.org/files/v3.11/cmake-3.11.4-Linux-x86_64.sh -$ sudo mkdir /opt/cmake$ sudo sh cmake-3.11.4-Linux-x86_64.sh --prefix=/opt/cmake --skip-license -$ sudo update-alternatives --install /usr/bin/cmake cmake /opt/cmake/bin/cmake 1 --force +wget https://cmake.org/files/v3.11/cmake-3.11.4-Linux-x86_64.sh +sudo mkdir /opt/cmake$ sudo sh cmake-3.11.4-Linux-x86_64.sh --prefix=/opt/cmake --skip-license +sudo update-alternatives --install /usr/bin/cmake cmake /opt/cmake/bin/cmake 1 --force ``` ### C and C++ @@ -47,15 +50,15 @@ The C and C++ libraries from Ubuntu's official repositories are older than the l #### Install the repository with the updated versions of C and C++ ``` -$ sudo add-apt-repository ppa:ubuntu-toolchain-r/test -$ sudo apt update +sudo add-apt-repository ppa:ubuntu-toolchain-r/test +sudo apt update ``` #### C Libraries installation ``` -$ sudo apt install gcc-7 -$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-7 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-7 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-7 -$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-5 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-5 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-5 +sudo apt install gcc-7 +sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-7 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-7 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-7 +sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-5 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-5 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-5 ``` #### C++ Libraries installation @@ -68,61 +71,61 @@ sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 60 #### FMPEG Main appllication ``` -$ sudo add-apt-repository ppa:jonathonf/ffmpeg-3 -$ sudo apt update -$ sudo apt install ffmpeg +sudo add-apt-repository ppa:jonathonf/ffmpeg-3 +sudo apt update +sudo apt install ffmpeg ``` #### FFMPEG Development libraries ``` -$ sudo apt install libavcodec-dev libavformat-dev libavfilter-dev -$ sudo apt install libx265-dev libx264-dev libvpx-dev libbz2-dev libvdpau-dev libva-dev liblzma-dev +sudo apt install libavcodec-dev libavformat-dev libavfilter-dev +sudo apt install libx265-dev libx264-dev libvpx-dev libbz2-dev libvdpau-dev libva-dev liblzma-dev ``` ## Installation of Track4K #### Clone the repositories: ``` -$ git clone https://github.com/opencv/opencv -$ git clone https://github.com/opencv/opencv_contrib -$ git clone https://github.com/cilt-uct/trackhd.git +git clone https://github.com/opencv/opencv +git clone https://github.com/opencv/opencv_contrib +git clone https://github.com/cilt-uct/trackhd.git ``` #### Install OpenCV > **Note: Track 4K works with version 3.4 of OpenCV, prerelease of V4.0 makes compilation errors.** -In the OpenCV directory, Change to the 3.4 branch, next, build the program +In the OpenCV directory, Change to the 3.4 branch, next, build the program ``` -$ cd opencv -$ git checkout --track remotes/origin/3.4 -$ mkdir build -$ cd build -$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. +cd opencv +git checkout --track remotes/origin/3.4 +mkdir build +cd build +cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. ``` Once complete perform the following command to run make faster (the number after the j-flag is the number of processors the job will use). If you are not sure how many processors the machine has use the following instruction to find out: ``` -$ cat /proc/cpuinfo | grep processor | wc -l +cat /proc/cpuinfo | grep processor | wc -l ``` Use the result from this in the j-flag ``` -$ make -j`processor_count` +make -j`processor_count` ``` Remain in the build folder and run the following cmake command to make the extra modules. The path decribed below is an example. Fill in the directory path on your machine which points to the OpenCV Extra modules folder. ``` -$ cmake -DOPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules ../ +cmake -DOPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules ../ ``` After that compile and install the files: ``` -$ make -j`processor_count` -$ sudo make install +make -j`processor_count` +sudo make install ``` #### Building Track4K @@ -174,8 +177,8 @@ Track4K runs in two parts: track4k analyzes a video file and produces a cropping video file according to the cropping information in the data file, using ffmpeg libraries. ``` -$ track4k -$ cropvid +track4k +cropvid ``` **Example:** diff --git a/utils/Track4KPyro/trackhd.service b/utils/Track4KPyro/trackhd.service new file mode 100644 index 0000000..37385c8 --- /dev/null +++ b/utils/Track4KPyro/trackhd.service @@ -0,0 +1,17 @@ +[Unit] +Description=Track HD Server +Requires=network.target +After=syslog.target network.target + +[Service] +Type=simple +User={{ server_user }} +Group={{ server_user }} +WorkingDirectory=/etc/ +ExecStart=/usr/bin/python3 -u {{ trackhd_installation_path }}/trackhd_server.py +StandardOutput=syslog +StandardError=syslog + + +[Install] +WantedBy=multi-user.target diff --git a/utils/Track4KPyro/trackhd_client.py b/utils/Track4KPyro/trackhd_client.py index 31b31bc..caa8d43 100644 --- a/utils/Track4KPyro/trackhd_client.py +++ b/utils/Track4KPyro/trackhd_client.py @@ -8,7 +8,10 @@ import Pyro4 import Pyro4.util import argparse +import logging +import sys +# Pyro 4 Exceptbook: Sends error messages from Server to client sys.excepthook = Pyro4.util.excepthook parser = argparse.ArgumentParser(description='Executes track4K and cropvid in a remote machine') @@ -34,11 +37,57 @@ args = parser.parse_args() -# Configure IP and port of the TrackHD serverself. -uri = 'PYRO:trackhd.prototype@:' +# Class to create logfiles in the client machine +class StreamToLogger(object): + """ + Fake file-like stream object that redirects writes to a logger instance. + """ + def __init__(self, logger, log_level=logging.INFO): + self.logger = logger + self.log_level = log_level + self.linebuf = '' + + def write(self, buf): + for line in buf.rstrip().splitlines(): + self.logger.log(self.log_level, line.rstrip()) + + def flush(self): + pass + +# Log file store location, change it or configure linux to make it work in that location +logging.basicConfig( + level=logging.DEBUG, + format='%(asctime)s:%(levelname)s:%(name)s:%(message)s', + filename="/var/log/pyro4trackhd/trackhd_client.log", + filemode='a' +) + +stdout_logger = logging.getLogger('STDOUT') +sl = StreamToLogger(stdout_logger, logging.INFO) +sys.stdout = sl + +stderr_logger = logging.getLogger('STDERR') +sl = StreamToLogger(stderr_logger, logging.ERROR) +sys.stderr = sl + + + + +# Configure IP and port of the TrackHD server. +uri = 'PYRO:trackhd.prototype@{{ trackhd_ip }}:{{ trackhd_port }}' trackhd = Pyro4.Proxy(uri) #Run the application +print("Track HD Client Started") +print("Server IP address: " + "{{ trackhd_ip }}") +print("Server Port address: " + "{{ trackhd_port }}") +print (" ") +print('Input details:') +print('Input Filename: ' + args.input_file) +print('Output Filename: ' + args.output_file) +print('Desired tracking resolution: ' + args.width_out + 'x' + args.height_out) +print('Track output mode: ' + args.track_mode) + app = trackhd app.track4k(args.input_file, args.output_file, args.width_out, args.height_out, args.track_mode) diff --git a/utils/Track4KPyro/trackhd_server.py b/utils/Track4KPyro/trackhd_server.py index 3b1d143..ce97c94 100644 --- a/utils/Track4KPyro/trackhd_server.py +++ b/utils/Track4KPyro/trackhd_server.py @@ -9,23 +9,36 @@ import subprocess import os +# Expose the trackhd class throught Pyro4 interface @Pyro4.expose @Pyro4.behavior(instance_mode = 'single') class trackhd: - + # Method to crop files def cropvid(self, input_file, output_file, track_file): cmd = ['/usr/local/bin/cropvid', input_file, output_file, track_file] + print('Cropping file, please wait...') + print('Input details:') + print('Input Filename: ' + input_file) + print('Output Filename: ' + output_file) + print('Track File: ' + track_file) while True: app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) if app.returncode == 0: break return [app.returncode, app.stdout] + # Method to create the track file for crop or for auto zoom in paella player def track4k(self, input_file, output_file, width, height, mode): + print('Tracking File, please wait...') + print('Input details:') + print('Input Filename: ' + input_file) + print('Output Filename: ' + output_file) + print('Desired tracking resolution: ' + width + 'x' + height) + print('Track output mode: ' + mode) if mode == 'txt': output_track = output_file + '.txt' else: - output_track = 'trackhd.json' + output_track = output_file cmd = ['/usr/local/bin/track4k', input_file, output_track, width, height] while True: print('Processing video') @@ -41,42 +54,22 @@ def track4k(self, input_file, output_file, width, height, mode): -# def getNS(): -# """ -# Return a Pyro name server proxy. If there is no name server running, -# start one on 0.0.0.0 (all interfaces), as a background process. -# -# """ -# import Pyro4 -# try: -# return Pyro4.locateNS() -# except Pyro4.errors.NamingError: -# print("Pyro name server not found; starting a new one") -# os.system("python3 -m Pyro4.naming -n 0.0.0.0 -p 15236 &") -# # TODO: spawn a proper daemon ala http://code.activestate.com/recipes/278731/ ? -# # like this, if there's an error somewhere, we'll never know... (and the loop -# # below will block). And it probably doesn't work on windows, either. -# while True: -# try: -# return Pyro4.locateNS() -# except: -# pass - def main(): - -# getNS() - + # Start of Pyro4 Server Pyro4.Daemon.serveSimple( { trackhd: "trackhd.prototype" }, + + # Allow connection from any IP of the server host = '0.0.0.0', - port = 15236, + port = {{ trackhd_port }}, ns = False) if __name__=="__main__": main() +# Test lines, uncomment to Test the class without Pyro 4 #app = trackhd() #app.track4k(input_file='/mnt/opencast/4k_sample/presenter.mkv',output_file='/mnt/opencast/4k_sample/tracked.mkv',width='1920', height='1080', mode='json') diff --git a/utils/TrackHD_Ansible_Playbook.yml b/utils/TrackHD_Ansible_Playbook.yml new file mode 100644 index 0000000..aac20de --- /dev/null +++ b/utils/TrackHD_Ansible_Playbook.yml @@ -0,0 +1,172 @@ +# Insert here installation of procedures of +# track4k and cropvid for the server. + +- name: mount the shared nfs filesystem + mount: + src: "{{ nfs_name }}" + name: "{{ fstab_name }}" + fstype: nfs + opts: "intr,acl,nosuid" + state: mounted + ignore_errors: yes + tags: oc_nfs + +- name: Create a new group user + group: + name: {{ server_user }} + gid: {{ user_gid }} + tags: oc_prepare + +- name: Create a new user user + user: + name: {{ server_user }} + uid: {{ user_uid }} + group: {{ server_user }} + tags: oc_prepare + +- name: Install basic libraries + apt: + name: "{{ item }}" + state: latest + update_cache: yes + with_items: + - git + - build-essential + - libgtk2.0-dev + - pkg-config + tags: trackhd_install-basic_libs + +- name: download and install cmake + block: + - name: Download CMAKE + command: wget https://cmake.org/files/v3.11/cmake-3.11.4-Linux-x86_64.sh + args: + chdir: /home/{{ server_user }} + - name: Create a folder to install CMAKE + file: + path: /opt/cmake + state: directory + mode: 755 + - name: Install CMAKE + command: sh cmake-3.11.4-Linux-x86_64.sh --prefix=/opt/cmake --skip-license + args: + chdir: /home/{{ server_user }} + - name: Update ENV variables to use CMAKE + command: update-alternatives --install /usr/bin/cmake cmake /opt/cmake/bin/cmake 1 --force + tags: trackhd_install-cmake + +- name: download and install latest versions of C and C++ + block: + - name: Adding ubuntu-toolchain-r/test repository for GCC 7 and G++ 7 + apt_repository: + repo: ppa:ubuntu-toolchain-r/test + - name: Downloading GCC 7 + apt: + name: gcc-7 + - name: Update ENV variables to use gcc 7 + command: update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-7 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-7 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-7 + - name: Update ENV variables of gcc 5 + command: update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 60 --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-5 --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-5 --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-5 + - name: Intalling G++ 7 + apt: + name: g++-7 + - name: Update ENV variables to use G++ 7 + command: update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 60 + tags: trackhd_install-gcc + +- name: Install FFMPEG 3 + block: + - name: Adding jonathonf/ffmpeg-3 repository + apt_repository: + repo: ppa:jonathonf/ffmpeg-3 + - name: Installing ffmpeg + ffmpeg dev libraries + apt: + name: "{{ item }}" + with_items: + - ffmpeg + - libavcodec-dev + - libavformat-dev + - libavfilter-dev + - libx265-dev + - libx264-dev + - libvpx-dev + - libbz2-dev + - libvdpau-dev + - libva-dev + - liblzma-dev + tags: trackhd_install-ffmpeg + + +- name: Clone git repositories of OpenCV and TrackHD + block: + - name: Cloning Opencv 3.4 repository + git: + repo: https://github.com/opencv/opencv.git + dest: /home/{{ server_user }}/opencv + version: 3.4 + - name: Cloning Opencv_contrib repository + git: + repo: https://github.com/opencv/opencv_contrib.git + dest: /home/{{ server_user }}/opencv_contrib + version: 3.4 + - name: Cloning TrackHD repository + git: + repo: "{{ trackhd_repo }}" + dest: /home/{{ server_user }}/trackhd + tags: trackhd_install-git + +- name: Install opencv + block: + - name: Create build folder for OpenCV + file: + path: /home/{{ server_user }}/opencv/build + state: directory + - name: Run CMAKE for OpenCV + command: cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. + args: + chdir: /home/{{ server_user }}/opencv/build + - name: Get number of logic CPUs in the machine + shell: cat /proc/cpuinfo | grep processor | wc -l + register: proc_count + - name: Building OpenCV + make: + chdir: /home/{{ server_user }}/opencv/build + params: + NUM_THREADS: "{{ proc_count }}" + - name: Adding Opencv_contrib modules + command: cmake -DOPENCV_EXTRA_MODULES_PATH=/home/{{ server_user }}/opencv_contrib/modules ../ + args: + chdir: /home/{{ server_user }}/opencv/build + - name: Building OpenCV with the Opencv_contrib modules + make: + chdir: /home/{{ server_user }}/opencv/build + params: + NUM_THREADS: "{{ proc_count }}" + - name: Installing the building of OpenCV to the system + command: make install + args: + chdir: /home/{{ server_user }}/opencv/build + tags: trackhd_install-opencv + +- name: Install Track4K + command: sh ./install_track4k.sh + args: + chdir: /home/{{ server_user }}/trackhd + +- name: Copy trackhd_server to desired machine + template: + src: "templates/trackhd_server.py.j2" + dest: /etc/trackhd_server.py + mode: "755" + tags: trackhd_install + +- name: TrackHD | Create Unit file + template: + src: "templates/trackhd.service.j2" + dest: /lib/systemd/system/trackhd.service + mode: "644" + notify: + - reload systemctl + +- name: TrackHD | Start TrackHD Server as a service + service: name=trackhd.service state=started enabled=yes diff --git a/utils/assets/Diagram-Track4K-Pyro4.png b/utils/assets/Diagram-Track4K-Pyro4.png new file mode 100644 index 0000000000000000000000000000000000000000..e3e36c20cea65681dbfd29499254d4bc16fff52b GIT binary patch literal 38147 zcmeFZd03O#^Dh`g#a0Aq7ZllSx>b}#WD^8}(jbZgg6u*RWZ##thlJLK7G&v0K|qKI zf~*nQw}2vG8z9P(00Dwx2oOSqkOUHv+&3V-e4m-;KKFO;%sex5{R@-xp0`d_ovKfL zPMzGhG{3ZWx72PB2(CaXm&<;}&NVsFyPT&)Ab%rtsgaBRs`NECJv4shX-(n$1 zNoLIpn^O|sThDy=%gw#-5sI(VpB8<;t5f*Ca==bWbBSFie>tM-w(H0Tp*@N63Beb= z-0vawINR@m9^7|3TH=`a?3LMIT8*FO8F_5KF*zo#oxRRpnF|e^Xm)mXhP$Y)e&vQU zDxFyQWs5QWE!Z~gp8!ezzw^&v%D7bhWXF<`Vvl&xy5;*NVUWy$=XbZAi#thL6-t%6 zq7w%SS{8ec;jd*q(v6yX+Hp`C%p>w`N;9&iMQ-wzrul3tym07G4Dh5->G$c|wpslo zzhisR6~o=2zyI82`Ur6VwDIZ2pZn5J2`PefPyXi!p;D`DR)&UU+lvs08{e4<0pqr2 z+nnORIG{)TcHWk3!1NCh2LM@s`8On%m$w0=-DdUt-*5f3hAxS=Sd2n^n4 zC0NOKrWZCQ+FZ)U(_0g5yi4#O5rHDM25zW_t+st*-pz$=trVDd;}zRBX8)&v8?$fC z3kWC>1#w{W>5Ym0R_f4_@aCenp4reOu+)FsWmBu)7P9%O|G@!<^Q%Ie=K0&;|DNLi z!1ls7|F!@irr~a2hAnk&s9{sd|8SrG;($#v1u=x?X5`j0vcL>U{ul}LFBeOP^5R?;4+-}mIfeW_405d;Znh=! zrm66U(|PwH>|t6xD`LH$$e7^6r{(y`G+8Wv21TD<Q)#AsYl^) zAHUl-6Uxtp^Aj)^w4?#WPA$a3l*EzrTcj;D%Xv(=`w$< z_9r24vF!?m(vB+TO=;N=N%WBP?9NX6<8bGl_uWmik#-5G681O>=CzknR{LV^k`(4k z(k*qn$^66V_@&n9^LlULp>?Tft-KyAta&Fdf{6v*l_x(-NFNpy+?IZ+MuW6?xjk2;f~!= zl8pB?sBEj_p&6v98iPk@(US@<{3`>A3;od$1;4&e{>0F~Oh26{2ru!c#Z282X3(IQ zVrQl)(KFsRQ5xo>X_l{0^N8BXRNbc!7}PyR(__zq-^V(*H#+7&LuM%l3gX-5zS-Dl zK_sEM5#Mx^bbH9%S}`%YH?0z(A(9p~+B|Q)kS3AdT`?Fr5gQ%nVzFSP&(VN`RqYZ= zs~>EsD`AGH#T$r1?~czKb+OhA@bi~EO;egSUrJBXkP}$&8~sDW^mpQlgaS4 zcJBKx+~EsA%-gV%%NwXEUG%;5olI@(Q^GyV?5Ct_Iqyl+OVJ))YvZl4ae81|kt)u% zfyO!OsEp$Ez;T2kAIvqyy^*7l`6@P!sK$q;pLs4k!G%b#GCfYexJf_{wr2oJQoTj2{Go@X8P)LL0u`;tzcFCwz4| zkdNd)*S6$z$??Zq$f7nb$20j^e3ok4dJ2Dy+#Zt>k`3Wy@JBJ&I7-$ff3g~`V0)3g zyuLr_L$zL;o=A$$zTOLNJh;#JyOZ1c)>~Vz=Br7)zQfb4nm+Q$T`3nSiFzcL22I^- ziW%g(!Iq%gk?x0c8cw^Y{xWG_|71sQd%q1fQn>-F+D`MZ1#*LmfqY+qgV@^pBr+~d zPU>aeDfvEiY53ulR8dn*kcLP+$7NcMH_B*XK&%XW%jAEAf2FOzOkE$pxyE0sUT+H-MeBC!Zsf2Ml=Sw@W1go1o(<`? zwTfzldL6#)!)tzW-I1uF_{&FW+e~dE-5=#E@mazHA@yDhW=9sg71!3Fbh@jFmrGUa zb!#3RHeGr)sO_FLRR#=NwKUW{5CFSV|M~HUrNi+zNHj$~C#?6qy1lCRq4H^wU8e22XY^Nc3OM=d;n{8h3?hgQmATo9o zxpzn&bGrxAd3@RvIm0IL*=ZUUn6#-gnjWzFZ3EIj&YV#PyFhX^+D-07xNRFS>il?C zz5>;yqbc7_=WuT#Q_9MN1hWxh935!`X($o;)$bNNgHjCslt!FSe+lGQ${l?FxBK?cazmG7%L6Z$TzT+e+S7+V!`+|wmO%r$f>@}2iwVueDWuS>CB z9eTXsv44(~x>@KM_JkUhKzT&z{p4{<LE#?u}q0BY#i zD^jF$XFOu>K&u~2Dvs#=6^55jA(P>p#gmXk6imq5Au5xkR&`%jQ@#?J(K)cZ1W2W) zP_J1NJ_{8GgxzN+@KtM(BGT1Ar48f0s4$_CId^-cb-R@?8lO~F+;9@d+{Mn&wH_Wk z=IQ4&-?Zqny_6w}Yg{*MH3`T-<9~y8`#u@eydneM%mRqyU8YV2x5U71$Gv=R(qHLy zwLzT+(1WYi)3uAXADUHxaNsrWr>JR;CpPF;-g8^Q1URty*# zMg#lewWrLwvEktY$9oyjcbUSt#wbg!ROR5pV1Blm`^nLHJdtfq1q>4LjJ9Vt2X_STdTsb8kff5D$@0I=dNQvhuK1xFWK=ACA=c}wl9RsH@1 z{(CT+OdK8o!XOyN`=k@qPN7{oUyD8=fFsQ(dprYx^;mXF(2r%4pyFx|d0J#;?39fsGC z3Du2lLP=<(MWsZ?uvJdT%kL1HQxqOxN9P0nq9-)Hn=h1YRKFg`)Ag{PL1D;Wo~nri=}0SO$wa5L=A-c{NIUFV zDj{nmRXV6XAD`nt)^Js9FTMA@^ba$zB+64-O=Gu1732(W`O=}h2k@s+g$oOSLxZG>F=rdv;;2K~FWm}8GiOxSx} z*Ylx~*`CnM8vSXH8}477?BdLMaXR}MBdBBYJF4`7=y;K?MgICS7Qi3wUsq4W&DlYC zy%4Tb;Kgnuj{sRmi5s}IEEKZgv`BUo;eAJK{&%^aDc!8)WgVryM|?RfhGW~_?YW3V z91w(`VUE0}%HA_!y1Y8u-anM@`fb&dS+eQ8aXBuJ&C&T;$`cm*%h8iGyc74##0V);6!Apr!4>+Ts)7wm|;9j z+d!OUavtxByRW4LxVKY|25l9%jb8x#mlhR67wu*1KJmtS&*<1IS=3;KfN&c6!hT&g zCPE)un_HP<^e#lpE6(L!WvT971CIuiRR_rKr_d(E+3{WuehgcXM2=-X-@Z|~sJ{HH zn@`$XTm_r)yJ^k*kX@sD@>@APVAja=(m$$xvES&;YkRx=-=(xKB!Oy0k6Q;v$L5bZ z^dCww)B+z;cXHe5E#@%YWTc&(^r0bE4nRJwCcZdd-$M%p1!V#pK9_O>?cs^8pZ&nN zf+}5Rn9_OX-`#^+ZpL>Y*4&i1m8q_$@LO;E=Yo=X@~t`p8SpQEM(T~d5OtrHDj-#0 zN9uJ~r9;PJT0M>wb$c^QA6Emp7rpP?SOHqAd<*MumPS=S%BMKwROV{QgmC+IoeO<@ zxaJbYp(wbs(5q0_w31K-Bn(1pV2tZdKUl`|*1WZqbv?Jb)>3O?-Hnp%UH@huwiihr zdcn{)2s;%+Nu?Xvh!k<^pNpAd&VR&z@*&DlSpE@{?KpQ3vo7sFq>u-scnDKBqkGR8 zeR6%0@6x|m0pyF}k`XznbY@;YQ4*J$x8~r2tj#w*wN>#*gYQ9sEPONAaBTN6OuCv# ze&uM+TJ^F9Q;-9{jC=^bYA6+vi6v5Ujis>3LE0isQBF?yODV7;yG?tZ45)5Rw82!w z?-4bfURKr_YZ=7S&j#jyTS)0l&7wie2V{R&*aS89z1On z_+sC3bP4pZ6y-fXlaqryN-PiTK7e*2mv~Mgr>TuX3}o;Nu4w z3LR<^ipcSFoUzn^x6zJYj`Hw!(Mn8aEDW_)*~$~5^}y|cYauNmBz}+Pb@K{D4%sH@aIi;3g}y zNx^Kehg+NUe-WLxy=dcIf?36Ybo`ekHq^s#+w~8kZ;J!_i}U~<+Mp2s3v`}Psh}JJ z-u4#-xHZ$i?eafA00HoX|6R83KP|hn@ox)k*b|uH{~D(D?|DHt<3x;0RaO4a$>}oZ zlvU#%zNJN~hxraA~UM zFZG?->xkO%67uJzK9TyEN0W(FQLwa@9(sGIoVLsXP*n!vK*wt5s_@iq_$T<(nvQAS zwb+0x1F*b*o5ztm=A+DAnOVcrHLepB3on#)>j~Hlc_j+q?WRsU2OH`V7>{OJct1zn zCbYj`9&?_Ji+vQeG^v`@)4YELl34Fe*HBtz68^#9Qj3YxxTHVFw-jDaajuSS?Vbo|Bl#*tySSZvm%?l%=#xuFQll7b zBSLt|-EAG3&7Tn({f&y8=n^fGcpGbu>i+D#LBAcfXT)%JWM==x;;Q-bz+~Q>t@(PjDLR0Nd1~8%mr!*TD@W?mEQf+z0cxLAe!{$ zs5caY#j*vvzT^)iX=3?eM!j!_nE3KD;>bZ7_u0H;IAqA&&Eq<5`hG}EOPGPsT!H$< zp6x{umqx0vyay<8FCstD;O|7N7dW;iT3()N#WBfXC{{STWuJX`zsAu+AC}up#Mz+KPqWpW z3j=Lb;P}YAs*fV~&ji@6q%E54yj-)wtSG7ac$^TAUYx&6!^AdJ((|>Dj8;4Piq^v? z+W9q7Ugc<31A&n&H@G_Zxn*J*J4YoQWbP=U=d&V_ID;_Zv_5P8s6)*&c%!mG#L^MR zBGxhItm0qW+zoYUYdT}B(fK{5S##6}_+}~wz1r%7p~ll9eAfHzZL!XK9~?XsGs%jo zraAkF6xHM-b3yl*C7oIy=@a&NKD^Z8c6<5G{mviNeA~LwmXUd5_ylDU`AU@a z$59Bd$81D)AW&g%5VgEQzZK2-8Y5TSt+h)e-8&A?@!(TnJ8to8pi;Tv?DOcnmU(Et z6b6;-9~ZEK#p%Dc#?KN`zKfnG4;5UMNzbJ2=V2y>kpqlbT}pn=1bJ}Ue7iXA6|8xjBGDN#vvyY?^R;)W>`G4WVjC}Y%7JoXUsD2xSN3U#Prq1k?N%W);o^tI-w_tI3_+I}1m_i{eZ>%ex@OYrs zdZ9&Qap(2Ku!l}1b>JfI>qRs*s!I2dk2=|mu+;fzzp&oHc&bCaug>Ae)oijK5lP=_`XN!d>QJ_rZ}5k@pKe#4fC&3{ER~Nc6a4=a(+6jaKyRah{GCw#Kdo zIkhz~nBZb>&$T0P(AOo%sxYx!+#a*SL?s08YopDr?45W~vM>~A&Cer&737^u=J4_| zA;?KySKX6q9~n2w2ge=BXX`9(6N{Om14b@xR|&mR$op)KVyu@nkO2DM-?ykn@gvtC zPKqQ|B5t4maDr0SBP-Q|vNdcjtuH_Lb9mR;^7(3%m}yoDLv`>I{)HbS;`1wu%JiyX zampT3dt9ubDCNx_P2ccgcF(vG$Df4Mft;O%7apmW^q57?ehRpnt`wUo|y z{}rngYbLC|d5Gs`+_e5`iEc&1lzn6^?ljo%X3%btLLJvIuuINo>BnL9Y%$oK27_t( z?d9`C{akg51rTz_e*;KVPQI8i%x(q!vcfE|^G!&C8_}EDBW6nREs%9>^353TU_qp0 zzQ77@Yk$yz9H0*5WXFJCxe=dtZW};bFVK$4^!B)(pFwgrb6MUw`8Erhh5v+@bS`D8 zm?rgyJCafwJWgEvuGq>)w3rsD) z;)Pm%QuSMUl`=6+{TT{ZrI{>O0uh6ZNSbLp2+AOy;>O(bK6G@N4E>|K;18%i+ zuea)H;F^45tD0A&#&KYNt}Dk8&sOHIo*~jl=b~LWe%%-V4d7`X5Vd;O%3FV1H=AB6 zafaF?p43fq)}=ZxOuH^)OZm|y9HoJ;(75@h=a@^1Z95Z#xq0Mgp%eJ1(*&vxV18Om z%Bt@=8gFlxR(X_T?_%dQJgAeDyG;9%qowdJ~b*p=aC3z_uElEEEC&eL>z+=y@VByEy4Yp0sD^tRO} z5y**<7bDa;zU+B5Q?{{iTgVkKe;3H4EOnbleZCwfbAeYQNP3(!X zxI{e=@ni_VFqpiP@+lNI1I=v@d*abIt^^wO6j(3Vr93S>$P4q1H5}92_D7XH@3#ED z`2q6IB4-P@d+-$H5&oz$fl@t2!U&KJvnM3Jo~ue5!5TK zImWX&xnXL3OI*ruhrFkA*pE0H5m&X(YfDne3hgEnXm^peSiWtHt{B!kdbR?-168}6i&dvkY*|d8Y!uLcYc8LRiz%DJvq4- z=Je~;%JWV%irjp2mV!c4Iw#Jx!`Wa-M;7<7= z=;Ynho>LDm+cZ`$dZ(-)uH0ugUk zIeXj&N_ri5-Vry14SNTH=~V2%lSx6v3!%oI~{ z^VZTQv5wuKv5b4j$Vx!pDO~+850F)3v}Hc0SS^h?iW+hF2SwIn_;Wxn$fvVC$+{#wj0sj!{}#yhvc z36Zop!fBnI-Ad@S`w-T6z*V{gN2M>UinjLoE$T>y=;9znX_35VJj?CA!!SXepfW8E z3NTq8hF+ccMWrqOnfjfz9sbV%AINF}2)XC@qw3~O#jDlA11hjSITWieouh=5NKf@{ zU(7Jbs@cQ-oma+cZPlTg=`MV4N3K`x*@th)Lym?6nHc1G2|#G75hg<3Pjd7R{02me z*|Q!nnC;y3Cv(ML@?lWr&#)Z1s639(%n3FIo-p0^m>B8w1=qT|t|9iL?<*D@dNdSA;|K#tHq7jzMGPmgy zv2p{q<6l@^QGEW;YQ7hsQ3c0!w5M0ImCGJ(eEPlMNET7P%d~UInv&-#9@PEBReVbl zG1}8G;ylku|0f~E4&(iT;&Yd&5J=`>8r1TT_f30%);nheFoV z0Db@l0d0aZ2N1acEq8`@h6e&v6=n$v$P}y^fa?GI;s3P?o9Y!CX|UBj35pe1-#?`R zu)x$dv60J$Wx(e86L5q8-&Xf^A&_KH2TowOsjw|tBgjL zF!-~kM+~!u=8MQ(xgiZ=4QGfY0cUwwAnG(8>6RlBQl%5~Pqijf=XPe!4N&epd(CQC zjn+Sdo{TLgNz}te2lLl77X>p=+;VB&s)(8(WVc8WOVCmaGDt)mnDRIbj}3e@nO}MM z!bE*^Ec6+RR0H&9w9BSx;9L$;qEyEGYB;*!IQ$6Kx=B5Mhsx_8Qp8Oq?Z$Qr+)M{> zv%WJtuVne^g9hE+^EDHVVZda?1{SDz%7?}}y@@u1zBW~PY7%~NU_SKr&%F*wI_r#O7hZGYQ}8Y~Fe*n;e9KyjD;E8b*nTupm7PyqV{D6k`*?5x zSry3_FfJk6tPr`k*FS0<__3R%)y0Ii&=`4+E+S8LI%29aPeaNNrUxY9ug$Frc^}Hi z%s)}<@I-=r=4J2Mw!JE>JXbKGvLT?nr|eo`Udu|j)&jjFre84CN?vvLQ{xwKFt~t%}{Wn;W zyy~0xk1sgPC#fyhmjI|ojlLZ4-cwOVRbJV`24Z(G`$7Y`PHr#=)Dj3eq)#}S*2Af( zs9$@HIy7}S!MvuueHr#23#u)bMWIfQNm?zo@Z7J^P+2+$(q%aZIOwB4Q}^HgQf*#M z8{lPEJ7eQg9pJi8C+3+>HiW|^OFe$X=Md!%H6@evZ9^9%(zkuRyt$DI{jV5G34L-~ z8fmQ-xA0yU9as;funQGo=0Mj4t)S#L@QR{cjJ@eS@7$1bk7|{7<~%neXb@o1`HAdu)Od<%H|dZ z`Y{As*x!2FK#A*kPG?KY3X>8tODb)5SBR8zKgpx`myq zH%AF9t|Z7ffD1cip~ZWf;{ko`p6OYPAVO|n=LR^|Si2UIQ(A+|bk#`JT8CCbBPFJd z>OS!t@9;;@tCP%QGzjrdiA^1hfj{Q)rMnr3grQ!ra-TI^jC0(ghr*c7#ZQkQiXEOO z`Bt?}zr%2UZyJ%6^_vfFY6bW)Dn=1lt9+04)_;Ew`>erJ(`@fXChKFqa8*zfKFpI} zOFv&6(me^pwI{!A0+i_)sP}60bN5Qr64@1dHkdjPOC32cCplH)KR|urA_xr*-}wEY z&Ss6+zUzn#r(%Lmn+t$JYk+o`4bX!Sm%XL>bOZU!3IccKQZ4|jW8VmAeP=2l#_WN3 zMv!U;z$-$yf;o6W=&9hzICWm*N#~Hhs~{13p;D&sR3u=q?!a!jES7=K&aDqk|M6jh z%S6MN&(01Z+l_SZC4gkw(`lF(IZY!}9aBj9){Y|GlaLtsDvAn>FWI|xzrKLb=1Dp2 zJxB_GG@Ce~n0lU?A51EtV#*CNYc($!>YYCUCy9pv(WI4F9IY0#E02YaTt|DZaq>}g zUD@{GPD4571g9~^C9aIr%7EJw$(QtYigCBNU$|KnhGiFgGBE=X`R@(BPnUgV0TO#TW?>k8&)f<*tMO21*7;-b z{lV*>L0~gfNpJfB5r$1P*OzP*eID})IJxL*aRttSekT8vKd4J;<(W~?P~%I-W#MJa zz}`B0N#mon%#y|J$^*AQd7aX|jTO#&^Ge+z;#IRx`?I~W<)oS6m>UKq<;Uc_4b#cDSmI7n``;C ze50}Y#TL2dZ?dz$%PVT0woJGuaaJXXFv(mA98w3#NG98ImeOl@bQq-1KFGY)cM+kLZickQKgH`~r} zA&YKYwWCm5y$H&`#XT)NFiQ+KhP1^_Tr0#PquFoNbP8>f^MlQ0(o>!BWwTB+0!g3Q?lYnSuh}z6mwhR!l3cA=ZspNXDJT#kySI3Q~czfAmH__ zPTxu^1y99FeYDUsF>IclJ73SPhQWwKgAH)$Q8Dp3YPNswca-__U2&O{5F+#{p}wD| zdj|=UzEy`rtQ68bElBAVVuxtS!ceYcuPy?hp&RsNa&{fij&V`Q2M2~dmMu%gk3fgv z#{C?fnf~-jbEh^i)Cqi$u=ZF?{Boh&dDPga(vljTAtp4jUev1RiPg~j%P!Bid75iC zzHPO;4DPY>>@4^8usnkk^2#S1SMsXsuLROW+sA4amqQ$C=9jK9a>_MgGm4FHb^d;w zW~n_7{0aVrY2fhl-$Xr3^JLgZ!Fr=CO3DtNGkHYG|2};&Cgqxir^=hQ65;v?Ybp!& zQQ|(7k=K0I45fZC(c^6<^?JWlia$6EH(%z+Z!VbzxLBBvl6KfBtPhrr^X5aJO?7^Y z$0ESWF_-s6%u(ff(vGBOvQ+F#m~BEAqr~IYa9zu+E`vvuv=7w_?G3U=YpNc)1U_T8 z#`MG7oZSfF&UJmqw&FogBv?fEu(*C+Nz3n)fE`7Q*Lt!j;ep}b@QtT4oi*#J>Ci{- zkM3hY4VpFdgd5P-o)fY)&qBlC9raz1F@;?Lzt%!UB4-agT~^=sX?5@&tD){@Z>r|q zhZcg89N7Nn?17sjHQy_YC&#UY>IN7#UkmiKFrc#MD%HytPpo7BsKn|vwiK4abH_gZ z{0RYND6=dp;eLe78gIyPKM_I7+Mgb`!u8X0Nr=Gc=Ov!|N_^g+ZVs@6&_e5vIT8+X ztJAKtmAPm2i}1Iqph*}sv~qy+A;LE7j+MowkUTbuC?(SoWfAi>{O3!v>H8wAYaJlJ z5h5~*Z3$Uv4d$UqEcCk4&oOwJu}B%o6teZVxStx3nGl!|c9p|UBGmF{u9l3XRa0_G zKACyK!g9^mCV|id1SgsnTs9hwe~xdr>Q535jXHHztoMEUwya}rN)6*sgewe?Gb%b} z{@o94+;VT3A2TCd19vpI;TV>Foj|YdnD0}x@5UV}?Oy>CyCiIWmjiS=-@Kj&8qg>RriH$rY9QCJW(!Yq#iMWNpE!!w0-Y1_7Q20*Q>kmiufLBc5|Xji)x}7vI^Naa3Pud_hR-;>QwK zeJ83&M;(Ck1sLRJ>){3!DPju|7O1|`v}#-PxtbKP&aoN}XSIz*v$$1c&1MTJYvFSp zac_jeo6dXbX^U+Krmgw|u^HSx`-l5S;8!Q77h^2+-KL@|T1vZLi?^JTEAO-wQk(+C zd;%mYcmWjH<=+xZW28iw!HqO*cv??ngMp2KN7kebEATm-~ zf;-d(!s5TDmXXDyu2O+pRBk?inE(LWG7ANAvTW4dv~gOwgtn#QVmAsJGowaxi_UXqe_%f-qIu>r9e%(s= z0OD1dWKB*c7Rq*Zdov`ws-n?g4d%oFlmyWHCcJP^?pCOMj)mE3#hxpaZ-tEjC)@pG zo`(t?d8j-{NqM~7PM_z{KgVEO$k=3>e-iCc$tR%4Qe@<)PDlSBrK`M74rB)~wgOiU zw$Uk0f;No5%tZ7w;=u|VX6%frkl|Lqz&8~{V(q$dn(XI z6IAmp_kcF{{x_Pn51?5E=^5xn>iY)fL0@eS5q&t7wqh^0SpmY?0m1?FhB(N|ZvD-K zUCDSpLjuPbsi{J9vti}r36{b4A9OqBfdk1;z$*aSN*JMGK@|DTodiK!r1V%6nDu;T zitmcQa}~G%@^RMKE%! zSQ*=|VKJnsZ*gf^^SxzgwG%#6O0v~pG1Ix2D>PT#d3{6i2Z8!*d}tL9$UGzDdQ!gc zlUsgXRC|olbNpzI&cogsJz5g!VqB}J0>+Y29i6gEQ~bM2g8{j&=jWkqI-p|gcb8;t z37q3!;uIh>?2g!WL$6XspAmdUAAfXknO0wPY@et+;i7<24am{Zl1?ET+<>rl6iK8S zoClN#)ZCVU{{JZpY$3|!<_Qgv0?LY1dUy-1CnY0cai1J280jM1s}}JPd)p_!lKmvX z0&g)#o#U#}hefS6INCU%Kx`~sLIFXwPx!5+p(9*Xs5Jruk@|OB<|yHdK?F~U&pl+IB+KIB`XgcS(3X{spQYZ zs#V?m3{M|}=Pdk@O5wq?LrNC$;5*@-zRuoQq2uNhY{9~UFjb(4Z5=xVl-{OaM1(vY zX+A`beal+z{n)aiD;zpXe`4b{8lvx9w;nQ8$62ZL&0`0?(>RSL>TFta{P-sE3FNSi zy=V9>?*Esv;Q#3E{l*gfKN~{+-y;NVoq*!lQhtY)QKLrb(QcV*qR^NM^zeEwJj)L# ziXk$q20eUFeTwrT|DXV50Svih-dsS#}tIC>)3Eq6SKZBbXPxb%xEqz5wuc3_LFmed_XzeW) zRzI%HYUK3ZOlq0%EQ_lJh&1EX zy48+m{G~Vuo%zKfq2@j{LFZ3uX(EQUzX7-KwNb#c)W z$#Va|9vbw370^epuEwbMrT|36NU8T-*)c9Pj%C>^#hf@yG zdqwG?w4J}CJ%%1GIg2&fZXC>}|Ih}O(Q%1ziOn;5fj*KQ)pRXdeByiCEljLPsw0WSX{$xtzT*D{K*Br*M`VRLK7$d8qmkgzdPE0?7rD0CHT|zx2zut-F-C|rQ)AD&K zah@DvTM>a9tXgX2%##gpas|O_q4ldBl{Ai zIn}kC9RJkI+TXhE>h?3N7WemzEcG>yuI3+lA6PSbdM62zGe5V4b-9V_Q(u+Uw^amu z3FY)!#T=R*VKvpBashi-F!PSb%{EiWdIxD{QM&8TqQzyC-5ccA{ZPr!{%&5tWiI}E zW%wc4qsSEFrO5|toraV;3h`KZetpb~$&!&`@-vy{?rGA(hz%oB-;7;FR&Oq4Ql66| zF6xoTQKa+Pn^<=v=M$kji%6wi!R&CEcU=pe>}vafrkH@J=;+fY9W}RAL}x>z;eGEy zjzeV728Re}bMEZ`N~q?6cf)v_1u5!)=9?PZb6U(MWy}h1=&R8rdCVyKC|Vi(^~gMZ zD3bp}FB%1}kD#~|cm7g`3-3$fCOPyZ>to@UJ8#cFTT(|~qoHWg?PT>2uPSjz3H+ON z&aIviF=JKZC7jP|*kSCIriqA@`efE^m)5Kr3GjJJceu;Je-sPt6j4*BlHVgzUY6xw zAKh*AL(xsU_-jwLjpt0*C0-^ebrgL430)rs10a|vS?Y^(Lkc4!2+aq<>B@d z$&N?v`GZ%FNICMVOZwy}NNO#(NdHcY?HqNXXIC+0uxolvT*0MWObl}?)HZe|!9Z9U zYS=tKTH@g51d&4KRgqaA>z8UAY4G)NJhF>3B<7hIS8|ium-A1gRi!TP-*@$a@80gZ zT^Q?$mWi{Rs$_@uILeJLwlA#mT_KLP7=P>YH^+;nw-;edPTJIC>Pwv27oVwN{T<{+ z`Z*6vuBSDX_8+74+X^GgBe5PNEb~NcePJ6doCb+9Dt4*1}!u`*++0iQWHlLou!^%Yid zFMaokPXSN46o+@nBk4HVG7`p`oM=vk6n_G`b{NFxX(B}t3WR|3udcBTj2NPGzy&xS zI&uQY@8xaRH=5bMAyK0L_M+S&vHtlpsvggkFSYXAqGPV`V{e@YI%Qz$_HqL~z+L0o zIrq9r_V+?C_5Dr}W<=I_rrVAplBR3ca15-1lItO%SMMQ#D|tq1S-a7+*BKi=VTaO} z8{tRe{O;k5wBI)eF2GOAD+fkvGEHv7)UVSxapH?>>&!Mgw`~J0L+Ieahln9jE6@WX zU`hdA?l;L26E|&0`QVj5i3K^Y+|!Zxr4P7Le606zt_Bro7A%zs&@V{?erzDU(SHH| zs`?sf+PToV+n|ADkC*qK_|Y3z0tUt?Ox_)$XDc-J8#T-K_8=Hb` z;mpIRN!Xy)IGKG}+F?lyR1)jOFq06ETnm8#=T*yAAj|~wL7SKtF-G&ayz@Z<%&s*c zii=xyduzSKc=nN3?yLV8D6a&y+eR=ItidJu%>1!BTAWJ)T-ErbmRw|t!@-wBmmEDS~}}4NEKB^Pg1RA4yi~{UtJFL#CQ7!l=X;31(@9{wVotLEuk5|zGTfc;vYSE z-cWESR%LO3VtTyIWeBxzvllcXhC4gfC#rmbk`lFnYc_$HmRX z&#tYvm8@e#Amnsu%`7fJ`%G%R@w>`hfpd>~9O3*-ld?pk%86DiJ)ZWwn<@KhQO#Bu zOO5F}hWQ=4_AaJnyrw-_pYU>oUZWYV&lw9)TW-r?)m|XJiqHqwB%5$ll29o2&TIb+ z;YhLfIu6D7^Zr>v9|sR6XsE?B&@>`f=dqqoQo-#rGQb@^0X6citklsa)u~{tUuM}g zqTH&CcSDYue`;@G{^zJmI*0qzO{Pt(@X`Zs_r~f5p7;Ers<5T)*SvpE_pPnxA607B zN#fU2SS?8Gou4*q`M>JdEHCHvs{X9cj}Hh+{l5$k5I{Gdot0NUo%npM+p{oW;Ty5C z1)PKKv$qYrT27hV*$Gf7e+4Z8Ph_TXT%Nke%(MFYtuJo!g=_^~Q8daYr%K*1iVyrk z1@H*agalkfQ+z&j4M&Hp2J*RC(tph3K!@7_eDsZky7`BZMRlM{){r7|;|y)9L{|9- zR4%lC$8(@TN#!FN5k`|)P~Upq4WSede{{dTJI z_xhSO)D9^B6RG`oII;hz2EM))UfA(aED0!_^np;-pebO^5Ekb;jti9npGg9=L68%0 z%=fL42=R+RtpC~8Dny}N&_mrKKohH1HV!9IQgBevAHu0tmw{vPUj-#KE^-(ssWYU5 zG=zZz)0@D(xy?&cm%vkAJAnrq4+tDY9FQwDuH&Pqpqu%xD^${4MB$MSz-g_OAkKk- z7;%m*bV*&%1p0jIx>V4LaH@wPaExjO9GPw$qfX6=NcSOtHD_HEP$fWzQpZQ}b2&ml z&yvco-&QRU{QowAZk0g`KTAeaV$`oC0Ai6%w#2$&(WUwo#wcnMtWjEEd%+qX&l>=p zGc-*}!2z?vmcFc&2UihcrT-qf`Wv|xEDgh56{xLB05$|S&tl&mr;3|u=X@u)9RU2i z+lHMHX@^5|+6F0tW-h<2S8a55Vk8ZjmK zLcu}yMz0`nQ7uRm__>X54_j<4Nv+nrCL&s2AiDmR=shp50PPHqJT3rM3I*tZO)DMt zn1dPW4k`&mU)&O18e_DyhkGGZVB(+tGO-fLn`w>`^lK(>ot017zu$VfRl+-zV_zr<9ZvRWZbd(O;e*Gf+YL$^-|40H9&Zgg7 zQbrGlE@AZMqgeU^8~v@g-(uT7%DwrBT>o-{XcK+(kLKfwf;B5|&1pT+qxw~jJ9|kL z%$>7 zfG+1Yk4Xq@hmC;tHRQG1a>hV%Bth%c&H{4+7j-+in z5Ln=+LO#G}!Jywhr~HA&YD)zct;o_-T+XU`2h)p7re?0~KCojq2njgPQ~n1yXdjt3 zULrTG3PF$i8UBXK+q;`{%f)JADsdpWZA>+z+`qQQ^|;#3V^jJ>er**0#OTZIHQP|s z*W2GT&=Koz1ZbPq&Ns-3KelKFXbIQzT%gOZ0I|KHq>lBsP5N>Io5;b})tX-)TeJp7 z8J$rr-B8kbV9U;eSL8tS@5w9caq7j$IQE=%V7JDXnQOsvos?Yd>~C71_@++8TH~J4 z*uwVpWlV$PTi9L{`KLB|@F{_+T1CF8>Wnx`U>OP?MfBOvYI>ZtGTF0fV!7(2U3x5HP$*_ETK$^fwavuIt25< zYx|Dor@RNTM#3TYxgnD9&J{0Soiog!9Hrl6Gt*{gF(lDszraqGz<$lYler4;2y6X} zyv_{`nOSOr1)lU$K_w2UqB3Gyw|B(@7JAR7au&8X*Y z5^`$-3^tZ;b|cyJOq-}fy!GqC={#P`r4#oyKTFn1)s|@eSxTUsrED5kQo-h9$q6sd z#kj!|eDLX$T-N$IK_$zt$!t=Csi9K78H?6J*o`zyd7_*HvnL*-PV>Q;-5X532h`B)@HsyW7I$9r8NfHDtc570x-Y;5O`Y<&f^LYn&c;8$Qn} zf!RM+=+QfAqi$qNwgD}J&<(i=vnjV5yK*)eEnm=w=K(sKpN<6dp;;8?bM*48p*#29 zF@|3HRMG7_O?)w=$sS@yqq`~Q`j%s7einkhTzEovF=xsiTF)@3dYqr2(cnzb)xR$- zYYmkg$`<#HmApVv$fCcfOZG{0(>s2B^{^^qeL^r$kk1M;AoA;|EDGS(!L3ON^p(19 zgKo`4Ucb1nLX_M3#GIggeaD}sUDfZu3$w5`k1_s7Sh_Cl3TX9pSPAn`xCn#j$r4Qy4wQ-&;|-7M9MwDXG3v|j_fAVMpFLm z0dM7>bBOOgmFziV3mRnBwd{Q`>S8FF8r&xzplsTx2eT;HP)DqAqi|Zgp9c3$6R^R< z7tqsAa%UM0ou{rWtJPmz8fi1FEJg|lUT#3cYg!!k$fBlSfqDy&f=JI}FY$0@*(!Lr zeP#$vafn?{QtN~NJZ&+Xg#{C4BsLvpz1xLDGnUK=+a zgDj5^1^(wqJt-g3@SVaBxkEssl-mdjLCdRve{x$=l7|H7X_wx0-P<8 zI=lD+;ih~T|2lO>KuPSo{Wd1EEeTx;lu#PFy(txl{K-Yuei8LP)~umQfEAFMx2AU_ zXVv@EOxc|PpG#l(-0?iQ#0FvC?NA4&{Fb6lWCIgQ$cv=^^!n%Kgz37~!FcwE-)Mw>G8(@GH~?!4S5K!*k_Fy@m} zw&K2}GRUS=Jeq8Og2kNCJD#*p2Nx~wMbSWTGzx4cK;pucsfBiop6}uX6{4w zN)9ZgHcth-%d!;r`1#vTw@9>phRu%GuhYf&I)zwnk$61~8=L?rNWgP6%hZQ4yM`wA zEze?=9N8aO-OJC-1#dfldpiXbjcJ`@--{3HZwj$`A4>fjZs0pdVHPp&9yJWQV4(*H ze-!kLoj;;e(Ux246$LR^v_2-xxGeIB_9|UM3~yp~rMX3FDcD4;BweO!d@toms;)X< zk6nu!yeKX1D@QAiv9`Fz%5SuPIp_%bO;YHH*Uu--%V(Viqpd^!DQmRSp*#Wj>Bs^I zv3!=ZK2eRFn2u1WtLs_TI+)*h-|+Dk7t1Z&+@!$J5p1$ZPaqbL<74}>)(c9-kw@6o zuM*5v@9>t1aZ~7_N5YPVI zznE|IY?%EmB)mcIFq=qK$Qq)0Nn5iKfoP%!{&xt_c|eU|Ah3EoPkm+9TZrekRDo}|FvwW-PD&y`4nXd# ztyZ&=u645S7|eBxu=ay*ORQX(^*iom3urb4#A+w|UK#sNgV#@PB*U-gkBY$bH_6{4 zPIn(XlOjvMIT?uLlk;gi?l8@juljjJN)0QD?0^w)_C+S?#ri~i*{W__7C|`2AAR*< z8WYDmgfN77L+ltxtk3ht@~psTRtDO1pbJb=gY@y8<5BnzFF>I-Yix%)l=eNAdny*<*Wz*8qkOCI*GxXhCyhAGA27V*p zDr-CP4L`S5^*)?B9t~LHS%M1$;yk1lkK;GXC=B2TKEx4uu&sBP+mLoZ!-^>4@Qu4c zAGx@Y%drHYN=r&kFW~{yo^QOhthMoVr7`w^fwr%$Vc;#k^LxV?x%Y9qwyQJ$> zPiFG#WGzmD!|x?l6n1+Qv32L+{^sw?GU-LU3Fy}!jyyueZ-maBB|>euo!^H?U;0fsUh@2;j{dE$||?dUWNih4EJC`IRNlbA^3ccdV`eo3mxfl7Esl+HVOt^1uD(p)AC^ z|KAC6))6#Oh%F#*AL%V0neG)S)c&Oh%tCFru_7yu=1qYK3#d#qgQC`Pjb@Z7>VT>^RW#^}1CdB+@#dR{a~VfT;>nuZEHXwiDXt@Q6|MXpI|xk8p}6 zt7Hel9IGx6w%-h|A%>^FNcat!Uv-J0*sdJ(Dh#+bPZ4c%yq3AhhtJT;(H96S>88O_ z+S}+RxaE1N;oU0@?}?Pxg(Zv&C_FGT&eFmyPnA`|hRyI@SvXfd+{8-X+z{Iqs*rY2 zq={Sp^)Y@L=riEYd^%knsxbEt9Jfl_=iwnb==~DZ@C33?mWvUX_TMZ`2E}*c?Bu&6 zd=aH2x4cV{?>m}kyY7(5E6$2yKb9cwzo`D=;k61C`iEh(6VlcZ1Kt52VnDO}1o2B| zBa=_zHBArsg5Pa2MamE|fb6dhQ!2F&q^u8{eY%z)Yu!e*8ve-^O|y^`7tO2_z3c=k zaw=~|*5kM3S=9D3MH=xj&DnB(uyZ6EL-(kQ<>h`@_h$DhX>D=(#5+VR^PuHFAh35I z5a>KW9Y^C)p>Eg^^&5ZF_ww}%FQgPss^hNZq>dK(S7*`oc_}@=iaH52K`wA=-rty$ zNZKk+H}k6-QPg^vGVWlMa`W-fn)I;}rj5zSWbAv1aL^4EMEdl>qPju!?f!OE@8YEo zBd1*ES9&zOcI~ngP^1pTF?0%JZNO6gLOpo9whR0K3s_$pP!Gc&1L+nh^`8>=c8i-z zS#O*V5Z2;mG-p@pfQ*)!smDq{g%v2Mm^#UUkS!F;`PK5>xs{`!Q6`AX`HJXQJ}IK# z)4m+9BNBs1`gHYm5xn37B)qUA^%rCxiBC_xMGQNx3tlS$F(9?WmcKE|1_Y?kJkfFd z@ccCNd_MfQOhIs zbTIz%^@GzRGM}@NZp_PD9UKbkj9_^-nq+^ieqVE_HM@=?IIqjgvyQ%Xg)1iZs~`VI z^SE+?1{W*$xBkPqLj}jS5@MVXgTXPl%jpSQRyKNX$g$W*Y$0t!lpmYL+C?y;$Q(FE zukUT;h%P8fM|A__1aZa1iN;Y`y-1S1=5c&_k<2V3N{Y53xbe7;V@Fz~0Ji7$Z+teq zp~#IgNFm)Fn!DwX<(n%~BWSGKl{466hinTfGFn{^ebAGo7wq}?l$$rytB zS^-yG^%-dI;NcdQjgm_ONlg3u?&pz)_qk>u`W7jr?ek=4a7V3f%dLU;c-O$>S=X69 z(^6qgvvPv4W{o!5iIzZ18DHO!+v#|Y@=|||rEecAf>`#j@P6~5m{@WjZNa%Qf_K_qpH>h=7u<)r||vx{UA*n zMvNXaG4QRis`h7h$L?K1rGqzhu%I#hna630iNhIo%KF1E`!m*@d*mc_zr**H%`TwA zB9y9Vs)(_)(&%RXz7Iclci^jQvq z)EqgcOCDr*Mc{~P5r&dIIiR-7*IkD$+{Nt{bgF$pQ2Ds3ZXn+p5R5m`YEtaqK{ zn~`oubml|WC^-O6AE@^<<*Q@7EPKNKn*AAf?J!Rv$yMCJ{mDtp-5xo@-9}8O^~+Nx zc_lIj*t9Y2-i`t$WcmgMvWeN32q)yrnF@C3!+0OGS$U}uyOI~&Sl4LFs&V$=m+ykHL*iYe z$0BydJIwXaP1!BKN9zwTa1|r*fHRnpr($**))yV4Tsin@9>Fd<0Z_n+oG)7MX%jQ* zn|BQmwC6=v7QRHxb_o-=cPPt_@;pAHbAcObdI ztJ|9*UiJsj8+??0dLO6#r-D(fX&G=Uhln}uYrot8VG|V_d)j;A?+p9 zPpnaXb#6S-fe{M}8>Qq(WyD{PrW+|A;AZ!`VqZQbWy#FE^AKo(^;LZ}g4wbVqb6Dd zZ|1PUZBdpN9~yEjzBQkQz@erTjSe*@vW)*3%NaY1NLl`Hn}zzEbo0D z5-2(QpH;o}CHx>uR7Kcac@Pe+)9|9pE+N~h9>tJBg_C{Z-J93c#qNl^RLKYi{+I=% zjt(DSm_6;>O2SirDn=X|@JPA;0+&{4rdMWj-+yFXl|=W1p3zit77^UT98;!+%m9K?sM(ix> zG+ZO5@+)NT=0Kho*gH81zgdaV-eFbSjKi_pGtI7br&TfvK3#0vu=}A8I8^mX*RlZV zpV;e|_x!J8YUwPcc%Lub9SBmemPF;LFND|lw{wP+(>)DkR7dwMi5Tgjf>qX=2X-VL z!@9<1X7$3-)>>;v2B;$g<|56mpB1OSAZi}jKe8KVZd28J-hMRXsldWwZ3f?u+FJKl z=P7CoRJ<`~E0hG7<`%e^>`b*yX$^Exem>RYAF1NjT+vuQ+y0Ysy0mqt`M`%9>b?`T zY&o}pa5)nWx4lgH71=b6>-x@Y60W@3noV`WoJvu)i%4+~k2wScs(I)o)ipR@dyO^Q z*o8fd7(zlM^L$w=h}eZ1j!%ph%l)~vXPp{0FZd&i zTn-fUZXhMYg++l_g6yo}@9$^_8Pf61-KUjL=9K)d@`~o}iFIRVz|CZFt-(mC+`4A9 z=;pmLves6?z2OWY>{{cp)&j*V+1w;#LBv4Vu4KE1VVRsO`78a+_~FuYtKaZD5GPJTnVB~u8d5YeV~MP zt#+@*Kv@!IN-8P0(pV#|2kpw0dL zS^gi!4x7MaU-peyq(hiK2PtuJhibjp9@X@(ESHmYd6wJ>2$?VXikn$m5{{6fF0^HZAT^P_qY+ZAevsN}nd7c5q7Z~s#%ZzZ{ zbxpK2wsEQK+MN(Yr$9l)pG%@8BbbdzQgO&TNiQY++3R00!+_IvFQK zvI`#WQWZUwNnKgn4!NI!Gmr}g_(sLBb~xh#UP)XXf~+fA2mp72_=s*To+(D&47Gkr z(*I$u8F9{7WcT&4>I71|EC6$Slp;ikBH&!YDs!IF>wLcbYu7ABEzGYTd@cg9l|Fbh&l#4MmCKEQ+y zK+k=r1SJCHYOYPDd8;f3G(jmq35aiGvIszdx-4il@k>|3HH zTujoDXa`bFP(!NO)H#VKZwJ#m#ODWnvRim4pJ&UjYcbQ#8^7E+b90>2a8y^W2I+?B z2^*}sbaS0VzR3Ad|27(=9-gzuaej5~0~ccau+Pfh$NtJ;usIB@?Aq;;pz{L2*Iod> z&lmz6eOg`h&RL(>?iiE%p&#cWJLe)@%#aTG#8SeZRK3n61rvCUi_}4tHU8H?@q?bJQszuC=2lZ=;Eh2}-ZvR;_BdHE;NmR`VWP?Ve@x&e6xWfvxRRaB1td zn9QNRk<@YBD_wF!17EaoYYqSyVqH}MI?Ju@aF4C!JIj$b0&@mpHaL(qf}Dio=^+xW z{-h}NcCztaN=RO4 zb4&mxbQi2-g2$RF?~anKFMBup+W@z$>ohNREu$)-y9|;{z@&vh#vmW_WUkDC1apj5 zNy0Wq=LW{-W+&rhfimBs&5V7v&IT;tfzmSGL-zxjzP`ybjd(y zAUJXH%sYS$Ks?|Rs5h_0MY)6R--ma6@`6WmJSbFp@tAx;eM>>G6zSf9rnlzP4WbeOv2XL>%W2iR~ja9_vz5NosZsJmsh7>gL{`4)fpR`Uu#9JjgEvak4K!T|kE@R@3T) zCJy`AUisq6qP1rDn9@)1z^kvwPuEqI@!JCoUEPE&``)||rgx}ovk9w##|*{Jfafe) zcFP`&Qp&RT(*!PHI}&`@+aym<76!mrX#ni-s0o>qcST(gcK$-tGXMl%(2Hk?EeHqx z<#k64qb~MXXqV8Wf${f!gZs^i0`)P%!jF9NVa`F;#5Su$r*kb|fZ__g9Y$)SQiFCt!p%*bZs=$%zrGcRHy z*ACSB$oJKj8rDi%L+S_UMx8v^Nf0jht?G3&^?19fjsh?RK#t2nWIM@0bGDsDIa=-=E_HjX39p59azM#MI=1t^+Y5JeM zFom1me3!IGQ|#g($3vOMx^4yxN?&?W`Xnz$x579iKOjctHYmRqnV)ZFzxTAverz%I zf%Z+>Q2(|q*N0!jE8oLPyp(&UJT0sIi31;#y~3zo9`FB6`Jplc2J7wBV)>K?y`N+U zb{wUarRX&@>gRCM52oCFf=*M?P|X&3olB4KvJ0OxtzkN+<$A+aGRu}+h^Ob zk7|z1RP}+X$%rA&Sh2tB<%X|6<1m{%KCql^tC4gX2jX$vq zeV}OK_N%>*ZIEt0WTcVT8OIxFly2YA-LgBW*wjJ9J_eQNJxmWv9p_9;3V79=$j12K zk>05TWu&}#Znaifxvch;&nBj0xebk>k1j->zt}V8MU?NA!;-a3Pe;}!Gwl@4QM{Sj z=4FlX!eY2~v~Ut0xQIg*h3|4SFZm9yJA=hC)3AN%r4W--qG-Phy{y!~?3lEtN}NSt zoWEvCCo`mz-OC@p{1$${1}!l6i^7oQ5)92L>7^7|iB9rlKG$*7y|RDoJ*${0(?2N) z0}J^qd=y91d__j-U(W%}{44g9IY^GlW+0tc%7g2)KPr!g?M)vQkg05&7PhL!T!i$` zDm`J``oKt&Hho$8i9b`8RfRsCv#-7rV?34Y|B9Y^c;5g+;r@rQHz90~GA7TrX7>7s zza5Se%H_!^KMJYnch_-yA`VwjYKsJN}n@^u;FzagxQ8$`Iy@Lr0F z^Z-iwrg*$@m|K)Dg3B`qz9OE%)3n=vPMSVA6WNt=09R0}qlst@N!>o8pJ7^I#_1JQ z-@^#UkByjPc(MF|KSLv4!jn6i0vp<=dIc{|XIOk;I5UGjBl7M1nD?G zIh%C0jz$$}4rju0!MICe%kER8E{4=Ig)_Hl1{tQ0ZI~HpnITTNPCPA_DQE+`jCLz; zj|d;iCSJnUpyfifqZ>Y{kttRb0xL*xLV!nnaSl|xJsTso6wpGNw=%sTmGK$y-77-q z=`Up?rY)>#7!EHHeSfwO@n!8M^Z=tbH#?NaDeyU3xq9~HP%*7?XvUTlk-Li4bjrB^ z-OsH1i%T{ne9fkMD?j0+4>(I^(Mi0BZG^uZoSDF8UUt3NX)p1>=cfq7vGIU^YZmXV zggO?|>v(6WjllKTen*qk;B+Mt;Hi*YD}aB7y`&HE$DA?EI=2VLnRgp9f zigFxhbT;(76BCoeANh6v$w^m972z4lbG|Nd8w#;nBsZBZW*8Bl#Vy$~-djrHO_xgb z`JBaoKq}w=Sbp0%;^#DO+m!LXf$6O!u2@?WtvpZDxb2kd_WT#pW6q?NKR8T~X>WL9 z91>#^VgBjrf3de-?Vu>*Bvf7Z&FzBBynLpbly%1Lf9sKDwg1(Kd-M;k!IcdZrzX}w zWnMpQR*YTtHa3rY)&pwf`|tmHv@(mgzOJ>8eL14}(7*X;g$ESajWNEq;q24JgPixk z0pNH~27xAb$0}>ZAKX8Ziif7ix5T+BW6)EC*%1rTexV;11;z~WIvvkfZk-!J3S=ps z*8k*-w?H>>#MR{iNLnFnX{%N98Xw|z0^KF?-KT`;hl;HJMU_$G1TG8yXn?^=o}wl{ zD~&?8CO|OhyDu-ER9=7z72^D`=#P~@E^~fH8*zGl#sw(a_ygg^hM%pHu=)VP^Z)wN ze?XQfO7Z^|mS!q=>~Rzo?nH@_au72^Nx2kp15r}$BuH*T5zYAw-czUMAWKky7Zd`? z3AkYZ#pgjMh9#B_v;vG>*`mnhpsHEbp&FtGzA&`nGZi=nF4U7Hy76nO7P@2 zh(n&m_iQA|hs^({Qn6<1`JM<8bM`i?a3vi z3Giy0BS%NMjn%ndfsXRxQfhkLU(@T`ZJ|vLigbkhx#6WJ_F-e<+BG)dXnNC7p(j#; z`aWv9o^}X`e#3?hB4UO)q$2sZu{zrG>C_;_To6!xUJ4aRfFh8Wp0zImMkWw^LV{rd ziSU9yuFPaSeX`;P^m_jYqW2=7cHvO5%KJDV3TZ??%RWbqM|LiAhm9;$o6vP^&~$SQ z;;p|>p+PA4n}%s(KFluUA!YD=EmyjCVqEK^R?%8#*{+U68X`2vXtD;E3rtyMQ{Xi) zANb9VZFlNDD`)S$S`ZkebBJGhqi|=(r`!rt3z90U&;F@5_38PlWhVmIfl#LYodHf@ z@o`h!5c`pEW5MdfDm^(DGXM+AEYG@{wXSrJB zPpjpmG0^-6Dx`{hlMWXE>`TdJ=U%vWMyMj}T1#zg#6g4*gj0^b+yTneze&^8{F30< zuz?wJqU(|M{l^IRhxPh4j|^=)GdFDoU$w5WR&zyqb=i%`cbJg;jz|s6W|K06pL-+s z0jB&-hdS4nT*Sx?e_P(SwVP{Gh9#hh1Bldtu;o+_B)uoP9* zAvWDY3>C3dZd+esii0gAy;(V$3Vzj5Aow5blKhFiN2J+R}6jn6MyuWTH- zU2NPrGOWxfv)}g?B6_r|Q;>5qVqizwXjFd{m1)JN#bB!r3{=N^*;k$8CmfEa?ZsVd z9Aas3NAO*R2jRvxMivcpw$n!PhQx_v5PhkwUaBZUpUeMUf85W#eK~*JME|JIzA=+B z*2&v1d=(qxX(~Myh8d>QN5ws3%g$HbZJ(`;%2nq)+oS3#N6flZv-h@!RYUU@`fdLR zgc>bCSY#BCit#!wnRU~%=9H24mtMKTHUO5eb|B!3i?jV8=3h5n-42CBjK%5If4)2; z1NwBN9C$0&@~n=YXP1(10RNh&aLf5zTam2yr_*yP)vj!B<-7=x>&hMAxW~0R2C5d78^yKia};IH z*kVI)TZu+V&tq*A|FoYl4W)0cdC4u;L@C^zVI<T&aAtW-*b%_@Pw6hqcr>^Lx+(U8WP-gLFV3Ld>ZE5~bAfAa-r3(36H z_?}PgwkZ>x6dzLM2s}1ML)PDZJmR9(R08fb{t`BPV9E*GdB)Qsluw7lN=FO^!k+h> zdG5~b_9-F3XrVVo?AIm!4PDz41EIHu{gzQ1;uQTZ8NZ@F-|~Ue7}nOCHF7tm`7uZ~ zh)fvl57dL?M%n}a9-^n?7ScqoshQ~F1qv#J(FtC5VjAW?Qts(ws{gOY_A*Bd8S81c z{I{-OI3N&RP6D7;REaGnDv&M#H%R0Wr15ogDHCT|YstWxi)HQ!2WqzW0kFLSbE`5l zm^OEyQ1Lb-`Mz?dV9f#GPLIwUCc`IE8hJf?4`9;cVMUqXY#sqqL1ij5J93~Go&g!P z46Ba^2F3;Cv~Cl8DuVtxOCO7@1c?=~Q*x#o<#vfcJIEEqWj7BcUe5+-%F=ST>I>)z zgxQy8iJm|pXlQ*jip7=Ltff`7Qtaz)(Go!uMQDje+H%luC_irYJPddcy4c=8(VNt; zBQ+w7ly7|n>t=0UvhHmz%60A;UrqpFKAO`7j8l*eKp!8tGP0z`nMfxPibbh!5{-vS zgD%EJ^a5-qCa<)BvqaZf#6-3DWnjMxj0vWhPZEKeD$%*2x}iKCbQ5y^phOv>dENrS z@$-wda)1ZGWgzIX?~*fGywv#pjQ+1bT5G%df%GOZH7F9Zz-vptz)TpOEQtX6q_35k`{%9vxIBE~W!@NW1~+4xk!GKm}+lfD-bc243toNVcK2 z0U|)7#SQ`7Tf1&yv{)ucF^VRDMx#CecMY0AG`jZ7!UUqx9?: - uri = PYRO:trackhd.prototype@0.0.0.0:15236 + uri = PYRO:trackhd.prototype@0.0.0.0: Pyro daemon running. ``` \* The port and the IP from what accept the clients can be changed in the code. @@ -40,6 +43,12 @@ Pyro daemon running. * Allow inbound TCP connection throught the port +**If you installed Track HD as a Systemd service, like in the included ansible playbook, you can run it using this line:** + +```bash +# systemctl start trackhd +``` + #### In the client machine: * Set the *IP*, and *Port* of the machine that has `trackhd_server.py`running inside the `trackhd_client.py` script. @@ -72,11 +81,17 @@ optional arguments: ### Future Work: -* Integrate *trackhd_server.py* as a OS service. -* Exception handling in case of problems. +* [x] Integrate *trackhd_server.py* as a OS service. +* [x] Log files generated from server and the client scripts +* [ ] Exception handling in case of problems. + +## Ansible playbook + +Included in the [Utils folder](/utils), there is and Ansible Playbook that is ready to install Track HD with the Pyro4 script for the server side. +**Important:** Take note this script has to be put in an *Ansible folder structure* and the variables and hosts had to be set before use. ## Opencast Workflows -This workflows for opencast are a samples they how will work with **Track4KPyro**, you only have to remember to install and allow this script in each admin and worker. node of opencast. +This workflows for opencast are a samples they how will work with **Track4KPyro**, you only have to remember to install and allow this script in each admin and each worker node of opencast. From c7e21cbdd14ebdc8a5397c70ebcf08807618fdb0 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Thu, 2 Aug 2018 13:58:10 +0200 Subject: [PATCH 41/49] Correction to readme.md of utils --- utils/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/readme.md b/utils/readme.md index c031088..2f15e0c 100644 --- a/utils/readme.md +++ b/utils/readme.md @@ -15,7 +15,7 @@ Track4KPyro makes possible to work with any other machine without installing dep One of the uses of Track4KPyro is to be part of an Opencast workflow as an external script. Here is a diagram about how works with Opencast: -![Track4K-Pyro4-Diagram](/assets/Diagram-Track4K-Pyro4.png) +![Track4K-Pyro4-Diagram](/utils/assets/Diagram-Track4K-Pyro4.png) ### Installation From 0d7345873e205d657cb0f8a211d939904e372196 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Thu, 2 Aug 2018 13:59:37 +0200 Subject: [PATCH 42/49] Minor corrrection to utils/readme.md --- utils/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/readme.md b/utils/readme.md index 2f15e0c..0ddf8cd 100644 --- a/utils/readme.md +++ b/utils/readme.md @@ -3,7 +3,7 @@ In this folder you can find different utilities to work with Track4K, the utilities available at the moment are: * **Track4KPyro** : Python3 script built with Pyro4 to execute remotely Track4K by using remote objects in a Client/Server Scheme. -* **opencast_workflows** : Samples of workflows to work with ssh_track4k.py +* **opencast_workflows** : Samples of workflows to work with the scripts. * **Ansible Playbook** : An Ansible playbook to install TrackHD automatically with the **Track4KPyro** server script included. * **ssh_track4k.py** (Deprecated): Python3 script built with Paramiko to execute remotely Track4K From 7cab5e9c3f709ebc5386e46f9e6d7008a16fc403 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Fri, 3 Aug 2018 16:28:07 +0200 Subject: [PATCH 43/49] Improve of Ansible Playbook --- README.md | 6 +- .../group_vars/trackhd_clients | 3 + .../group_vars/trackhd_server | 25 +++++ utils/Ansible_Playbook/hosts | 9 ++ utils/Ansible_Playbook/readme.md | 19 ++++ ...roject.execute.impl.ExecuteServiceImpl.cfg | 9 ++ .../roles/track4k_client/tasks/main.yml | 31 +++++++ .../roles/track4k_server/handlers/main.yml | 2 + .../roles/track4k_server/tasks/main.yml} | 19 ++++ .../templates/trackhd.service.j2 | 17 ++++ .../templates/trackhd_server.py.j2 | 93 +++++++++++++++++++ utils/Ansible_Playbook/trackhd_clnt.yml | 6 ++ utils/Ansible_Playbook/trackhd_srv.yml | 6 ++ 13 files changed, 242 insertions(+), 3 deletions(-) create mode 100644 utils/Ansible_Playbook/group_vars/trackhd_clients create mode 100644 utils/Ansible_Playbook/group_vars/trackhd_server create mode 100644 utils/Ansible_Playbook/hosts create mode 100644 utils/Ansible_Playbook/readme.md create mode 100644 utils/Ansible_Playbook/roles/track4k_client/files/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg create mode 100644 utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml create mode 100644 utils/Ansible_Playbook/roles/track4k_server/handlers/main.yml rename utils/{TrackHD_Ansible_Playbook.yml => Ansible_Playbook/roles/track4k_server/tasks/main.yml} (94%) create mode 100644 utils/Ansible_Playbook/roles/track4k_server/templates/trackhd.service.j2 create mode 100644 utils/Ansible_Playbook/roles/track4k_server/templates/trackhd_server.py.j2 create mode 100644 utils/Ansible_Playbook/trackhd_clnt.yml create mode 100644 utils/Ansible_Playbook/trackhd_srv.yml diff --git a/README.md b/README.md index e5ff0d8..0add637 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ Use the result from this in the j-flag make -j`processor_count` ``` -Remain in the build folder and run the following cmake command to make the extra modules. The path decribed below is an example. Fill in the directory path on your machine which points to the OpenCV Extra modules folder. +Remain in the build folder and run the following cmake command to make the extra modules. The path described below is an example. Fill in the directory path on your machine which points to the OpenCV Extra modules folder. ``` cmake -DOPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules ../ @@ -145,7 +145,7 @@ This will run all the steps listed in the manual method mentioned below. This method is for the case where the automatic method does not work. It does everything the shell script does manually. -The trackhd directory should have 2 main folders inside it: source and build. The source folder comntains all the header and source files while the build file contains all object files and executables. +The trackhd directory should have 2 main folders inside it: source and build. The source folder contains all the header and source files while the build file contains all object files and executables. The first step is to navigate into the build folder. Once inside run delete all files (if any) and then type the following command in terminal: ``` @@ -173,7 +173,7 @@ cp cropvid /usr/local/bin/ #### Running Track4K -Track4K runs in two parts: track4k analyzes a video file and produces a cropping data file in text format. cropvid crops the +Track4K runs in two parts: track4k analyses a video file and produces a cropping data file in text format. cropvid crops the video file according to the cropping information in the data file, using ffmpeg libraries. ``` diff --git a/utils/Ansible_Playbook/group_vars/trackhd_clients b/utils/Ansible_Playbook/group_vars/trackhd_clients new file mode 100644 index 0000000..3e39169 --- /dev/null +++ b/utils/Ansible_Playbook/group_vars/trackhd_clients @@ -0,0 +1,3 @@ +#TrackHD Server parameters +trackhd_ip: CHANGE_ME +trackhd_port: CHANGE_ME diff --git a/utils/Ansible_Playbook/group_vars/trackhd_server b/utils/Ansible_Playbook/group_vars/trackhd_server new file mode 100644 index 0000000..e2e852e --- /dev/null +++ b/utils/Ansible_Playbook/group_vars/trackhd_server @@ -0,0 +1,25 @@ +# TrackHD options + +### Pre-production and Development environments +# name of the NAS volume +nfs_name: CHANGE_ME + +# mount point for the NAS shared file system +## Same mount point of the opencast installation +fstab_name: CHANGE_ME + + + +### General Options + +#TrackHD Parameters + +server_user: CHANGE_ME +user_gid: CHANGE_ME +user_uid: CHANGE_ME + + +trackhd_repo: https://github.com/mliradelc/trackhd.git +trackhd_installation_path: /etc +trackhd_data_path: /etc +trackhd_port: CHANGE_ME diff --git a/utils/Ansible_Playbook/hosts b/utils/Ansible_Playbook/hosts new file mode 100644 index 0000000..8560583 --- /dev/null +++ b/utils/Ansible_Playbook/hosts @@ -0,0 +1,9 @@ +# In this file add the clients and the server of TrackHD + + +[trackhd_server] +#Insert IP or URL of the server below + + +[trackhd_clients] +#Insert IPs or URLs of the clients below diff --git a/utils/Ansible_Playbook/readme.md b/utils/Ansible_Playbook/readme.md new file mode 100644 index 0000000..21e78f9 --- /dev/null +++ b/utils/Ansible_Playbook/readme.md @@ -0,0 +1,19 @@ +# Ansible TrackHD Playbook + +This folder has the files you need to install TrackHD server and the comunication with Opencast. To use it first you need to have installed Ansible in the computer that will execute the orders to the remote hosts. + + +* In the `groups_vars` folder, modify the files according your existing setup. +* Add the URLs or IPs addresses for the clients and the server. + + +Enter to the folder and run the command in the terminal: + +``` +ansible-playbook -vv -i hosts trackhd_srv.yml -u --ask-sudo-pass + +ansible-playbook -vv -i hosts trackhd_clnt.yml -u --ask-sudo-pass +``` + + +Ansible will install TrackHD without hassle. diff --git a/utils/Ansible_Playbook/roles/track4k_client/files/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg b/utils/Ansible_Playbook/roles/track4k_client/files/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg new file mode 100644 index 0000000..a48de37 --- /dev/null +++ b/utils/Ansible_Playbook/roles/track4k_client/files/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg @@ -0,0 +1,9 @@ +# Load factor +# Default: 0.1 +# Note that the load on the system will depend on which command is executed... +job.load.execute = 1.0 + +# The list of commands, separated by spaces, which may be run by the Execute Service. +# A value of * means any command is allowed. +# Default: empty (no commands allowed) +commands.allowed = /etc/opencast/trackhd_client.py diff --git a/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml b/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml new file mode 100644 index 0000000..f5136b4 --- /dev/null +++ b/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml @@ -0,0 +1,31 @@ +- name: Install pyhton 3 and PiP + yum: + name: "{{ item }}" + state: latest + with_items: + - python34 + - python34-pip + tags: trackhd_client + +- name: Install Pyro4 and Argparse libraries + pip: + executable: pip3.4 + name: "{{ item }}" + state: latest + with_items: + - Pyro4 + - argparse + tags: trackhd_client + +- name: Copy trackhd_client to the admin and worker nodes + template: + src: "templates/trackhd_client.py.j2" + dest: "/etc/opencast/trackhd_client.py" + mode: "755" + tags: trackhd_client-scripts + +- name: Allow trackhd_client.py to be run by opencast + copy: + src: "files/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg" + dest: "/etc/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg" + tags: trackhd_client-execute diff --git a/utils/Ansible_Playbook/roles/track4k_server/handlers/main.yml b/utils/Ansible_Playbook/roles/track4k_server/handlers/main.yml new file mode 100644 index 0000000..0f6cd88 --- /dev/null +++ b/utils/Ansible_Playbook/roles/track4k_server/handlers/main.yml @@ -0,0 +1,2 @@ +- name: reload systemctl + command: systemctl daemon-reload diff --git a/utils/TrackHD_Ansible_Playbook.yml b/utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml similarity index 94% rename from utils/TrackHD_Ansible_Playbook.yml rename to utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml index aac20de..f508660 100644 --- a/utils/TrackHD_Ansible_Playbook.yml +++ b/utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml @@ -153,6 +153,25 @@ args: chdir: /home/{{ server_user }}/trackhd +- name: Install pyhton 3 and PiP + apt: + name: "{{ item }}" + state: latest + with_items: + - python3 + - python3-pip + tags: trackhd_script_libs + +- name: Install Pyro4 and Argparse libraries + pip: + executable: pip3 + name: "{{ item }}" + state: latest + with_items: + - Pyro4 + - argparse + tags: trackhd_script_libs + - name: Copy trackhd_server to desired machine template: src: "templates/trackhd_server.py.j2" diff --git a/utils/Ansible_Playbook/roles/track4k_server/templates/trackhd.service.j2 b/utils/Ansible_Playbook/roles/track4k_server/templates/trackhd.service.j2 new file mode 100644 index 0000000..e9e5bc5 --- /dev/null +++ b/utils/Ansible_Playbook/roles/track4k_server/templates/trackhd.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=Track HD Server +Requires=network.target +After=syslog.target network.target + +[Service] +Type=simple +User={{ server_user }} +Group={{ server_user }} +WorkingDirectory=/etc/ +ExecStart={{ trackhd_installation_path }}/trackhd_server.py +StandardOutput=syslog +StandardError=syslog + + +[Install] +WantedBy=multi-user.target diff --git a/utils/Ansible_Playbook/roles/track4k_server/templates/trackhd_server.py.j2 b/utils/Ansible_Playbook/roles/track4k_server/templates/trackhd_server.py.j2 new file mode 100644 index 0000000..0611100 --- /dev/null +++ b/utils/Ansible_Playbook/roles/track4k_server/templates/trackhd_server.py.j2 @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. To run in a standalone server (With Track4K and Cropvid installed). +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + + + +import Pyro4 +import subprocess +import os + +@Pyro4.expose +@Pyro4.behavior(instance_mode = 'single') +class trackhd: + + def cropvid(self, input_file, output_file, track_file): + cmd = ['/usr/local/bin/cropvid', input_file, output_file, track_file] + print('Cropping file, please wait...') + print('Input details:') + print('Input Filename: ' + input_file) + print('Output Filename: ' + output_file) + print('Track File: ' + track_file) + while True: + app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) + if app.returncode == 0: + break + return [app.returncode, app.stdout] + + def track4k(self, input_file, output_file, width, height, mode): + print('Tracking File, please wait...') + print('Input details:') + print('Input Filename: ' + input_file) + print('Output Filename: ' + output_file) + print('Desired tracking resolution: ' + width + 'x' + height) + print('Track output mode: ' + mode) + if mode == 'txt': + output_track = output_file + '.txt' + else: + output_track = output_file + cmd = ['/usr/local/bin/track4k', input_file, output_track, width, height] + while True: + print('Processing video') + app = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE) + print(app.stdout) + if app.returncode == 0: + break + if mode == 'txt': + print('Executing Cropvid') + self.cropvid(input_file, output_file, output_track) + print('Crop succesfull, output file: ' + output_file) + return [app.returncode, app.stdout] + + + +# def getNS(): +# """ +# Return a Pyro name server proxy. If there is no name server running, +# start one on 0.0.0.0 (all interfaces), as a background process. +# +# """ +# import Pyro4 +# try: +# return Pyro4.locateNS() +# except Pyro4.errors.NamingError: +# print("Pyro name server not found; starting a new one") +# os.system("python3 -m Pyro4.naming -n 0.0.0.0 -p 15236 &") +# # TODO: spawn a proper daemon ala http://code.activestate.com/recipes/278731/ ? +# # like this, if there's an error somewhere, we'll never know... (and the loop +# # below will block). And it probably doesn't work on windows, either. +# while True: +# try: +# return Pyro4.locateNS() +# except: +# pass + +def main(): + +# getNS() + + Pyro4.Daemon.serveSimple( + { + trackhd: "trackhd.prototype" + }, + host = '0.0.0.0', + port = {{ trackhd_port }}, + ns = False) + +if __name__=="__main__": + main() + + +#app = trackhd() +#app.track4k(input_file='/mnt/opencast/4k_sample/presenter.mkv',output_file='/mnt/opencast/4k_sample/tracked.mkv',width='1920', height='1080', mode='json') diff --git a/utils/Ansible_Playbook/trackhd_clnt.yml b/utils/Ansible_Playbook/trackhd_clnt.yml new file mode 100644 index 0000000..cf89592 --- /dev/null +++ b/utils/Ansible_Playbook/trackhd_clnt.yml @@ -0,0 +1,6 @@ +- hosts: trackhd_clients + become: yes + become_method: sudo + become_user: root + roles: + - track4k_client diff --git a/utils/Ansible_Playbook/trackhd_srv.yml b/utils/Ansible_Playbook/trackhd_srv.yml new file mode 100644 index 0000000..d87da8b --- /dev/null +++ b/utils/Ansible_Playbook/trackhd_srv.yml @@ -0,0 +1,6 @@ +- hosts: trackhd_server + become: yes + become_method: sudo + become_user: root + roles: + - track4k_server From ffac72de997678076f356385b8a059e08557967b Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Tue, 7 Aug 2018 17:30:06 +0200 Subject: [PATCH 44/49] Improve in playbook of trackhd server and documentation --- README.md | 6 ++++++ .../roles/track4k_server/tasks/main.yml | 7 +++++++ utils/readme.md | 13 +++++++++---- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0add637..ce5e037 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ Track4K is an open source C++ project that takes a High Definition video of a lecture recording and then produces a smaller cropped output video, which frames the lecturer. This is done using image processing and computer vision algorithms to track the lecturer and uses the lecturer position information to pan the virtual camera. ## Getting Started + +> If you want to use right away go to [this link](/utils/Ansible_Playbook) to install Track4K automatically. + These instructions will help get the program and all its dependencies set up on your machine. > Please take note that this installation guide It was made for use under Ubuntu 16.04, some changes may apply for other distributions or Ubuntu variations. @@ -9,6 +12,9 @@ These instructions will help get the program and all its dependencies set up on > All the commands are run as normal user unless if its written as super user "\#" + + + ### Prerequisites These instructions are written with the assumption that the project will be installed on a Linux-based system (preferably a Debian version). **Track4K has been tested on Ubuntu 16.04** diff --git a/utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml b/utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml index f508660..ab6835d 100644 --- a/utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml +++ b/utils/Ansible_Playbook/roles/track4k_server/tasks/main.yml @@ -172,6 +172,13 @@ - argparse tags: trackhd_script_libs +- name: Register libraries to the system + command: ldconfig -v + tags: + - trackhd_install-basic_libs + - trackhd_install-opencv + - trackhd_install-ffmpeg + - name: Copy trackhd_server to desired machine template: src: "templates/trackhd_server.py.j2" diff --git a/utils/readme.md b/utils/readme.md index 0ddf8cd..44d4458 100644 --- a/utils/readme.md +++ b/utils/readme.md @@ -4,7 +4,7 @@ In this folder you can find different utilities to work with Track4K, the utilit * **Track4KPyro** : Python3 script built with Pyro4 to execute remotely Track4K by using remote objects in a Client/Server Scheme. * **opencast_workflows** : Samples of workflows to work with the scripts. -* **Ansible Playbook** : An Ansible playbook to install TrackHD automatically with the **Track4KPyro** server script included. +* **Ansible Playbooks** : An Ansible playbook to install TrackHD automatically with the **Track4KPyro** server script included. * **ssh_track4k.py** (Deprecated): Python3 script built with Paramiko to execute remotely Track4K ## Track4KPyro @@ -85,11 +85,16 @@ optional arguments: * [x] Log files generated from server and the client scripts * [ ] Exception handling in case of problems. -## Ansible playbook +## Ansible playbooks -Included in the [Utils folder](/utils), there is and Ansible Playbook that is ready to install Track HD with the Pyro4 script for the server side. +Included in the [Utils folder](/utils/Ansible_Playbook), there is and Ansible Playbook that is ready to install Track HD with the Pyro4 script for the server side. -**Important:** Take note this script has to be put in an *Ansible folder structure* and the variables and hosts had to be set before use. +**Important:** Take note this script is already in a *Ansible folder structure*. The variables and hosts had to be set before use. + +To execute, simply run, for the clients or the server: +``` +ansible-playbook -vv -i hosts [trackhd_clnt.yml | trackhd_srv.yml] -u [user_with_sudo] --ask-sudo-pass +``` ## Opencast Workflows From c162ec4543986f512cbc8b4b853e99772e62ce6c Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Tue, 7 Aug 2018 17:30:55 +0200 Subject: [PATCH 45/49] bug corrections --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index ce5e037..1a92941 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,6 @@ These instructions are written with the assumption that the project will be inst * git (2.10.2 or future releases) -<<<<<<< HEAD ## Installation of the requirements From 4418c8de78c4bcc98812677329aa1da0d4d70b9a Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Wed, 8 Aug 2018 15:37:16 +0200 Subject: [PATCH 46/49] Improve in Track4K scripts and Ansible playbook. Cleaned up the code. --- .../group_vars/trackhd_clients | 2 + .../roles/track4k_client/tasks/main.yml | 6 ++ .../templates/trackhd_client.py.j2 | 91 +++++++++++++++++++ utils/Track4KPyro/trackhd_client.py | 15 ++- utils/Track4KPyro/trackhd_server.py | 8 +- 5 files changed, 117 insertions(+), 5 deletions(-) create mode 100644 utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 diff --git a/utils/Ansible_Playbook/group_vars/trackhd_clients b/utils/Ansible_Playbook/group_vars/trackhd_clients index 3e39169..522dc42 100644 --- a/utils/Ansible_Playbook/group_vars/trackhd_clients +++ b/utils/Ansible_Playbook/group_vars/trackhd_clients @@ -1,3 +1,5 @@ #TrackHD Server parameters trackhd_ip: CHANGE_ME trackhd_port: CHANGE_ME +Log_file: output.log +Log_folder: /var/log/pyro4trackhd diff --git a/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml b/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml index f5136b4..dadce0a 100644 --- a/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml +++ b/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml @@ -29,3 +29,9 @@ src: "files/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg" dest: "/etc/opencast/org.opencastproject.execute.impl.ExecuteServiceImpl.cfg" tags: trackhd_client-execute + +- name: Create Log file folder + file: + path: "{{ Log_folder }}" + state: directory + mode: 775 diff --git a/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 b/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 new file mode 100644 index 0000000..5b40d37 --- /dev/null +++ b/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# Track4k Script to execute Track4K and cropvid from a +# remote machine. For Opencast execution +# Author: Maximiliano Lira Del Canto | RRZK University of Cologne, Germany + + +import sys +import Pyro4 +import Pyro4.util +import argparse +import logging +import sys + + +sys.excepthook = Pyro4.util.excepthook + +parser = argparse.ArgumentParser(description='Executes track4K and cropvid in a remote machine') + +#Argparsers arguments and description + +parser.add_argument('input_file', type=str, + help ='Input filename') + +parser.add_argument('output_file', type=str, + help='Name of the output file') + +parser.add_argument('width_out', type=str, + help ='Output width of the video') + +parser.add_argument('height_out', type=str, + help ='Output height of the video') + +parser.add_argument('track_mode', type=str, choices=['txt', 'json'], + help='Mode of the tracking, txt mode: Track + Video Crop. json mode: Only Track in JSON format for use in applications that can use that info') + + +args = parser.parse_args() + + +class StreamToLogger(object): + """ + Fake file-like stream object that redirects writes to a logger instance. + """ + def __init__(self, logger, log_level=logging.INFO): + self.logger = logger + self.log_level = log_level + self.linebuf = '' + + def write(self, buf): + for line in buf.rstrip().splitlines(): + self.logger.log(self.log_level, line.rstrip()) + + def flush(self): + pass + +logging.basicConfig( + level=logging.DEBUG, + format='%(asctime)s:%(levelname)s:%(name)s:%(message)s', + filename="{{ Log_folder }}/{{ Log_file }}", + filemode='a' +) + +stdout_logger = logging.getLogger('STDOUT') +sl = StreamToLogger(stdout_logger, logging.INFO) +sys.stdout = sl + +stderr_logger = logging.getLogger('STDERR') +sl = StreamToLogger(stderr_logger, logging.ERROR) +sys.stderr = sl + + + + +# Configure IP and port of the TrackHD serverself. +uri = 'PYRO:trackhd.prototype@{{ trackhd_ip }}:{{ trackhd_port }}' +trackhd = Pyro4.Proxy(uri) + + +#Run the application +print("Track HD Client Started") +print("Server IP address: " + "{{ trackhd_ip }}") +print("Server Port address: " + "{{ trackhd_port }}") +print ("") +print('Input details:') +print('Input Filename: ' + args.input_file) +print('Output Filename: ' + args.output_file) +print('Desired tracking resolution: ' + args.width_out + 'x' + args.height_out) +print('Track output mode: ' + args.track_mode) + +app = trackhd +app.track4k(args.input_file, args.output_file, args.width_out, args.height_out, args.track_mode) diff --git a/utils/Track4KPyro/trackhd_client.py b/utils/Track4KPyro/trackhd_client.py index caa8d43..9cab92e 100644 --- a/utils/Track4KPyro/trackhd_client.py +++ b/utils/Track4KPyro/trackhd_client.py @@ -11,6 +11,13 @@ import logging import sys +#Please Change this values before the first run +# trackhd_ip is an ip string. +# trackhd_port is an int. +trackhd_ip = 'CHANGE_ME' +trackhd_port = 'CHANGE_ME' +log_file = '/var/log/trackhd_client.log' + # Pyro 4 Exceptbook: Sends error messages from Server to client sys.excepthook = Pyro4.util.excepthook @@ -58,7 +65,7 @@ def flush(self): logging.basicConfig( level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s:%(message)s', - filename="/var/log/pyro4trackhd/trackhd_client.log", + filename= log_file, filemode='a' ) @@ -74,14 +81,14 @@ def flush(self): # Configure IP and port of the TrackHD server. -uri = 'PYRO:trackhd.prototype@{{ trackhd_ip }}:{{ trackhd_port }}' +uri = 'PYRO:trackhd.prototype@trackhd_ip:trackhd_port' trackhd = Pyro4.Proxy(uri) #Run the application print("Track HD Client Started") -print("Server IP address: " + "{{ trackhd_ip }}") -print("Server Port address: " + "{{ trackhd_port }}") +print("Server IP address: " + "trackhd_ip") +print("Server Port address: " + "trackhd_port") print (" ") print('Input details:') print('Input Filename: ' + args.input_file) diff --git a/utils/Track4KPyro/trackhd_server.py b/utils/Track4KPyro/trackhd_server.py index ce97c94..2e51efd 100644 --- a/utils/Track4KPyro/trackhd_server.py +++ b/utils/Track4KPyro/trackhd_server.py @@ -9,6 +9,12 @@ import subprocess import os +#Please Change this value before the first run +# trackhd_port is an int. +trackhd_port = 'CHANGE_ME' + + + # Expose the trackhd class throught Pyro4 interface @Pyro4.expose @Pyro4.behavior(instance_mode = 'single') @@ -63,7 +69,7 @@ def main(): # Allow connection from any IP of the server host = '0.0.0.0', - port = {{ trackhd_port }}, + port = trackhd_port, ns = False) if __name__=="__main__": From 9e3fa82927bf03af90a6a14e72096d96213711b7 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Wed, 8 Aug 2018 16:30:15 +0200 Subject: [PATCH 47/49] Minor adjustments for release --- utils/Track4KPyro/trackhd_client.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/utils/Track4KPyro/trackhd_client.py b/utils/Track4KPyro/trackhd_client.py index 9cab92e..69428f6 100644 --- a/utils/Track4KPyro/trackhd_client.py +++ b/utils/Track4KPyro/trackhd_client.py @@ -12,11 +12,9 @@ import sys #Please Change this values before the first run -# trackhd_ip is an ip string. -# trackhd_port is an int. trackhd_ip = 'CHANGE_ME' trackhd_port = 'CHANGE_ME' -log_file = '/var/log/trackhd_client.log' +log_file = 'CHANGE_ME' # Pyro 4 Exceptbook: Sends error messages from Server to client sys.excepthook = Pyro4.util.excepthook @@ -81,14 +79,14 @@ def flush(self): # Configure IP and port of the TrackHD server. -uri = 'PYRO:trackhd.prototype@trackhd_ip:trackhd_port' +uri = 'PYRO:trackhd.prototype@' + trackhd_ip +':'+ trackhd_port trackhd = Pyro4.Proxy(uri) #Run the application print("Track HD Client Started") -print("Server IP address: " + "trackhd_ip") -print("Server Port address: " + "trackhd_port") +print("Server IP address: " + trackhd_ip) +print("Server Port address: " + trackhd_port) print (" ") print('Input details:') print('Input Filename: ' + args.input_file) From b54db2b0c0eba98ddc2eb0dfe45fcdf2fcec1f5a Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Thu, 23 Aug 2018 15:52:06 +0200 Subject: [PATCH 48/49] Remove of the deprecated solutions in /utils. --- utils/assets/track4k-opencast-example.png | Bin 30342 -> 0 bytes utils/deprecated/readme.md | 50 --------- utils/deprecated/ssh_track4k.py | 73 -------------- utils/deprecated/ssh_track4k_v2.py | 117 ---------------------- utils/readme.md | 7 +- 5 files changed, 6 insertions(+), 241 deletions(-) delete mode 100644 utils/assets/track4k-opencast-example.png delete mode 100644 utils/deprecated/readme.md delete mode 100644 utils/deprecated/ssh_track4k.py delete mode 100644 utils/deprecated/ssh_track4k_v2.py diff --git a/utils/assets/track4k-opencast-example.png b/utils/assets/track4k-opencast-example.png deleted file mode 100644 index ffe528012f25f7d6a30024d461720b3209a9d078..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30342 zcmeFZcUY54*Dss^3WyX7f=Ce&k)j}q6oIHHs5Fs|G-ab;=oo=e5|v_u=$0l`M0yD* zp_gn>;|2w!BuMWFp+g7|lAH;;w?6xQ-*dj}eCJ%}JJ85wPt55q!Q6NbklczDC^9k9CSE-RSpc=?f42;trgJKF5zw1gFgT z8eL%nQvBcd=h5-@it1(bijM3Irwc5)Ed(MWkO763NL->p!RJLME{Aex0D}i*lVhyN z%N2BgWrRsjjb2}F3&E2-VGJs2z9okB2@TPgh3?@hir(o6WmAyGvvF8mLT}mpeE0vw zS1zzn+3)>8cmy252>bC@z-Kmv3=XT8&>cUXuqm8Ig9rbm44dx)nV_*(em3PUfgI%E>Y6b;;hRHjYI$=(>)~0kA2Y)(aQBk@-6`X#$n}fO)OuNO_tNpyO<)(^`4S5d|=j_w&<_*Ms7B-tOYYcm^wXe zhBn_5hIw2*0c&ELS(_W9j26io$tuk02WU#9+xzB)BG&BREs?IpLwL$>p<^1NZ~qo@ zC%=WuzLBzVmpM29gV)t9%hye0(R~Knwq3EIZMFibfY&p&`ewE$ODYB6x34P3YM|EcGGUZQY`(LoyMfaDn=h_ zY0ZHFV~#iuE^vmd`Bi_Nuo`o3Tw#tf8R;#kNz`gaYV(e=HK)2cSP(-9MaD!{HtUP( zZ1z(Tqm!lKz{?4f5Z3 z(pM{>A8&543J7gg)R2~9&lk2IdpX{r&iEkFU#-rt+*Z^WxN3d()m9D;tGMk&K4a)_ zNf2@zGaWUNMYu}w3!(FGT&E3ecy7qosp_ z>s=ZkhfO-|UlGjQc?&kk$CmFj)DWmkd%_)dbVvN?(&&z}6(6=*O@vzpESqSDh)3=& zLwTZOFo?;^UM{aYq6Kor`(kx8S;u;zpyODhR5EDITxG5|$Y4_MXCRqZP$QP8F`@K2 z?J3~^Gi$Pn-J3V|hM165& zOq5MwFoiw8IMseNW{+xJ91G)+zag#FIporexQ%^IM@z_`F%d()Bj_vbzW-7Kud&BI zCg=W|noj%DF#N?%`>{q+0yU5NLc}qqkz~qNUfx#$wXn{TgQd)!-y0dLQ((K05_f(S zte_1UwQ034pcZ^VmxQ0QC+lyql3rcQiF~27Ru@IisH&6@FNHbmLs8XhiMlPS%^>5E5 zrnj@4pe{Nc$j;ey-OgO9OJ+FMvx^F605fwDia=Zmr> z+px=fUNt^0nlP>k-0oKxQ+@d%I&04b<4EJw#`Nf;#fL~8ki34G2wQ=#3SD1wDNv!!uOzmNj4Nw9&eGsj&tyl%%* zot->hPAdLNyl&i~plw6NVeL!40gZOtftdz*`!sYz#GJqZMwP_GKEl?8BAstJTbnVo zCIo&Q?X}L}u-SNBByOJ4W`V)W-EA(OKfKk-IoiDDhWW<3Kdy|~2Bf~p4j|)3l@!e3 zMuxma!#1V_VskEgJ~G!iI9X@tiq^I%DYpEyXBLFn`z&Oey4P{c>o5hb`u>ZqX_kOE zPudmO*lfqyStpDMTRxF1{|aSyeVS9OYpC==d0LEXc)<=UrxwW{=su_k0#12^k||r# zPGt0aHs2W^h{YZuY|EDf-FTyo7N6~rHrR~N6Y6LU+~Kuilbd7RHg|2eH(tW4oEXEp zU2QhejqglPo;p3R1toU*#-HV4%fHG~G(6&bqAeOCZ=Bd9c{wS{%F=o;$mJcef^ZmW zL4M8=aW4r-gu`D^$#|0_<++sN%k=}v4*)MNm2qj%1ZKa-9+RHB=;Z88A!Z50?sxn2 zvA`+yY**#@8?KF<^SxJ1x9HDDW1g|6t#&u5ka&nB=+*u#TYc`y#ssT{B=2GxFfIRN zO6Aq8=4(6r1RVRW1EUv+aXeh(#9^_w#!T^%IL23wOM@yrRymR-J6<<&TCO(AX-?)L1B{bEZ| zzuu*~!g7lCZ21FB8ODY&sfJFeTbWwub6{4%!qn@AR2i8L8y=0QmejNHskCpG-4Dk% zlJ;U{?R#3Yq{hjgNp^EfS3_uXL5P%~=?(u{8PJ;n;URMp^yiyFM2w|mtGZNbbHUDoofyK>1iA5y zX>F;nOQ+PIi`jXLw*A@>))k8bO(AQz&WY8JBpEy#a5On(XDnZ=`@2;R`4uQPDr}8p z$;b*MYmsQmsSs)lxo-TaOq9i?Yv)Y0k_$txuC0|j^ujFRjo0|XhDQo!7Eh{&%1K@c zs13CV=rGf2lXGwi$>+GTuqJAhao5Cr3^(fZ0WU8hqYXSI0EyX+GDr9#A+%X+73I%| ze-FmOdTBv50UV|^d}bQQB9C&yyhX0@Jq4~)Cv`q>`ZMxMj>Jri&MUP9xjD=FA~VA! z`SgZMYUaiWH^Yc0L#83Y1*CxdMr{c#^D8V8Ks&opAwGBA+6MeiFtO-nZttbkH%WUp67~b zwm}k&4|08?h}p%&!*6_b6j{5hGOZD=JBBNWDu|;b%V8s89Ok0&M(k`q0_(~#IjE_q zr1`y9qN_=#1dnk#P;NKu6pbM?X+27_woaFeN{}O2aGE-0Qeu2Ryv#F@GPw56k}oU# zY^9IH6z-2rL?2w$26(3LOh;|~x@E!nz7}g{4##EnY1wq_l!=w|nPnf$)XThDKOLw= z?YJq)RQGA;$1mXw+EeN|< z#8(%t?d|nRMyWJYJ*5)%E6c!&``2H~r9TbVy)owMN2pI#rXP)p%GGm>$;}c-+2_b> z2ov4b{vj&q5!tCTz33nDNZ&^hD;>$pe6MZNlNX)56YK#7N&C`u*~I{DuDw?Yrg~yq z-n3X3XtH)!1fp6ke+eFau?)YFvsR8Fd-F>5RF z4->wy#ZqanM`YJX-l9evSjJ;+ji~XFxK`Q1GV=)YF{`zfRhy;Su?l4I%9}m$=X;^Q zk};Ln!Lh_^Aoyv>h&$BA9J^PL^c*bT+VOB*C-!{h+`JG?i98OMf=1G1%%Ata9?VxBX%X=m^q#|!yYFo%F z!L?GT65ZPNmW3PCH-|`<$DYbLT;;1+Obtj)qm*2_KwQ6l_Sflm^8+Y!eM~?mxcHf_ zgq`!UdC~#df2F<1k7D%pjmaV$TuZ*_MO|v5UU&K8_p^^pZpkwHO@D`_P0>a=>_xHt zF8xi;?3Wl=9&1h!)Jeo}E!m_t9{$(2wsc$1|{b%n0HY1z;`SpG^Uh3I0(E zgYcj)a#%qO0JL=62HgXB4lrH(S@ahCZqVe=jvq~ASxpZ7XkyB0a_&cy&0ZdYUN)Ps zdf5qj*=)k<<>0>u`aiBZG;LBj=o#DK)iPbynu2BTHI9=6OkFt=eh@4e8*~qzetWM^ zUoQAm{+xILtQ!ZL0_(MdU@a{@BUw|}557x1sss2|FUr50{97;oHirMtZ<1l7+pt^h zOm4p(=L=xx7xs&QO&TB!NYwdndRBz6f!K?iqGm71vXkRhi)E4Z9 zix=E3oaG&Eu`h1fhV}$h;1{n^GlVKbV_c}ZxtKb-_Ix1Smo1-y4 zAa%BUV}8BZ94&f2Tr(vry-#$hZ0h0BZ|3!xVaW;M>$CvFHh4Bt;25ZPq*$Ww9m^m5VMV- zS4v^e`-d?i7@XjAW@p3evBo=8BmSZzSNQknNmoqpV6l6sa-wee&ijP{%r5QFBM7Cp za}xV@IcD4~x-pGNYRhPc5-(pafp6@aIu=yJ*AoMg-D9b&x2s$PB> zwhRM06cK=ECqT#tJ}6k(bE-<;{>rb(6sV8O4z-gmLb{cZ-fI6Qd$%!=Ex(xLeJRd} z)5?hyrC~<;nlNtJtd4CizUZk_M+|llt&p)Bp-0ltKLwZOE;{-uRJ2OF*Tr1Ew<}^^ zD(bg{jP|=-hur++Rsqi%Src`v^0o&4mKeNmZYe$-)>-@{Ga=F+GrYSg&7G(>dok#U zv&fi+X!~J>pVA&R1g2;T3!9h3Wi@}+=~6x+U92x5@uRm8G|mWn|CKW;iEu&-|45RE ze5d7Ez(YhH$=vyhnZ|^^gnmIo=1n-PgpNt$AElQLBdxHEWTgY!1B6|$Rm(f)qXjbU zu~V|iDQDGIG>j(O)}TZkWYty+Q9+AB-^6ZHE}=>3Z}JmenQ}e-|BVC;-blisjV`*epwcKu&p@*$c1#4^NGr3I2^s9)_jKdWQ)B7* z{QW-Y9x2vi;8eUYycf0?40{7Kq6#p|OC~^@gZ3g%(^^4t-)jUsUYB6Y&wIhlE@ZYZ zKzToMs_bu)z|l;MUDZkeZs}Fq6azy3E%hcd{baD6a{kw4Y{1eP60;M-?{fN(>i&sv z3jlof0aA!HVoh=Ox+ee7+}Z*qthqfSwIS7`zP7@lbR9oZHl61Ss62KbQ{In(XXP`Z zuUwMN=-`ckz;CNXW2F_JEAwC!^FrEiwCAaNML&y7^!9$NfpAy>PGqwwhn1h87NU8lBVx#(a=4(QoIir$Pv+n%GLYnO+-}MRU50U3d5W&Xzee_k#Mm1jQ$WQ& zyJ+`qSIo?%Edb~68;jkIQ(Z;WgthCcH6E?Go_k?Fk=u?veuG!qw6wnmj< zS5O*D;_M;+-v|la=y9)hM^iHxO|ld~TJ?8m+n10`|sa)uTE`lFZ7ikPhj!>vPPp}qUR3kNT; zMsgQlayfoO#5S%Q`lVPUcWTLA89!y@rn;i?M9W^Zqcyic!nhUPdxc$jDVDug{D<0N zG>31!2q)c_YhPnq7A_Z3n%!72bQ
zTk7L@^T+ba>A(cVOz~t z|1d}f;j{3mo!d5jT)E2u~~8 z<@9X-(1o!r2X3Z%@%E|0pO#IIJ-;y(Sff?k(yC<@Mjeeus8O$twoLuea^lEOf7aVU z?Rl6WqA)}A8jVhSFJJd+>=|uE(+k$9w;%R;2(J{Pxo~W({p#Y%;}afciIB&KHf?~x z3EZE@6s=&vpYMu=l+JTWjH@l0V)to1(ERM{V^CuJd~O%DhcEc`yfSaZZv?I)R&mSb zLGcnglvz8V8+r(AhVv4&qebJz10Lcj@@XGUkuTaxxlkC>@!=7ZID57fpD1fCcz9+9 zF0)7Mltpjm6OkS&sZ@K!J~j|%G{TneTc5WGu*=eu0Bq@`dwgdYwZPT^WXGW6PUW-a zBS~)=6zvt9q4xS}4{*7gqf`*e(T1eh@rRK*`7B(bW_`-JtGcRDBciY znNzcs*nn3Tr?vhjURxLwEF^^yY)H$}4*eQVwv8&sG|h|HU}Z8$P-4Z9xk;gG|Ekti zOl;o$9K-TC+VV~fEsGw+r1S2tg>^z>WcLx3xWEeixzsz2yui>lg^(!FA0grb7^7tM zGM%@uHg3^`#pTv-zqx`#+JL9X#+2(O`yWk}3vVqkYLDs?E%ZY4K5;e|8!AdAU01@n zaHwN$io;3eh1MFi^GMB`dmoE1UTT*W=$xv?S&YLD`B`QsXZ2@$HE5DC>+79?7!Is_5ov zBXHIZRnL`um<^lvUPX%8$0(Jn z)VRlez)Xv7E8>$0(VI3Ft(%6yLzVf>{gqd&ya&{zXU*^(B5NJVt9>{gnj!b6Mz|a; zM#%#va8Rs@D!Wk_Nzkxp+C`90ue1oMu008OgJ62#W#&lxk+!w;1=xs{&JC$`N~*V5 z@1Kep4G+C?Qr-v&U5_GXmel80npSCld65`l)b8`L9VfEa@jdQ0)*j8=Vucv#NbuN$ z()Odb+{o+S=a{TPK<@waLiJvfZ&b|sfL_(|o#evU)j$PtUm^R6Lp7nUl(`)Dgi=0> zID5{Os$F&Z00y^2&WK~G>+F!H?iC0+t_^@p9>4?Z5}wTDnnFgwTD(2ONttgvUX{_e zH_lfLk?-H!<$%g%^rCj*@7eNKFPb*xF1_VVu3T4pyNo~l-c-u83OQE5Z{9!J} zH%evqJ5eK!i&?p+76biLGq=oRh9pip(+h4N#9wml-Ce)77dA2`65iaiwg5Ie3koe< zT5D+uW_l9sxMCXXDl8*Ws<9}CBQ2U1(}5DX`{9AQXcglI=EYOv8iTK(7JXl>LOtg# z2*26S)a7YJ8(*vHQr80Y5K&zX51T!b!k5HQN+b^>^gO`XcO;JIHWYe#P)0%alS2hmLn zb?i4ms@)KgIvo6<=SVyGS&0s{Odp#SSH{re|6vrc$Eo-F!fdiu46>BFwL-cn6@**# z2q%%fCnLFwnga~IiXr4KjYEXYcTZl(wPlp0)~f7NBjw=UUXKWD_tQ>oquMB0vDX)X z+J_=jrtMtrODFaDw-Tflc-TH27`aK{fEESi`8mVkb$jZR>bOB-0B(%vkzmr!?M%uz^06X>oO%dN}2q9KEq8~8?HPLom% zN(~yMWG;O(Yc4LR{j-ds`1wSLa`2bCCuu%$FR&eY434!l*S5nVpo*; zI&EpH_zmC60bUf+UmmYv;BV-)qL=~vU%9xxwPxYLJc`l>VK=e*QYY<#{R{yj1IMUaSYWSSWr#Oq5L&6IZ_M7TA$}W; zP&&b$Gl_D)&E|=}_h^MVlXtZX)$Z3h;+XTcfPuJ>ZSa#2F zwqc>(r0=MA3*OJ;^kUGIa@~5sxnIyQcn&|^%nNK6qH6S476j2k){4T12wPV@crP&Cm(ooz0QAjgYyvV+4l|pEDzF==uw(Yt zF7#HTW@R-`l4;j<=tb_xeP-@aNiYhA+yEV@h3?(3H9wv&2V zDk)bfEi>!bo91=ow1FEj_GjNrgd|#zZ1mwNh;=V~jmm!YPd3y#P5Q5|Uj;Et4#l54 zqjG9ZSp(jQs^RWY+}dm!CD@=0-SZN}&F~)x#P+d@shWYuOf09}``G|@FZ(%?k%;$Qrjqjok=9H+uVbqg8QTq?5Tt_A5^5b9#j z|3JKgP0L8FyhZVt_dWV}^RrpXzvn(FooZ`Z{=;-i_F>}a^HC@WlN~^UWfN-4HFD+d zJRNfyZWnB)>k{+D+PNU_XblXoX(()7=uMDq1Yv{w58!6|S+d3e+M7v&nsb-m8KMWe z)nw(=paYM2f-)Sg&3+&M==aue3BJ0t)07evq7Y#L&d@$3X%O^@2!L%;^H+CJuFAAifzG0qG zuSPh*%xItmYeIhwoLf4CYl-MDTfTGK;#gjk3F%}_(C^na;X5Sg2j<4)HKv)zg*UuD z&+H8v^=_2m7VpV&JGIY0gb1sfcVY+1FX#RV&;M(>q3pNgZz@|h_Lk#M#+6nMYuz>8 z4DhiG~4#&LXD`EVb#K?E)kDS`mp-H`_zc&NHb*9PJpCP-Ul@J!}R6tI> zJCERwNNLmWGWRqsblz1O>`@)%jO2dYcqeZPXHcOy*0r`4?4Qp=TDVNF38`u6L_GN_ zr2XW(Ww3+QDr40vpN&@DDL-qf%Rlz=)&Oje9&gP48||KNc?q@_6CZ7c%rDJA@Zi#0IOoN%H`u9|2TnCr_+j`+gT3D-!dnte;hpV})Pf|VFO>6n{zE)jxFeuu z5P2!<0ypCNqwd*ckUg!_4^K6B0`+F!>JgniN!?k9f~9#R;fae)=tzvslk(K1 zyCe;4q5BuL@vh*Z;h(EEsIi!xsaY?(^{jJEPDKy9;;&!%c$sJN$11@;EuSer^>eMf zwA9}<-Zi%t2#)(;xFxg-7lcyk4#o zMv`uYkDa$k{>`&$H>yL4gV$$&&KD9CZ3-p+X}tf_Pvt;X4;Mhl3wrLTj_cEB{^hwe zUgK2Bqknm1mmq9&-cQuApshu6Q2H~fV_J-m@@4=K&at0mk|*rl)~T}=QA>7(>>$Mg zAYzbSs<7i2u6$*30$0HoJoe1#t$fXL!DUY;9DC*;)29vn0>=C#yM+XV#f9Vy-Xuy)NYXXgq*2Lw9}v5uRGeW@B8l!d zD9x67)XwqXIB$*kGHfFS%A$85{^VV3`GD=}G{=-aBKrrI>TiLKQS9KzWC)qeI5j^= z*(9Gg6A4Q7G8|UYuex^I+{G%ZYYrUysx1W$Hh|;u%-i%be(^@NpM9ZG>3BSpS zFZR+dx%X48nYKy&@palKE1mZhE))-Vn%G?>+ekFvnzxqf)5%>bg|fA^s~WL%Z$Ox9 zy|P9%J5h1ABWZOGd8>1=JLddqOM}bDKjTj(dP$s&q~=%mJNrj2ASdXg0-9Qh=ibRO zE?Qw*k0V0Np6SFCzs*jHTX)jTAf?aSP?w4;QzLdz-%uDWTMN^*2XxA3kyS*B#MVO8 zu34wG)(1xDjGt+eWVZbCE~Xz01g8PGsJ9!i4!ITT7*X`uboFh&$FAl&{!5&Js=Y3IA;a#yV72!Ic3s zTIL7!;b702C_4(BYF2ZTpczi8$f=bGzn5-DVQBFN4Tx4ToCU{mvxIbr{?9bVDL@xw zRdQ(cJ#=^CQsTe+E`P_i7{ZOnW(2ov<=xBjrA>$nCc49-`@(Rp9EWwF#6qMY;`APB zB7Z2G^tpJ9+Dh7v)&h|pEeq;g4)Sy2Gv-nM{8`GYZ4Ym-&*`ZO6m>ucj@r&`e1wB@ zQik@S_Ob4Mv$L-PMkrV#TL()q&wjNDxBZuM5s`3x`r0sctNSZ?%)k|fmbDL2^!@A9@Ay|6T!;EtnjtK zn7yZyRHE&}c+)cct~hloTewa+w(qpSm&jGWKinJ#0{mMVy=~^^bddWF6S43 zW$gkN&Yd1O{VuTDVCI=Is1}C!D}>raaRpuPD$`z=`%?!|yUxhRa9F+l(DFWK|E-wr z^nMO27BvF+NPxv8MxL|fYqVNrl1$n4_0)`1MRT16t3PdJdvW{VQ`a!@rYTJwGPFhp ze@U+pN1?>SMSQ0{-UZJ@aqM@xm*Par+;!LJk|%p3ajmk$UWwVtRV2>YpVj%0BuJV^ zdh{~O2O1vIO7@ob7oYi(slf~xT<15xjN4o8>4b%OlznTVwU&c0HQA zsu8$~5qeUuRCcIA4wto(+&$MZ;*=#&T1Y>J)0+vpfi~ydK@qtE4%MuFY>p|HDbm`j zM*BvbeI{EBg@;gMZjU>`Pm3q(3iVQhEYK=E z69eJ!Tssi zw~XxgBA1-f-=(3)ZL&wE^q~}4TLW)%vU%xp%}T_whF2s!U2N;hsXMdgl$vVIH>eT= zEJ<5_jyZfzpfe0^_j;wH74Y?~^b71%onM`7275mCM#z0q$$aT*N>aIdoRM+Zd6w^D z#XMyoP8_(j_lP<1LLwEDoUrXNE5Q!-AjAWNPqi_kSF~P}Oc4><=b(QjPJ?*a6jtx} zJitxLV--tp>u+LxHhkP)iBeXy$3iN-L8+3c_22ACZg=iFO`;&IxRuP}h(l5(J0$5F z4}V|EA+)i=9Pl!T8p`R~4X4Mp(0t=@pv%FAeNsR^607Y(R{mPs(8e z=q6QNFx^5B`FqiV!%rM7dGp{DM1mv?Di_N^k_10P-Me*aNUxxMq4g4z9o=-|*XEZa zmVI4RNI)!`$S(zZAOqlb{#%hxH-!F#yca#6TNbui!`CqG-_-=I2UA93G219!-nL42I^7Yn( zfUGM&08BNQ!m?==x8(B+3@>;61=6!hD=bgYOzfBw1y||W4Hz3+ms3A|rLaDl;_bfi zusi53>6uRaS{S$sm^*_(X3y#|kZ~TWpSqVxU+w<+#x?g1Cy5)|J1x1?QrevFmx>y_ zADn8HK#>Npi(Qt7@MnL*u{LlOo%jL6eZ6saCGqy#^S6w#O%B-h$sE6>yH0V02|e)^ zg53(%iP}tlaLFI%X=}N?th7@lW~8h+rp%hxR;LEFhWr#@umQNfr=_tvdH!PlHL ze!b`)*8m;UrhnnYn1;)Mj<4BB^+U5@0z#k=eB#2qrqhPrDt^u$2Q=G5d<4y(-|c(W49HWor| z=w9`O8G615*PH8rYjq;BOHY+%uYWHoo35rEpZ&8H$4B3np+7VEd)}^|n8|zm><(b| zGKJR+9&r7fHn>|EY_$*NfkwD-Sa3&8Ju}W|Je+2?-UV(sV$(A?ZX3&h%Xqu!C)ugF z;66M^{nF%mX6gZ{`iW@vNdu2G(EVlE=G8Z?(AYXZDA9h;-hmVKF&YnEfGDY;(X1T# z?n@!IT#a`5@NEUWNTAq}wY99g?JRnlTz?6%&#I9CEI#BFmZU<2n8KeDj*5(y2sL&ngl+&B2_X@TIpTXd4J;Rx{9^6|Stlu{q8?r|XtySzR-_JoZcRDUa@)pEw z*)#)|SG&Yv739RG@B`KO?Z*!TKloSp4{}xmR`wCO`V$2$-(TtVX8&F2hFw4t)C2Q_ z6v1n$?q6$1FUruPF{Rr6zkUPweuRr4z#(4uH=QSNnzLz8^*#Sck+_{+JewV$l^3ib zZUy}AUsXsv=(#qu!GBDWRGrW&VU>CT;L~>*v|nvzvbWK*flKZ9_Pq*B8V|q+SCK~z z)Y@SCTTqczeg;6GO{QjbmiMDNi!K`63SQj=EWM`ezc=JlFi23Ki&g~w|3_8ZzNZ1m zeZST)Z9#7xmHyO?{AG&&QZx4#6#SdC%G?|R6o#_+q4xjY`LRhE;r;tC-OH$`uO5Y) zWtAbU6dWkb>rLv!bknm53?aM|ld>L;4QO{D-x$4asKOe#_Zd!osr>QrQu>3O z;Drq~1pq>YSRi!Yx&+=ZK1^aY4KE8z(fh4n9jh-KowX;{#0DgYs!`fTI<{FLtFirg z?6n@dE{{G~sk?2<(WE++FeM49K=Xj~=CF@hD3CSmq1&L&b8lYg8Dn~?xMKRL}^~4?5cR(}`VK-L&cKIZJs@{8rK4N?=l~u_B9i#)r`2hf5&Mgh;HtLeG z6t%|fQzM?N;lo@9T(fA|lV!Q|s^O1CfKksH)|SS1TM{nNfjrYirnQg1W7r{U95>`@lEtmgzHvr?lr`_;XQWJ)`jzf}&lo~f zuN^iJd23vg9cbNj%R|s+QHM3IDzEqiJnC3kd&93@}96=Mct0Evn5o!BaDXe}BbV%G2OlIx9yB zQI#y@$BA}j>SOiJf?sY?a?$ciQWh-YE8=6Hct|*sX0Cnbg|@$ffqc*H&SHt*A^Kp{ z_kf1nG=t;Bp$Kix({@aMzA~RxGevZNM!QsYvr zH}-Zyi5k^W$}!%8_Nkjafjg@|461a9vo7eY#XuMu^GFBaZ?1;R!# zGgB%hci2fgFy@88DbZy&=*PH=SV?J+xA;DdEPWWKwfecnA!ELHA4fB+EAATr9HKIw zsXAE%hChWNeY>yOoU*=Yul6y8&5;=HtizivlhW?E5KlVz&p)aY%gM^ zx3>J)Qs_C4Z>C(J#r%07F3ZEA=K$FT%;oW4wDEu)C|Sg6l${0IVFgxy6TYl)?SFQU zZpMoLJhNeDf0eP- zAr16=7OL`lyo;(x9NslIGL+mCM5KPziipkXk65h1o$^j{uInqUOKp*wo$Q*K{mG1f zjO?fGQc7h@9^}U zo)~WRd^=S}ZkueiMkDV1&8D+DyZh3q6GlC;(1meOvRcii1%P6>BYknA<@xRUm)};U z&%Hf1XU?5a%HGl*?f_f?*(<^UqL}#?I2yjw1zk&ddy5Ml-!fK zaySRsJ$&oj;WzHpoIO+id0t}1bX>vKdvQpc(V+_;mB(up+VmgbwqsiAaT62b>saI9 z)X1;Y_+vEjD$l07bARqEL4aSf30Msg-sM%$)-##AG#^|y;Gs6LLXmb)JI+*8M&uw>8S>AX?a@IDq|`fse;6OBjunLg zl;93?l2x;Bc5`1CZS}-($om=b2Ca~mZ{Mr3zzo0zjM2Wk`tWT-3b$u(%CJ31GeT*- z%Ht~>9dE%6P+}z!-erp-v?(dJ?>r(~F zceuU5&rjK?YINl366$6z^KY>-W=~!)6im6@dFSy|e4g(9l0vpnUkoL>Usd`RNJ!og z>8Y%g={x=O7(Z#BPNexQPH;t?*!~dx+ly|VbSitB1H~uV=Lk8p z(twFzeRik`_w!giw)}~^j+3tYziCjk)Jn294kg2`n1F;3BLeIQa7qREp(#oNA@8D= ziZuis#f&(tXuAlCG=j;+_d1<_iU;PGdnO&g#rwk|QUYBxYrkQaBzaM6qc_g2?$n7P znw2_1iPwG;Fy)ME;@M*L_yc`1caD%|{6Vymag4^-M=+&D)p%Cj^}0D746mUU%+b;y zc{n^Rt$S8BlD4}ojapP4JM$m(bVA=#&$T0Yz;2+}y)%$K-*n=EWV1GhigQ8_;|bYL zRbyncst^3!4h5bX5zJt&%=W9rxEqhmE6*2sf4JV8bAZkHrND!`!r8^x<6-LDPwR!x z{c-TFcg;mdj9FP#NlC|NLY4HF3oWLdh}r%KOy-%JeN1cuF|FEeS3bRxxWo(1i?WI^ z-e#4k$Y*8jyo$7^4``WRn{0_meG8vGwWqC{Hp@_UJiijT zeW=Z)2txC;Q)aBK-^$@d6VaDb4ck9F*VtYFbkIrKo=08sR2;6AYY;l&i3+@4V?1Mh zXJ%qLmKGKEN}pOoDWAD?X9sn~+s}E!E$gHsuia8!$dRUT6>MS)qqHaX^G!+xsNS(j z328*_-l70midbWZ@O*UIlFulsz}>vL%udeP_dECrwM_Wuj%KOSQ?lbyZ{2wIip4G=z#Q*a`Ca$G+JREt>35jMqe)Y1V3I#%4k`S4UyCr^*z-vy zMB%0%vaaVF33YzS_0=5H?QH7I*ZUhu&EkWUAXz96e&ZSgf@fpHc`HGUem#|oyNcU>UOf{w(3omY#a)ZkT$kq`A`V#308pZ`-4R`@bh~R82Ycy zffU2eJ=JamFdfa^e+>ZSEsmG|RoWSz3u13M6wus2;!hH6d%%`cJ?^QFUj~AY^Fw(c zJY10HhyD|l05#)ZbN{tZ1t5nE|JE`@$(3yndaYptir&J3z6d7$L(LF_1eScj(>Q|4 z#5ORI@4M&!w?j-Q*q4sUX!qv_R==0&i*h*qw+_9G^WxlTZ^Rr*gUj5j{Qo#Ht+z_oLfmITE9Lnj7jXeuIn+wJAl>)QQS8F1Id)4lsv|~{3kIc?p!FS(ghWL)3 zH14Erh*T2$29n<@u-mlp?j4=2k=E*esa#;w`;5NPXjz;S+Xy zppT>eUa%o?Vqaj{zBMX#2br=xU&FDW^1ron?SDyS{n|@qre;&0GR<3;w{g-mbJVF( zuo`QcPM68NU(g)I36;|D8iF<2U22+g)RbeLGOviHq9V}6q49=IAt4GD5h|i70s~v% zT{!QXy`S?3ob#R&KW+Bf>$~@Q7VG;w--o?7kDmW?fRZk8Y#2Mv?DxFUEYfiFE8Q}1 zQz)W!s&RZepEeURwU17hV~OozUrb99J*%biPHzJh+mT|;Po9v&EQgZ$9c2OOKJlUy z#um}mBei?&8!dmn_q8iDGkvAD;+>~U_enGasVcL6C&nX5}g zcF-Xl6kzqYagu}L(S3L-%46k`g1}d!#B5$(aX{;wCq+wEFFpNWGQDO)7j2oFTvk_w zg?W+6bNFg)#k;ijku<4f_zIO@F{5=ZWu9oCO>N#H+?6uy!)Z8U9-wiTBAAs_LQ+Q( zW^O>V^>%3&w1Ps4>Tag$PE=if40l|>w4_cGwb43JEPbEsXpGsb8(nCu`x-+CGdp{xoqfw4%4PiEK*8+~*lba9%f&qL+0&z>qil4coH#|!mp-G!zjRcLgpLn91=}~kGImyc>mO~XSB?whkVcgu= zF{U%F@0~G;O!%=nH3yI(p-@tx1S^GW{kOP4BTGu*k7 zHt@YeM36Bt*K514w|gw#r?0=+mP-N*mGo_O6VhC+L|hq~ROR#JcZG&SGBP(8c>~UYJi`pTZVFiuVuU{-x@g%8FdZ zH1aJ?vzT+ZS2n9GS`1;=s(Q9|Gf9f3UGXl9q{SRB1)HI8#&P(QgTx!!4XP&YRXDPH zCQUIEU=6!lOFx)xQF8#XKGAInJU*}wm)DQ;u%D-o`Na6zULg{47MBo3=#fN$1&!Tif`q*H&#yV1t`L(QAY;6{@2f2i8$+s)m!>xKjyUgbZI%XGM}ZltG)=KO~s$RhG77STcDK9iPRLPh(!d^FZsgqwuU zZ#AH;FZZz4NI%!f9i{Zq^1O+-=ql#E+lf_Sxi+App zRvWy)$~Mr60Mi#YA5-A6j$a&m`AJdybp)5MH{zt3r!ntFwrM5nLKB=T6@+8%MP ziQ@-yp%nUO;+`vVvNf_7j3kK@S)xtmcV5J<+U&)SwHU)0zadiz37j*vCkHz3#IDW_ zGQJ+?^h-}Qq@X8}JM!5ybp-roi{xA1yq0#6+_jo1y&Iwvqv$E2IO)mJwJzPvZES}Z zurRwWCMue(BnbsHbHvn%P;7j`u!N{0XaC0s+WKkV_L5hzIh|OgNSHUKdzbb*jAcKf zDIK2nc;dZ4Xxp|z&TvjP5tn230Yjq~8=`js(*CF9csOfir>o&Tda}^%UwCk&nyO9v zog>(yQinTQjh$P_=LWQ{HZ@uO542@uNS!&7B7naBjTx@!93fTEoJ;ysaXiKFiF<1)z$aH2cj?>seWiE z8BYlOXuxM~a`2c8Zdv!$=T-kV)P&-)@xVk}I4x0jIA^F6;ggm^CuP~ys=R$!=;sbB z?l<-obJsMCzcdhWbe-{l1mB*+EpX$#j^m~CW)8VwxwWa1!x=S!$~NsW(ifQ%Z*z#M zoqeIW7qYoEn(2r`;0|z0uI0RNH~8}+?ThHG)fkcI>_iuRLy#_=gwsxXc&oAMD~^q0 z_7XXfgp8JxCkJ9+tpX&i0A-Yi2M_zCs!WMOOslTwl+i7{BAcJm_ZSdN+U_Z8RY9Bt z%(!;=g*SaRPDOQWqzj8xMI>Rd-}-3m*et7$xOvCoFj@o?Q9J7(`g)C$B&An z1cW!6=DIH`V5*Xe9c~a*RKyb@j=RKO_EM8NT60^JCk(3+W{v~i8(LFeR)UQ(ABtyd z+O_6Xg|7sWPNy?-HQ2f+$xIhC@aP_W>J@V8NC%14dn&66c0e{sX4<0%d<1j(BObF_ z(q2XEs7lT9&u%Ac1m+VstSzLh_O<^Qj`xPumaEUNL*piv2w^KrtDG8Z-aIDT%Wau|bYPp0O2k}Tu?G$(j1YuY2bZ(T?1W=ax! zP0sdYIUeol;)MoUac0L^-5$_RhYP;d*P$enC;^pMfpW;^E^1Ryl1qevM7y9qTT<$y zd$0lC=9RT3$sW$==P5f@0RM`3S6;+vtAn9r@8m7YVb5I&p6}J&d5j@lhFO7JOEB3_T* zeOyPijF6@!JnoL)^sL5hQIRV)_z?jq@THGb=6ps?7-A3SC8=slS|qkdS`vWBqjZ z#WR@{J*111r9(6BFtd*?hp;?k&pFJxu;yWbMbdssXq5F2w?rB_zD{Lv5n)~bot5<< zHY}eRGeLo2QyImCMKw=8Us5AtKo9Bf+s0AFNo}!o7a?+6q&lKhyCGU0B`Aqpb?x0o z$X%fmq|Jk1o=C7W0`0zBO7Nb4Ob|a^%|Rxn0=d3PqTI`LBc61-uGe$RXH>{F3mOuEHa70yNjcgzw0sX5l6 zyS)93^nvu5R(p^UCRkBF80|5;PBGq(Nn+Rx7RI%n`AkVhIMzEoDJS7>_ugcsgZ=8O z=SpU#C%!T4)>{nq_z-Z*^(a1&c`@gGR5~sgA^36ByS%h5MirvTJQYi+ByBwHGKF*I z_xtx*4(oDU(MVRr&UF*jMgwg-<2A4*l&y4&o=pK z&#IeVuTMEe{#oT#Mt6QR-7^ZUVmjV?2shfSL4S7pkLh_AXC7iopYbA>kt|J7irn+9n#8QLv;U`I zV7Z@h)269TY9~-)WC8duxGSA+Zu*#8o`h8?d9trPh|rs zbsz2#L5^JY-*ygBu6DYB^Qw-|g8Ar;*NZ8=ozqiW5DLr{qgx+#t*QsLlVLZ=D;2WU zyYXWz^S=>-z%0U$?hSZ@lZTDgFlLeYKkst}(TI&}L;VaWpBtJ*t3CGoe)p!z7|#m5 z;sRbca|6G4o2ICocSxj2h|?O3q^{Wru3NbYjA#(`aFCX$@2@-#{;``}sMY@WIXV;y z-iNV-VF|S1T%F$Xat!mk1nv>D0}c}%rT5h+QzvE-I+Ylcy2*f&2cD?R-kT4+t&>Xj zx8P_{M9h_4F9+Ir2z-rYu>w91LN3*{7XP~#fS>O9p@Ojie!Sd, , ) - -if args.track_mode == 'txt': - cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.txt' + ' ' + args.width_out + ' ' + args.height_out - track = shell.execute(cmd) - print(type(track)) - - cmd = '/usr/local/bin/cropvid ' + args.input_file + ' ' + args.output_file + ' ' + args.output_file + '.txt' - track = shell.execute(cmd) - - -# Track4K in virtual cropping mode (Creates a JSON file with the tracking position) -if args.track_mode == 'json': - cmd = '/usr/local/bin/track4k ' + args.input_file + ' ' + args.output_file + '.json' + ' ' + args.width_out + ' ' + args.height_out - track = shell.execute(cmd) - print(type(track)) -# Close the SSH pipe after finishing -print('Cropped video ready') -exit() diff --git a/utils/readme.md b/utils/readme.md index 44d4458..c70559b 100644 --- a/utils/readme.md +++ b/utils/readme.md @@ -5,7 +5,7 @@ In this folder you can find different utilities to work with Track4K, the utilit * **Track4KPyro** : Python3 script built with Pyro4 to execute remotely Track4K by using remote objects in a Client/Server Scheme. * **opencast_workflows** : Samples of workflows to work with the scripts. * **Ansible Playbooks** : An Ansible playbook to install TrackHD automatically with the **Track4KPyro** server script included. -* **ssh_track4k.py** (Deprecated): Python3 script built with Paramiko to execute remotely Track4K + ## Track4KPyro @@ -79,6 +79,11 @@ optional arguments: -h, --help show this help message and exit ``` +### Known Issues: + +* If Opencast sends a non-video file to the execute service, the track HD server will enter to an infinite loop. +* Track HD server Syslog is not logging everything. + ### Future Work: * [x] Integrate *trackhd_server.py* as a OS service. From 5686bb774dc09145e14fffa32991f9bb75e8b7a3 Mon Sep 17 00:00:00 2001 From: Maximiliano Lira Del Canto Date: Mon, 24 Sep 2018 16:20:58 +0200 Subject: [PATCH 49/49] Hot_Fix: Log folder address in ansible is set --- utils/Ansible_Playbook/group_vars/trackhd_clients | 4 ++-- utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml | 2 +- .../roles/track4k_client/templates/trackhd_client.py.j2 | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/Ansible_Playbook/group_vars/trackhd_clients b/utils/Ansible_Playbook/group_vars/trackhd_clients index 522dc42..4614cbf 100644 --- a/utils/Ansible_Playbook/group_vars/trackhd_clients +++ b/utils/Ansible_Playbook/group_vars/trackhd_clients @@ -1,5 +1,5 @@ #TrackHD Server parameters trackhd_ip: CHANGE_ME trackhd_port: CHANGE_ME -Log_file: output.log -Log_folder: /var/log/pyro4trackhd +Log_file_client: output.log +Log_folder_client: /var/log/pyro4trackhd diff --git a/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml b/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml index dadce0a..8058da6 100644 --- a/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml +++ b/utils/Ansible_Playbook/roles/track4k_client/tasks/main.yml @@ -32,6 +32,6 @@ - name: Create Log file folder file: - path: "{{ Log_folder }}" + path: "{{ Log_folder_client }}" state: directory mode: 775 diff --git a/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 b/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 index 5b40d37..d669e6e 100644 --- a/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 +++ b/utils/Ansible_Playbook/roles/track4k_client/templates/trackhd_client.py.j2 @@ -56,7 +56,7 @@ class StreamToLogger(object): logging.basicConfig( level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s:%(message)s', - filename="{{ Log_folder }}/{{ Log_file }}", + filename="{{ Log_folder_client }}/{{ Log_file_client }}", filemode='a' )