diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 3df16721..07e4937a 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -3,7 +3,7 @@ target_compile_definitions(math_test PUBLIC -DEXACT_MATH_TEST) if (MYSQL_FOUND) message(STATUS "mysql found, adding db_conn to exact_common library!") - add_library(exact_common arguments random exp db_conn color_table log files weight_initialize) + add_library(exact_common arguments random exp db_conn color_table log files weight_initialize string_format) else (MYSQL_FOUND) - add_library(exact_common arguments exp random color_table log files weight_initialize) + add_library(exact_common arguments exp random color_table log files weight_initialize string_format) endif (MYSQL_FOUND) diff --git a/common/log.cxx b/common/log.cxx index 928a46c0..50ae3b96 100644 --- a/common/log.cxx +++ b/common/log.cxx @@ -1,4 +1,5 @@ #include +#include using std::fprintf; using std::printf; using std::snprintf; @@ -17,12 +18,11 @@ using std::thread; #include "files.hxx" #include "log.hxx" - using std::cerr; using std::endl; -int32_t Log::std_message_level = INFO; -int32_t Log::file_message_level = INFO; +log_level_t Log::std_message_level = LOG_LEVEL_INFO; +log_level_t Log::file_message_level = LOG_LEVEL_INFO; bool Log::write_to_file = true; int32_t Log::max_header_length = 256; int32_t Log::max_message_length = 1024; @@ -45,23 +45,23 @@ void Log::register_command_line_arguments() { //CommandLine:: } -int8_t Log::parse_level_from_string(string level) { +log_level_t Log::parse_level_from_string(string level) { if (level.compare("0") == 0 || level.compare("NONE") == 0|| level.compare("none") == 0) { - return Log::NONE; + return LOG_LEVEL_NONE; } else if (level.compare("1") == 0 || level.compare("FATAL") == 0 || level.compare("fatal") == 0) { - return Log::FATAL; + return LOG_LEVEL_FATAL; } else if (level.compare("2") == 0 || level.compare("ERROR") == 0 || level.compare("error") == 0) { - return Log::ERROR; + return LOG_LEVEL_ERROR; } else if (level.compare("3") == 0 || level.compare("WARNING") == 0 || level.compare("warning") == 0) { - return Log::WARNING; + return LOG_LEVEL_WARNING; } else if (level.compare("4") == 0 || level.compare("INFO") == 0 || level.compare("info") == 0) { - return Log::INFO; + return LOG_LEVEL_INFO; } else if (level.compare("5") == 0 || level.compare("DEBUG") == 0 || level.compare("debug") == 0) { - return Log::DEBUG; + return LOG_LEVEL_DEBUG; } else if (level.compare("6") == 0 || level.compare("TRACE") == 0 || level.compare("trace") == 0) { - return Log::TRACE; + return LOG_LEVEL_TRACE; } else if (level.compare("7") == 0 || level.compare("ALL") == 0 || level.compare("all") == 0) { - return Log::ALL; + return LOG_LEVEL_ALL; } else { cerr << "ERROR: specified an incorrect message level for the Log: '" << level << "'" << endl; cerr << "Options are:" << endl; @@ -77,6 +77,29 @@ int8_t Log::parse_level_from_string(string level) { } } +string Log::get_level_str(log_level_t level) { + switch (level) { + case LOG_LEVEL_NONE: + return "NONE"; + case LOG_LEVEL_FATAL: + return "FATAL"; + case LOG_LEVEL_ERROR: + return "ERROR"; + case LOG_LEVEL_WARNING: + return "WARNING"; + case LOG_LEVEL_INFO: + return "INFO"; + case LOG_LEVEL_DEBUG: + return "DEBUG"; + case LOG_LEVEL_TRACE: + return "TRACE"; + case LOG_LEVEL_ALL: + return "ALL"; + default: + return ""; + } +} + void Log::initialize(const vector &arguments) { //TODO: should read these from the CommandLine (to be created) @@ -146,11 +169,12 @@ void Log::release_id(string human_readable_id) { log_ids_mutex.unlock(); } +void Log::log(const char *file, size_t filelen, const char *func, size_t funclen, long line, log_level_t level, const char *format, ...) { -void Log::write_message(bool print_header, int8_t message_level, const char *message_type, const char *format, va_list arguments) { - + va_list arguments; + va_start(arguments, format); + thread::id id = std::this_thread::get_id(); - if (log_ids.count(id) == 0) { cerr << "ERROR: could not write message from thread '" << id << "' because it did not have a human readable id assigned (please use the Log::set_id(string) function before writing to the Log on any thread)." << endl; cerr << "message:" << endl; @@ -159,29 +183,32 @@ void Log::write_message(bool print_header, int8_t message_level, const char *mes exit(1); } - string human_readable_id = log_ids[id]; + // file is relative path to ~/. getting file name here + char filename_buffer[filelen]; + strcpy(filename_buffer, file); + char* file_token = strtok(filename_buffer, "/"); + char* file_name = file_token; + for (; (file_token = strtok(NULL, "/")) != NULL; file_name = file_token); - //print the message header into a string - char header_buffer[max_header_length]; - //we only need to print the header for some messages - if (print_header) { - //snprintf(header_buffer, max_header_length, "[%-8s %-20s]", message_type, human_readable_id.c_str()); - snprintf(header_buffer, max_header_length, "[%-7s %-21s]", message_type, human_readable_id.c_str()); - } +// TODO: find replace \r\n with ' '. ensure one new line at end of every message + + char func_name[funclen]; + strcpy(func_name, func); - //print the actual message contents into a string char message_buffer[max_message_length]; vsnprintf(message_buffer, max_message_length, format, arguments); - if (std_message_level >= message_level) { - if (print_header) { - printf("%s %s", header_buffer, message_buffer); - } else { - printf("%s", message_buffer); - } + string human_readable_id = log_ids[id]; + string level_str = Log::get_level_str(level); + + string log_str = ""; + log_str = string_format("[%s %s]: %s:%ld %s:\t %s", level_str.c_str(), human_readable_id.c_str(), file_name, line, func_name, message_buffer); + + if (std_message_level >= level) { + printf("%s", log_str.c_str()); } - if (file_message_level >= message_level) { + if (file_message_level >= level) { LogFile* log_file = NULL; //check and see if we've already opened a file for this human readable id, if we haven't @@ -199,161 +226,17 @@ void Log::write_message(bool print_header, int8_t message_level, const char *mes //lock this log_file in case multiple threads are trying to write //to the same file log_file->file_mutex.lock(); - if (print_header) { - fprintf(log_file->file, "%s %s", header_buffer, message_buffer); - } else { - fprintf(log_file->file, "%s", message_buffer); - } + fprintf(log_file->file, "%s", log_str.c_str()); fflush(log_file->file); log_file->file_mutex.unlock(); } } -bool Log::at_level(int8_t level) { +bool Log::at_level(log_level_t level) { return level >= std_message_level || level >= file_message_level; } -void Log::fatal(const char *format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < FATAL && file_message_level < FATAL) return; - - va_list arguments; - va_start(arguments, format); - write_message(true, FATAL, "FATAL", format, arguments); +bool Log::should_log(log_level_t level) { + return !(std_message_level < level && file_message_level < level) + && !(restricted_rank >= 0 && restricted_rank != process_rank); } - -void Log::error(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < ERROR && file_message_level < ERROR) return; - - va_list arguments; - va_start(arguments, format); - write_message(true, ERROR, "ERROR", format, arguments); -} - -void Log::warning(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < WARNING && file_message_level < WARNING) return; - - va_list arguments; - va_start(arguments, format); - write_message(true, WARNING, "WARNING", format, arguments); -} - -void Log::info(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < INFO && file_message_level < INFO) return; - - va_list arguments; - va_start(arguments, format); - write_message(true, INFO, "INFO", format, arguments); -} - -void Log::debug(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < DEBUG && file_message_level < DEBUG) return; - - va_list arguments; - va_start(arguments, format); - write_message(true, DEBUG, "DEBUG", format, arguments); -} - -void Log::trace(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < TRACE && file_message_level < TRACE) return; - - va_list arguments; - va_start(arguments, format); - write_message(true, TRACE, "TRACE", format, arguments); -} - -void Log::fatal_no_header(const char *format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < FATAL && file_message_level < FATAL) return; - - va_list arguments; - va_start(arguments, format); - write_message(false, FATAL, "FATAL", format, arguments); -} - -void Log::error_no_header(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < ERROR && file_message_level < ERROR) return; - - va_list arguments; - va_start(arguments, format); - write_message(false, ERROR, "ERROR", format, arguments); -} - -void Log::warning_no_header(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < WARNING && file_message_level < WARNING) return; - - va_list arguments; - va_start(arguments, format); - write_message(false, WARNING, "WARNING", format, arguments); -} - -void Log::info_no_header(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < INFO && file_message_level < INFO) return; - - va_list arguments; - va_start(arguments, format); - write_message(false, INFO, "INFO", format, arguments); -} - -void Log::debug_no_header(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < DEBUG && file_message_level < DEBUG) return; - - va_list arguments; - va_start(arguments, format); - write_message(false, DEBUG, "DEBUG", format, arguments); -} - -void Log::trace_no_header(const char* format, ...) { - //don't write if this is the wrong process rank - if (restricted_rank >= 0 && restricted_rank != process_rank) return; - - //not writing this type of message to either std out or a file - if (std_message_level < TRACE && file_message_level < TRACE) return; - - va_list arguments; - va_start(arguments, format); - write_message(false, TRACE, "TRACE", format, arguments); -} - diff --git a/common/log.hxx b/common/log.hxx index 42dbbf8d..86f8695f 100644 --- a/common/log.hxx +++ b/common/log.hxx @@ -21,6 +21,37 @@ using std::string; #include using std::thread; +#include "string_format.hxx" + +typedef enum log_level { + LOG_LEVEL_NONE = 0, + LOG_LEVEL_FATAL = 20, + LOG_LEVEL_ERROR = 40, + LOG_LEVEL_WARNING = 60, + LOG_LEVEL_INFO = 80, + LOG_LEVEL_DEBUG = 100, + LOG_LEVEL_TRACE = 120, + LOG_LEVEL_ALL = 140 +} log_level_t; + +#define LOG_GUARD(level, format, ...) {if(Log::should_log(level)) { \ + Log::log(__FILE__, sizeof(__FILE__)-1, __func__, sizeof(__func__)-1, __LINE__, \ + level, format, ##__VA_ARGS__); \ + }} + +#define LOG_FATAL(format, ...) \ + LOG_GUARD(LOG_LEVEL_FATAL, format, ##__VA_ARGS__) +#define LOG_ERROR(format, ...) \ + LOG_GUARD(LOG_LEVEL_ERROR, format, ##__VA_ARGS__) +#define LOG_WARNING(format, ...) \ + LOG_GUARD(LOG_LEVEL_WARNING, format, ##__VA_ARGS__) +#define LOG_INFO(format, ...) \ + LOG_GUARD(LOG_LEVEL_INFO, format, ##__VA_ARGS__) +#define LOG_DEBUG(format, ...) \ + LOG_GUARD(LOG_LEVEL_DEBUG, format, ##__VA_ARGS__) +#define LOG_TRACE(format, ...) \ + LOG_GUARD(LOG_LEVEL_TRACE, format, ##__VA_ARGS__) + class LogFile { private: FILE* file; @@ -38,14 +69,14 @@ class Log { * Specifies which messages to log. * Messages will be written to the standard output log if their type is <= message_level. */ - static int32_t std_message_level; + static log_level_t std_message_level; /** * Specifies which messages to log. * Messages will be written to the log file if their type is <= message_level. */ - static int32_t file_message_level; + static log_level_t file_message_level; /** * Specifies if the logs should also be written to a flie. @@ -99,26 +130,8 @@ class Log { static shared_mutex log_ids_mutex; - /** - * Potentially writes the message to either standard output or the log file if the message level is high enough. - * - * \param print_header specifies if the header to the message should be printed out - * \param message_level the level of the message to potentially be printed out - * \param message_type a string representation of this message type - * \param message_type the format string for this message (as in printf) - * \param arguments the arguments for the print statement - */ - static void write_message(bool print_header, int8_t message_level, const char *message_type, const char *format, va_list arguments); - public: - static const int8_t NONE = 0; /**< Specifies no messages will be logged. */ - static const int8_t FATAL = 1; /**< Specifies only fatal messages will be logged. */ - static const int8_t ERROR = 2; /**< Specifies error and above messages will be logged. */ - static const int8_t WARNING = 3; /**< Specifies warning and above messages will be logged. */ - static const int8_t INFO = 4; /**< Specifies info and above messages will be logged. */ - static const int8_t DEBUG = 5; /**< Specifies debug and above messages will be logged. */ - static const int8_t TRACE = 6; /**< Specifies trace and above messages will be logged. */ - static const int8_t ALL = 7; /**< Specifies all messages will be logged. */ + static string get_level_str(log_level_t level); /** * Registers used command line arguments and instructions with the CommandLine class. @@ -133,7 +146,7 @@ class Log { * * \return the message level as an int8_t (i.e., one of the message level constants) */ - static int8_t parse_level_from_string(string level); + static log_level_t parse_level_from_string(string level); /** * Initializes the Log given arguments retreived from the CommandLine class. @@ -186,7 +199,6 @@ class Log { */ static void release_id(string human_readable_id); - /** * Determines if either output level (the file or standard output) level * is above the level passed as a parameter. @@ -195,23 +207,34 @@ class Log { * * \return true if either the file or standard output level is greater than or equal to the passed level */ - static bool at_level(int8_t level); + static bool at_level(log_level_t level); - static void fatal(const char* format, ...); /**< Logs a fatal message. varargs are the same as in printf. */ - static void error(const char* format, ...); /**< Logs an error message. varargs are the same as in printf. */ - static void warning(const char* format, ...); /**< Logs a warning message. varargs are the same as in printf. */ - static void info(const char* format, ...); /**< Logs an info message. varargs are the same as in printf. */ - static void debug(const char* format, ...); /**< Logs a debug message. varargs are the same as in printf. */ - static void trace(const char* format, ...); /**< Logs a trace message. varargs are the same as in printf. */ + /** + * Determines if log should be invoked, including arg parameter functions calls + * + * Note: side effect includes not calling arg parameter functions such as prefix/postfix increment, e.g. i++ will be skipped! + * + * \return true if the level is greater than the file or standard output level and process rank is allowed to log + */ + static bool should_log(log_level_t level); - static void fatal_no_header(const char* format, ...); /**< Logs a fatal message. Does not print the message header (useful if doing multiple log prints to the same line). varargs are the same as in printf. */ - static void error_no_header(const char* format, ...); /**< Logs an error message. Does not print the message header (useful if doing multiple log prints to the same line). varargs are the same as in printf. */ - static void warning_no_header(const char* format, ...); /**< Logs a warning message. Does not print the message header (useful if doing multiple log prints to the same line). varargs are the same as in printf. */ - static void info_no_header(const char* format, ...); /**< Logs an info message. Does not print the message header (useful if doing multiple log prints to the same line). varargs are the same as in printf. */ - static void debug_no_header(const char* format, ...); /**< Logs a debug message. Does not print the message header (useful if doing multiple log prints to the same line). varargs are the same as in printf. */ - static void trace_no_header(const char* format, ...); /**< Logs a trace message. Does not print the message header (useful if doing multiple log prints to the same line). varargs are the same as in printf. */ + /** + * Don't use log directly. Use one of the log macros (info, debug, trace etc) + * The log macros will automatically provide the file, function and line of the calling function + * + * Potentially writes the message to either standard output or the log file if the message level is high enough. + * + * \param file the c string of the file name + * \param filelen the length of the file name string + * \param func the c string of the function name + * \param funclen the length of the function name + * \param line the line number where log was called + * \param level the log level to write to + * \param format the format string, e.g. "my name is %s" + * \param args the args for the formated string + */ + static void log(const char *file, size_t filelen, const char *func, size_t funclen, long line, log_level_t level, const char *format, ...); }; - #endif diff --git a/common/string_format.hxx b/common/string_format.hxx new file mode 100644 index 00000000..6b7f38fb --- /dev/null +++ b/common/string_format.hxx @@ -0,0 +1,24 @@ +#ifndef STIRNG_FORMAT_HXX +#define STIRNG_FORMAT_HXX + +#include + +#include +using namespace std; + +#include + +// TODO: this can go away when C++20 is adopted, string::format +// https://en.cppreference.com/w/cpp/utility/format/formatter#Standard_format_specification +// Credit to https://stackoverflow.com/a/26221725/4102299 +template +inline std::string string_format( const std::string& format, Args ... args ) +{ + size_t size = snprintf( nullptr, 0, format.c_str(), args ... ) + 1; // Extra space for '\0' + if( size <= 0 ){ throw std::runtime_error( "Error during formatting." ); } + std::unique_ptr buf( new char[ size ] ); + snprintf( buf.get(), size, format.c_str(), args ... ); + return std::string( buf.get(), buf.get() + size - 1 ); // We don't want the '\0' inside +} + +#endif diff --git a/mpi/examm_mpi.cxx b/mpi/examm_mpi.cxx index a2d980b8..481d79dd 100644 --- a/mpi/examm_mpi.cxx +++ b/mpi/examm_mpi.cxx @@ -64,16 +64,16 @@ RNN_Genome* receive_genome_from(int source) { int length = length_message[0]; - Log::debug("receiving genome of length: %d from: %d\n", length, source); + LOG_DEBUG("receiving genome of length: %d from: %d\n", length, source); char* genome_str = new char[length + 1]; - Log::debug("receiving genome from: %d\n", source); + LOG_DEBUG("receiving genome from: %d\n", source); MPI_Recv(genome_str, length, MPI_CHAR, source, GENOME_TAG, MPI_COMM_WORLD, &status); genome_str[length] = '\0'; - Log::trace("genome_str:\n%s\n", genome_str); + LOG_TRACE("genome_str:\n%s\n", genome_str); RNN_Genome* genome = new RNN_Genome(genome_str, length); @@ -87,13 +87,13 @@ void send_genome_to(int target, RNN_Genome* genome) { genome->write_to_array(&byte_array, length); - Log::debug("sending genome of length: %d to: %d\n", length, target); + LOG_DEBUG("sending genome of length: %d to: %d\n", length, target); int length_message[1]; length_message[0] = length; MPI_Send(length_message, 1, MPI_INT, target, GENOME_LENGTH_TAG, MPI_COMM_WORLD); - Log::debug("sending genome to: %d\n", target); + LOG_DEBUG("sending genome to: %d\n", target); MPI_Send(byte_array, length, MPI_CHAR, target, GENOME_TAG, MPI_COMM_WORLD); free(byte_array); @@ -113,7 +113,7 @@ void receive_terminate_message(int source) { void master(int max_rank) { //the "main" id will have already been set by the main function so we do not need to re-set it here - Log::debug("MAX INT: %d\n", numeric_limits::max()); + LOG_DEBUG("MAX INT: %d\n", numeric_limits::max()); int terminates_sent = 0; @@ -124,7 +124,7 @@ void master(int max_rank) { int source = status.MPI_SOURCE; int tag = status.MPI_TAG; - Log::debug("probe returned message from: %d with tag: %d\n", source, tag); + LOG_DEBUG("probe returned message from: %d with tag: %d\n", source, tag); //if the message is a work request, send a genome @@ -138,25 +138,25 @@ void master(int max_rank) { if (genome == NULL) { //search was completed if it returns NULL for an individual //send terminate message - Log::info("terminating worker: %d\n", source); + LOG_INFO("terminating worker: %d\n", source); send_terminate_message(source); terminates_sent++; - Log::debug("sent: %d terminates of %d\n", terminates_sent, (max_rank - 1)); + LOG_DEBUG("sent: %d terminates of %d\n", terminates_sent, (max_rank - 1)); if (terminates_sent >= max_rank - 1) return; } else { //genome->write_to_file( examm->get_output_directory() + "/before_send_gen_" + to_string(genome->get_generation_id()) ); //send genome - Log::debug("sending genome to: %d\n", source); + LOG_DEBUG("sending genome to: %d\n", source); send_genome_to(source, genome); //delete this genome as it will not be used again delete genome; } } else if (tag == GENOME_LENGTH_TAG) { - Log::debug("received genome from: %d\n", source); + LOG_DEBUG("received genome from: %d\n", source); RNN_Genome *genome = receive_genome_from(source); examm_mutex.lock(); @@ -167,7 +167,7 @@ void master(int max_rank) { delete genome; //this genome will be deleted if/when removed from population } else { - Log::fatal("ERROR: received message from %d with unknown tag: %d", source, tag); + LOG_FATAL("ERROR: received message from %d with unknown tag: %d", source, tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -177,23 +177,23 @@ void worker(int rank) { Log::set_id("worker_" + to_string(rank)); while (true) { - Log::debug("sending work request!\n"); + LOG_DEBUG("sending work request!\n"); send_work_request(0); - Log::debug("sent work request!\n"); + LOG_DEBUG("sent work request!\n"); MPI_Status status; MPI_Probe(0, MPI_ANY_TAG, MPI_COMM_WORLD, &status); int tag = status.MPI_TAG; - Log::debug("probe received message with tag: %d\n", tag); + LOG_DEBUG("probe received message with tag: %d\n", tag); if (tag == TERMINATE_TAG) { - Log::debug("received terminate tag!\n"); + LOG_DEBUG("received terminate tag!\n"); receive_terminate_message(0); break; } else if (tag == GENOME_LENGTH_TAG) { - Log::debug("received genome!\n"); + LOG_DEBUG("received genome!\n"); RNN_Genome* genome = receive_genome_from(0); //have each worker write the backproagation to a separate log file @@ -209,7 +209,7 @@ void worker(int rank) { delete genome; } else { - Log::fatal("ERROR: received message with unknown tag: %d\n", tag); + LOG_FATAL("ERROR: received message with unknown tag: %d\n", tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -272,7 +272,7 @@ int main(int argc, char** argv) { int number_inputs = time_series_sets->get_number_inputs(); int number_outputs = time_series_sets->get_number_outputs(); - Log::debug("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); + LOG_DEBUG("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); int32_t population_size; get_argument(arguments, "--population_size", true, population_size); @@ -417,7 +417,7 @@ int main(int argc, char** argv) { finished = true; - Log::debug("rank %d completed!\n"); + LOG_DEBUG("rank %d completed!\n"); Log::release_id("main_" + to_string(rank)); MPI_Finalize(); diff --git a/mpi/examm_mpi_multi.cxx b/mpi/examm_mpi_multi.cxx index c939eb3e..37c89086 100644 --- a/mpi/examm_mpi_multi.cxx +++ b/mpi/examm_mpi_multi.cxx @@ -66,16 +66,16 @@ RNN_Genome* receive_genome_from(int source) { int length = length_message[0]; - Log::debug("receiving genome of length: %d from: %d\n", length, source); + LOG_DEBUG("receiving genome of length: %d from: %d\n", length, source); char* genome_str = new char[length + 1]; - Log::debug("receiving genome from: %d\n", source); + LOG_DEBUG("receiving genome from: %d\n", source); MPI_Recv(genome_str, length, MPI_CHAR, source, GENOME_TAG, MPI_COMM_WORLD, &status); genome_str[length] = '\0'; - Log::trace("genome_str:\n%s\n", genome_str); + LOG_TRACE("genome_str:\n%s\n", genome_str); RNN_Genome* genome = new RNN_Genome(genome_str, length); @@ -89,13 +89,13 @@ void send_genome_to(int target, RNN_Genome* genome) { genome->write_to_array(&byte_array, length); - Log::debug("sending genome of length: %d to: %d\n", length, target); + LOG_DEBUG("sending genome of length: %d to: %d\n", length, target); int length_message[1]; length_message[0] = length; MPI_Send(length_message, 1, MPI_INT, target, GENOME_LENGTH_TAG, MPI_COMM_WORLD); - Log::debug("sending genome to: %d\n", target); + LOG_DEBUG("sending genome to: %d\n", target); MPI_Send(byte_array, length, MPI_CHAR, target, GENOME_TAG, MPI_COMM_WORLD); free(byte_array); @@ -123,7 +123,7 @@ void master(int max_rank) { int source = status.MPI_SOURCE; int tag = status.MPI_TAG; - Log::debug("probe returned message from: %d with tag: %d\n", source, tag); + LOG_DEBUG("probe returned message from: %d with tag: %d\n", source, tag); //if the message is a work request, send a genome @@ -136,25 +136,25 @@ void master(int max_rank) { if (genome == NULL) { //search was completed if it returns NULL for an individual //send terminate message - Log::debug("terminating worker: %d\n", source); + LOG_DEBUG("terminating worker: %d\n", source); send_terminate_message(source); terminates_sent++; - Log::debug("sent: %d terminates of %d\n", terminates_sent, (max_rank - 1)); + LOG_DEBUG("sent: %d terminates of %d\n", terminates_sent, (max_rank - 1)); if (terminates_sent >= max_rank - 1) return; } else { //genome->write_to_file( examm->get_output_directory() + "/before_send_gen_" + to_string(genome->get_generation_id()) ); //send genome - Log::debug("sending genome to: %d\n", source); + LOG_DEBUG("sending genome to: %d\n", source); send_genome_to(source, genome); //delete this genome as it will not be used again delete genome; } } else if (tag == GENOME_LENGTH_TAG) { - Log::debug("received genome from: %d\n", source); + LOG_DEBUG("received genome from: %d\n", source); RNN_Genome *genome = receive_genome_from(source); examm_mutex.lock(); @@ -164,7 +164,7 @@ void master(int max_rank) { delete genome; //this genome will be deleted if/when removed from population } else { - Log::fatal("ERROR: received message from %d with unknown tag: %d\n", source, tag); + LOG_FATAL("ERROR: received message from %d with unknown tag: %d\n", source, tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -175,23 +175,23 @@ void worker(int rank) { Log::set_id(worker_id); while (true) { - Log::debug("sending work request!\n"); + LOG_DEBUG("sending work request!\n"); send_work_request(0); - Log::debug("sent work request!\n"); + LOG_DEBUG("sent work request!\n"); MPI_Status status; MPI_Probe(0, MPI_ANY_TAG, MPI_COMM_WORLD, &status); int tag = status.MPI_TAG; - Log::debug("probe received message with tag: %d\n", tag); + LOG_DEBUG("probe received message with tag: %d\n", tag); if (tag == TERMINATE_TAG) { - Log::debug("received terminate tag!\n"); + LOG_DEBUG("received terminate tag!\n"); receive_terminate_message(0); break; } else if (tag == GENOME_LENGTH_TAG) { - Log::debug("received genome!\n"); + LOG_DEBUG("received genome!\n"); RNN_Genome* genome = receive_genome_from(0); string log_id = "slice_" + to_string(global_slice) + "_repeat_" + to_string(global_repeat) + "_genome_" + to_string(genome->get_generation_id()) + "_worker_" + to_string(rank); @@ -206,7 +206,7 @@ void worker(int rank) { delete genome; } else { - Log::fatal("ERROR: received message with unknown tag: %d\n", tag); + LOG_FATAL("ERROR: received message with unknown tag: %d\n", tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -432,7 +432,7 @@ int main(int argc, char** argv) { string binary_file = slice_output_directory + "/repeat_best_" + to_string(k) + ".bin"; string graphviz_file = slice_output_directory + "/repeat_best_" + to_string(k) + ".gv"; - Log::debug("writing best genome to '%s' and '%s'\n", binary_file.c_str(), graphviz_file.c_str()); + LOG_DEBUG("writing best genome to '%s' and '%s'\n", binary_file.c_str(), graphviz_file.c_str()); best_genome->write_to_file(binary_file); best_genome->write_graphviz(graphviz_file); @@ -444,7 +444,7 @@ int main(int argc, char** argv) { Log::set_id("main_" + to_string(rank)); MPI_Barrier(MPI_COMM_WORLD); - Log::debug("rank %d completed slice %d of %d repeat %d of %d\n", rank, i, time_series_sets->get_number_series(), k, repeats); + LOG_DEBUG("rank %d completed slice %d of %d repeat %d of %d\n", rank, i, time_series_sets->get_number_series(), k, repeats); } slice_times_file.close(); diff --git a/mpi/examm_mpi_nlp.cxx b/mpi/examm_mpi_nlp.cxx index 493ffadb..de672275 100644 --- a/mpi/examm_mpi_nlp.cxx +++ b/mpi/examm_mpi_nlp.cxx @@ -63,16 +63,16 @@ RNN_Genome* receive_genome_from(int source) { int length = length_message[0]; - Log::debug("receiving genome of length: %d from: %d\n", length, source); + LOG_DEBUG("receiving genome of length: %d from: %d\n", length, source); char* genome_str = new char[length + 1]; - Log::debug("receiving genome from: %d\n", source); + LOG_DEBUG("receiving genome from: %d\n", source); MPI_Recv(genome_str, length, MPI_CHAR, source, GENOME_TAG, MPI_COMM_WORLD, &status); genome_str[length] = '\0'; - Log::trace("genome_str:\n%s\n", genome_str); + LOG_TRACE("genome_str:\n%s\n", genome_str); RNN_Genome* genome = new RNN_Genome(genome_str, length); @@ -86,13 +86,13 @@ void send_genome_to(int target, RNN_Genome* genome) { genome->write_to_array(&byte_array, length); - Log::debug("sending genome of length: %d to: %d\n", length, target); + LOG_DEBUG("sending genome of length: %d to: %d\n", length, target); int length_message[1]; length_message[0] = length; MPI_Send(length_message, 1, MPI_INT, target, GENOME_LENGTH_TAG, MPI_COMM_WORLD); - Log::debug("sending genome to: %d\n", target); + LOG_DEBUG("sending genome to: %d\n", target); MPI_Send(byte_array, length, MPI_CHAR, target, GENOME_TAG, MPI_COMM_WORLD); free(byte_array); @@ -112,7 +112,7 @@ void receive_terminate_message(int source) { void master(int max_rank) { //the "main" id will have already been set by the main function so we do not need to re-set it here - Log::debug("MAX INT: %d\n", numeric_limits::max()); + LOG_DEBUG("MAX INT: %d\n", numeric_limits::max()); int terminates_sent = 0; @@ -123,7 +123,7 @@ void master(int max_rank) { int source = status.MPI_SOURCE; int tag = status.MPI_TAG; - Log::debug("probe returned message from: %d with tag: %d\n", source, tag); + LOG_DEBUG("probe returned message from: %d with tag: %d\n", source, tag); //if the message is a work request, send a genome @@ -137,25 +137,25 @@ void master(int max_rank) { if (genome == NULL) { //search was completed if it returns NULL for an individual //send terminate message - Log::info("terminating worker: %d\n", source); + LOG_INFO("terminating worker: %d\n", source); send_terminate_message(source); terminates_sent++; - Log::debug("sent: %d terminates of %d\n", terminates_sent, (max_rank - 1)); + LOG_DEBUG("sent: %d terminates of %d\n", terminates_sent, (max_rank - 1)); if (terminates_sent >= max_rank - 1) return; } else { //genome->write_to_file( examm->get_output_directory() + "/before_send_gen_" + to_string(genome->get_generation_id()) ); //send genome - Log::debug("sending genome to: %d\n", source); + LOG_DEBUG("sending genome to: %d\n", source); send_genome_to(source, genome); //delete this genome as it will not be used again delete genome; } } else if (tag == GENOME_LENGTH_TAG) { - Log::debug("received genome from: %d\n", source); + LOG_DEBUG("received genome from: %d\n", source); RNN_Genome *genome = receive_genome_from(source); examm_mutex.lock(); @@ -166,7 +166,7 @@ void master(int max_rank) { delete genome; //this genome will be deleted if/when removed from population } else { - Log::fatal("ERROR: received message from %d with unknown tag: %d", source, tag); + LOG_FATAL("ERROR: received message from %d with unknown tag: %d", source, tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -176,23 +176,23 @@ void worker(int rank) { Log::set_id("worker_" + to_string(rank)); while (true) { - Log::debug("sending work request!\n"); + LOG_DEBUG("sending work request!\n"); send_work_request(0); - Log::debug("sent work request!\n"); + LOG_DEBUG("sent work request!\n"); MPI_Status status; MPI_Probe(0, MPI_ANY_TAG, MPI_COMM_WORLD, &status); int tag = status.MPI_TAG; - Log::debug("probe received message with tag: %d\n", tag); + LOG_DEBUG("probe received message with tag: %d\n", tag); if (tag == TERMINATE_TAG) { - Log::debug("received terminate tag!\n"); + LOG_DEBUG("received terminate tag!\n"); receive_terminate_message(0); break; } else if (tag == GENOME_LENGTH_TAG) { - Log::debug("received genome!\n"); + LOG_DEBUG("received genome!\n"); RNN_Genome* genome = receive_genome_from(0); //have each worker write the backproagation to a separate log file @@ -211,7 +211,7 @@ void worker(int rank) { delete genome; } else { - Log::fatal("ERROR: received message with unknown tag: %d\n", tag); + LOG_FATAL("ERROR: received message with unknown tag: %d\n", tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -264,12 +264,12 @@ int main(int argc, char** argv) { corpus_sets->export_training_series(word_offset,training_inputs,training_outputs); corpus_sets->export_test_series(word_offset,validation_inputs,validation_outputs); - Log::info("exported word series.\n"); + LOG_INFO("exported word series.\n"); int number_inputs = corpus_sets->get_number_inputs(); int number_outputs = corpus_sets->get_number_outputs(); - Log::info("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); + LOG_INFO("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); int32_t population_size; get_argument(arguments, "--population_size", true, population_size); @@ -416,7 +416,7 @@ int main(int argc, char** argv) { finished = true; - Log::debug("rank %d completed!\n"); + LOG_DEBUG("rank %d completed!\n"); Log::release_id("main_" + to_string(rank)); MPI_Finalize(); diff --git a/mpi/rnn_kfold_sweep.cxx b/mpi/rnn_kfold_sweep.cxx index 5517260c..05230cb1 100644 --- a/mpi/rnn_kfold_sweep.cxx +++ b/mpi/rnn_kfold_sweep.cxx @@ -109,7 +109,7 @@ void send_job_to(int target, int current_job) { int job_message[1]; job_message[0] = current_job; - Log::debug("sending job %d of %d to %d\n", current_job, results.size(), target); + LOG_DEBUG("sending job %d of %d to %d\n", current_job, results.size(), target); MPI_Send(job_message, 1, MPI_INT, target, JOB_TAG, MPI_COMM_WORLD); } @@ -121,7 +121,7 @@ int receive_job_from(int source) { int current_job = job_message[0]; - Log::debug("receiving current_job: %d from %d\n", current_job, source); + LOG_DEBUG("receiving current_job: %d from %d\n", current_job, source); return current_job; } @@ -199,7 +199,7 @@ int mkpath(const char *path, mode_t mode) { status = 0; pp = copypath; while (status == 0 && (sp = strchr(pp, '/')) != 0) { - Log::debug("trying to create directory: '%s'\n", copypath); + LOG_DEBUG("trying to create directory: '%s'\n", copypath); if (sp != pp) { /* Neither root nor double slash in path */ *sp = '\0'; @@ -222,7 +222,7 @@ int mkpath(const char *path, mode_t mode) { void master(int max_rank) { if (output_directory != "") { - Log::debug("creating directory: '%s'\n", output_directory.c_str()); + LOG_DEBUG("creating directory: '%s'\n", output_directory.c_str()); mkpath(output_directory.c_str(), 0777); mkdir(output_directory.c_str(), 0777); @@ -242,7 +242,7 @@ void master(int max_rank) { int message_source = status.MPI_SOURCE; int tag = status.MPI_TAG; - Log::debug("probe returned message from: %d with tag: %d\n", message_source, tag); + LOG_DEBUG("probe returned message from: %d with tag: %d\n", message_source, tag); //if the message is a work request, send a genome @@ -252,23 +252,23 @@ void master(int max_rank) { if (current_job >= last_job) { //no more jobs to process if the current job is >= the result vector //send terminate message - Log::debug("terminating worker: %d\n", message_source); + LOG_DEBUG("terminating worker: %d\n", message_source); send_terminate_to(message_source); terminates_sent++; - Log::debug("sent: %d terminates of: %d\n", terminates_sent, (max_rank - 1)); + LOG_DEBUG("sent: %d terminates of: %d\n", terminates_sent, (max_rank - 1)); if (terminates_sent >= max_rank - 1) return; } else { //send job - Log::debug("sending job to: %d\n", message_source); + LOG_DEBUG("sending job to: %d\n", message_source); send_job_to(message_source, current_job); //increment the current job for the next worker current_job++; } } else if (tag == RESULT_TAG) { - Log::debug("receiving job from: %d\n", message_source); + LOG_DEBUG("receiving job from: %d\n", message_source); ResultSet result = receive_result_from(message_source); results[result.job] = result; @@ -283,22 +283,21 @@ void master(int max_rank) { int32_t rnn_job_end = (rnn + 1) * jobs_per_rnn; bool rnn_finished = true; - Log::debug("testing finished for rnn: '%s'\n", rnn_types[rnn].c_str()); + LOG_DEBUG("testing finished for rnn: '%s'\n", rnn_types[rnn].c_str()); + + string log_str = ""; for (int i = rnn_job_start; i < rnn_job_end; i++) { - if (i == rnn_job_start) { - Log::debug(" %d", results[i].job); - } else { - Log::debug_no_header(" %d", results[i].job); - } + log_str = log_str + string_format(" %d", results[i].job); if (results[i].job < 0) { rnn_finished = false; break; } } - Log::debug_no_header("\n"); + log_str = log_str + "\n"; + LOG_DEBUG(log_str.c_str()); - Log::debug("rnn '%s' finished? %d\n", rnn_types[rnn].c_str(), rnn_finished); + LOG_DEBUG("rnn '%s' finished? %d\n", rnn_types[rnn].c_str(), rnn_finished); if (rnn_finished) { ofstream outfile(output_directory + "/combined_" + rnn_types[rnn] + ".csv"); @@ -309,7 +308,7 @@ void master(int max_rank) { outfile << j << "," << k << "," << results[current].milliseconds << "," << results[current].training_mse << "," << results[current].training_mae << "," << results[current].test_mse << "," << results[current].test_mae << endl; - Log::debug("%s, tested on series[%d], repeat: %d, result: %s\n", rnn_types[rnn].c_str(), j, k, result_to_string(results[current]).c_str()); + LOG_DEBUG("%s, tested on series[%d], repeat: %d, result: %s\n", rnn_types[rnn].c_str(), j, k, result_to_string(results[current]).c_str()); current++; } } @@ -318,7 +317,7 @@ void master(int max_rank) { } else { - Log::fatal("ERROR: received message from %d with unknown tag: %d\n", message_source, tag); + LOG_FATAL("ERROR: received message from %d with unknown tag: %d\n", message_source, tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -336,7 +335,7 @@ ResultSet handle_job(int rank, int current_job) { //get repeat int32_t repeat = current_job % jobs_per_j; - Log::debug("evaluating rnn type '%s' with j: %d, repeat: %d\n", rnn_type.c_str(), j, repeat); + LOG_DEBUG("evaluating rnn type '%s' with j: %d, repeat: %d\n", rnn_type.c_str(), j, repeat); vector training_indexes; vector test_indexes; @@ -353,7 +352,7 @@ ResultSet handle_job(int rank, int current_job) { } } - Log::debug("test_indexes.size(): %d, training_indexes.size(): %d\n", test_indexes.size(), training_indexes.size()); + LOG_DEBUG("test_indexes.size(): %d, training_indexes.size(): %d\n", test_indexes.size(), training_indexes.size()); time_series_sets->set_training_indexes(training_indexes); time_series_sets->set_test_indexes(test_indexes); @@ -425,7 +424,7 @@ ResultSet handle_job(int rank, int current_job) { RNN* rnn = genome->get_rnn(); uint32_t number_of_weights = genome->get_number_weights(); - Log::debug("RNN INFO FOR '%s', nodes: %d, edges: %d, rec: %d, weights: %d\n", rnn_type.c_str(), genome->get_enabled_node_count(), genome->get_enabled_edge_count(), genome->get_enabled_recurrent_edge_count(), number_of_weights); + LOG_DEBUG("RNN INFO FOR '%s', nodes: %d, edges: %d, rec: %d, weights: %d\n", rnn_type.c_str(), genome->get_enabled_node_count(), genome->get_enabled_edge_count(), genome->get_enabled_recurrent_edge_count(), number_of_weights); vector min_bound(number_of_weights, -1.0); vector max_bound(number_of_weights, 1.0); @@ -465,7 +464,7 @@ ResultSet handle_job(int rank, int current_job) { Log::release_id(backprop_log_id); Log::set_id("worker_" + to_string(rank)); - Log::debug("deleting genome and rnn.\n"); + LOG_DEBUG("deleting genome and rnn.\n"); delete genome; delete rnn; @@ -478,7 +477,7 @@ ResultSet handle_job(int rank, int current_job) { result.test_mae = test_mae; result.milliseconds = milliseconds; - Log::debug("finished job, result: %s\n", result_to_string(result).c_str()); + LOG_DEBUG("finished job, result: %s\n", result_to_string(result).c_str()); return result; } @@ -488,33 +487,33 @@ void worker(int rank) { Log::set_id("worker_" + to_string(rank)); while (true) { - Log::debug("sending work request!\n"); + LOG_DEBUG("sending work request!\n"); send_work_request_to(master_rank); - Log::debug("sent work request!\n"); + LOG_DEBUG("sent work request!\n"); MPI_Status status; MPI_Probe(master_rank, MPI_ANY_TAG, MPI_COMM_WORLD, &status); int tag = status.MPI_TAG; - Log::debug("probe received message with tag: %d\n", tag); + LOG_DEBUG("probe received message with tag: %d\n", tag); if (tag == TERMINATE_TAG) { - Log::debug("received terminate tag!\n"); + LOG_DEBUG("received terminate tag!\n"); receive_terminate_from(master_rank); break; } else if (tag == JOB_TAG) { - Log::debug("received genome!\n"); + LOG_DEBUG("received genome!\n"); int current_job = receive_job_from(master_rank); ResultSet result = handle_job(rank, current_job); - Log::debug("calculated_result: %s\n", result_to_string(result).c_str()); + LOG_DEBUG("calculated_result: %s\n", result_to_string(result).c_str()); send_result_to(master_rank, result); } else { - Log::fatal("ERROR: received message with unknown tag: %d\n", tag); + LOG_FATAL("ERROR: received message with unknown tag: %d\n", tag); MPI_Abort(MPI_COMM_WORLD, 1); } } @@ -538,7 +537,7 @@ int main(int argc, char **argv) { Log::set_rank(rank); Log::set_id("main_" + to_string(rank)); - Log::debug("process %d of %d\n", rank, max_rank); + LOG_DEBUG("process %d of %d\n", rank, max_rank); Log::restrict_to_rank(0); @@ -557,7 +556,7 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } diff --git a/mpi/test_stream_write.cxx b/mpi/test_stream_write.cxx index a708d439..2cf488eb 100644 --- a/mpi/test_stream_write.cxx +++ b/mpi/test_stream_write.cxx @@ -92,7 +92,7 @@ int main(int argc, char** argv) { int number_inputs = time_series_sets->get_number_inputs(); int number_outputs = time_series_sets->get_number_outputs(); - Log::debug("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); + LOG_DEBUG("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); int32_t population_size; get_argument(arguments, "--population_size", true, population_size); @@ -236,13 +236,13 @@ int main(int argc, char** argv) { genome->write_to_array(&byte_array, length); - Log::debug("write to array successful!\n"); + LOG_DEBUG("write to array successful!\n"); Log::set_id("main_" + to_string(rank)); finished = true; - Log::debug("rank %d completed!\n"); + LOG_DEBUG("rank %d completed!\n"); Log::release_id("main_" + to_string(rank)); MPI_Finalize(); diff --git a/multithreaded/examm_mt.cxx b/multithreaded/examm_mt.cxx index 444c9490..f59ac6f3 100644 --- a/multithreaded/examm_mt.cxx +++ b/multithreaded/examm_mt.cxx @@ -99,12 +99,12 @@ int main(int argc, char** argv) { time_series_sets->export_training_series(time_offset, training_inputs, training_outputs); time_series_sets->export_test_series(time_offset, validation_inputs, validation_outputs); - Log::info("exported time series.\n"); + LOG_INFO("exported time series.\n"); int number_inputs = time_series_sets->get_number_inputs(); int number_outputs = time_series_sets->get_number_outputs(); - Log::info("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); + LOG_INFO("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); int32_t population_size; get_argument(arguments, "--population_size", true, population_size); @@ -251,7 +251,7 @@ int main(int argc, char** argv) { finished = true; - Log::info("completed!\n"); + LOG_INFO("completed!\n"); Log::release_id("main"); return 0; diff --git a/multithreaded/examm_mt_nlp.cxx b/multithreaded/examm_mt_nlp.cxx index 04efd7b7..2bdde5a3 100644 --- a/multithreaded/examm_mt_nlp.cxx +++ b/multithreaded/examm_mt_nlp.cxx @@ -100,14 +100,14 @@ int main(int argc, char **argv) corpus_sets->export_training_series(word_offset,training_inputs,training_outputs); corpus_sets->export_test_series(word_offset,validation_inputs,validation_outputs); - Log::info("exported word series.\n"); + LOG_INFO("exported word series.\n"); int number_inputs = corpus_sets->get_number_inputs(); int number_outputs = corpus_sets->get_number_outputs(); - Log::info("The size of the input is :: %d,%d,%d \n",training_inputs.size(),training_inputs[0].size(),training_inputs[0][0].size()); + LOG_INFO("The size of the input is :: %d,%d,%d \n",training_inputs.size(),training_inputs[0].size(),training_inputs[0][0].size()); - Log::info("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); + LOG_INFO("number_inputs: %d, number_outputs: %d\n", number_inputs, number_outputs); int32_t population_size; get_argument(arguments, "--population_size", true, population_size); @@ -255,7 +255,7 @@ int main(int argc, char **argv) finished = true; - Log::info("completed!\n"); + LOG_INFO("completed!\n"); Log::release_id("main"); return 0; diff --git a/multithreaded/examm_mt_single_series.cxx b/multithreaded/examm_mt_single_series.cxx index 7ce98061..80571ba6 100644 --- a/multithreaded/examm_mt_single_series.cxx +++ b/multithreaded/examm_mt_single_series.cxx @@ -261,15 +261,15 @@ int main(int argc, char** argv) { finished = true; - Log::info("completed!\n"); + LOG_INFO("completed!\n"); RNN_Genome *best_genome = examm->get_best_genome(); vector best_parameters = best_genome->get_best_parameters(); - Log::info("training MSE: %lf\n", best_genome->get_mse(best_parameters, training_inputs, training_outputs)); - Log::info("training MSE: %lf\n", best_genome->get_mae(best_parameters, training_inputs, training_outputs)); - Log::info("validation MSE: %lf\n", best_genome->get_mse(best_parameters, validation_inputs, validation_outputs)); - Log::info("validation MSE: %lf\n", best_genome->get_mae(best_parameters, validation_inputs, validation_outputs)); + LOG_INFO("training MSE: %lf\n", best_genome->get_mse(best_parameters, training_inputs, training_outputs)); + LOG_INFO("training MSE: %lf\n", best_genome->get_mae(best_parameters, training_inputs, training_outputs)); + LOG_INFO("validation MSE: %lf\n", best_genome->get_mse(best_parameters, validation_inputs, validation_outputs)); + LOG_INFO("validation MSE: %lf\n", best_genome->get_mae(best_parameters, validation_inputs, validation_outputs)); overall_results << setw(15) << fixed << best_genome->get_mse(best_parameters, training_inputs, training_outputs) << ", " << setw(15) << fixed << best_genome->get_mae(best_parameters, training_inputs, training_outputs) << ", " @@ -279,11 +279,11 @@ int main(int argc, char** argv) { best_genome->write_to_file(output_directory + "/" + output_filename + "_slice_" + to_string(i) + "_repeat_" + to_string(k) + ".bin"); best_genome->write_graphviz(output_directory + "/" + output_filename + "_slice_" + to_string(i) + "_repeat_" + to_string(k) + ".gv"); - Log::debug("deleting genome\n"); + LOG_DEBUG("deleting genome\n"); delete best_genome; - Log::debug("deleting exact\n"); + LOG_DEBUG("deleting exact\n"); delete examm; - Log::debug("deleted exact\n"); + LOG_DEBUG("deleted exact\n"); } overall_results << endl; } diff --git a/rnn/delta_node.cxx b/rnn/delta_node.cxx index d2ef65d8..9a4920ec 100644 --- a/rnn/delta_node.cxx +++ b/rnn/delta_node.cxx @@ -88,7 +88,7 @@ double Delta_Node::get_gradient(string gradient_name) { } else if (gradient_name == "z_hat_bias") { gradient_sum += d_z_hat_bias[i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -97,7 +97,7 @@ double Delta_Node::get_gradient(string gradient_name) { } void Delta_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } void Delta_Node::input_fired(int time, double incoming_output) { @@ -107,7 +107,7 @@ void Delta_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on Delta_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on Delta_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } @@ -154,7 +154,7 @@ void Delta_Node::input_fired(int time, double incoming_output) { void Delta_Node::try_update_deltas(int time) { if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on Delta_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on Delta_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -254,7 +254,7 @@ void Delta_Node::set_weights(uint32_t &offset, const vector ¶meters) z_hat_bias = bound(parameters[offset++]); //uint32_t end_offset = offset; - //Log::trace("set weights from offset %d to %d on Delta_node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("set weights from offset %d to %d on Delta_node %d\n", start_offset, end_offset, innovation_number); } void Delta_Node::get_weights(uint32_t &offset, vector ¶meters) const { @@ -269,7 +269,7 @@ void Delta_Node::get_weights(uint32_t &offset, vector ¶meters) const parameters[offset++] = z_hat_bias; //uint32_t end_offset = offset; - //Log::trace("got weights from offset %d to %d on Delta_node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("got weights from offset %d to %d on Delta_node %d\n", start_offset, end_offset, innovation_number); } diff --git a/rnn/enarc_node.cxx b/rnn/enarc_node.cxx index c1cd4833..7f9bdd17 100644 --- a/rnn/enarc_node.cxx +++ b/rnn/enarc_node.cxx @@ -131,7 +131,7 @@ double ENARC_Node::get_gradient(string gradient_name) { } else if (gradient_name == "w8") { gradient_sum += d_w8[i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -139,7 +139,7 @@ double ENARC_Node::get_gradient(string gradient_name) { } void ENARC_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } void ENARC_Node::input_fired(int time, double incoming_output) { @@ -149,7 +149,7 @@ void ENARC_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on ENARC_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on ENARC_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } @@ -220,7 +220,7 @@ void ENARC_Node::input_fired(int time, double incoming_output) { void ENARC_Node::try_update_deltas(int time){ if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on ENARC_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on ENARC_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -314,7 +314,7 @@ void ENARC_Node::set_weights(uint32_t &offset, const vector ¶meters) //uint32_t end_offset = offset; - //Log::trace("set weights from offset %d to %d on ENARC_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("set weights from offset %d to %d on ENARC_Node %d\n", start_offset, end_offset, innovation_number); } void ENARC_Node::get_weights(uint32_t &offset, vector ¶meters) const { @@ -338,7 +338,7 @@ void ENARC_Node::get_weights(uint32_t &offset, vector ¶meters) const //uint32_t end_offset = offset; - //Log::trace("got weights from offset %d to %d on ENARC_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("got weights from offset %d to %d on ENARC_Node %d\n", start_offset, end_offset, innovation_number); } void ENARC_Node::get_gradients(vector &gradients) { diff --git a/rnn/enas_dag_node.cxx b/rnn/enas_dag_node.cxx index 095d7424..5c50d673 100644 --- a/rnn/enas_dag_node.cxx +++ b/rnn/enas_dag_node.cxx @@ -118,7 +118,7 @@ double ENAS_DAG_Node::get_gradient(string gradient_name) { } else if (gradient_name == "w8") { gradient_sum += d_weights[7][i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -126,7 +126,7 @@ double ENAS_DAG_Node::get_gradient(string gradient_name) { } void ENAS_DAG_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } double ENAS_DAG_Node::activation(double value, int act_operator) { @@ -136,7 +136,7 @@ double ENAS_DAG_Node::activation(double value, int act_operator) { if (act_operator == 3) return leakyReLU(value); if (act_operator == 4) return identity(value); - Log::fatal("ERROR: invalid act_operator: %d\n", act_operator); + LOG_FATAL("ERROR: invalid act_operator: %d\n", act_operator); exit(1); } @@ -147,7 +147,7 @@ double ENAS_DAG_Node::activation_derivative(double value, double input, int act_ if (act_operator == 3) return leakyReLU_derivative(input); if (act_operator == 4) return identity_derivative(); - Log::fatal("ERROR: invalid act_operator: %d\n", act_operator); + LOG_FATAL("ERROR: invalid act_operator: %d\n", act_operator); exit(1); } @@ -161,14 +161,14 @@ void ENAS_DAG_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on ENAS_DAG_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on ENAS_DAG_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } //update the reset gate bias so its centered around 1 //r_bias += 1; int no_of_nodes = connections.size(); - Log::debug("ERROR: inputs_fired on ENAS_DAG_Node %d at time %d is %d and no_of_nodes is %d\n", innovation_number, time, inputs_fired[time], no_of_nodes); + LOG_DEBUG("ERROR: inputs_fired on ENAS_DAG_Node %d at time %d is %d and no_of_nodes is %d\n", innovation_number, time, inputs_fired[time], no_of_nodes); double x = input_values[time]; @@ -203,7 +203,7 @@ void ENAS_DAG_Node::input_fired(int time, double incoming_output) { // output_values[time] /= fan_out; - Log::debug("DEBUG: input_fired on ENAS_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_DEBUG("DEBUG: input_fired on ENAS_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); } @@ -211,7 +211,7 @@ void ENAS_DAG_Node::input_fired(int time, double incoming_output) { void ENAS_DAG_Node::try_update_deltas(int time){ if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on ENAS_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on ENAS_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -262,7 +262,7 @@ void ENAS_DAG_Node::try_update_deltas(int time){ // d_zw[time] = d_h*l_Nodes[0][time]*x; - Log::debug("DEBUG: output_fired on ENAS_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_DEBUG("DEBUG: output_fired on ENAS_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); } @@ -314,7 +314,7 @@ void ENAS_DAG_Node::set_weights(uint32_t &offset, const vector ¶mete else weights.at(new_node_weight) = bound(parameters[offset++]); } - Log::debug("DEBUG: no of weights on ENAS_DAG_Node %d at time %d is %d \n", innovation_number, time, weights.size()); + LOG_DEBUG("DEBUG: no of weights on ENAS_DAG_Node %d at time %d is %d \n", innovation_number, time, weights.size()); } diff --git a/rnn/examm.cxx b/rnn/examm.cxx index 854cae0d..0ccdfb3a 100644 --- a/rnn/examm.cxx +++ b/rnn/examm.cxx @@ -204,11 +204,11 @@ EXAMM::EXAMM( check_weight_initialize_validity(); - Log::info("weight initialize: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str()); - Log::info("weight inheritance: %s \n", WEIGHT_TYPES_STRING[weight_inheritance].c_str()); - Log::info("mutated component weight: %s\n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_INFO("weight initialize: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str()); + LOG_INFO("weight inheritance: %s \n", WEIGHT_TYPES_STRING[weight_inheritance].c_str()); + LOG_INFO("mutated component weight: %s\n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); - Log::info("Speciation method is: \"%s\" (Default is the island-based speciation strategy).\n", speciation_method.c_str()); + LOG_INFO("Speciation method is: \"%s\" (Default is the island-based speciation strategy).\n", speciation_method.c_str()); if (speciation_method.compare("island") == 0 || speciation_method.compare("") == 0) { //generate a minimal feed foward network as the seed genome @@ -360,7 +360,7 @@ EXAMM::EXAMM( } void EXAMM::print() { - if (Log::at_level(Log::INFO)) { + if (Log::at_level(LOG_LEVEL_INFO)) { speciation_strategy->print(); } } @@ -377,7 +377,7 @@ void EXAMM::update_log() { log_file = new ofstream(output_file, std::ios_base::app); if (!log_file->is_open()) { - Log::error("could not open EXAMM output log: '%s'\n", output_file.c_str()); + LOG_ERROR("could not open EXAMM output log: '%s'\n", output_file.c_str()); exit(1); } } @@ -390,7 +390,7 @@ void EXAMM::update_log() { op_log_file = new ofstream(output_file, std::ios_base::app); if (!op_log_file->is_open()) { - Log::error("could not open EXAMM output log: '%s'\n", output_file.c_str()); + LOG_ERROR("could not open EXAMM output log: '%s'\n", output_file.c_str()); exit(1); } } @@ -459,7 +459,7 @@ void EXAMM::set_possible_node_types(vector possible_node_type_strings) { } if (!found) { - Log::error("unknown node type: '%s'\n", node_type_s.c_str()); + LOG_ERROR("unknown node type: '%s'\n", node_type_s.c_str()); exit(1); } } @@ -489,10 +489,10 @@ RNN_Genome* EXAMM::get_worst_genome() { bool EXAMM::insert_genome(RNN_Genome* genome) { total_bp_epochs += genome->get_bp_iterations(); - // Log::info("genomes evaluated: %10d , attempting to insert: %s\n", (speciation_strategy->get_inserted_genomes() + 1), parse_fitness(genome->get_fitness()).c_str()); + // LOG_INFO("genomes evaluated: %10d , attempting to insert: %s\n", (speciation_strategy->get_inserted_genomes() + 1), parse_fitness(genome->get_fitness()).c_str()); if (!genome->sanity_check()) { - Log::error("genome failed sanity check on insert!\n"); + LOG_ERROR("genome failed sanity check on insert!\n"); exit(1); } @@ -526,7 +526,7 @@ bool EXAMM::insert_genome(RNN_Genome* genome) { } } else { if (generated_by != "initial") - Log::error("unrecognized generated_by string '%s'\n", generated_by.c_str()); + LOG_ERROR("unrecognized generated_by string '%s'\n", generated_by.c_str()); } } @@ -565,7 +565,7 @@ RNN_Genome* EXAMM::generate_genome() { //this is just a sanity check, can most likely comment out (checking to see //if all the paramemters are sane) - Log::debug("getting mu/sigma after random initialization of copy!\n"); + LOG_DEBUG("getting mu/sigma after random initialization of copy!\n"); double _mu, _sigma; genome->get_mu_sigma(genome->best_parameters, _mu, _sigma); @@ -585,7 +585,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { double mu, sigma; //g->write_graphviz("rnn_genome_premutate_" + to_string(g->get_generation_id()) + ".gv"); - Log::info("generating new genome by mutation.\n"); + LOG_INFO("generating new genome by mutation.\n"); g->get_mu_sigma(g->best_parameters, mu, sigma); g->clear_generated_by(); @@ -612,10 +612,10 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { double rng = rng_0_1(generator) * total; int new_node_type = get_random_node_type(); string node_type_str = NODE_TYPES[new_node_type]; - Log::debug( "rng: %lf, total: %lf, new node type: %d (%s)\n", rng, total, new_node_type, node_type_str.c_str()); + LOG_DEBUG( "rng: %lf, total: %lf, new node type: %d (%s)\n", rng, total, new_node_type, node_type_str.c_str()); if (rng < clone_rate) { - Log::debug("\tcloned\n"); + LOG_DEBUG("\tcloned\n"); g->set_generated_by("clone"); modified = true; continue; @@ -623,7 +623,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { rng -= clone_rate; if (rng < add_edge_rate) { modified = g->add_edge(mu, sigma, edge_innovation_count); - Log::debug("\tadding edge, modified: %d\n", modified); + LOG_DEBUG("\tadding edge, modified: %d\n", modified); if (modified) g->set_generated_by("add_edge"); continue; } @@ -632,7 +632,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < add_recurrent_edge_rate) { uniform_int_distribution dist = get_recurrent_depth_dist(); modified = g->add_recurrent_edge(mu, sigma, dist, edge_innovation_count); - Log::debug("\tadding recurrent edge, modified: %d\n", modified); + LOG_DEBUG("\tadding recurrent edge, modified: %d\n", modified); if (modified) g->set_generated_by("add_recurrent_edge"); continue; } @@ -640,7 +640,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < enable_edge_rate) { modified = g->enable_edge(); - Log::debug("\tenabling edge, modified: %d\n", modified); + LOG_DEBUG("\tenabling edge, modified: %d\n", modified); if (modified) g->set_generated_by("enable_edge"); continue; } @@ -648,7 +648,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < disable_edge_rate) { modified = g->disable_edge(); - Log::debug("\tdisabling edge, modified: %d\n", modified); + LOG_DEBUG("\tdisabling edge, modified: %d\n", modified); if (modified) g->set_generated_by("disable_edge"); continue; } @@ -657,7 +657,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < split_edge_rate) { uniform_int_distribution dist = get_recurrent_depth_dist(); modified = g->split_edge(mu, sigma, new_node_type, dist, edge_innovation_count, node_innovation_count); - Log::debug("\tsplitting edge, modified: %d\n", modified); + LOG_DEBUG("\tsplitting edge, modified: %d\n", modified); if (modified) g->set_generated_by("split_edge(" + node_type_str + ")"); continue; } @@ -666,7 +666,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < add_node_rate) { uniform_int_distribution dist = get_recurrent_depth_dist(); modified = g->add_node(mu, sigma, new_node_type, dist, edge_innovation_count, node_innovation_count); - Log::debug("\tadding node, modified: %d\n", modified); + LOG_DEBUG("\tadding node, modified: %d\n", modified); if (modified) g->set_generated_by("add_node(" + node_type_str + ")"); continue; } @@ -674,7 +674,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < enable_node_rate) { modified = g->enable_node(); - Log::debug("\tenabling node, modified: %d\n", modified); + LOG_DEBUG("\tenabling node, modified: %d\n", modified); if (modified) g->set_generated_by("enable_node"); continue; } @@ -682,7 +682,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < disable_node_rate) { modified = g->disable_node(); - Log::debug("\tdisabling node, modified: %d\n", modified); + LOG_DEBUG("\tdisabling node, modified: %d\n", modified); if (modified) g->set_generated_by("disable_node"); continue; } @@ -691,7 +691,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < split_node_rate) { uniform_int_distribution dist = get_recurrent_depth_dist(); modified = g->split_node(mu, sigma, new_node_type, dist, edge_innovation_count, node_innovation_count); - Log::debug("\tsplitting node, modified: %d\n", modified); + LOG_DEBUG("\tsplitting node, modified: %d\n", modified); if (modified) g->set_generated_by("split_node(" + node_type_str + ")"); continue; } @@ -700,7 +700,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { if (rng < merge_node_rate) { uniform_int_distribution dist = get_recurrent_depth_dist(); modified = g->merge_node(mu, sigma, new_node_type, dist, edge_innovation_count, node_innovation_count); - Log::debug("\tmerging node, modified: %d\n", modified); + LOG_DEBUG("\tmerging node, modified: %d\n", modified); if (modified) g->set_generated_by("merge_node(" + node_type_str + ")"); continue; } @@ -716,7 +716,7 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { g->get_weights(new_parameters); g->initial_parameters = new_parameters; - if (Log::at_level(Log::DEBUG)) { + if (Log::at_level(LOG_LEVEL_DEBUG)) { g->get_mu_sigma(new_parameters, mu, sigma); } @@ -726,8 +726,8 @@ void EXAMM::mutate(int32_t max_mutations, RNN_Genome *g) { g->best_validation_mse = EXAMM_MAX_DOUBLE; g->best_validation_mae = EXAMM_MAX_DOUBLE; - if (Log::at_level(Log::DEBUG)) { - Log::debug("checking parameters after mutation\n"); + if (Log::at_level(LOG_LEVEL_DEBUG)) { + LOG_DEBUG("checking parameters after mutation\n"); g->get_mu_sigma(g->initial_parameters, mu, sigma); } @@ -749,21 +749,21 @@ void EXAMM::attempt_node_insert(vector &child_nodes, const void EXAMM::attempt_edge_insert(vector &child_edges, vector &child_nodes, RNN_Edge *edge, RNN_Edge *second_edge, bool set_enabled) { for (int32_t i = 0; i < (int32_t)child_edges.size(); i++) { if (child_edges[i]->get_innovation_number() == edge->get_innovation_number()) { - Log::fatal("ERROR in crossover! trying to push an edge with innovation_number: %d and it already exists in the vector!\n", edge->get_innovation_number()); + LOG_FATAL("ERROR in crossover! trying to push an edge with innovation_number: %d and it already exists in the vector!\n", edge->get_innovation_number()); - Log::fatal("vector innovation numbers: "); + LOG_FATAL("vector innovation numbers: "); for (int32_t i = 0; i < (int32_t)child_edges.size(); i++) { - Log::fatal("\t%d", child_edges[i]->get_innovation_number()); + LOG_FATAL("\t%d", child_edges[i]->get_innovation_number()); } - Log::fatal("This should never happen!\n"); + LOG_FATAL("This should never happen!\n"); exit(1); return; } else if (child_edges[i]->get_input_innovation_number() == edge->get_input_innovation_number() && child_edges[i]->get_output_innovation_number() == edge->get_output_innovation_number()) { - Log::debug("Not inserting edge in crossover operation as there was already an edge with the same input and output innovation numbers!\n"); + LOG_DEBUG("Not inserting edge in crossover operation as there was already an edge with the same input and output innovation numbers!\n"); return; } } @@ -774,7 +774,7 @@ void EXAMM::attempt_edge_insert(vector &child_edges, vectorweight - edge->weight) + edge->weight; - Log::trace("EDGE WEIGHT CROSSOVER :: better: %lf, worse: %lf, crossover_value: %lf, new_weight: %lf\n", edge->weight, second_edge->weight, crossover_value, new_weight); + LOG_TRACE("EDGE WEIGHT CROSSOVER :: better: %lf, worse: %lf, crossover_value: %lf, new_weight: %lf\n", edge->weight, second_edge->weight, crossover_value, new_weight); vector input_weights1, input_weights2, output_weights1, output_weights2; edge->get_input_node()->get_weights(input_weights1); @@ -791,12 +791,12 @@ void EXAMM::attempt_edge_insert(vector &child_edges, vector &child_edges, vector &child_recurrent_edges, vector &child_nodes, RNN_Recurrent_Edge *recurrent_edge, RNN_Recurrent_Edge *second_edge, bool set_enabled) { for (int32_t i = 0; i < (int32_t)child_recurrent_edges.size(); i++) { if (child_recurrent_edges[i]->get_innovation_number() == recurrent_edge->get_innovation_number()) { - Log::fatal("ERROR in crossover! trying to push an recurrent_edge with innovation_number: %d and it already exists in the vector!\n", recurrent_edge->get_innovation_number()); - Log::fatal("vector innovation numbers:\n"); + LOG_FATAL("ERROR in crossover! trying to push an recurrent_edge with innovation_number: %d and it already exists in the vector!\n", recurrent_edge->get_innovation_number()); + LOG_FATAL("vector innovation numbers:\n"); for (int32_t i = 0; i < (int32_t)child_recurrent_edges.size(); i++) { - Log::fatal("\t %d", child_recurrent_edges[i]->get_innovation_number()); + LOG_FATAL("\t %d", child_recurrent_edges[i]->get_innovation_number()); } - Log::fatal("This should never happen!\n"); + LOG_FATAL("This should never happen!\n"); exit(1); return; } else if (child_recurrent_edges[i]->get_input_innovation_number() == recurrent_edge->get_input_innovation_number() && child_recurrent_edges[i]->get_output_innovation_number() == recurrent_edge->get_output_innovation_number()) { - Log::debug("Not inserting recurrent_edge in crossover operation as there was already an recurrent_edge with the same input and output innovation numbers!\n"); + LOG_DEBUG("Not inserting recurrent_edge in crossover operation as there was already an recurrent_edge with the same input and output innovation numbers!\n"); return; } } @@ -845,7 +845,7 @@ void EXAMM::attempt_recurrent_edge_insert(vector &child_rec double crossover_value = rng_crossover_weight(generator); new_weight = crossover_value * (second_edge->weight - recurrent_edge->weight) + recurrent_edge->weight; - Log::debug("RECURRENT EDGE WEIGHT CROSSOVER :: better: %lf, worse: %lf, crossover_value: %lf, new_weight: %lf\n", recurrent_edge->weight, second_edge->weight, crossover_value, new_weight); + LOG_DEBUG("RECURRENT EDGE WEIGHT CROSSOVER :: better: %lf, worse: %lf, crossover_value: %lf, new_weight: %lf\n", recurrent_edge->weight, second_edge->weight, crossover_value, new_weight); vector input_weights1, input_weights2, output_weights1, output_weights2; recurrent_edge->get_input_node()->get_weights(input_weights1); @@ -859,12 +859,12 @@ void EXAMM::attempt_recurrent_edge_insert(vector &child_rec for (int32_t i = 0; i < (int32_t)new_input_weights.size(); i++) { new_input_weights[i] = crossover_value * (input_weights2[i] - input_weights1[i]) + input_weights1[i]; - Log::trace("\tnew input weights[%d]: %lf\n", i, new_input_weights[i]); + LOG_TRACE("\tnew input weights[%d]: %lf\n", i, new_input_weights[i]); } for (int32_t i = 0; i < (int32_t)new_output_weights.size(); i++) { new_output_weights[i] = crossover_value * (output_weights2[i] - output_weights1[i]) + output_weights1[i]; - Log::trace("\tnew output weights[%d]: %lf\n", i, new_output_weights[i]); + LOG_TRACE("\tnew output weights[%d]: %lf\n", i, new_output_weights[i]); } } else { @@ -888,20 +888,20 @@ void EXAMM::attempt_recurrent_edge_insert(vector &child_rec RNN_Genome* EXAMM::crossover(RNN_Genome *p1, RNN_Genome *p2) { - Log::debug("generating new genome by crossover!\n"); - Log::debug("p1->island: %d, p2->island: %d\n", p1->get_group_id(), p2->get_group_id()); - Log::debug("p1->number_inputs: %d, p2->number_inputs: %d\n", p1->get_number_inputs(), p2->get_number_inputs()); + LOG_DEBUG("generating new genome by crossover!\n"); + LOG_DEBUG("p1->island: %d, p2->island: %d\n", p1->get_group_id(), p2->get_group_id()); + LOG_DEBUG("p1->number_inputs: %d, p2->number_inputs: %d\n", p1->get_number_inputs(), p2->get_number_inputs()); for (uint32_t i = 0; i < p1->nodes.size(); i++) { - Log::debug("p1 node[%d], in: %d, depth: %lf, layer_type: %d, node_type: %d, reachable: %d, enabled: %d\n", i, p1->nodes[i]->get_innovation_number(), p1->nodes[i]->get_depth(), p1->nodes[i]->get_layer_type(), p1->nodes[i]->get_node_type(), p1->nodes[i]->is_reachable(), p1->nodes[i]->is_enabled()); + LOG_DEBUG("p1 node[%d], in: %d, depth: %lf, layer_type: %d, node_type: %d, reachable: %d, enabled: %d\n", i, p1->nodes[i]->get_innovation_number(), p1->nodes[i]->get_depth(), p1->nodes[i]->get_layer_type(), p1->nodes[i]->get_node_type(), p1->nodes[i]->is_reachable(), p1->nodes[i]->is_enabled()); } for (uint32_t i = 0; i < p2->nodes.size(); i++) { - Log::debug("p2 node[%d], in: %d, depth: %lf, layer_type: %d, node_type: %d, reachable: %d, enabled: %d\n", i, p2->nodes[i]->get_innovation_number(), p2->nodes[i]->get_depth(), p2->nodes[i]->get_layer_type(), p2->nodes[i]->get_node_type(), p2->nodes[i]->is_reachable(), p2->nodes[i]->is_enabled()); + LOG_DEBUG("p2 node[%d], in: %d, depth: %lf, layer_type: %d, node_type: %d, reachable: %d, enabled: %d\n", i, p2->nodes[i]->get_innovation_number(), p2->nodes[i]->get_depth(), p2->nodes[i]->get_layer_type(), p2->nodes[i]->get_node_type(), p2->nodes[i]->is_reachable(), p2->nodes[i]->is_enabled()); } double _mu, _sigma; - Log::debug("getting p1 mu/sigma!\n"); + LOG_DEBUG("getting p1 mu/sigma!\n"); if (p1->best_parameters.size() == 0) { p1->set_weights(p1->initial_parameters); p1->get_mu_sigma(p1->initial_parameters, _mu, _sigma); @@ -910,7 +910,7 @@ RNN_Genome* EXAMM::crossover(RNN_Genome *p1, RNN_Genome *p2) { p1->get_mu_sigma(p1->best_parameters, _mu, _sigma); } - Log::debug("getting p2 mu/sigma!\n"); + LOG_DEBUG("getting p2 mu/sigma!\n"); if (p2->best_parameters.size() == 0) { p2->set_weights(p2->initial_parameters); p2->get_mu_sigma(p2->initial_parameters, _mu, _sigma); @@ -931,13 +931,13 @@ RNN_Genome* EXAMM::crossover(RNN_Genome *p1, RNN_Genome *p2) { sort(p1_edges.begin(), p1_edges.end(), sort_RNN_Edges_by_innovation()); sort(p2_edges.begin(), p2_edges.end(), sort_RNN_Edges_by_innovation()); - Log::debug("\tp1 innovation numbers AFTER SORT:\n"); + LOG_DEBUG("\tp1 innovation numbers AFTER SORT:\n"); for (int32_t i = 0; i < (int32_t)p1_edges.size(); i++) { - Log::trace("\t\t%d\n", p1_edges[i]->innovation_number); + LOG_TRACE("\t\t%d\n", p1_edges[i]->innovation_number); } - Log::debug("\tp2 innovation numbers AFTER SORT:\n"); + LOG_DEBUG("\tp2 innovation numbers AFTER SORT:\n"); for (int32_t i = 0; i < (int32_t)p2_edges.size(); i++) { - Log::debug("\t\t%d\n", p2_edges[i]->innovation_number); + LOG_DEBUG("\t\t%d\n", p2_edges[i]->innovation_number); } vector< RNN_Recurrent_Edge* > p1_recurrent_edges = p1->recurrent_edges; @@ -1085,12 +1085,12 @@ RNN_Genome* EXAMM::crossover(RNN_Genome *p1, RNN_Genome *p2) { // if weight_inheritance is same, all the weights of the child genome would be initialized as weight_initialize method if (weight_inheritance == weight_initialize) { - Log::debug("weight inheritance at crossover method is %s, setting weights to %s randomly \n", WEIGHT_TYPES_STRING[weight_inheritance].c_str(), WEIGHT_TYPES_STRING[weight_inheritance].c_str()); + LOG_DEBUG("weight inheritance at crossover method is %s, setting weights to %s randomly \n", WEIGHT_TYPES_STRING[weight_inheritance].c_str(), WEIGHT_TYPES_STRING[weight_inheritance].c_str()); child->initialize_randomly(); } child->get_weights(new_parameters); - Log::debug("getting mu/sigma before assign reachability\n"); + LOG_DEBUG("getting mu/sigma before assign reachability\n"); child->get_mu_sigma(new_parameters, mu, sigma); child->assign_reachability(); @@ -1105,7 +1105,7 @@ RNN_Genome* EXAMM::crossover(RNN_Genome *p1, RNN_Genome *p2) { child->get_weights(new_parameters); child->initial_parameters = new_parameters; - Log::debug("checking parameters after crossover\n"); + LOG_DEBUG("checking parameters after crossover\n"); child->get_mu_sigma(child->initial_parameters, mu, sigma); child->best_parameters.clear(); @@ -1120,27 +1120,27 @@ uniform_int_distribution EXAMM::get_recurrent_depth_dist() { void EXAMM::check_weight_initialize_validity() { if (weight_initialize < 0) { - Log::fatal("Weight initalization is set to NONE, this should not happen! \n"); + LOG_FATAL("Weight initalization is set to NONE, this should not happen! \n"); exit(1); } if (weight_inheritance < 0) { - Log::fatal("Weight inheritance is set to NONE, this should not happen! \n"); + LOG_FATAL("Weight inheritance is set to NONE, this should not happen! \n"); exit(1); } if (mutated_component_weight < 0) { - Log::fatal("Mutated component weight is set to NONE, this should not happen! \n"); + LOG_FATAL("Mutated component weight is set to NONE, this should not happen! \n"); exit(1); } if (weight_initialize == WeightType::LAMARCKIAN) { - Log::fatal("Weight initialization method is set to Lamarckian! \n"); + LOG_FATAL("Weight initialization method is set to Lamarckian! \n"); exit(1); } if (weight_inheritance != weight_initialize && weight_inheritance != WeightType::LAMARCKIAN) { - Log::fatal("Weight initialize is %s, weight inheritance is %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[weight_inheritance].c_str()); + LOG_FATAL("Weight initialize is %s, weight inheritance is %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[weight_inheritance].c_str()); exit(1); } if (mutated_component_weight != weight_initialize && mutated_component_weight != WeightType::LAMARCKIAN) { - Log::fatal("Weight initialize is %s, new component weight is %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_FATAL("Weight initialize is %s, new component weight is %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); exit(1); } diff --git a/rnn/generate_nn.cxx b/rnn/generate_nn.cxx index bd9b73a2..5cdbfbf8 100644 --- a/rnn/generate_nn.cxx +++ b/rnn/generate_nn.cxx @@ -21,7 +21,7 @@ using std::vector; #include "common/log.hxx" RNN_Genome* create_ff(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize, WeightType weight_inheritance, WeightType mutated_component_weight) { - Log::debug("creating feed forward network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating feed forward network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -76,7 +76,7 @@ RNN_Genome* create_ff(const vector &input_parameter_names, int number_hi RNN_Genome* create_jordan(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating jordan neural network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating jordan neural network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector output_layer; vector< vector > layer_nodes(2 + number_hidden_layers); @@ -135,7 +135,7 @@ RNN_Genome* create_jordan(const vector &input_parameter_names, int numbe } RNN_Genome* create_elman(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating elman network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating elman network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector output_layer; vector< vector > layer_nodes(2 + number_hidden_layers); @@ -199,7 +199,7 @@ RNN_Genome* create_elman(const vector &input_parameter_names, int number } RNN_Genome* create_lstm(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating LSTM network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating LSTM network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -245,7 +245,7 @@ RNN_Genome* create_lstm(const vector &input_parameter_names, int number_ RNN_Genome* create_ugrnn(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating UGRNN network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating UGRNN network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -292,7 +292,7 @@ RNN_Genome* create_ugrnn(const vector &input_parameter_names, int number RNN_Genome* create_gru(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating GRU network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating GRU network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -337,7 +337,7 @@ RNN_Genome* create_gru(const vector &input_parameter_names, int number_h } RNN_Genome* create_enarc(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating ENARC network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating ENARC network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -382,7 +382,7 @@ RNN_Genome* create_enarc(const vector &input_parameter_names, int number } RNN_Genome* create_enas_dag(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating ENAS_DAG network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating ENAS_DAG network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -398,7 +398,7 @@ RNN_Genome* create_enas_dag(const vector &input_parameter_names, int num layer_nodes[current_layer].push_back(node); } current_layer++; - Log::debug("creating ENAS_DAG Node\n"); + LOG_DEBUG("creating ENAS_DAG Node\n"); for (int32_t i = 0; i < number_hidden_layers; i++) { for (int32_t j = 0; j < number_hidden_nodes; j++) { ENAS_DAG_Node *node = new ENAS_DAG_Node(++node_innovation_count, HIDDEN_LAYER, current_layer); @@ -428,7 +428,7 @@ RNN_Genome* create_enas_dag(const vector &input_parameter_names, int num RNN_Genome* create_random_dag(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating RANDOM_DAG network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating RANDOM_DAG network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -444,7 +444,7 @@ RNN_Genome* create_random_dag(const vector &input_parameter_names, int n layer_nodes[current_layer].push_back(node); } current_layer++; - Log::debug("creating RANDOM_DAG Node\n"); + LOG_DEBUG("creating RANDOM_DAG Node\n"); for (int32_t i = 0; i < number_hidden_layers; i++) { for (int32_t j = 0; j < number_hidden_nodes; j++) { RANDOM_DAG_Node *node = new RANDOM_DAG_Node(++node_innovation_count, HIDDEN_LAYER, current_layer); @@ -474,7 +474,7 @@ RNN_Genome* create_random_dag(const vector &input_parameter_names, int n RNN_Genome* create_mgu(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating MGU network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating MGU network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; @@ -520,7 +520,7 @@ RNN_Genome* create_mgu(const vector &input_parameter_names, int number_h RNN_Genome* create_delta(const vector &input_parameter_names, int number_hidden_layers, int number_hidden_nodes, const vector &output_parameter_names, int max_recurrent_depth, WeightType weight_initialize) { - Log::debug("creating delta network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); + LOG_DEBUG("creating delta network with inputs: %d, hidden: %dx%d, outputs: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_recurrent_depth); vector rnn_nodes; vector< vector > layer_nodes(2 + number_hidden_layers); vector rnn_edges; diff --git a/rnn/gru_node.cxx b/rnn/gru_node.cxx index 04f93875..cdfe2728 100644 --- a/rnn/gru_node.cxx +++ b/rnn/gru_node.cxx @@ -116,7 +116,7 @@ double GRU_Node::get_gradient(string gradient_name) { } else if (gradient_name == "h_bias") { gradient_sum += d_h_bias[i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -125,7 +125,7 @@ double GRU_Node::get_gradient(string gradient_name) { } void GRU_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } void GRU_Node::input_fired(int time, double incoming_output) { @@ -135,7 +135,7 @@ void GRU_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on GRU_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on GRU_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } @@ -182,7 +182,7 @@ void GRU_Node::input_fired(int time, double incoming_output) { void GRU_Node::try_update_deltas(int time) { if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on GRU_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on GRU_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -283,7 +283,7 @@ void GRU_Node::set_weights(uint32_t &offset, const vector ¶meters) { //uint32_t end_offset = offset; - //Log::trace("set weights from offset %d to %d on GRU_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("set weights from offset %d to %d on GRU_Node %d\n", start_offset, end_offset, innovation_number); } void GRU_Node::get_weights(uint32_t &offset, vector ¶meters) const { @@ -302,7 +302,7 @@ void GRU_Node::get_weights(uint32_t &offset, vector ¶meters) const { parameters[offset++] = h_bias; //uint32_t end_offset = offset; - //Log::trace("got weights from offset %d to %d on GRU_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("got weights from offset %d to %d on GRU_Node %d\n", start_offset, end_offset, innovation_number); } diff --git a/rnn/island.cxx b/rnn/island.cxx index 53e67f68..c20e3d68 100644 --- a/rnn/island.cxx +++ b/rnn/island.cxx @@ -95,7 +95,7 @@ void Island::copy_two_random_genomes(uniform_real_distribution &rng_0_1, void Island::do_population_check(int line, int initial_size) { if (status == Island::FILLED && genomes.size() < max_size) { - Log::error("ERROR: do_population_check had issue on island.cxx line %d, status was FILLED and genomes.size() was: %d, size at beginning of insert was: %d\n", line, genomes.size(), initial_size); + LOG_ERROR("ERROR: do_population_check had issue on island.cxx line %d, status was FILLED and genomes.size() was: %d, size at beginning of insert was: %d\n", line, genomes.size(), initial_size); status = Island::INITIALIZING; } } @@ -108,20 +108,20 @@ int32_t Island::insert_genome(RNN_Genome *genome) { int initial_size = genomes.size(); if (genome->get_generation_id() <= erased_generation_id) { - Log::info("genome already erased, not inserting"); + LOG_INFO("genome already erased, not inserting"); do_population_check(__LINE__, initial_size); return -1; } - Log::debug("getting fitness of genome copy\n"); + LOG_DEBUG("getting fitness of genome copy\n"); double new_fitness = genome->get_fitness(); - Log::info("inserting genome with fitness: %s to island %d\n", parse_fitness(genome->get_fitness()).c_str(), id); + LOG_INFO("inserting genome with fitness: %s to island %d\n", parse_fitness(genome->get_fitness()).c_str(), id); //discard the genome if the island is full and it's fitness is worse than the worst in thte population if (is_full() && new_fitness > get_worst_fitness()) { - Log::info("ignoring genome, fitness: %lf > worst for island[%d] fitness: %lf\n", new_fitness, id, genomes.back()->get_fitness()); + LOG_INFO("ignoring genome, fitness: %lf > worst for island[%d] fitness: %lf\n", new_fitness, id, genomes.back()->get_fitness()); do_population_check(__LINE__, initial_size); return false; } @@ -133,13 +133,13 @@ int32_t Island::insert_genome(RNN_Genome *genome) { if (structure_map.count(structural_hash) > 0) { vector &potential_matches = structure_map.find(structural_hash)->second; - Log::info("potential duplicate for hash '%s', had %d potential matches.\n", structural_hash.c_str(), potential_matches.size()); + LOG_INFO("potential duplicate for hash '%s', had %d potential matches.\n", structural_hash.c_str(), potential_matches.size()); for (auto potential_match = potential_matches.begin(); potential_match != potential_matches.end(); ) { - Log::info("on potential match %d of %d\n", potential_match - potential_matches.begin(), potential_matches.size()); + LOG_INFO("on potential match %d of %d\n", potential_match - potential_matches.begin(), potential_matches.size()); if ((*potential_match)->equals(genome)) { if ((*potential_match)->get_fitness() > new_fitness) { - Log::info("REPLACING DUPLICATE GENOME, fitness of genome in search: %s, new fitness: %s\n", parse_fitness((*potential_match)->get_fitness()).c_str(), parse_fitness(genome->get_fitness()).c_str()); + LOG_INFO("REPLACING DUPLICATE GENOME, fitness of genome in search: %s, new fitness: %s\n", parse_fitness((*potential_match)->get_fitness()).c_str(), parse_fitness(genome->get_fitness()).c_str()); //we have an exact match for this genome in the island and its fitness is worse //than the genome we're trying to remove, so remove the duplicate it from the genomes //as well from the potential matches vector @@ -147,7 +147,7 @@ int32_t Island::insert_genome(RNN_Genome *genome) { auto duplicate_genome_iterator = lower_bound(genomes.begin(), genomes.end(), *potential_match, sort_genomes_by_fitness()); bool found = false; for (; duplicate_genome_iterator != genomes.end(); duplicate_genome_iterator++) { - Log::info("duplicate_genome_iterator: %p, (*potential_match): %p\n", (*duplicate_genome_iterator), (*potential_match)); + LOG_INFO("duplicate_genome_iterator: %p, (*potential_match): %p\n", (*duplicate_genome_iterator), (*potential_match)); if ((*duplicate_genome_iterator) == (*potential_match)) { found = true; @@ -156,24 +156,24 @@ int32_t Island::insert_genome(RNN_Genome *genome) { } if (!found) { - Log::fatal("ERROR: could not find duplicate genome even though its structural hash was in the island, this should never happen!\n"); + LOG_FATAL("ERROR: could not find duplicate genome even though its structural hash was in the island, this should never happen!\n"); exit(1); } - Log::info("potential_match->get_fitness(): %lf, duplicate_genome_iterator->get_fitness(): %lf, new_fitness: %lf\n", (*potential_match)->get_fitness(), (*duplicate_genome_iterator)->get_fitness(), new_fitness); + LOG_INFO("potential_match->get_fitness(): %lf, duplicate_genome_iterator->get_fitness(): %lf, new_fitness: %lf\n", (*potential_match)->get_fitness(), (*duplicate_genome_iterator)->get_fitness(), new_fitness); int32_t duplicate_genome_index = duplicate_genome_iterator - genomes.begin(); - Log::info("duplicate_genome_index: %d\n", duplicate_genome_index); + LOG_INFO("duplicate_genome_index: %d\n", duplicate_genome_index); //int32_t test_index = contains(genome); - //Log::info("test_index: %d\n", test_index); + //LOG_INFO("test_index: %d\n", test_index); RNN_Genome *duplicate = genomes[duplicate_genome_index]; - //Log::info("duplicate.equals(potential_match)? %d\n", duplicate->equals(*potential_match)); + //LOG_INFO("duplicate.equals(potential_match)? %d\n", duplicate->equals(*potential_match)); genomes.erase(genomes.begin() + duplicate_genome_index); - Log::info("potential_matches.size() before erase: %d\n", potential_matches.size()); + LOG_INFO("potential_matches.size() before erase: %d\n", potential_matches.size()); //erase the potential match from the structure map as well //returns an iterator to next element after the deleted one so @@ -182,17 +182,17 @@ int32_t Island::insert_genome(RNN_Genome *genome) { delete duplicate; - Log::info("potential_matches.size() after erase: %d\n", potential_matches.size()); - Log::info("structure_map[%s].size() after erase: %d\n", structural_hash.c_str(), structure_map[structural_hash].size()); + LOG_INFO("potential_matches.size() after erase: %d\n", potential_matches.size()); + LOG_INFO("structure_map[%s].size() after erase: %d\n", structural_hash.c_str(), structure_map[structural_hash].size()); if (potential_matches.size() == 0) { - Log::info("deleting the potential_matches vector for hash '%s' because it was empty.\n", structural_hash.c_str()); + LOG_INFO("deleting the potential_matches vector for hash '%s' because it was empty.\n", structural_hash.c_str()); structure_map.erase(structural_hash); break; //break because this vector is now empty and deleted } } else { - Log::info("island already contains a duplicate genome with a better fitness! not inserting.\n"); + LOG_INFO("island already contains a duplicate genome with a better fitness! not inserting.\n"); do_population_check(__LINE__, initial_size); return -1; } @@ -210,16 +210,16 @@ int32_t Island::insert_genome(RNN_Genome *genome) { copy->set_weights(best); } copy -> set_generation_id (genome -> get_generation_id()); - Log::info("created copy to insert to island: %d\n", copy->get_group_id()); + LOG_INFO("created copy to insert to island: %d\n", copy->get_group_id()); auto index_iterator = upper_bound(genomes.begin(), genomes.end(), copy, sort_genomes_by_fitness()); int32_t insert_index = index_iterator - genomes.begin(); - Log::info("inserting genome at index: %d\n", insert_index); + LOG_INFO("inserting genome at index: %d\n", insert_index); if (insert_index >= max_size) { //if we're going to insert this at the back of the population //its just going to get removed anyways, so we can delete //it and report it was not inserted. - Log::info("not inserting genome because it is worse than the worst fitness\n"); + LOG_INFO("not inserting genome because it is worse than the worst fitness\n"); delete copy; do_population_check(__LINE__, initial_size); return -1; @@ -231,12 +231,12 @@ int32_t Island::insert_genome(RNN_Genome *genome) { structural_hash = copy->get_structural_hash(); //add the genome to the vector for this structural hash structure_map[structural_hash].push_back(copy); - Log::info("adding to structure_map[%s] : %p\n", structural_hash.c_str(), ©); + LOG_INFO("adding to structure_map[%s] : %p\n", structural_hash.c_str(), ©); if (insert_index == 0) { //this was a new best genome for this island - Log::info("new best fitness for island: %d!\n", id); + LOG_INFO("new best fitness for island: %d!\n", id); if (genome->get_fitness() != EXAMM_MAX_DOUBLE) { //need to set the weights for non-initial genomes so we @@ -252,13 +252,13 @@ int32_t Island::insert_genome(RNN_Genome *genome) { status = Island::FILLED; } - Log::info("genomes.size(): %d, max_size: %d, status: %d\n", genomes.size(), max_size, status); + LOG_INFO("genomes.size(): %d, max_size: %d, status: %d\n", genomes.size(), max_size, status); if (genomes.size() > max_size) { //island was full before insert so now we need to //delete the worst genome in the island. - Log::debug("deleting worst genome\n"); + LOG_DEBUG("deleting worst genome\n"); RNN_Genome *worst = genomes.back(); genomes.pop_back(); structural_hash = worst->get_structural_hash(); @@ -268,20 +268,20 @@ int32_t Island::insert_genome(RNN_Genome *genome) { bool found = false; for (auto potential_match = potential_matches.begin(); potential_match != potential_matches.end(); ) { //make sure the addresses of the pointers are the same - Log::info("checking to remove worst from structure_map - &worst: %p, &(*potential_match): %p\n", worst, (*potential_match)); + LOG_INFO("checking to remove worst from structure_map - &worst: %p, &(*potential_match): %p\n", worst, (*potential_match)); if ((*potential_match) == worst) { found = true; - Log::info("potential_matches.size() before erase: %d\n", potential_matches.size()); + LOG_INFO("potential_matches.size() before erase: %d\n", potential_matches.size()); //erase the potential match from the structure map as well potential_match = potential_matches.erase(potential_match); - Log::info("potential_matches.size() after erase: %d\n", potential_matches.size()); - Log::info("structure_map[%s].size() after erase: %d\n", structural_hash.c_str(), structure_map[structural_hash].size()); + LOG_INFO("potential_matches.size() after erase: %d\n", potential_matches.size()); + LOG_INFO("structure_map[%s].size() after erase: %d\n", structural_hash.c_str(), structure_map[structural_hash].size()); //clean up the structure_map if no genomes in the population have this hash if (potential_matches.size() == 0) { - Log::info("deleting the potential_matches vector for hash '%s' because it was empty.\n", structural_hash.c_str()); + LOG_INFO("deleting the potential_matches vector for hash '%s' because it was empty.\n", structural_hash.c_str()); structure_map.erase(structural_hash); break; } @@ -291,7 +291,7 @@ int32_t Island::insert_genome(RNN_Genome *genome) { } if (!found) { - Log::info("could not erase from structure_map[%s], genome not found! This should never happen.\n", structural_hash.c_str()); + LOG_INFO("could not erase from structure_map[%s], genome not found! This should never happen.\n", structural_hash.c_str()); exit(1); } @@ -312,12 +312,12 @@ int32_t Island::insert_genome(RNN_Genome *genome) { } void Island::print(string indent) { - if (Log::at_level(Log::INFO)) { + if (Log::at_level(LOG_LEVEL_INFO)) { - Log::info("%s\t%s\n", indent.c_str(), RNN_Genome::print_statistics_header().c_str()); + LOG_INFO("%s\t%s\n", indent.c_str(), RNN_Genome::print_statistics_header().c_str()); for (int32_t i = 0; i < genomes.size(); i++) { - Log::info("%s\t%s\n", indent.c_str(), genomes[i]->print_statistics().c_str()); + LOG_INFO("%s\t%s\n", indent.c_str(), genomes[i]->print_statistics().c_str()); } } } @@ -333,10 +333,10 @@ void Island::erase_island() { genomes.clear(); erased = true; erase_again = 5; - Log::info("Worst island size after erased: %d\n", genomes.size()); + LOG_INFO("Worst island size after erased: %d\n", genomes.size()); if (genomes.size() != 0) { - Log::error("The worst island is not fully erased!\n"); + LOG_ERROR("The worst island is not fully erased!\n"); } } @@ -352,7 +352,7 @@ void Island::set_status(int32_t status_to_set) { if (status_to_set == Island::INITIALIZING || status_to_set == Island::FILLED || status_to_set == Island::REPOPULATING) { status = status_to_set; } else { - Log::error("Island::set_status: Wrong island status to set! %d\n", status_to_set); + LOG_ERROR("Island::set_status: Wrong island status to set! %d\n", status_to_set); exit(1); } } diff --git a/rnn/island_speciation_strategy.cxx b/rnn/island_speciation_strategy.cxx index f20972f9..5d050e36 100644 --- a/rnn/island_speciation_strategy.cxx +++ b/rnn/island_speciation_strategy.cxx @@ -188,7 +188,7 @@ bool IslandSpeciationStrategy::islands_full() const { //this will insert a COPY, original needs to be deleted //returns 0 if a new global best, < 0 if not inserted, > 0 otherwise int32_t IslandSpeciationStrategy::insert_genome(RNN_Genome* genome) { - Log::debug("inserting genome!\n"); + LOG_DEBUG("inserting genome!\n"); if (extinction_event_generation_number != 0){ if(inserted_genomes > 1 && inserted_genomes % extinction_event_generation_number == 0 && max_genomes - inserted_genomes >= extinction_event_generation_number) { if (island_ranking_method.compare("EraseWorst") == 0 || island_ranking_method.compare("") == 0){ @@ -197,12 +197,12 @@ int32_t IslandSpeciationStrategy::insert_genome(RNN_Genome* genome) { // int32_t worst_island = get_worst_island_by_best_genome(); for (int32_t i = 0; i < islands_to_exterminate; i++){ if (rank[i] >= 0){ - Log::info("found island: %d is not doing well \n",rank[0]); + LOG_INFO("found island: %d is not doing well \n",rank[0]); islands[rank[i]]->erase_island(); islands[rank[i]]->set_status(Island::REPOPULATING); // rank++; } - else Log::error("Didn't find the worst island!"); + else LOG_ERROR("Didn't find the worst island!"); // set this so the island would not be re-killed in 5 rounds if (!repeat_extinction) { set_erased_islands_status(); @@ -227,7 +227,7 @@ int32_t IslandSpeciationStrategy::insert_genome(RNN_Genome* genome) { inserted_genomes++; int32_t island = genome->get_group_id(); - Log::info("inserting genome to island: %d\n", island); + LOG_INFO("inserting genome to island: %d\n", island); int32_t insert_position = islands[island]->insert_genome(genome); @@ -260,8 +260,8 @@ vector IslandSpeciationStrategy::rank_islands() { // int32_t* island_rank = new int32_t[number_of_islands]; int32_t temp; double fitness_j1, fitness_j2; - Log::info("ranking islands \n"); - Log::info("repeat extinction: %s \n", repeat_extinction? "true":"false"); + LOG_INFO("ranking islands \n"); + LOG_INFO("repeat extinction: %s \n", repeat_extinction? "true":"false"); for (int32_t i = 0; i< number_of_islands; i++){ if (repeat_extinction) { @@ -273,9 +273,9 @@ vector IslandSpeciationStrategy::rank_islands() { } } - // Log::error("islands can get killed: \n"); + // LOG_ERROR("islands can get killed: \n"); // for (int32_t i = 0; i< island_rank.size(); i++){ - // Log::error("%d \n",island_rank[i]); + // LOG_ERROR("%d \n",island_rank[i]); // } for (int32_t i = 0; i < island_rank.size() - 1; i++) { for (int32_t j = 0; j < island_rank.size() - i - 1; j++) { @@ -288,9 +288,9 @@ vector IslandSpeciationStrategy::rank_islands() { } } } - Log::info("island rank: \n"); + LOG_INFO("island rank: \n"); for (int32_t i = 0; i< island_rank.size(); i++){ - Log::info("island: %d fitness %f \n", island_rank[i], islands[island_rank[i]]->get_best_fitness()); + LOG_INFO("island: %d fitness %f \n", island_rank[i], islands[island_rank[i]]->get_best_fitness()); } return island_rank; } @@ -301,18 +301,18 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< //robin fashion. RNN_Genome *genome = NULL; - Log::debug("getting island: %d\n", generation_island); + LOG_DEBUG("getting island: %d\n", generation_island); Island *island = islands[generation_island]; - Log::info("generating new genome for island[%d], island_size: %d, max_island_size: %d, mutation_rate: %lf, intra_island_crossover_rate: %lf, inter_island_crossover_rate: %lf\n", generation_island, island->size(), max_island_size, mutation_rate, intra_island_crossover_rate, inter_island_crossover_rate); + LOG_INFO("generating new genome for island[%d], island_size: %d, max_island_size: %d, mutation_rate: %lf, intra_island_crossover_rate: %lf, inter_island_crossover_rate: %lf\n", generation_island, island->size(), max_island_size, mutation_rate, intra_island_crossover_rate, inter_island_crossover_rate); - Log::debug("islands.size(): %d, selected island is null? %d\n", islands.size(), (island == NULL)); + LOG_DEBUG("islands.size(): %d, selected island is null? %d\n", islands.size(), (island == NULL)); if (island->is_initializing()) { - Log::info("island is initializing!\n"); + LOG_INFO("island is initializing!\n"); if (island->size() == 0) { - Log::debug("starting with minimal genome\n"); + LOG_DEBUG("starting with minimal genome\n"); RNN_Genome *genome_copy = seed_genome->copy(); //the architectures may be the same but we can give each copy of the minimal genome different //starting weights for more variety @@ -323,7 +323,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< // // Stir the seed genome if need be // if (this->number_stir_mutations) { - // Log::debug("Stirring seed genome for island %d by applying %d mutations!\n", + // LOG_DEBUG("Stirring seed genome for island %d by applying %d mutations!\n", // generation_island, this->number_stir_mutations); // mutate(this->number_stir_mutations, genome_copy); // } @@ -335,7 +335,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< genome_copy->set_generation_id(generated_genomes); genome_copy->set_group_id(generation_island); - Log::debug("inserting genome copy!\n"); + LOG_DEBUG("inserting genome copy!\n"); insert_genome(genome_copy); //return a copy of the minimal genome to be trained for each island genome = genome_copy->copy(); @@ -343,7 +343,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< generated_genomes++; return genome; } else { - Log::info("island is not empty, mutating a random genome\n"); + LOG_INFO("island is not empty, mutating a random genome\n"); while (genome == NULL) { island->copy_random_genome(rng_0_1, generator, &genome); @@ -387,7 +387,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< } else if (island->is_full()) { //generate a genome via crossover or mutation - Log::info("island is full\n"); + LOG_INFO("island is full\n"); while (genome == NULL) { genome = generate_for_filled_island(rng_0_1, generator, mutate, crossover); @@ -397,15 +397,15 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< //select two other islands (non-overlapping) at random, and select genomes //from within those islands and generate a child via crossover - Log::info("island is repopulating \n"); + LOG_INFO("island is repopulating \n"); while (genome == NULL) { if (repopulation_method.compare("randomParents") == 0 || repopulation_method.compare("randomparents") == 0){ - Log::info("island is repopulating through random parents method!\n"); + LOG_INFO("island is repopulating through random parents method!\n"); genome = parents_repopulation("random", rng_0_1, generator, mutate, crossover); } else if (repopulation_method.compare("bestParents") == 0 || repopulation_method.compare("bestparents") == 0){ - Log::info("island is repopulating through best parents method!\n"); + LOG_INFO("island is repopulating through best parents method!\n"); genome = parents_repopulation("best", rng_0_1, generator, mutate, crossover); } else if (repopulation_method.compare("bestGenome") == 0 || repopulation_method.compare("bestgenome") == 0){ @@ -418,17 +418,17 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< //copy the best island to the worst at once //after the worst island is filled, set the island status to filled //then generate a genome for filled status, so this function still return a generated genome - Log::info("island is repopulating through bestIsland method!\n"); - Log::info("island current size is: %d \n", islands[generation_island]->get_genomes().size()); + LOG_INFO("island is repopulating through bestIsland method!\n"); + LOG_INFO("island current size is: %d \n", islands[generation_island]->get_genomes().size()); RNN_Genome *best_genome = get_best_genome()->copy(); int32_t best_island_id = best_genome->get_group_id(); fill_island(best_island_id); if (island->is_full()) { - Log::info("island is full now, and generating a new one!\n"); + LOG_INFO("island is full now, and generating a new one!\n"); island->set_status(Island::FILLED); } else { - Log::error("Island is not full after coping the best island over!\n"); + LOG_ERROR("Island is not full after coping the best island over!\n"); } while (genome == NULL) { @@ -436,14 +436,14 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< } } else { - Log::fatal("Wrong repopulation_method argument"); + LOG_FATAL("Wrong repopulation_method argument"); exit(1); } } } else { - Log::fatal("ERROR: island was neither initializing, repopulating or full.\n"); - Log::fatal("This should never happen!\n"); + LOG_FATAL("ERROR: island was neither initializing, repopulating or full.\n"); + LOG_FATAL("This should never happen!\n"); } @@ -459,8 +459,8 @@ RNN_Genome* IslandSpeciationStrategy::generate_genome(uniform_real_distribution< islands[generation_island] -> set_latest_generation_id(generated_genomes); } else { - Log::fatal("ERROR: genome was NULL at the end of generate genome!\n"); - Log::fatal( "This should never happen.\n"); + LOG_FATAL("ERROR: genome was NULL at the end of generate genome!\n"); + LOG_FATAL( "This should never happen.\n"); exit(1); } @@ -474,7 +474,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_for_filled_island(uniform_real_di RNN_Genome* genome; double r = rng_0_1(generator); if (!islands_full() || r < mutation_rate) { - Log::info("performing mutation\n"); + LOG_INFO("performing mutation\n"); island->copy_random_genome(rng_0_1, generator, &genome); @@ -483,7 +483,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_for_filled_island(uniform_real_di } else if (r < intra_island_crossover_rate || number_of_islands == 1) { //intra-island crossover - Log::info("performing intra-island crossover\n"); + LOG_INFO("performing intra-island crossover\n"); //select two distinct parent genomes in the same island RNN_Genome *parent1 = NULL, *parent2 = NULL; @@ -494,7 +494,7 @@ RNN_Genome* IslandSpeciationStrategy::generate_for_filled_island(uniform_real_di delete parent2; } else { //inter-island crossover - // Log::info("performing inter-island crossover\n"); + // LOG_INFO("performing inter-island crossover\n"); //get a random genome from this island RNN_Genome *parent1 = NULL; @@ -529,9 +529,9 @@ RNN_Genome* IslandSpeciationStrategy::generate_for_filled_island(uniform_real_di void IslandSpeciationStrategy::print(string indent) const { - Log::info("%sIslands: \n", indent.c_str()); + LOG_INFO("%sIslands: \n", indent.c_str()); for (int32_t i = 0; i < (int32_t)islands.size(); i++) { - Log::info("%sIsland %d:\n", indent.c_str(), i); + LOG_INFO("%sIsland %d:\n", indent.c_str(), i); islands[i]->print(indent + "\t"); } } @@ -573,19 +573,19 @@ string IslandSpeciationStrategy::get_strategy_information_values() const { RNN_Genome* IslandSpeciationStrategy::parents_repopulation(string method, uniform_real_distribution &rng_0_1, minstd_rand0 &generator, function &mutate, function &crossover){ RNN_Genome* genome = NULL; - Log::info("generation island: %d \n", generation_island); + LOG_INFO("generation island: %d \n", generation_island); int32_t parent_island1; do { parent_island1 = (number_of_islands - 1) * rng_0_1(generator); } while (parent_island1 == generation_island); - Log::info("parent island 1: %d \n", parent_island1); + LOG_INFO("parent island 1: %d \n", parent_island1); int32_t parent_island2; do { parent_island2 = (number_of_islands - 1) * rng_0_1(generator); } while (parent_island2 == generation_island || parent_island2 == parent_island1); - Log::info("parent island 2: %d \n", parent_island2); + LOG_INFO("parent island 2: %d \n", parent_island2); RNN_Genome *parent1 = NULL; RNN_Genome *parent2 = NULL; @@ -634,7 +634,7 @@ void IslandSpeciationStrategy::set_erased_islands_status() { for (int i = 0; i < islands.size(); i++) { if (islands[i] -> get_erase_again_num() > 0) { islands[i] -> set_erase_again_num(); - Log::info("Island %d can be removed in %d rounds.\n", i, islands[i] -> get_erase_again_num()); + LOG_INFO("Island %d can be removed in %d rounds.\n", i, islands[i] -> get_erase_again_num()); } } } diff --git a/rnn/lstm_node.cxx b/rnn/lstm_node.cxx index 25694b84..d7365f90 100644 --- a/rnn/lstm_node.cxx +++ b/rnn/lstm_node.cxx @@ -141,7 +141,7 @@ double LSTM_Node::get_gradient(string gradient_name) { } else if (gradient_name == "cell_bias") { gradient_sum += d_cell_bias[i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -150,7 +150,7 @@ double LSTM_Node::get_gradient(string gradient_name) { } void LSTM_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } void LSTM_Node::input_fired(int time, double incoming_output) { @@ -160,7 +160,7 @@ void LSTM_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on LSTM_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on LSTM_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } @@ -210,7 +210,7 @@ void LSTM_Node::input_fired(int time, double incoming_output) { void LSTM_Node::try_update_deltas(int time) { if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on LSTM_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on LSTM_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -310,7 +310,7 @@ void LSTM_Node::set_weights(uint32_t &offset, const vector ¶meters) cell_bias = bound(parameters[offset++]); //uint32_t end_offset = offset; - //Log::trace("set weights from offset %d to %d on LSTM_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("set weights from offset %d to %d on LSTM_Node %d\n", start_offset, end_offset, innovation_number); } void LSTM_Node::get_weights(uint32_t &offset, vector ¶meters) const { @@ -332,7 +332,7 @@ void LSTM_Node::get_weights(uint32_t &offset, vector ¶meters) const parameters[offset++] = cell_bias; //uint32_t end_offset = offset; - //Log::trace("got weights from offset %d to %d on LSTM_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("got weights from offset %d to %d on LSTM_Node %d\n", start_offset, end_offset, innovation_number); } diff --git a/rnn/mgu_node.cxx b/rnn/mgu_node.cxx index e6abc86e..47787836 100644 --- a/rnn/mgu_node.cxx +++ b/rnn/mgu_node.cxx @@ -93,7 +93,7 @@ double MGU_Node::get_gradient(string gradient_name) { } else if (gradient_name == "h_bias") { gradient_sum += d_h_bias[i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -102,7 +102,7 @@ double MGU_Node::get_gradient(string gradient_name) { } void MGU_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } void MGU_Node::input_fired(int time, double incoming_output) { @@ -112,7 +112,7 @@ void MGU_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on MGU_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on MGU_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } @@ -143,7 +143,7 @@ void MGU_Node::input_fired(int time, double incoming_output) { void MGU_Node::try_update_deltas(int time) { if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on MGU_Node %d at time %d is %d and total_outputs is %d\n:", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on MGU_Node %d at time %d is %d and total_outputs is %d\n:", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -227,7 +227,7 @@ void MGU_Node::set_weights(uint32_t &offset, const vector ¶meters) { //uint32_t end_offset = offset; - //Log::trace("set weights from offset %d to %d on MGU_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("set weights from offset %d to %d on MGU_Node %d\n", start_offset, end_offset, innovation_number); } void MGU_Node::get_weights(uint32_t &offset, vector ¶meters) const { @@ -242,7 +242,7 @@ void MGU_Node::get_weights(uint32_t &offset, vector ¶meters) const { parameters[offset++] = h_bias; //uint32_t end_offset = offset; - //Log::trace("got weights from offset %d to %d on MGU_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_TRACE("got weights from offset %d to %d on MGU_Node %d\n", start_offset, end_offset, innovation_number); } diff --git a/rnn/neat_speciation_strategy.cxx b/rnn/neat_speciation_strategy.cxx index ca173b62..a7681393 100644 --- a/rnn/neat_speciation_strategy.cxx +++ b/rnn/neat_speciation_strategy.cxx @@ -56,14 +56,14 @@ NeatSpeciationStrategy::NeatSpeciationStrategy( intra_island_crossover_rate += mutation_rate; inter_island_crossover_rate += intra_island_crossover_rate; - Log::info("Neat speciation strategy, the species threshold is %f. \n", species_threshold); + LOG_INFO("Neat speciation strategy, the species threshold is %f. \n", species_threshold); //set the generation id for the initial minimal genome generated_genomes++; minimal_genome->set_generation_id(generated_genomes); // set the fitst species with minimal genome Neat_Species.push_back(new Species(species_count)); - Log::info("initialized the first species, current neat species size: %d \n", Neat_Species.size() ); + LOG_INFO("initialized the first species, current neat species size: %d \n", Neat_Species.size() ); species_count++; insert_genome(minimal_genome); @@ -152,14 +152,14 @@ int32_t NeatSpeciationStrategy::insert_genome(RNN_Genome* genome) { genome->set_weights(best); } - Log::info("inserting genome id %d!\n", genome->get_generation_id()); + LOG_INFO("inserting genome id %d!\n", genome->get_generation_id()); inserted_genomes++; int32_t insert_position; if (Neat_Species.size() == 1 && Neat_Species[0]->size() == 0) { // insert the first genome in the evolution insert_position = Neat_Species[0]->insert_genome(genome); - Log::info("first genome of this species inserted \n"); + LOG_INFO("first genome of this species inserted \n"); inserted = true; } @@ -168,25 +168,25 @@ int32_t NeatSpeciationStrategy::insert_genome(RNN_Genome* genome) { for (int i = 0; i < species_list.size(); i++){ Species* random_species = Neat_Species[species_list[i]]; if (random_species == NULL || random_species->size() == 0) { - Log::error("random_species is empty\n"); + LOG_ERROR("random_species is empty\n"); continue; } RNN_Genome* genome_representation = random_species->get_latested_genome(); if (genome_representation == NULL) { - Log::error("genome representation is null!\n"); + LOG_ERROR("genome representation is null!\n"); break; } if (genome_representation == NULL){ - Log::fatal("the latest genome is null, this should never happen!\n"); + LOG_FATAL("the latest genome is null, this should never happen!\n"); } double distance = get_distance(genome_representation, genome); - // Log::error("distance is %f \n", distance); + // LOG_ERROR("distance is %f \n", distance); if (distance < species_threshold) { - Log::info("inserting genome to species: %d\n", species_list[i]); + LOG_INFO("inserting genome to species: %d\n", species_list[i]); insert_position = random_species->insert_genome(genome); inserted = true; break; @@ -199,8 +199,8 @@ int32_t NeatSpeciationStrategy::insert_genome(RNN_Genome* genome) { species_count++; Neat_Species.push_back(new_species); if (species_count != Neat_Species.size()){ - Log::error("this should never happen, the species count is not the same as the number of species we have! \n"); - Log::error("num of species: %d, and species count is %d \n", Neat_Species.size(), species_count); + LOG_ERROR("this should never happen, the species count is not the same as the number of species we have! \n"); + LOG_ERROR("num of species: %d, and species count is %d \n", Neat_Species.size(), species_count); } insert_position = new_species->insert_genome(genome); inserted = true; @@ -230,7 +230,7 @@ RNN_Genome* NeatSpeciationStrategy::generate_genome(uniform_real_distribution= (signed) Neat_Species.size()) generation_species = 0; - Log::debug("getting species: %d\n", generation_species); + LOG_DEBUG("getting species: %d\n", generation_species); Species *currentSpecies = Neat_Species[generation_species]; @@ -239,11 +239,11 @@ RNN_Genome* NeatSpeciationStrategy::generate_genome(uniform_real_distributionget_distance(g1, g2); }; - Log::info("generating new genome for species[%d], species_size: %d, mutation_rate: %lf, intra_island_crossover_rate: %lf, inter_island_crossover_rate: %lf\n", generation_species, currentSpecies->size(), mutation_rate, intra_island_crossover_rate, inter_island_crossover_rate); + LOG_INFO("generating new genome for species[%d], species_size: %d, mutation_rate: %lf, intra_island_crossover_rate: %lf, inter_island_crossover_rate: %lf\n", generation_species, currentSpecies->size(), mutation_rate, intra_island_crossover_rate, inter_island_crossover_rate); if (currentSpecies->size() <= 2) { - Log::info("current species has less than 2 genomes, doing mutation!\n"); - Log::info("generating genome with id: %d \n", generated_genomes); + LOG_INFO("current species has less than 2 genomes, doing mutation!\n"); + LOG_INFO("generating genome with id: %d \n", generated_genomes); while (genome == NULL) { currentSpecies->copy_random_genome(rng_0_1, generator, &genome); @@ -270,8 +270,8 @@ RNN_Genome* NeatSpeciationStrategy::generate_genome(uniform_real_distributionfitness_sharing_remove(fitness_threshold, distance_function); } //generate a genome via crossover or mutation - Log::info("current species size %d, doing mutaion or crossover\n", currentSpecies->size()); - Log::info("generating genome with id: %d \n", generated_genomes); + LOG_INFO("current species size %d, doing mutaion or crossover\n", currentSpecies->size()); + LOG_INFO("generating genome with id: %d \n", generated_genomes); while (genome == NULL) { genome = generate_for_species(rng_0_1, generator, mutate, crossover); } @@ -288,8 +288,8 @@ RNN_Genome* NeatSpeciationStrategy::generate_genome(uniform_real_distributionset_latest_generation_id(generated_genomes); } else { - Log::fatal("ERROR: genome was NULL at the end of generate genome!\n"); - Log::fatal( "This should never happen.\n"); + LOG_FATAL("ERROR: genome was NULL at the end of generate genome!\n"); + LOG_FATAL( "This should never happen.\n"); exit(1); } @@ -303,7 +303,7 @@ RNN_Genome* NeatSpeciationStrategy::generate_for_species(uniform_real_distributi RNN_Genome* genome; double r = rng_0_1(generator); if ( r < mutation_rate) { - Log::info("performing mutation\n"); + LOG_INFO("performing mutation\n"); currentSpecies->copy_random_genome(rng_0_1, generator, &genome); @@ -312,7 +312,7 @@ RNN_Genome* NeatSpeciationStrategy::generate_for_species(uniform_real_distributi } else if (r < intra_island_crossover_rate || Neat_Species.size() == 1) { //intra-island crossover - Log::info("performing intra-island crossover\n"); + LOG_INFO("performing intra-island crossover\n"); //select two distinct parent genomes in the same island RNN_Genome *parent1 = NULL, *parent2 = NULL; @@ -323,7 +323,7 @@ RNN_Genome* NeatSpeciationStrategy::generate_for_species(uniform_real_distributi delete parent2; } else { //inter-island crossover - // Log::info("performing inter-island crossover\n"); + // LOG_INFO("performing inter-island crossover\n"); //get a random genome from this island RNN_Genome *parent1 = NULL; @@ -357,9 +357,9 @@ RNN_Genome* NeatSpeciationStrategy::generate_for_species(uniform_real_distributi } void NeatSpeciationStrategy::print(string indent) const { - Log::info("%NEAT Species: \n", indent.c_str()); + LOG_INFO("%NEAT Species: \n", indent.c_str()); for (int32_t i = 0; i < (int32_t)Neat_Species.size(); i++) { - Log::info("%sSpecies %d:\n", indent.c_str(), i); + LOG_INFO("%sSpecies %d:\n", indent.c_str(), i); Neat_Species[i]->print(indent + "\t"); } } @@ -409,7 +409,7 @@ vector NeatSpeciationStrategy::get_random_species_list() { } shuffle(species_list.begin(), species_list.end(), generator); - // Log::error("species shuffle list: \n"); + // LOG_ERROR("species shuffle list: \n"); // for (std::vector::iterator it=species_list.begin(); it!=species_list.end(); ++it) // std::cout << ' ' << *it; // std::cout << '\n'; @@ -428,7 +428,7 @@ double NeatSpeciationStrategy::get_distance(RNN_Genome* g1, RNN_Genome* g2) { double weight1 = g1-> get_avg_edge_weight(); double weight2 = g2-> get_avg_edge_weight(); double w = abs(weight1 - weight2); - Log::info("weight difference: %f \n", w); + LOG_INFO("weight difference: %f \n", w); if (innovation1.size() >= innovation2.size()){ N = innovation1.size(); @@ -451,7 +451,7 @@ double NeatSpeciationStrategy::get_distance(RNN_Genome* g1, RNN_Genome* g2) { D = setunion.size() - intersec.size() - E; distance = neat_c1 * E / N + neat_c2 * D / N + neat_c3 * w ; - Log::info("distance is %f \n", distance); + LOG_INFO("distance is %f \n", distance); return distance; } @@ -486,7 +486,7 @@ void NeatSpeciationStrategy::rank_species() { } } for (int32_t i = 0; i < Neat_Species.size() -1; i++) { - Log::error("Neat specis rank: %f \n", Neat_Species[i]->get_best_fitness()); + LOG_ERROR("Neat specis rank: %f \n", Neat_Species[i]->get_best_fitness()); } } @@ -496,20 +496,20 @@ bool NeatSpeciationStrategy::check_population() { // if so only save the top 2 species and erase the rest if (population_not_improving_count >= 3000) { - Log::error("the population fitness has not been improved for 3000 genomes, start to erasing \n"); + LOG_ERROR("the population fitness has not been improved for 3000 genomes, start to erasing \n"); rank_species(); Neat_Species.erase(Neat_Species.begin(), Neat_Species.end()-2); if (Neat_Species.size() != 2) { - Log::error("It should never happen, the population has %d number of species instead of 2! \n", Neat_Species.size()); + LOG_ERROR("It should never happen, the population has %d number of species instead of 2! \n", Neat_Species.size()); } for (int i = 0; i < 2; i++){ - Log::error("species %d size %d\n",i, Neat_Species[i]->size() ); - Log::error("species %d fitness %f\n",i, Neat_Species[i]->get_best_fitness() ); + LOG_ERROR("species %d size %d\n",i, Neat_Species[i]->size() ); + LOG_ERROR("species %d fitness %f\n",i, Neat_Species[i]->get_best_fitness() ); Neat_Species[i]->set_species_not_improving_count(0); } - Log::error("erase finished!\n"); - Log::error ("current number of species: %d \n", Neat_Species.size()); + LOG_ERROR("erase finished!\n"); + LOG_ERROR ("current number of species: %d \n", Neat_Species.size()); erased = true; } return erased; @@ -517,13 +517,13 @@ bool NeatSpeciationStrategy::check_population() { void NeatSpeciationStrategy::check_species() { - Log::info("checking speies \n"); + LOG_INFO("checking speies \n"); auto it = Neat_Species.begin(); while (it != Neat_Species.end()) { if (Neat_Species[it - Neat_Species.begin()]->get_species_not_improving_count() >= 2250) { - Log::error("Species at position %d hasn't been improving for 2250 genomes, erasing it \n", it - Neat_Species.begin() ); - Log::error ("current number of species: %d \n", Neat_Species.size()); + LOG_ERROR("Species at position %d hasn't been improving for 2250 genomes, erasing it \n", it - Neat_Species.begin() ); + LOG_ERROR ("current number of species: %d \n", Neat_Species.size()); // Neat_Species[Neat_Species.begin() - it]->erase_species(); it = Neat_Species.erase(it); } @@ -531,5 +531,5 @@ void NeatSpeciationStrategy::check_species() { ++it; } } - Log::info("finished checking species, current number of species: %d \n", Neat_Species.size()); + LOG_INFO("finished checking species, current number of species: %d \n", Neat_Species.size()); } diff --git a/rnn/random_dag_node.cxx b/rnn/random_dag_node.cxx index 0466c7ef..bfda3199 100644 --- a/rnn/random_dag_node.cxx +++ b/rnn/random_dag_node.cxx @@ -119,7 +119,7 @@ double RANDOM_DAG_Node::get_gradient(string gradient_name) { } else if (gradient_name == "w8") { gradient_sum += d_weights[7][i]; } else { - Log::fatal("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); + LOG_FATAL("ERROR: tried to get unknown gradient: '%s'\n", gradient_name.c_str()); exit(1); } } @@ -127,7 +127,7 @@ double RANDOM_DAG_Node::get_gradient(string gradient_name) { } void RANDOM_DAG_Node::print_gradient(string gradient_name) { - Log::info("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); + LOG_INFO("\tgradient['%s']: %lf\n", gradient_name.c_str(), get_gradient(gradient_name)); } double RANDOM_DAG_Node::activation(double value, int act_operator) { @@ -137,7 +137,7 @@ double RANDOM_DAG_Node::activation(double value, int act_operator) { if (act_operator == 3) return leakyReLU(value); if (act_operator == 4) return identity(value); - Log::fatal("ERROR: invalid act_operator: %d\n", act_operator); + LOG_FATAL("ERROR: invalid act_operator: %d\n", act_operator); exit(1); } @@ -148,7 +148,7 @@ double RANDOM_DAG_Node::activation_derivative(double value, double input, int ac if(act_operator == 3) return leakyReLU_derivative(input); if(act_operator == 4) return identity_derivative(); - Log::fatal("ERROR: invalid act_operator: %d\n", act_operator); + LOG_FATAL("ERROR: invalid act_operator: %d\n", act_operator); exit(1); } @@ -179,14 +179,14 @@ void RANDOM_DAG_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on RANDOM_DAG_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on RANDOM_DAG_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } //update the reset gate bias so its centered around 1 //r_bias += 1; - Log::debug("ERROR: inputs_fired on RANDOM_DAG_Node %d at time %d is %d and no_of_nodes is %d\n", innovation_number, time, inputs_fired[time], no_of_nodes); + LOG_DEBUG("ERROR: inputs_fired on RANDOM_DAG_Node %d at time %d is %d and no_of_nodes is %d\n", innovation_number, time, inputs_fired[time], no_of_nodes); double x = input_values[time]; @@ -235,7 +235,7 @@ void RANDOM_DAG_Node::input_fired(int time, double incoming_output) { // output_values[time] /= fan_out; - Log::debug("DEBUG: input_fired on RANDOM_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_DEBUG("DEBUG: input_fired on RANDOM_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); } @@ -243,11 +243,11 @@ void RANDOM_DAG_Node::input_fired(int time, double incoming_output) { void RANDOM_DAG_Node::try_update_deltas(int time){ if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on RANDOM_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on RANDOM_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } - //Log::info(" trying to update\n"); + //LOG_INFO(" trying to update\n"); double error = error_values[time]; double x = input_values[time]; @@ -299,7 +299,7 @@ void RANDOM_DAG_Node::try_update_deltas(int time){ // d_zw[time] = d_h*l_Nodes[0][time]*x; - Log::debug("DEBUG: output_fired on RANDOM_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_DEBUG("DEBUG: output_fired on RANDOM_DAG_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); } @@ -351,7 +351,7 @@ void RANDOM_DAG_Node::set_weights(uint32_t &offset, const vector ¶me else weights.at(new_node_weight) = bound(parameters[offset++]); } - Log::debug("DEBUG: no of weights on RANDOM_DAG_Node %d at time %d is %d \n", innovation_number, time, weights.size()); + LOG_DEBUG("DEBUG: no of weights on RANDOM_DAG_Node %d at time %d is %d \n", innovation_number, time, weights.size()); } diff --git a/rnn/rnn.cxx b/rnn/rnn.cxx index 2ab6096f..e65decee 100644 --- a/rnn/rnn.cxx +++ b/rnn/rnn.cxx @@ -45,38 +45,38 @@ using std::vector; #include "word_series/word_series.hxx" void RNN::validate_parameters(const vector &input_parameter_names, const vector &output_parameter_names) { - Log::debug("validating parameters -- input_parameter_names.size(): %d, output_parameter_names.size(): %d\n", input_parameter_names.size(), output_parameter_names.size()); - if (Log::at_level(Log::DEBUG)) { - Log::debug("\tinput_parameter_names:"); + LOG_DEBUG("validating parameters -- input_parameter_names.size(): %d, output_parameter_names.size(): %d\n", input_parameter_names.size(), output_parameter_names.size()); + if (Log::at_level(LOG_LEVEL_DEBUG)) { + LOG_DEBUG("\tinput_parameter_names:"); for (int32_t i = 0; i < input_parameter_names.size(); i++) { - Log::debug("\t\t'%s'\n", input_parameter_names[i].c_str()); + LOG_DEBUG("\t\t'%s'\n", input_parameter_names[i].c_str()); } - Log::debug("\tinput_node names:"); + LOG_DEBUG("\tinput_node names:"); for (int32_t i = 0; i < input_nodes.size(); i++) { - Log::debug("\t\t'%s'\n", input_nodes[i]->parameter_name.c_str()); + LOG_DEBUG("\t\t'%s'\n", input_nodes[i]->parameter_name.c_str()); } - Log::debug("\toutput_parameter_names:"); + LOG_DEBUG("\toutput_parameter_names:"); for (int32_t i = 0; i < output_parameter_names.size(); i++) { - Log::debug("\t\t'%s'\n", output_parameter_names[i].c_str()); + LOG_DEBUG("\t\t'%s'\n", output_parameter_names[i].c_str()); } - Log::debug("\toutput_node names:"); + LOG_DEBUG("\toutput_node names:"); for (int32_t i = 0; i < output_nodes.size(); i++) { - Log::debug("\t\t'%s'\n", output_nodes[i]->parameter_name.c_str()); + LOG_DEBUG("\t\t'%s'\n", output_nodes[i]->parameter_name.c_str()); } } if (input_nodes.size() != input_parameter_names.size()) { - Log::fatal("ERROR: number of input nodes (%d) != number of input parameters (%d)\n", input_nodes.size(), input_parameter_names.size()); + LOG_FATAL("ERROR: number of input nodes (%d) != number of input parameters (%d)\n", input_nodes.size(), input_parameter_names.size()); exit(1); } bool parameter_mismatch = false; for (int i = 0; i < input_nodes.size(); i++) { if (input_nodes[i]->parameter_name.compare(input_parameter_names[i]) != 0) { - Log::fatal("ERROR: input_nodes[%d]->parameter_name '%s' != input_parmater_names[%d] '%s'\n", i, input_nodes[i]->parameter_name.c_str(), i, input_parameter_names[i].c_str()); + LOG_FATAL("ERROR: input_nodes[%d]->parameter_name '%s' != input_parmater_names[%d] '%s'\n", i, input_nodes[i]->parameter_name.c_str(), i, input_parameter_names[i].c_str()); parameter_mismatch = true; } } @@ -85,14 +85,14 @@ void RNN::validate_parameters(const vector &input_parameter_names, const } if (output_nodes.size() != output_parameter_names.size()) { - Log::fatal("ERROR: number of output nodes (%d) != number of output parameters (%d)\n", output_nodes.size(), output_parameter_names.size()); + LOG_FATAL("ERROR: number of output nodes (%d) != number of output parameters (%d)\n", output_nodes.size(), output_parameter_names.size()); exit(1); } parameter_mismatch = false; for (int i = 0; i < output_nodes.size(); i++) { if (output_nodes[i]->parameter_name.compare(output_parameter_names[i]) != 0) { - Log::fatal("ERROR: output_nodes[%d]->parameter_name '%s' != output_parmater_names[%d] '%s'\n", i, output_nodes[i]->parameter_name.c_str(), i, output_parameter_names[i].c_str()); + LOG_FATAL("ERROR: output_nodes[%d]->parameter_name '%s' != output_parmater_names[%d] '%s'\n", i, output_nodes[i]->parameter_name.c_str(), i, output_parameter_names[i].c_str()); parameter_mismatch = true; } } @@ -104,36 +104,36 @@ void RNN::validate_parameters(const vector &input_parameter_names, const void RNN::fix_parameter_orders(const vector &input_parameter_names, const vector &output_parameter_names) { vector ordered_input_nodes; - Log::debug("fixing parameter orders -- input_parameter_names.size(): %d, output_parameter_names.size(): %d\n", input_parameter_names.size(), output_parameter_names.size()); - if (Log::at_level(Log::DEBUG)) { - Log::debug("\tinput_parameter_names:"); + LOG_DEBUG("fixing parameter orders -- input_parameter_names.size(): %d, output_parameter_names.size(): %d\n", input_parameter_names.size(), output_parameter_names.size()); + if (Log::at_level(LOG_LEVEL_DEBUG)) { + LOG_DEBUG("\tinput_parameter_names:"); for (int32_t i = 0; i < input_parameter_names.size(); i++) { - Log::debug("\t\t'%s'\n", input_parameter_names[i].c_str()); + LOG_DEBUG("\t\t'%s'\n", input_parameter_names[i].c_str()); } - Log::debug("\tinput_node names:"); + LOG_DEBUG("\tinput_node names:"); for (int32_t i = 0; i < input_nodes.size(); i++) { - Log::debug("\t\t'%s'\n", input_nodes[i]->parameter_name.c_str()); + LOG_DEBUG("\t\t'%s'\n", input_nodes[i]->parameter_name.c_str()); } - Log::debug("\toutput_parameter_names:"); + LOG_DEBUG("\toutput_parameter_names:"); for (int32_t i = 0; i < output_parameter_names.size(); i++) { - Log::debug("\t\t'%s'\n", output_parameter_names[i].c_str()); + LOG_DEBUG("\t\t'%s'\n", output_parameter_names[i].c_str()); } - Log::debug("\toutput_node names:"); + LOG_DEBUG("\toutput_node names:"); for (int32_t i = 0; i < output_nodes.size(); i++) { - Log::debug("\t\t'%s'\n", output_nodes[i]->parameter_name.c_str()); + LOG_DEBUG("\t\t'%s'\n", output_nodes[i]->parameter_name.c_str()); } } for (int i = 0; i < input_parameter_names.size(); i++) { for (int j = input_nodes.size() - 1; j >= 0; j--) { - Log::debug("checking input node name '%s' vs parameter name '%s'\n", input_nodes[j]->parameter_name.c_str(), input_parameter_names[i].c_str()); + LOG_DEBUG("checking input node name '%s' vs parameter name '%s'\n", input_nodes[j]->parameter_name.c_str(), input_parameter_names[i].c_str()); if (input_nodes[j]->parameter_name.compare(input_parameter_names[i]) == 0) { - Log::debug("erasing node!\n"); + LOG_DEBUG("erasing node!\n"); ordered_input_nodes.push_back(input_nodes[j]); input_nodes.erase(input_nodes.begin() + j); } @@ -182,24 +182,24 @@ RNN::RNN(vector &_nodes, vector &_edges, vector< //sort nodes by depth //sort edges by depth - Log::debug("creating rnn with %d nodes, %d edges\n", nodes.size(), edges.size()); + LOG_DEBUG("creating rnn with %d nodes, %d edges\n", nodes.size(), edges.size()); for (uint32_t i = 0; i < nodes.size(); i++) { if (nodes[i]->layer_type == INPUT_LAYER) { input_nodes.push_back(nodes[i]); - Log::debug("had input node!\n"); + LOG_DEBUG("had input node!\n"); } else if (nodes[i]->layer_type == OUTPUT_LAYER) { output_nodes.push_back(nodes[i]); - Log::debug("had output node!\n"); + LOG_DEBUG("had output node!\n"); } } - Log::debug("fixing parameter orders, input_node.size: %d\n", input_nodes.size()); + LOG_DEBUG("fixing parameter orders, input_node.size: %d\n", input_nodes.size()); fix_parameter_orders(input_parameter_names, output_parameter_names); - Log::debug("validating parameters, input_node.size: %d\n", input_nodes.size()); + LOG_DEBUG("validating parameters, input_node.size: %d\n", input_nodes.size()); validate_parameters(input_parameter_names, output_parameter_names); - Log::trace("got RNN with %d nodes, %d edges, %d recurrent edges\n", nodes.size(), edges.size(), recurrent_edges.size()); + LOG_TRACE("got RNN with %d nodes, %d edges, %d recurrent edges\n", nodes.size(), edges.size(), recurrent_edges.size()); } RNN::~RNN() { @@ -273,7 +273,7 @@ void RNN::get_weights(vector ¶meters) { void RNN::set_weights(const vector ¶meters) { if (parameters.size() != get_number_weights()) { - Log::fatal("ERROR! Trying to set weights where the RNN has %d weights, and the parameters vector has %d weights!\n", get_number_weights(), parameters.size()); + LOG_FATAL("ERROR! Trying to set weights where the RNN has %d weights, and the parameters vector has %d weights!\n", get_number_weights(), parameters.size()); exit(1); } @@ -325,9 +325,9 @@ void RNN::forward_pass(const vector< vector > &series_data, bool using_d series_length = series_data[0].size(); if (input_nodes.size() != series_data.size()) { - Log::fatal("ERROR: number of input nodes (%d) != number of time series data input fields (%d)\n", input_nodes.size(), series_data.size()); + LOG_FATAL("ERROR: number of input nodes (%d) != number of time series data input fields (%d)\n", input_nodes.size(), series_data.size()); for (int i = 0; i < nodes.size(); i++) { - Log::fatal("node[%d], in: %d, depth: %lf, layer_type: %d, node_type: %d\n", i, nodes[i]->get_innovation_number(), nodes[i]->get_depth(), nodes[i]->get_layer_type(), nodes[i]->get_node_type()); + LOG_FATAL("node[%d], in: %d, depth: %lf, layer_type: %d, node_type: %d\n", i, nodes[i]->get_innovation_number(), nodes[i]->get_depth(), nodes[i]->get_layer_type(), nodes[i]->get_node_type()); } exit(1); } @@ -531,8 +531,8 @@ vector RNN::get_predictions(const vector< vector > &series_data, void RNN::write_predictions(string output_filename, const vector &input_parameter_names, const vector &output_parameter_names, const vector< vector > &series_data, const vector< vector > &expected_outputs, TimeSeriesSets *time_series_sets, bool using_dropout, double dropout_probability) { forward_pass(series_data, using_dropout, false, dropout_probability); - Log::debug("series_length: %d, series_data.size(): %d, series_data[0].size(): %d\n", series_length, series_data.size(), series_data[0].size()); - Log::debug("input_nodes.size(): %d, output_nodes.size(): %d\n", input_nodes.size(), output_nodes.size()); + LOG_DEBUG("series_length: %d, series_data.size(): %d, series_data[0].size(): %d\n", series_length, series_data.size(), series_data[0].size()); + LOG_DEBUG("input_nodes.size(): %d, output_nodes.size(): %d\n", input_nodes.size(), output_nodes.size()); ofstream outfile(output_filename); outfile << "#"; @@ -541,21 +541,21 @@ void RNN::write_predictions(string output_filename, const vector &input_ if (i > 0) outfile << ","; outfile << input_parameter_names[i]; - Log::debug("input_parameter_names[%d]: '%s'\n", i, input_parameter_names[i].c_str()); + LOG_DEBUG("input_parameter_names[%d]: '%s'\n", i, input_parameter_names[i].c_str()); } for (uint32_t i = 0; i < output_nodes.size(); i++) { outfile << ","; outfile << "expected_" << output_parameter_names[i]; - Log::debug("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); + LOG_DEBUG("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); } for (uint32_t i = 0; i < output_nodes.size(); i++) { outfile << ","; outfile << "predicted_" << output_parameter_names[i]; - Log::debug("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); + LOG_DEBUG("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); } outfile << endl; @@ -585,8 +585,8 @@ void RNN::write_predictions(string output_filename, const vector &input_ void RNN::write_predictions(string output_filename, const vector &input_parameter_names, const vector &output_parameter_names, const vector< vector > &series_data, const vector< vector > &expected_outputs, Corpus *word_series_sets, bool using_dropout, double dropout_probability) { forward_pass(series_data, using_dropout, false, dropout_probability); - Log::debug("series_length: %d, series_data.size(): %d, series_data[0].size(): %d\n", series_length, series_data.size(), series_data[0].size()); - Log::debug("input_nodes.size(): %d, output_nodes.size(): %d\n", input_nodes.size(), output_nodes.size()); + LOG_DEBUG("series_length: %d, series_data.size(): %d, series_data[0].size(): %d\n", series_length, series_data.size(), series_data[0].size()); + LOG_DEBUG("input_nodes.size(): %d, output_nodes.size(): %d\n", input_nodes.size(), output_nodes.size()); ofstream outfile(output_filename); outfile << "#"; @@ -595,21 +595,21 @@ void RNN::write_predictions(string output_filename, const vector &input_ if (i > 0) outfile << ","; outfile << input_parameter_names[i]; - Log::debug("input_parameter_names[%d]: '%s'\n", i, input_parameter_names[i].c_str()); + LOG_DEBUG("input_parameter_names[%d]: '%s'\n", i, input_parameter_names[i].c_str()); } for (uint32_t i = 0; i < output_nodes.size(); i++) { outfile << ","; outfile << "expected_" << output_parameter_names[i]; - Log::debug("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); + LOG_DEBUG("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); } for (uint32_t i = 0; i < output_nodes.size(); i++) { outfile << ","; outfile << "predicted_" << output_parameter_names[i]; - Log::debug("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); + LOG_DEBUG("output_parameter_names[%d]: '%s'\n", i, output_parameter_names[i].c_str()); } outfile << endl; diff --git a/rnn/rnn_edge.cxx b/rnn/rnn_edge.cxx index c5dcf8ba..ddf4cd16 100644 --- a/rnn/rnn_edge.cxx +++ b/rnn/rnn_edge.cxx @@ -17,7 +17,7 @@ RNN_Edge::RNN_Edge(int _innovation_number, RNN_Node_Interface *_input_node, RNN_ input_node->total_outputs++; output_node->total_inputs++; - Log::debug("\t\tcreated edge %d from %d to %d\n", innovation_number, input_innovation_number, output_innovation_number); + LOG_DEBUG("\t\tcreated edge %d from %d to %d\n", innovation_number, input_innovation_number, output_innovation_number); } RNN_Edge::RNN_Edge(int _innovation_number, int _input_innovation_number, int _output_innovation_number, const vector &nodes) { @@ -31,7 +31,7 @@ RNN_Edge::RNN_Edge(int _innovation_number, int _input_innovation_number, int _ou for (int i = 0; i < nodes.size(); i++) { if (nodes[i]->innovation_number == _input_innovation_number) { if (input_node != NULL) { - Log::fatal("ERROR in copying RNN_Edge, list of nodes has multiple nodes with same input_innovation_number -- this should never happen.\n"); + LOG_FATAL("ERROR in copying RNN_Edge, list of nodes has multiple nodes with same input_innovation_number -- this should never happen.\n"); exit(1); } @@ -40,7 +40,7 @@ RNN_Edge::RNN_Edge(int _innovation_number, int _input_innovation_number, int _ou if (nodes[i]->innovation_number == _output_innovation_number) { if (output_node != NULL) { - Log::fatal("ERROR in copying RNN_Edge, list of nodes has multiple nodes with same output_innovation_number -- this should never happen.\n"); + LOG_FATAL("ERROR in copying RNN_Edge, list of nodes has multiple nodes with same output_innovation_number -- this should never happen.\n"); exit(1); } @@ -49,12 +49,12 @@ RNN_Edge::RNN_Edge(int _innovation_number, int _input_innovation_number, int _ou } if (input_node == NULL) { - Log::fatal("ERROR initializing RNN_Edge, input node with innovation number; %d was not found!\n", input_innovation_number); + LOG_FATAL("ERROR initializing RNN_Edge, input node with innovation number; %d was not found!\n", input_innovation_number); exit(1); } if (output_node == NULL) { - Log::fatal("ERROR initializing RNN_Edge, output node with innovation number; %d was not found!\n", output_innovation_number); + LOG_FATAL("ERROR initializing RNN_Edge, output node with innovation number; %d was not found!\n", output_innovation_number); exit(1); } } @@ -79,14 +79,14 @@ RNN_Edge* RNN_Edge::copy(const vector new_nodes) { void RNN_Edge::propagate_forward(int time) { if (input_node->inputs_fired[time] != input_node->total_inputs) { - Log::fatal("ERROR! propagate forward called on edge %d where input_node->inputs_fired[%d] (%d) != total_inputs (%d)\n", innovation_number, time, input_node->inputs_fired[time], input_node->total_inputs); - Log::fatal("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); + LOG_FATAL("ERROR! propagate forward called on edge %d where input_node->inputs_fired[%d] (%d) != total_inputs (%d)\n", innovation_number, time, input_node->inputs_fired[time], input_node->total_inputs); + LOG_FATAL("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); exit(1); } double output = input_node->output_values[time] * weight; - //Log::trace("propagating forward at time %d from %d to %d, value: %lf, input: %lf, weight: %lf\n", time, input_node->innovation_number, output_node->innovation_number, output, input_node->output_values[time], weight); + //LOG_TRACE("propagating forward at time %d from %d to %d, value: %lf, input: %lf, weight: %lf\n", time, input_node->innovation_number, output_node->innovation_number, output, input_node->output_values[time], weight); outputs[time] = output; output_node->input_fired(time, output); @@ -95,13 +95,13 @@ void RNN_Edge::propagate_forward(int time) { void RNN_Edge::propagate_forward(int time, bool training, double dropout_probability) { if (input_node->inputs_fired[time] != input_node->total_inputs) { - Log::fatal("ERROR! propagate forward called on edge %d where input_node->inputs_fired[%d] (%d) != total_inputs (%d)\n", innovation_number, time, input_node->inputs_fired[time], input_node->total_inputs); + LOG_FATAL("ERROR! propagate forward called on edge %d where input_node->inputs_fired[%d] (%d) != total_inputs (%d)\n", innovation_number, time, input_node->inputs_fired[time], input_node->total_inputs); exit(1); } double output = input_node->output_values[time] * weight; - //Log::trace("propagating forward at time %d from %d to %d, value: %lf, input: %lf, weight: %lf\n", time, input_node->innovation_number, output_node->innovation_number, output, input_node->output_values[time], weight); + //LOG_TRACE("propagating forward at time %d from %d to %d, value: %lf, input: %lf, weight: %lf\n", time, input_node->innovation_number, output_node->innovation_number, output, input_node->output_values[time], weight); if (training) { if (drand48() < dropout_probability) { @@ -120,13 +120,13 @@ void RNN_Edge::propagate_forward(int time, bool training, double dropout_probabi void RNN_Edge::propagate_backward(int time) { if (output_node->outputs_fired[time] != output_node->total_outputs) { - Log::fatal("ERROR! propagate backward called on edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); - Log::fatal("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); - Log::fatal("series_length: %d\n", input_node->series_length); + LOG_FATAL("ERROR! propagate backward called on edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); + LOG_FATAL("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); + LOG_FATAL("series_length: %d\n", input_node->series_length); exit(1); } - //Log::trace("propgating backward on edge %d at time %d from node %d to node %d\n", innovation_number, time, output_innovation_number, input_innovation_number); + //LOG_TRACE("propgating backward on edge %d at time %d from node %d to node %d\n", innovation_number, time, output_innovation_number, input_innovation_number); double delta = output_node->d_input[time]; @@ -137,13 +137,13 @@ void RNN_Edge::propagate_backward(int time) { void RNN_Edge::propagate_backward(int time, bool training, double dropout_probability) { if (output_node->outputs_fired[time] != output_node->total_outputs) { - Log::fatal("ERROR! propagate backward called on edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); - Log::fatal("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); - Log::fatal("series_length: %d\n", input_node->series_length); + LOG_FATAL("ERROR! propagate backward called on edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); + LOG_FATAL("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); + LOG_FATAL("series_length: %d\n", input_node->series_length); exit(1); } - //Log::trace("propgating backward on edge %d at time %d from node %d to node %d\n", innovation_number, time, output_innovation_number, input_innovation_number); + //LOG_TRACE("propgating backward on edge %d at time %d from node %d to node %d\n", innovation_number, time, output_innovation_number, input_innovation_number); double delta = output_node->d_input[time]; diff --git a/rnn/rnn_genome.cxx b/rnn/rnn_genome.cxx index 99b7ce78..82a6f886 100644 --- a/rnn/rnn_genome.cxx +++ b/rnn/rnn_genome.cxx @@ -535,7 +535,7 @@ void RNN_Genome::get_weights(vector ¶meters) { void RNN_Genome::set_weights(const vector ¶meters) { if (parameters.size() != get_number_weights()) { - Log::fatal("ERROR! Trying to set weights where the RNN has %d weights, and the parameters vector has %d weights!\n", get_number_weights(), parameters.size()); + LOG_FATAL("ERROR! Trying to set weights where the RNN has %d weights, and the parameters vector has %d weights!\n", get_number_weights(), parameters.size()); exit(1); } @@ -611,7 +611,7 @@ double RNN_Genome::get_avg_edge_weight() { for (int i = 0; i < edges.size(); i++) { if (edges[i] -> enabled) { if(edges[i] -> weight > 10) { - Log::error("ERROR: edge %d has weight %f \n", i, edges[i]-> weight); + LOG_ERROR("ERROR: edge %d has weight %f \n", i, edges[i]-> weight); } weights += edges[i] -> weight; } @@ -620,7 +620,7 @@ double RNN_Genome::get_avg_edge_weight() { for (int i = 0; i < recurrent_edges.size(); i++) { if (recurrent_edges[i] -> enabled) { if(recurrent_edges[i] -> weight > 10) { - Log::error("ERROR: recurrent edge %d has weight %f \n", i, recurrent_edges[i]-> weight); + LOG_ERROR("ERROR: recurrent edge %d has weight %f \n", i, recurrent_edges[i]-> weight); } weights += recurrent_edges[i] -> weight; } @@ -631,7 +631,7 @@ double RNN_Genome::get_avg_edge_weight() { } void RNN_Genome::initialize_randomly() { - Log::trace("initializing genome %d of group %d randomly!\n", generation_id, group_id); + LOG_TRACE("initializing genome %d of group %d randomly!\n", generation_id, group_id); int number_of_weights = get_number_weights(); initial_parameters.assign(number_of_weights, 0.0); @@ -651,7 +651,7 @@ void RNN_Genome::initialize_randomly() { } get_weights(initial_parameters); } else { - Log::fatal("ERROR: trying to initialize a genome randomly with unknown weight initalization strategy: '%d'\n", weight_initialize); + LOG_FATAL("ERROR: trying to initialize a genome randomly with unknown weight initalization strategy: '%d'\n", weight_initialize); exit(1); } @@ -738,7 +738,7 @@ void RNN_Genome::initialize_node_randomly(RNN_Node_Interface* n) { // random weight n->initialize_uniform_random(generator, rng); } else { - Log::fatal("weight initialize method %d is not set correctly \n", weight_initialize); + LOG_FATAL("weight initialize method %d is not set correctly \n", weight_initialize); exit(1); } } @@ -881,7 +881,7 @@ void forward_pass_thread_regression(RNN* rnn, const vector ¶meters, mses[i] = rnn->calculate_error_mse(outputs); //mses[i] = rnn->calculate_error_mae(outputs); - Log::trace("mse[%d]: %lf\n", i, mses[i]); + LOG_TRACE("mse[%d]: %lf\n", i, mses[i]); } void forward_pass_thread_classification(RNN* rnn, const vector ¶meters, const vector< vector > &inputs, const vector< vector > &outputs, uint32_t i, double *mses, bool use_dropout, bool training, double dropout_probability) { @@ -890,7 +890,7 @@ void forward_pass_thread_classification(RNN* rnn, const vector ¶mete mses[i] = rnn->calculate_error_softmax(outputs); //mses[i] = rnn->calculate_error_mae(outputs); - Log::trace("mse[%d]: %lf\n", i, mses[i]); + LOG_TRACE("mse[%d]: %lf\n", i, mses[i]); } void RNN_Genome::get_analytic_gradient(vector &rnns, const vector ¶meters, const vector< vector< vector > > &inputs, const vector< vector< vector > > &outputs, double &mse, vector &analytic_gradient, bool training) { @@ -1056,10 +1056,12 @@ void RNN_Genome::backpropagate(const vector< vector< vector > > &inputs, << " " << best_validation_mse << endl; } - Log::info("iteration %10d, mse: %10lf, v_mse: %10lf, bv_mse: %10lf, lr: %lf, norm: %lf, p_norm: %lf, v_norm: %lf", iteration, mse, validation_mse, best_validation_mse, learning_rate, norm, parameter_norm, velocity_norm); + string log_str = ""; + + log_str = log_str + string_format("iteration %10d, mse: %10lf, v_mse: %10lf, bv_mse: %10lf, lr: %lf, norm: %lf, p_norm: %lf, v_norm: %lf", iteration, mse, validation_mse, best_validation_mse, learning_rate, norm, parameter_norm, velocity_norm); if (use_reset_weights && prev_mse * 1.25 < mse) { - Log::info_no_header(", RESETTING WEIGHTS %d", reset_count); + log_str = log_str + string_format(", RESETTING WEIGHTS %d", reset_count); parameters = prev_parameters; //prev_velocity = prev_prev_velocity; prev_velocity.assign(parameters.size(), 0.0); @@ -1093,14 +1095,14 @@ void RNN_Genome::backpropagate(const vector< vector< vector > > &inputs, learning_rate *= 1.10; if (learning_rate > 1.0) learning_rate = 1.0; - Log::info_no_header(", INCREASING LR"); + log_str = log_str + ", INCREASING LR"; } } if (use_high_norm && norm > high_threshold) { double high_threshold_norm = high_threshold / norm; - Log::info_no_header(", OVER THRESHOLD, multiplier: %lf", high_threshold_norm); + log_str = log_str + string_format(", OVER THRESHOLD, multiplier: %lf", high_threshold_norm); for (int32_t i = 0; i < parameters.size(); i++) { analytic_gradient[i] = high_threshold_norm * analytic_gradient[i]; @@ -1113,7 +1115,7 @@ void RNN_Genome::backpropagate(const vector< vector< vector > > &inputs, } else if (use_low_norm && norm < low_threshold) { double low_threshold_norm = low_threshold / norm; - Log::info_no_header(", UNDER THRESHOLD, multiplier: %lf", low_threshold_norm); + log_str = log_str + string_format(", UNDER THRESHOLD, multiplier: %lf", low_threshold_norm); for (int32_t i = 0; i < parameters.size(); i++) { analytic_gradient[i] = low_threshold_norm * analytic_gradient[i]; @@ -1121,7 +1123,7 @@ void RNN_Genome::backpropagate(const vector< vector< vector > > &inputs, if (adapt_learning_rate) { if (prev_mse * 1.05 < mse) { - Log::info_no_header(", WORSE"); + log_str = log_str + ", WORSE"; learning_rate *= 0.5; if (learning_rate < 0.0000001) learning_rate = 0.0000001; } @@ -1130,7 +1132,7 @@ void RNN_Genome::backpropagate(const vector< vector< vector > > &inputs, if (reset_count > 0) { double reset_penalty = pow(5.0, -reset_count); - Log::info_no_header(", RESET PENALTY (%d): %lf", reset_count, reset_penalty); + log_str = log_str + string_format(", RESET PENALTY (%d): %lf", reset_count, reset_penalty); for (int32_t i = 0; i < parameters.size(); i++) { analytic_gradient[i] = reset_penalty * analytic_gradient[i]; @@ -1138,7 +1140,8 @@ void RNN_Genome::backpropagate(const vector< vector< vector > > &inputs, } - Log::info_no_header("\n"); + log_str = log_str + "\n"; + LOG_INFO(log_str.c_str()); if (use_nesterov_momentum) { for (int32_t i = 0; i < parameters.size(); i++) { @@ -1202,10 +1205,10 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > //initialize the initial previous values for (uint32_t i = 0; i < n_series; i++) { - Log::trace("getting analytic gradient for input/output: %d, n_series: %d, parameters.size: %d, inputs.size(): %d, outputs.size(): %d, log filename: '%s'\n", i, n_series, parameters.size(), inputs.size(), outputs.size(), log_filename.c_str()); + LOG_TRACE("getting analytic gradient for input/output: %d, n_series: %d, parameters.size: %d, inputs.size(): %d, outputs.size(): %d, log filename: '%s'\n", i, n_series, parameters.size(), inputs.size(), outputs.size(), log_filename.c_str()); rnn->get_analytic_gradient(parameters, inputs[i], outputs[i], mse, analytic_gradient, use_dropout, true, dropout_probability); - Log::trace("got analytic gradient.\n"); + LOG_TRACE("got analytic gradient.\n"); norm = 0.0; for (int32_t j = 0; j < parameters.size(); j++) { @@ -1217,7 +1220,7 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > prev_mse[i] = mse; prev_learning_rate[i] = learning_rate; } - Log::trace("initialized previous values.\n"); + LOG_TRACE("initialized previous values.\n"); //TODO: need to get validation mse on the RNN not the genome double validation_mse = get_mse(parameters, validation_inputs, validation_outputs); @@ -1225,14 +1228,14 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > best_validation_mae = get_mae(parameters, validation_inputs, validation_outputs); best_parameters = parameters; - Log::trace("got initial mses.\n"); + LOG_TRACE("got initial mses.\n"); - Log::trace("initial validation_mse: %lf, best validation mse: %lf\n", validation_mse, best_validation_mse); + LOG_TRACE("initial validation_mse: %lf, best validation mse: %lf\n", validation_mse, best_validation_mse); double m = 0.0, s = 0.0; get_mu_sigma(parameters, m, s); for (int32_t i = 0; i < parameters.size(); i++) { - Log::trace("parameters[%d]: %lf\n", i, parameters[i]); + LOG_TRACE("parameters[%d]: %lf\n", i, parameters[i]); } unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); @@ -1249,16 +1252,16 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > ostringstream memory_log; if (log_filename != "") { - Log::trace("creating new log stream for '%s'\n", log_filename.c_str()); + LOG_TRACE("creating new log stream for '%s'\n", log_filename.c_str()); output_log = new ofstream(log_filename); - Log::trace("testing to see if log file is valid.\n"); + LOG_TRACE("testing to see if log file is valid.\n"); if (!output_log->is_open()) { - Log::fatal("ERROR, could not open output log: '%s'\n", log_filename.c_str()); + LOG_FATAL("ERROR, could not open output log: '%s'\n", log_filename.c_str()); exit(1); } - Log::trace("opened log file '%s'\n", log_filename.c_str()); + LOG_TRACE("opened log file '%s'\n", log_filename.c_str()); } vector shuffle_order; @@ -1292,10 +1295,12 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > norm = sqrt(norm); avg_norm += norm; - Log::info("iteration %7d, series: %4d, mse: %5.10lf, lr: %lf, norm: %lf", iteration, random_selection, mse, learning_rate, norm); + string log_str = ""; + + log_str = string_format("iteration %7d, series: %4d, mse: %5.10lf, lr: %lf, norm: %lf", iteration, random_selection, mse, learning_rate, norm); if (use_reset_weights && prev_mse[random_selection] * 2 < mse) { - Log::info_no_header(", RESETTING WEIGHTS"); + log_str = log_str + ", RESETTING WEIGHTS"; parameters = prev_parameters; //prev_velocity = prev_prev_velocity; @@ -1331,13 +1336,13 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > learning_rate *= 1.10; if (learning_rate > 1.0) learning_rate = 1.0; - Log::info_no_header(", INCREASING LR"); + log_str = log_str + ", INCREASING LR"; } } if (use_high_norm && norm > high_threshold) { double high_threshold_norm = high_threshold / norm; - Log::info_no_header(", OVER THRESHOLD, multiplier: %lf", high_threshold_norm); + log_str = log_str + string_format(", OVER THRESHOLD, multiplier: %lf", high_threshold_norm); for (int32_t i = 0; i < parameters.size(); i++) { analytic_gradient[i] = high_threshold_norm * analytic_gradient[i]; @@ -1350,7 +1355,7 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > } else if (use_low_norm && norm < low_threshold) { double low_threshold_norm = low_threshold / norm; - Log::info_no_header(", UNDER THRESHOLD, multiplier: %lf", low_threshold_norm); + log_str = log_str + string_format(", UNDER THRESHOLD, multiplier: %lf", low_threshold_norm); for (int32_t i = 0; i < parameters.size(); i++) { analytic_gradient[i] = low_threshold_norm * analytic_gradient[i]; @@ -1358,14 +1363,16 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > if (adapt_learning_rate) { if (prev_mse[random_selection] * 1.05 < mse) { - Log::info_no_header(", WORSE"); + log_str = log_str + ", WORSE"; learning_rate *= 0.5; if (learning_rate < 0.0000001) learning_rate = 0.0000001; } } } - Log::info_no_header("\n"); + log_str = log_str + "\n"; + + LOG_INFO(log_str.c_str()); if (use_nesterov_momentum) { for (int32_t i = 0; i < parameters.size(); i++) { @@ -1403,7 +1410,7 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > validation_mse = get_softmax(parameters, validation_inputs, validation_outputs); } - //Log::info("iteration %7d, validation mse: %5.10lf\n", iteration, validation_mse); + //LOG_INFO("iteration %7d, validation mse: %5.10lf\n", iteration, validation_mse); if (validation_mse < best_validation_mse) { best_validation_mse = validation_mse; @@ -1422,10 +1429,10 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > delete output_log; output_log = new ofstream(log_filename, std::ios_base::app); - Log::trace("testing to see if log file valid for '%s'\n", log_filename.c_str()); + LOG_TRACE("testing to see if log file valid for '%s'\n", log_filename.c_str()); if (!output_log->is_open()) { - Log::fatal("ERROR, could not open output log: '%s'\n", log_filename.c_str()); + LOG_FATAL("ERROR, could not open output log: '%s'\n", log_filename.c_str()); exit(1); } } @@ -1447,7 +1454,7 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > << "," << avg_norm << endl; } - Log::info("iteration %4d, mse: %5.10lf, v_mse: %5.10lf, bv_mse: %5.10lf, avg_norm: %5.10lf\n", iteration, training_mse, validation_mse, best_validation_mse, avg_norm); + LOG_INFO("iteration %4d, mse: %5.10lf, v_mse: %5.10lf, bv_mse: %5.10lf, avg_norm: %5.10lf\n", iteration, training_mse, validation_mse, best_validation_mse, avg_norm); } if (log_filename != "") { @@ -1459,7 +1466,7 @@ void RNN_Genome::backpropagate_stochastic(const vector< vector< vector > delete rnn; this->set_weights(best_parameters); - Log::trace("backpropagation completed, getting mu/sigma\n"); + LOG_TRACE("backpropagation completed, getting mu/sigma\n"); double _mu, _sigma; get_mu_sigma(best_parameters, _mu, _sigma); } @@ -1477,13 +1484,13 @@ double RNN_Genome::get_softmax(const vector ¶meters, const vector< v avg_softmax += softmax; - Log::trace("series[%5d]: Softmax: %5.10lf\n", i, softmax); + LOG_TRACE("series[%5d]: Softmax: %5.10lf\n", i, softmax); } delete rnn; avg_softmax /= inputs.size(); - Log::trace("average Softmax: %5.10lf\n", avg_softmax); + LOG_TRACE("average Softmax: %5.10lf\n", avg_softmax); return avg_softmax; } @@ -1500,13 +1507,13 @@ double RNN_Genome::get_mse(const vector ¶meters, const vector< vecto avg_mse += mse; - Log::trace("series[%5d]: MSE: %5.10lf\n", i, mse); + LOG_TRACE("series[%5d]: MSE: %5.10lf\n", i, mse); } delete rnn; avg_mse /= inputs.size(); - Log::trace("average MSE: %5.10lf\n", avg_mse); + LOG_TRACE("average MSE: %5.10lf\n", avg_mse); return avg_mse; } @@ -1523,13 +1530,13 @@ double RNN_Genome::get_mae(const vector ¶meters, const vector< vecto avg_mae += mae; - Log::debug("series[%5d] MAE: %5.10lf\n", i, mae); + LOG_DEBUG("series[%5d] MAE: %5.10lf\n", i, mae); } delete rnn; avg_mae /= inputs.size(); - Log::debug("average MAE: %5.10lf\n", avg_mae); + LOG_DEBUG("average MAE: %5.10lf\n", avg_mae); return avg_mae; } @@ -1556,7 +1563,7 @@ void RNN_Genome::write_predictions(string output_directory, const vector for (uint32_t i = 0; i < inputs.size(); i++) { string filename = input_filenames[i]; - Log::info("input filename[%5d]: '%s'\n", i, filename.c_str()); + LOG_INFO("input filename[%5d]: '%s'\n", i, filename.c_str()); int last_dot_pos = filename.find_last_of("."); string extension = filename.substr(last_dot_pos); @@ -1566,7 +1573,7 @@ void RNN_Genome::write_predictions(string output_directory, const vector string output_filename = prefix + "_predictions" + extension; output_filename = output_directory + "/" + output_filename.substr(output_filename.find_last_of("/") + 1); - Log::info("output filename: '%s'\n", output_filename.c_str()); + LOG_INFO("output filename: '%s'\n", output_filename.c_str()); rnn->write_predictions(output_filename, input_parameter_names, output_parameter_names, inputs[i], outputs[i], time_series_sets, use_dropout, dropout_probability); } @@ -1584,7 +1591,7 @@ void RNN_Genome::write_predictions(string output_directory, const vector //one input vector per testing file for (uint32_t i = 0; i < inputs.size(); i++) { string filename = input_filenames[i]; - Log::info("input filename[%5d]: '%s'\n", i, filename.c_str()); + LOG_INFO("input filename[%5d]: '%s'\n", i, filename.c_str()); int last_dot_pos = filename.find_last_of("."); string extension = filename.substr(last_dot_pos); @@ -1594,7 +1601,7 @@ void RNN_Genome::write_predictions(string output_directory, const vector string output_filename = prefix + "_predictions" + extension; output_filename = output_directory + "/" + output_filename.substr(output_filename.find_last_of("/") + 1); - Log::info("output filename: '%s'\n", output_filename.c_str()); + LOG_INFO("output filename: '%s'\n", output_filename.c_str()); rnn->write_predictions(output_filename, input_parameter_names, output_parameter_names, inputs[i], outputs[i], word_series_sets, use_dropout, dropout_probability); } @@ -1633,8 +1640,8 @@ bool RNN_Genome::equals(RNN_Genome* other) { } void RNN_Genome::assign_reachability() { - Log::trace("assigning reachability!\n"); - Log::trace("%6d nodes, %6d edges, %6d recurrent edges\n", nodes.size(), edges.size(), recurrent_edges.size()); + LOG_TRACE("assigning reachability!\n"); + LOG_TRACE("%6d nodes, %6d edges, %6d recurrent edges\n", nodes.size(), edges.size(), recurrent_edges.size()); for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { nodes[i]->forward_reachable = false; @@ -1647,7 +1654,7 @@ void RNN_Genome::assign_reachability() { nodes[i]->forward_reachable = true; nodes[i]->total_inputs = 1; - Log::trace("\tsetting input node[%5d] reachable\n", i); + LOG_TRACE("\tsetting input node[%5d] reachable\n", i); } if (nodes[i]->layer_type == OUTPUT_LAYER) { @@ -1691,7 +1698,7 @@ void RNN_Genome::assign_reachability() { if (edges[i]->output_node->forward_reachable == false) { if (edges[i]->output_node->innovation_number == edges[i]->input_node->innovation_number) { - Log::fatal("ERROR, forward edge was circular -- this should never happen"); + LOG_FATAL("ERROR, forward edge was circular -- this should never happen"); exit(1); } edges[i]->output_node->forward_reachable = true; @@ -1783,23 +1790,23 @@ void RNN_Genome::assign_reachability() { } } - if (Log::at_level(Log::TRACE)) { - Log::trace("node reachabiltity:\n"); + if (Log::at_level(LOG_LEVEL_TRACE)) { + LOG_TRACE("node reachabiltity:\n"); for (int32_t i = 0; i < nodes.size(); i++) { RNN_Node_Interface *n = nodes[i]; - Log::trace("node %5d, e: %d, fr: %d, br: %d, ti: %5d, to: %5d\n", n->innovation_number, n->enabled, n->forward_reachable, n->backward_reachable, n->total_inputs, n->total_outputs); + LOG_TRACE("node %5d, e: %d, fr: %d, br: %d, ti: %5d, to: %5d\n", n->innovation_number, n->enabled, n->forward_reachable, n->backward_reachable, n->total_inputs, n->total_outputs); } - Log::trace("edge reachabiltity:\n"); + LOG_TRACE("edge reachabiltity:\n"); for (int32_t i = 0; i < edges.size(); i++) { RNN_Edge *e = edges[i]; - Log::trace("edge %5d, e: %d, fr: %d, br: %d\n", e->innovation_number, e->enabled, e->forward_reachable, e->backward_reachable); + LOG_TRACE("edge %5d, e: %d, fr: %d, br: %d\n", e->innovation_number, e->enabled, e->forward_reachable, e->backward_reachable); } - Log::trace("recurrent edge reachabiltity:\n"); + LOG_TRACE("recurrent edge reachabiltity:\n"); for (int32_t i = 0; i < recurrent_edges.size(); i++) { RNN_Recurrent_Edge *e = recurrent_edges[i]; - Log::trace("recurrent edge %5d, e: %d, fr: %d, br: %d\n", e->innovation_number, e->enabled, e->forward_reachable, e->backward_reachable); + LOG_TRACE("recurrent edge %5d, e: %d, fr: %d, br: %d\n", e->innovation_number, e->enabled, e->forward_reachable, e->backward_reachable); } } @@ -1826,7 +1833,7 @@ void RNN_Genome::assign_reachability() { } structural_hash = to_string(node_hash) + "_" + to_string(edge_hash) + "_" + to_string(recurrent_edge_hash); - //Log::info("genome had structural hash: '%s'\n", structural_hash.c_str()); + //LOG_INFO("genome had structural hash: '%s'\n", structural_hash.c_str()); } @@ -1844,7 +1851,7 @@ void RNN_Genome::get_mu_sigma(const vector &p, double &mu, double &sigma if (p.size() == 0) { mu = 0.0; sigma = 0.25; - Log::debug("\tmu: %lf, sigma: %lf, parameters.size() == 0\n", mu, sigma); + LOG_DEBUG("\tmu: %lf, sigma: %lf, parameters.size() == 0\n", mu, sigma); return; } @@ -1854,10 +1861,10 @@ void RNN_Genome::get_mu_sigma(const vector &p, double &mu, double &sigma for (int32_t i = 0; i < p.size(); i++) { /* if (p[i] < -10 || p[i] > 10) { - Log::fatal("ERROR in get_mu_sigma, parameter[%d] was out of bounds: %lf\n", i, p[i]); - Log::fatal("all parameters:\n"); + LOG_FATAL("ERROR in get_mu_sigma, parameter[%d] was out of bounds: %lf\n", i, p[i]); + LOG_FATAL("all parameters:\n"); for (int32_t i = 0; i < (int32_t)p.size(); i++) { - Log::fatal("\t%lf\n", p[i]); + LOG_FATAL("\t%lf\n", p[i]); } exit(1); } @@ -1877,19 +1884,19 @@ void RNN_Genome::get_mu_sigma(const vector &p, double &mu, double &sigma sigma /= (p.size() - 1); sigma = sqrt(sigma); - Log::debug("\tmu: %lf, sigma: %lf, parameters.size(): %d\n", mu, sigma, p.size()); + LOG_DEBUG("\tmu: %lf, sigma: %lf, parameters.size(): %d\n", mu, sigma, p.size()); if (std::isnan(mu) || std::isinf(mu) || std::isnan(sigma) || std::isinf(sigma)) { - Log::fatal("mu or sigma was not a number, all parameters:\n"); + LOG_FATAL("mu or sigma was not a number, all parameters:\n"); for (int32_t i = 0; i < (int32_t)p.size(); i++) { - Log::fatal("\t%lf\n", p[i]); + LOG_FATAL("\t%lf\n", p[i]); } exit(1); } if (mu < -11.0 || mu > 11.0 || sigma < -30.0 || sigma > 30.0) { - Log::fatal("mu or sigma exceeded possible bounds (11 or 30), all parameters:\n"); + LOG_FATAL("mu or sigma exceeded possible bounds (11 or 30), all parameters:\n"); for (int32_t i = 0; i < (int32_t)p.size(); i++) { - Log::fatal("\t%lf\n", p[i]); + LOG_FATAL("\t%lf\n", p[i]); } exit(1); } @@ -1899,7 +1906,7 @@ void RNN_Genome::get_mu_sigma(const vector &p, double &mu, double &sigma RNN_Node_Interface* RNN_Genome::create_node(double mu, double sigma, int node_type, int32_t &node_innovation_count, double depth) { RNN_Node_Interface *n = NULL; - Log::info("CREATING NODE, type: '%s'\n", NODE_TYPES[node_type].c_str()); + LOG_INFO("CREATING NODE, type: '%s'\n", NODE_TYPES[node_type].c_str()); if (node_type == LSTM_NODE) { n = new LSTM_Node(++node_innovation_count, HIDDEN_LAYER, depth); } else if (node_type == DELTA_NODE) { @@ -1919,18 +1926,18 @@ RNN_Node_Interface* RNN_Genome::create_node(double mu, double sigma, int node_ty } else if (node_type == SIMPLE_NODE || node_type == JORDAN_NODE || node_type == ELMAN_NODE) { n = new RNN_Node(++node_innovation_count, HIDDEN_LAYER, depth, node_type); } else { - Log::fatal("ERROR: attempted to create a node with an unknown node type: %d\n", node_type); + LOG_FATAL("ERROR: attempted to create a node with an unknown node type: %d\n", node_type); exit(1); } if (mutated_component_weight == WeightType::LAMARCKIAN) { - Log::debug("new component weight is lamarckian, setting new node weight to lamarckian \n"); + LOG_DEBUG("new component weight is lamarckian, setting new node weight to lamarckian \n"); n->initialize_lamarckian(generator, normal_distribution, mu, sigma); } else if (mutated_component_weight == weight_initialize) { - Log::debug("new component weight is %s, setting new node's weight randomly with %s method \n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_DEBUG("new component weight is %s, setting new node's weight randomly with %s method \n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); initialize_node_randomly(n); } else { - Log::fatal("new component weight is not set correctly, weight initialize: %s, new component weight: %s. \n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_FATAL("new component weight is not set correctly, weight initialize: %s, new component weight: %s. \n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); exit(1); } @@ -1939,10 +1946,10 @@ RNN_Node_Interface* RNN_Genome::create_node(double mu, double sigma, int node_ty } bool RNN_Genome::attempt_edge_insert(RNN_Node_Interface *n1, RNN_Node_Interface *n2, double mu, double sigma, int32_t &edge_innovation_count) { - Log::info("\tadding edge between nodes %d and %d\n", n1->innovation_number, n2->innovation_number); + LOG_INFO("\tadding edge between nodes %d and %d\n", n1->innovation_number, n2->innovation_number); if (n1->depth == n2->depth) { - Log::info("\tcannot add edge between nodes as their depths are the same: %lf and %lf\n", n1->depth, n2->depth); + LOG_INFO("\tcannot add edge between nodes as their depths are the same: %lf and %lf\n", n1->depth, n2->depth); return false; } @@ -1951,7 +1958,7 @@ bool RNN_Genome::attempt_edge_insert(RNN_Node_Interface *n1, RNN_Node_Interface RNN_Node_Interface *temp = n2; n2 = n1; n1 = temp; - Log::info("\tswaping nodes, because n2->depth < n1->depth\n"); + LOG_INFO("\tswaping nodes, because n2->depth < n1->depth\n"); } @@ -1961,13 +1968,13 @@ bool RNN_Genome::attempt_edge_insert(RNN_Node_Interface *n1, RNN_Node_Interface edges[i]->output_innovation_number == n2->innovation_number) { if (!edges[i]->enabled) { //edge was disabled so we can enable it - Log::info("\tedge already exists but was disabled, enabling it.\n"); + LOG_INFO("\tedge already exists but was disabled, enabling it.\n"); edges[i]->enabled = true; // edges[i]->input_node->fan_out++; // edges[i]->output_node->fan_in++; return true; } else { - Log::info("\tedge already exists, not adding.\n"); + LOG_INFO("\tedge already exists, not adding.\n"); //edge was already enabled, so there will not be a change return false; } @@ -1976,34 +1983,34 @@ bool RNN_Genome::attempt_edge_insert(RNN_Node_Interface *n1, RNN_Node_Interface RNN_Edge *e = new RNN_Edge(++edge_innovation_count, n1, n2); if (mutated_component_weight == weight_initialize) { - Log::debug("setting new edge weight with %s method \n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_DEBUG("setting new edge weight with %s method \n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); if (weight_initialize == WeightType::XAVIER) { - Log::debug("setting new edge weight to Xavier \n"); + LOG_DEBUG("setting new edge weight to Xavier \n"); e->weight = get_xavier_weight(n2); } else if (weight_initialize == WeightType::KAIMING) { - Log::debug("setting new edge weight to Kaiming \n"); + LOG_DEBUG("setting new edge weight to Kaiming \n"); e->weight = get_kaiming_weight(n2); } else if (weight_initialize == WeightType::RANDOM) { - Log::debug("setting new edge weight to Random \n"); + LOG_DEBUG("setting new edge weight to Random \n"); e->weight = get_random_weight(); } else { - Log::fatal("weight initialization method %d is not set correctly \n", weight_initialize); + LOG_FATAL("weight initialization method %d is not set correctly \n", weight_initialize); } } else if (mutated_component_weight == WeightType::LAMARCKIAN){ - Log::debug("setting new edge weight with Lamarckian method \n"); + LOG_DEBUG("setting new edge weight with Lamarckian method \n"); e->weight = bound(normal_distribution.random(generator, mu, sigma)); } else { - Log::fatal("new component weight method is not set correctly, weight initialize: %s, new component weight: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_FATAL("new component weight method is not set correctly, weight initialize: %s, new component weight: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); } - Log::info("\tadding edge between nodes %d and %d, new edge weight: %lf\n", e->input_innovation_number, e->output_innovation_number, e->weight); + LOG_INFO("\tadding edge between nodes %d and %d, new edge weight: %lf\n", e->input_innovation_number, e->output_innovation_number, e->weight); edges.insert( upper_bound(edges.begin(), edges.end(), e, sort_RNN_Edges_by_depth()), e); return true; } bool RNN_Genome::attempt_recurrent_edge_insert(RNN_Node_Interface *n1, RNN_Node_Interface *n2, double mu, double sigma, uniform_int_distribution dist, int32_t &edge_innovation_count) { - Log::info("\tadding recurrent edge between nodes %d and %d\n", n1->innovation_number, n2->innovation_number); + LOG_INFO("\tadding recurrent edge between nodes %d and %d\n", n1->innovation_number, n2->innovation_number); //int32_t recurrent_depth = 1 + (rng_0_1(generator) * (max_recurrent_depth - 1)); int32_t recurrent_depth = dist(generator); @@ -2016,13 +2023,13 @@ bool RNN_Genome::attempt_recurrent_edge_insert(RNN_Node_Interface *n1, RNN_Node_ if (!recurrent_edges[i]->enabled) { //edge was disabled so we can enable it - Log::info("\trecurrent edge already exists but was disabled, enabling it.\n"); + LOG_INFO("\trecurrent edge already exists but was disabled, enabling it.\n"); recurrent_edges[i]->enabled = true; // recurrent_edges[i]->input_node->fan_out++; // recurrent_edges[i]->output_node->fan_in++; return true; } else { - Log::info("\tenabled recurrent edge already existed between selected nodes %d and %d at recurrent depth: %d\n", n1->innovation_number, n2->innovation_number, recurrent_depth); + LOG_INFO("\tenabled recurrent edge already existed between selected nodes %d and %d at recurrent depth: %d\n", n1->innovation_number, n2->innovation_number, recurrent_depth); //edge was already enabled, so there will not be a change return false; } @@ -2031,27 +2038,27 @@ bool RNN_Genome::attempt_recurrent_edge_insert(RNN_Node_Interface *n1, RNN_Node_ RNN_Recurrent_Edge *e = new RNN_Recurrent_Edge(++edge_innovation_count, recurrent_depth, n1, n2); if (mutated_component_weight == weight_initialize) { - Log::debug("setting new recurrent edge weight with %s method \n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_DEBUG("setting new recurrent edge weight with %s method \n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); if (weight_initialize == WeightType::XAVIER) { - Log::debug("setting new recurrent edge weight to Xavier \n"); + LOG_DEBUG("setting new recurrent edge weight to Xavier \n"); e->weight = get_xavier_weight(n2); } else if (weight_initialize == WeightType::KAIMING) { - Log::debug("setting new recurrent edge weight to Kaiming \n"); + LOG_DEBUG("setting new recurrent edge weight to Kaiming \n"); e->weight = get_kaiming_weight(n2); } else if (weight_initialize == WeightType::RANDOM) { - Log::debug("setting new recurrent edge weight to Random \n"); + LOG_DEBUG("setting new recurrent edge weight to Random \n"); e->weight = get_random_weight(); } else { - Log::fatal("Weight initialization method %d is not set correctly \n", weight_initialize); + LOG_FATAL("Weight initialization method %d is not set correctly \n", weight_initialize); } } else if (mutated_component_weight == WeightType::LAMARCKIAN){ - Log::debug("setting new recurrent edge weight with Lamarckian method \n"); + LOG_DEBUG("setting new recurrent edge weight with Lamarckian method \n"); e->weight = bound(normal_distribution.random(generator, mu, sigma)); } else { - Log::fatal("new component weight method is not set correctly, weight initialize: %s, new component weight: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_FATAL("new component weight method is not set correctly, weight initialize: %s, new component weight: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str(), WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); } - Log::info("\tadding recurrent edge with innovation number %d between nodes %d and %d, new edge weight: %d\n", e->innovation_number, e->input_innovation_number, e->output_innovation_number, e->weight); + LOG_INFO("\tadding recurrent edge with innovation number %d between nodes %d and %d, new edge weight: %d\n", e->innovation_number, e->input_innovation_number, e->output_innovation_number, e->weight); recurrent_edges.insert( upper_bound(recurrent_edges.begin(), recurrent_edges.end(), e, sort_RNN_Recurrent_Edges_by_depth()), e); return true; @@ -2072,17 +2079,17 @@ void RNN_Genome::generate_recurrent_edges(RNN_Node_Interface *node, double mu, d } bool RNN_Genome::add_edge(double mu, double sigma, int32_t &edge_innovation_count) { - Log::info("\tattempting to add edge!\n"); + LOG_INFO("\tattempting to add edge!\n"); vector reachable_nodes; for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { if (nodes[i]->is_reachable()) reachable_nodes.push_back(nodes[i]); } - Log::info("\treachable_nodes.size(): %d\n", reachable_nodes.size()); + LOG_INFO("\treachable_nodes.size(): %d\n", reachable_nodes.size()); int position = rng_0_1(generator) * reachable_nodes.size(); RNN_Node_Interface *n1 = reachable_nodes[position]; - Log::info("\tselected first node %d with depth %d\n", n1->innovation_number, n1->depth); + LOG_INFO("\tselected first node %d with depth %d\n", n1->innovation_number, n1->depth); //printf("pos: %d, size: %d\n", position, reachable_nodes.size()); for (int i = 0; i < reachable_nodes.size();) { @@ -2096,26 +2103,26 @@ bool RNN_Genome::add_edge(double mu, double sigma, int32_t &edge_innovation_coun // for (auto i = reachable_nodes.begin(); i < reachable_nodes.end();) { // if ((*i)->depth == n1->depth) { - // Log::info("\t\terasing node %d with depth %d\n", (*i)->innovation_number, (*i)->depth); + // LOG_INFO("\t\terasing node %d with depth %d\n", (*i)->innovation_number, (*i)->depth); // reachable_nodes.erase(i); // } else { - // Log::info("\t\tkeeping node %d with depth %d\n", (*i)->innovation_number, (*i)->depth); + // LOG_INFO("\t\tkeeping node %d with depth %d\n", (*i)->innovation_number, (*i)->depth); // i++; // } // } - Log::info("\treachable_nodes.size(): %d\n", reachable_nodes.size()); + LOG_INFO("\treachable_nodes.size(): %d\n", reachable_nodes.size()); position = rng_0_1(generator) * reachable_nodes.size(); RNN_Node_Interface *n2 = reachable_nodes[position]; - Log::info("\tselected second node %d with depth %d\n", n2->innovation_number, n2->depth); + LOG_INFO("\tselected second node %d with depth %d\n", n2->innovation_number, n2->depth); return attempt_edge_insert(n1, n2, mu, sigma, edge_innovation_count); } bool RNN_Genome::add_recurrent_edge(double mu, double sigma, uniform_int_distribution dist, int32_t &edge_innovation_count) { - Log::info("\tattempting to add recurrent edge!\n"); + LOG_INFO("\tattempting to add recurrent edge!\n"); vector possible_input_nodes; vector possible_output_nodes; @@ -2129,8 +2136,8 @@ bool RNN_Genome::add_recurrent_edge(double mu, double sigma, uniform_int_distrib } } - Log::info("\tpossible_input_nodes.size(): %d\n", possible_input_nodes.size()); - Log::info("\tpossible_output_nodes.size(): %d\n", possible_output_nodes.size()); + LOG_INFO("\tpossible_input_nodes.size(): %d\n", possible_input_nodes.size()); + LOG_INFO("\tpossible_output_nodes.size(): %d\n", possible_output_nodes.size()); if (possible_input_nodes.size() == 0) return false; if (possible_output_nodes.size() == 0) return false; @@ -2140,10 +2147,10 @@ bool RNN_Genome::add_recurrent_edge(double mu, double sigma, uniform_int_distrib //no need to swap the nodes as recurrent connections can go backwards RNN_Node_Interface *n1 = possible_input_nodes[p1]; - Log::info("\tselected first node %d with depth %d\n", n1->innovation_number, n1->depth); + LOG_INFO("\tselected first node %d with depth %d\n", n1->innovation_number, n1->depth); RNN_Node_Interface *n2 = possible_output_nodes[p2]; - Log::info("\tselected second node %d with depth %d\n", n2->innovation_number, n2->depth); + LOG_INFO("\tselected second node %d with depth %d\n", n2->innovation_number, n2->depth); return attempt_recurrent_edge_insert(n1, n2, mu, sigma, dist, edge_innovation_count); } @@ -2213,7 +2220,7 @@ bool RNN_Genome::enable_edge() { bool RNN_Genome::split_edge(double mu, double sigma, int node_type, uniform_int_distribution dist, int32_t &edge_innovation_count, int32_t &node_innovation_count) { - Log::info("\tattempting to split an edge!\n"); + LOG_INFO("\tattempting to split an edge!\n"); vector enabled_edges; for (int32_t i = 0; i < edges.size(); i++) { if (edges[i]->enabled) enabled_edges.push_back(edges[i]); @@ -2264,7 +2271,7 @@ bool RNN_Genome::split_edge(double mu, double sigma, int node_type, uniform_int_ } bool RNN_Genome::connect_new_input_node(double mu, double sigma, RNN_Node_Interface *new_node, uniform_int_distribution dist, int32_t &edge_innovation_count) { - Log::info("\tattempting to connect a new input node (%d) for transfer learning!\n", new_node->innovation_number); + LOG_INFO("\tattempting to connect a new input node (%d) for transfer learning!\n", new_node->innovation_number); vector possible_outputs; @@ -2274,7 +2281,7 @@ bool RNN_Genome::connect_new_input_node(double mu, double sigma, RNN_Node_Interf for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { //can connect to output or hidden nodes if (nodes[i]->get_layer_type() == OUTPUT_LAYER || (nodes[i]->get_layer_type() == HIDDEN_LAYER && nodes[i]->is_reachable())) { - Log::info("\tpotential connection node[%d], depth: %lf, total_inputs: %d, total_outputs: %d\n", nodes[i]->get_innovation_number(), nodes[i]->get_depth(), nodes[i]->get_total_inputs(), nodes[i]->get_total_outputs()); + LOG_INFO("\tpotential connection node[%d], depth: %lf, total_inputs: %d, total_outputs: %d\n", nodes[i]->get_innovation_number(), nodes[i]->get_depth(), nodes[i]->get_total_inputs(), nodes[i]->get_total_outputs()); possible_outputs.push_back(nodes[i]); } @@ -2301,7 +2308,7 @@ bool RNN_Genome::connect_new_input_node(double mu, double sigma, RNN_Node_Interf int32_t max_outputs = fmax(1, 2.0 + normal_distribution.random(generator, avg_outputs, output_sigma)); - Log::info("\tadd new input node, max_outputs: %d\n", max_outputs); + LOG_INFO("\tadd new input node, max_outputs: %d\n", max_outputs); int32_t enabled_edges = get_enabled_edge_count(); int32_t enabled_recurrent_edges = get_enabled_recurrent_edge_count(); @@ -2309,7 +2316,7 @@ bool RNN_Genome::connect_new_input_node(double mu, double sigma, RNN_Node_Interf double recurrent_probability = (double)enabled_recurrent_edges / (double)(enabled_recurrent_edges + enabled_edges); //recurrent_probability = fmax(0.2, recurrent_probability); - Log::info("\tadd new node for transfer recurrent probability: %lf\n", recurrent_probability); + LOG_INFO("\tadd new node for transfer recurrent probability: %lf\n", recurrent_probability); while (possible_outputs.size() > max_outputs) { int32_t position = rng_0_1(generator) * possible_outputs.size(); @@ -2332,7 +2339,7 @@ bool RNN_Genome::connect_new_input_node(double mu, double sigma, RNN_Node_Interf } bool RNN_Genome::connect_new_output_node(double mu, double sigma, RNN_Node_Interface *new_node, uniform_int_distribution dist, int32_t &edge_innovation_count) { - Log::info("\tattempting to connect a new output node for transfer learning!\n"); + LOG_INFO("\tattempting to connect a new output node for transfer learning!\n"); vector possible_inputs; @@ -2343,7 +2350,7 @@ bool RNN_Genome::connect_new_output_node(double mu, double sigma, RNN_Node_Inter //can connect to input or hidden nodes if (nodes[i]->get_layer_type() == INPUT_LAYER || (nodes[i]->get_layer_type() == HIDDEN_LAYER && nodes[i]->is_reachable())) { possible_inputs.push_back(nodes[i]); - Log::info("\tpotential connection node[%d], depth: %lf, total_inputs: %d, total_outputs: %d\n", nodes[i]->get_innovation_number(), nodes[i]->get_depth(), nodes[i]->get_total_inputs(), nodes[i]->get_total_outputs()); + LOG_INFO("\tpotential connection node[%d], depth: %lf, total_inputs: %d, total_outputs: %d\n", nodes[i]->get_innovation_number(), nodes[i]->get_depth(), nodes[i]->get_total_inputs(), nodes[i]->get_total_outputs()); } if (nodes[i]->enabled) { @@ -2368,7 +2375,7 @@ bool RNN_Genome::connect_new_output_node(double mu, double sigma, RNN_Node_Inter input_sigma = sqrt(input_sigma); int32_t max_inputs = fmax(1, 2.0 + normal_distribution.random(generator, avg_inputs, input_sigma)); - Log::info("\tadd new output node, max_inputs: %d\n", max_inputs); + LOG_INFO("\tadd new output node, max_inputs: %d\n", max_inputs); int32_t enabled_edges = get_enabled_edge_count(); int32_t enabled_recurrent_edges = get_enabled_recurrent_edge_count(); @@ -2376,7 +2383,7 @@ bool RNN_Genome::connect_new_output_node(double mu, double sigma, RNN_Node_Inter double recurrent_probability = (double)enabled_recurrent_edges / (double)(enabled_recurrent_edges + enabled_edges); //recurrent_probability = fmax(0.2, recurrent_probability); - Log::info("\tadd new node for transfer recurrent probability: %lf\n", recurrent_probability); + LOG_INFO("\tadd new node for transfer recurrent probability: %lf\n", recurrent_probability); while (possible_inputs.size() > max_inputs) { int32_t position = rng_0_1(generator) * possible_inputs.size(); @@ -2470,7 +2477,7 @@ bool RNN_Genome::connect_node_to_hid_nodes( double mu, double sig, RNN_Node_Inte } e->weight = bound(normal_distribution.random(generator, mu, sigma)); - Log::debug("\tadding recurrent edge between nodes %d and %d, new edge weight: %d\n", e->input_innovation_number, e->output_innovation_number, e->weight); + LOG_DEBUG("\tadding recurrent edge between nodes %d and %d, new edge weight: %d\n", e->input_innovation_number, e->output_innovation_number, e->weight); recurrent_edges.insert( upper_bound(recurrent_edges.begin(), recurrent_edges.end(), e, sort_RNN_Recurrent_Edges_by_depth()), e); initial_parameters.push_back(e->weight); @@ -2489,7 +2496,7 @@ bool RNN_Genome::connect_node_to_hid_nodes( double mu, double sig, RNN_Node_Inte // innovation_list.push_back(edge_innovation_count); } e->weight = bound(normal_distribution.random(generator, mu, sigma)); - Log::info("\tadding edge between nodes %d and %d, new edge weight: %lf\n", e->input_innovation_number, e->output_innovation_number, e->weight); + LOG_INFO("\tadding edge between nodes %d and %d, new edge weight: %lf\n", e->input_innovation_number, e->output_innovation_number, e->weight); edges.insert( upper_bound(edges.begin(), edges.end(), e, sort_RNN_Edges_by_depth()), e); initial_parameters.push_back(e->weight); @@ -2504,7 +2511,7 @@ bool RNN_Genome::connect_node_to_hid_nodes( double mu, double sig, RNN_Node_Inte /* ################# ################# ################# */ bool RNN_Genome::add_node(double mu, double sigma, int node_type, uniform_int_distribution dist, int32_t &edge_innovation_count, int32_t &node_innovation_count) { - Log::info("\tattempting to add a node!\n"); + LOG_INFO("\tattempting to add a node!\n"); double split_depth = rng_0_1(generator); vector possible_inputs; @@ -2551,7 +2558,7 @@ bool RNN_Genome::add_node(double mu, double sigma, int node_type, uniform_int_di int32_t max_inputs = fmax(1, 2.0 + normal_distribution.random(generator, avg_inputs, input_sigma)); int32_t max_outputs = fmax(1, 2.0 + normal_distribution.random(generator, avg_outputs, output_sigma)); - Log::info("\tadd node, split depth: %lf, max_inputs: %d, max_outputs: %d\n", split_depth, max_inputs, max_outputs); + LOG_INFO("\tadd node, split depth: %lf, max_inputs: %d, max_outputs: %d\n", split_depth, max_inputs, max_outputs); int32_t enabled_edges = get_enabled_edge_count(); int32_t enabled_recurrent_edges = get_enabled_recurrent_edge_count(); @@ -2559,7 +2566,7 @@ bool RNN_Genome::add_node(double mu, double sigma, int node_type, uniform_int_di double recurrent_probability = (double)enabled_recurrent_edges / (double)(enabled_recurrent_edges + enabled_edges); //recurrent_probability = fmax(0.2, recurrent_probability); - Log::info("\tadd node recurrent probability: %lf\n", recurrent_probability); + LOG_INFO("\tadd node recurrent probability: %lf\n", recurrent_probability); while (possible_inputs.size() > max_inputs) { int32_t position = rng_0_1(generator) * possible_inputs.size(); @@ -2604,7 +2611,7 @@ bool RNN_Genome::add_node(double mu, double sigma, int node_type, uniform_int_di } bool RNN_Genome::enable_node() { - Log::info("\tattempting to enable a node!\n"); + LOG_INFO("\tattempting to enable a node!\n"); vector possible_nodes; for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { if (!nodes[i]->enabled) possible_nodes.push_back(nodes[i]); @@ -2614,13 +2621,13 @@ bool RNN_Genome::enable_node() { int position = rng_0_1(generator) * possible_nodes.size(); possible_nodes[position]->enabled = true; - Log::info("\tenabling node %d at depth %lf\n", possible_nodes[position]->innovation_number, possible_nodes[position]->depth); + LOG_INFO("\tenabling node %d at depth %lf\n", possible_nodes[position]->innovation_number, possible_nodes[position]->depth); return true; } bool RNN_Genome::disable_node() { - Log::info("\tattempting to disable a node!\n"); + LOG_INFO("\tattempting to disable a node!\n"); vector possible_nodes; for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { if (nodes[i]->layer_type != OUTPUT_LAYER && nodes[i]->enabled) possible_nodes.push_back(nodes[i]); @@ -2630,13 +2637,13 @@ bool RNN_Genome::disable_node() { int position = rng_0_1(generator) * possible_nodes.size(); possible_nodes[position]->enabled = false; - Log::info("\tdisabling node %d at depth %lf\n", possible_nodes[position]->innovation_number, possible_nodes[position]->depth); + LOG_INFO("\tdisabling node %d at depth %lf\n", possible_nodes[position]->innovation_number, possible_nodes[position]->depth); return true; } bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_distribution dist, int32_t &edge_innovation_count, int32_t &node_innovation_count) { - Log::info("\tattempting to split a node!\n"); + LOG_INFO("\tattempting to split a node!\n"); vector possible_nodes; for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { if (nodes[i]->layer_type != INPUT_LAYER && nodes[i]->layer_type != OUTPUT_LAYER && @@ -2649,7 +2656,7 @@ bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_ int position = rng_0_1(generator) * possible_nodes.size(); RNN_Node_Interface *selected_node = possible_nodes[position]; - Log::info("\tselected node: %d at depth %lf\n", selected_node->innovation_number, selected_node->depth); + LOG_INFO("\tselected node: %d at depth %lf\n", selected_node->innovation_number, selected_node->depth); vector input_edges; vector output_edges; @@ -2674,10 +2681,10 @@ bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_ if (rng_0_1(generator) < 0.5) recurrent_edges_2.push_back(recurrent_edges[i]); } } - Log::info("\t\trecurrent_edges_1.size(): %d, recurrent_edges_2.size(): %d, input_edges.size(): %d, output_edges.size(): %d\n", recurrent_edges_1.size(), recurrent_edges_2.size(), input_edges.size(), output_edges.size()); + LOG_INFO("\t\trecurrent_edges_1.size(): %d, recurrent_edges_2.size(): %d, input_edges.size(): %d, output_edges.size(): %d\n", recurrent_edges_1.size(), recurrent_edges_2.size(), input_edges.size(), output_edges.size()); if (input_edges.size() == 0 || output_edges.size() == 0) { - Log::warning("\tthe input or output edges size was 0 for the selected node, we cannot split it\n"); + LOG_WARNING("\tthe input or output edges size was 0 for the selected node, we cannot split it\n"); //write_graphviz("error_genome.gv"); //exit(1); return false; @@ -2768,7 +2775,7 @@ bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_ attempt_edge_insert(new_node_2, output_edges_2[i]->output_node, mu, sigma, edge_innovation_count); } - Log::debug("\tattempting recurrent edge inserts for split node\n"); + LOG_DEBUG("\tattempting recurrent edge inserts for split node\n"); for (int32_t i = 0; i < (int32_t)recurrent_edges_1.size(); i++) { if (recurrent_edges_1[i]->input_innovation_number == selected_node->innovation_number) { @@ -2776,7 +2783,7 @@ bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_ } else if (recurrent_edges_1[i]->output_innovation_number == selected_node->innovation_number) { attempt_recurrent_edge_insert(recurrent_edges_1[i]->input_node, new_node_1, mu, sigma, dist, edge_innovation_count); } else { - Log::fatal("\trecurrent edge list for split had an edge which was not connected to the selected node! This should never happen.\n"); + LOG_FATAL("\trecurrent edge list for split had an edge which was not connected to the selected node! This should never happen.\n"); exit(1); } //disable the old recurrent edges @@ -2789,7 +2796,7 @@ bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_ } else if (recurrent_edges_2[i]->output_innovation_number == selected_node->innovation_number) { attempt_recurrent_edge_insert(recurrent_edges_2[i]->input_node, new_node_2, mu, sigma, dist, edge_innovation_count); } else { - Log::fatal("\trecurrent edge list for split had an edge which was not connected to the selected node! This should never happen.\n"); + LOG_FATAL("\trecurrent edge list for split had an edge which was not connected to the selected node! This should never happen.\n"); exit(1); } //disable the old recurrent edges @@ -2821,7 +2828,7 @@ bool RNN_Genome::split_node(double mu, double sigma, int node_type, uniform_int_ } bool RNN_Genome::merge_node(double mu, double sigma, int node_type, uniform_int_distribution dist, int32_t &edge_innovation_count, int32_t &node_innovation_count) { - Log::info("\tattempting to merge a node!\n"); + LOG_INFO("\tattempting to merge a node!\n"); vector possible_nodes; for (int32_t i = 0; i < (int32_t)nodes.size(); i++) { if (nodes[i]->layer_type != INPUT_LAYER && nodes[i]->layer_type != OUTPUT_LAYER) possible_nodes.push_back(nodes[i]); @@ -2890,7 +2897,7 @@ bool RNN_Genome::merge_node(double mu, double sigma, int node_type, uniform_int_ } if (input_node->depth == output_node->depth) { - Log::info("\tskipping merged edge because the input and output nodes are the same depth\n"); + LOG_INFO("\tskipping merged edge because the input and output nodes are the same depth\n"); continue; } @@ -2984,7 +2991,7 @@ string RNN_Genome::get_color(double weight, bool is_recurrent) { } Color color = get_colormap(value); - Log::debug("weight: %lf, converted to value: %lf\n", weight, value); + LOG_DEBUG("weight: %lf, converted to value: %lf\n", weight, value); oss << hex << setw(2) << setfill('0') << color.red << hex << setw(2) << setfill('0') << color.green @@ -3167,7 +3174,7 @@ void write_map(ostream &out, map &m) { void write_binary_string(ostream &out, string s, string name) { int32_t n = s.size(); - Log::debug("writing %d %s characters '%s'\n", n, name.c_str(), s.c_str()); + LOG_DEBUG("writing %d %s characters '%s'\n", n, name.c_str(), s.c_str()); out.write((char*)&n, sizeof(int32_t)); if (n > 0) { out.write((char*)&s[0], sizeof(char) * s.size()); @@ -3178,7 +3185,7 @@ void read_binary_string(istream &in, string &s, string name) { int32_t n; in.read((char*)&n, sizeof(int32_t)); - Log::debug("reading %d %s characters.\n", n, name.c_str()); + LOG_DEBUG("reading %d %s characters.\n", n, name.c_str()); if (n > 0) { char* s_v = new char[n]; in.read((char*)s_v, sizeof(char) * n); @@ -3188,7 +3195,7 @@ void read_binary_string(istream &in, string &s, string name) { s.assign(""); } - Log::debug("read %d %s characters '%s'\n", n, name.c_str(), s.c_str()); + LOG_DEBUG("read %d %s characters '%s'\n", n, name.c_str(), s.c_str()); } @@ -3196,7 +3203,7 @@ RNN_Genome::RNN_Genome(string binary_filename) { ifstream bin_infile(binary_filename, ios::in | ios::binary); if (!bin_infile.good()) { - Log::fatal("ERROR: could not open RNN genome file '%s' for reading.\n", binary_filename.c_str()); + LOG_FATAL("ERROR: could not open RNN genome file '%s' for reading.\n", binary_filename.c_str()); exit(1); } @@ -3223,7 +3230,7 @@ void RNN_Genome::read_from_array(char *array, int32_t length) { } void RNN_Genome::read_from_stream(istream &bin_istream) { - Log::debug("READING GENOME FROM STREAM\n"); + LOG_DEBUG("READING GENOME FROM STREAM\n"); bin_istream.read((char*)&generation_id, sizeof(int32_t)); bin_istream.read((char*)&group_id, sizeof(int32_t)); @@ -3246,25 +3253,25 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { bin_istream.read((char*)&weight_inheritance, sizeof(int32_t)); bin_istream.read((char*)&mutated_component_weight, sizeof(int32_t)); - Log::debug("generation_id: %d\n", generation_id); - Log::debug("bp_iterations: %d\n", bp_iterations); - Log::debug("learning_rate: %lf\n", learning_rate); - Log::debug("adapt_learning_rate: %d\n", adapt_learning_rate); - Log::debug("use_nesterov_momentum: %d\n", use_nesterov_momentum); - Log::debug("use_reset_weights: %d\n", use_reset_weights); + LOG_DEBUG("generation_id: %d\n", generation_id); + LOG_DEBUG("bp_iterations: %d\n", bp_iterations); + LOG_DEBUG("learning_rate: %lf\n", learning_rate); + LOG_DEBUG("adapt_learning_rate: %d\n", adapt_learning_rate); + LOG_DEBUG("use_nesterov_momentum: %d\n", use_nesterov_momentum); + LOG_DEBUG("use_reset_weights: %d\n", use_reset_weights); - Log::debug("use_high_norm: %d\n", use_high_norm); - Log::debug("high_threshold: %lf\n", high_threshold); - Log::debug("use_low_norm: %d\n", use_low_norm); - Log::debug("low_threshold: %lf\n", low_threshold); + LOG_DEBUG("use_high_norm: %d\n", use_high_norm); + LOG_DEBUG("high_threshold: %lf\n", high_threshold); + LOG_DEBUG("use_low_norm: %d\n", use_low_norm); + LOG_DEBUG("low_threshold: %lf\n", low_threshold); - Log::debug("use_dropout: %d\n", use_regression); - Log::debug("use_dropout: %d\n", use_dropout); - Log::debug("dropout_probability: %lf\n", dropout_probability); + LOG_DEBUG("use_dropout: %d\n", use_regression); + LOG_DEBUG("use_dropout: %d\n", use_dropout); + LOG_DEBUG("dropout_probability: %lf\n", dropout_probability); - Log::debug("weight initialize: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str()); - Log::debug("weight inheritance: %s\n", WEIGHT_TYPES_STRING[weight_inheritance].c_str()); - Log::debug("new component weight: %s\n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_DEBUG("weight initialize: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str()); + LOG_DEBUG("weight inheritance: %s\n", WEIGHT_TYPES_STRING[weight_inheritance].c_str()); + LOG_DEBUG("new component weight: %s\n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); read_binary_string(bin_istream, log_filename, "log_filename"); string generator_str; @@ -3293,7 +3300,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { int32_t n_initial_parameters; bin_istream.read((char*)&n_initial_parameters, sizeof(int32_t)); - Log::debug("reading %d initial parameters.\n", n_initial_parameters); + LOG_DEBUG("reading %d initial parameters.\n", n_initial_parameters); double* initial_parameters_v = new double[n_initial_parameters]; bin_istream.read((char*)initial_parameters_v, sizeof(double) * n_initial_parameters); initial_parameters.assign(initial_parameters_v, initial_parameters_v + n_initial_parameters); @@ -3301,7 +3308,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { int32_t n_best_parameters; bin_istream.read((char*)&n_best_parameters, sizeof(int32_t)); - Log::debug("reading %d best parameters.\n", n_best_parameters); + LOG_DEBUG("reading %d best parameters.\n", n_best_parameters); double* best_parameters_v = new double[n_best_parameters]; bin_istream.read((char*)best_parameters_v, sizeof(double) * n_best_parameters); best_parameters.assign(best_parameters_v, best_parameters_v + n_best_parameters); @@ -3311,7 +3318,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { input_parameter_names.clear(); int32_t n_input_parameter_names; bin_istream.read((char*)&n_input_parameter_names, sizeof(int32_t)); - Log::debug("reading %d input parameter names.\n", n_input_parameter_names); + LOG_DEBUG("reading %d input parameter names.\n", n_input_parameter_names); for (int32_t i = 0; i < n_input_parameter_names; i++) { string input_parameter_name; read_binary_string(bin_istream, input_parameter_name, "input_parameter_names[" + std::to_string(i) + "]"); @@ -3321,7 +3328,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { output_parameter_names.clear(); int32_t n_output_parameter_names; bin_istream.read((char*)&n_output_parameter_names, sizeof(int32_t)); - Log::debug("reading %d output parameter names.\n", n_output_parameter_names); + LOG_DEBUG("reading %d output parameter names.\n", n_output_parameter_names); for (int32_t i = 0; i < n_output_parameter_names; i++) { string output_parameter_name; read_binary_string(bin_istream, output_parameter_name, "output_parameter_names[" + std::to_string(i) + "]"); @@ -3332,7 +3339,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { int32_t n_nodes; bin_istream.read((char*)&n_nodes, sizeof(int32_t)); - Log::debug("reading %d nodes.\n", n_nodes); + LOG_DEBUG("reading %d nodes.\n", n_nodes); nodes.clear(); for (int32_t i = 0; i < n_nodes; i++) { @@ -3351,7 +3358,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { string parameter_name; read_binary_string(bin_istream, parameter_name, "parameter_name"); - Log::debug("NODE: %d %d %d %lf %d '%s'\n", innovation_number, layer_type, node_type, depth, enabled, parameter_name.c_str()); + LOG_DEBUG("NODE: %d %d %d %lf %d '%s'\n", innovation_number, layer_type, node_type, depth, enabled, parameter_name.c_str()); RNN_Node_Interface *node; if (node_type == LSTM_NODE) { @@ -3377,7 +3384,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { node = new RNN_Node(innovation_number, layer_type, depth, node_type, parameter_name); } } else { - Log::fatal("Error reading node from stream, unknown node_type: %d\n", node_type); + LOG_FATAL("Error reading node from stream, unknown node_type: %d\n", node_type); exit(1); } @@ -3388,7 +3395,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { int32_t n_edges; bin_istream.read((char*)&n_edges, sizeof(int32_t)); - Log::debug("reading %d edges.\n", n_edges); + LOG_DEBUG("reading %d edges.\n", n_edges); edges.clear(); for (int32_t i = 0; i < n_edges; i++) { @@ -3402,7 +3409,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { bin_istream.read((char*)&output_innovation_number, sizeof(int32_t)); bin_istream.read((char*)&enabled, sizeof(bool)); - Log::debug("EDGE: %d %d %d %d\n", innovation_number, input_innovation_number, output_innovation_number, enabled); + LOG_DEBUG("EDGE: %d %d %d %d\n", innovation_number, input_innovation_number, output_innovation_number, enabled); RNN_Edge *edge = new RNN_Edge(innovation_number, input_innovation_number, output_innovation_number, nodes); // innovation_list.push_back(innovation_number); @@ -3413,7 +3420,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { int32_t n_recurrent_edges; bin_istream.read((char*)&n_recurrent_edges, sizeof(int32_t)); - Log::debug("reading %d recurrent_edges.\n", n_recurrent_edges); + LOG_DEBUG("reading %d recurrent_edges.\n", n_recurrent_edges); recurrent_edges.clear(); for (int32_t i = 0; i < n_recurrent_edges; i++) { @@ -3429,7 +3436,7 @@ void RNN_Genome::read_from_stream(istream &bin_istream) { bin_istream.read((char*)&output_innovation_number, sizeof(int32_t)); bin_istream.read((char*)&enabled, sizeof(bool)); - Log::debug("RECURRENT EDGE: %d %d %d %d %d\n", innovation_number, recurrent_depth, input_innovation_number, output_innovation_number, enabled); + LOG_DEBUG("RECURRENT EDGE: %d %d %d %d %d\n", innovation_number, recurrent_depth, input_innovation_number, output_innovation_number, enabled); RNN_Recurrent_Edge *recurrent_edge = new RNN_Recurrent_Edge(innovation_number, recurrent_depth, input_innovation_number, output_innovation_number, nodes); // innovation_list.push_back(innovation_number); @@ -3482,7 +3489,7 @@ void RNN_Genome::write_to_file(string bin_filename) { void RNN_Genome::write_to_stream(ostream &bin_ostream) { - Log::debug("WRITING GENOME TO STREAM\n"); + LOG_DEBUG("WRITING GENOME TO STREAM\n"); bin_ostream.write((char*)&generation_id, sizeof(int32_t)); bin_ostream.write((char*)&group_id, sizeof(int32_t)); bin_ostream.write((char*)&bp_iterations, sizeof(int32_t)); @@ -3504,25 +3511,25 @@ void RNN_Genome::write_to_stream(ostream &bin_ostream) { bin_ostream.write((char*)&weight_inheritance, sizeof(int32_t)); bin_ostream.write((char*)&mutated_component_weight, sizeof(int32_t)); - Log::debug("generation_id: %d\n", generation_id); - Log::debug("bp_iterations: %d\n", bp_iterations); - Log::debug("learning_rate: %lf\n", learning_rate); - Log::debug("adapt_learning_rate: %d\n", adapt_learning_rate); - Log::debug("use_nesterov_momentum: %d\n", use_nesterov_momentum); - Log::debug("use_reset_weights: %d\n", use_reset_weights); + LOG_DEBUG("generation_id: %d\n", generation_id); + LOG_DEBUG("bp_iterations: %d\n", bp_iterations); + LOG_DEBUG("learning_rate: %lf\n", learning_rate); + LOG_DEBUG("adapt_learning_rate: %d\n", adapt_learning_rate); + LOG_DEBUG("use_nesterov_momentum: %d\n", use_nesterov_momentum); + LOG_DEBUG("use_reset_weights: %d\n", use_reset_weights); - Log::debug("use_high_norm: %d\n", use_high_norm); - Log::debug("high_threshold: %lf\n", high_threshold); - Log::debug("use_low_norm: %d\n", use_low_norm); - Log::debug("low_threshold: %lf\n", low_threshold); + LOG_DEBUG("use_high_norm: %d\n", use_high_norm); + LOG_DEBUG("high_threshold: %lf\n", high_threshold); + LOG_DEBUG("use_low_norm: %d\n", use_low_norm); + LOG_DEBUG("low_threshold: %lf\n", low_threshold); - Log::debug("use_dropout: %d\n", use_regression); - Log::debug("use_dropout: %d\n", use_dropout); - Log::debug("dropout_probability: %lf\n", dropout_probability); + LOG_DEBUG("use_dropout: %d\n", use_regression); + LOG_DEBUG("use_dropout: %d\n", use_dropout); + LOG_DEBUG("dropout_probability: %lf\n", dropout_probability); - Log::debug("weight initialize: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str()); - Log::debug("weight inheritance: %s\n", WEIGHT_TYPES_STRING[weight_inheritance].c_str()); - Log::debug("new component weight: %s\n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); + LOG_DEBUG("weight initialize: %s\n", WEIGHT_TYPES_STRING[weight_initialize].c_str()); + LOG_DEBUG("weight inheritance: %s\n", WEIGHT_TYPES_STRING[weight_inheritance].c_str()); + LOG_DEBUG("new component weight: %s\n", WEIGHT_TYPES_STRING[mutated_component_weight].c_str()); write_binary_string(bin_ostream, log_filename, "log_filename"); @@ -3545,7 +3552,7 @@ void RNN_Genome::write_to_stream(ostream &bin_ostream) { bin_ostream.write((char*)&best_validation_mae, sizeof(double)); int32_t n_initial_parameters = initial_parameters.size(); - Log::debug("writing %d initial parameters.\n", n_initial_parameters); + LOG_DEBUG("writing %d initial parameters.\n", n_initial_parameters); bin_ostream.write((char*)&n_initial_parameters, sizeof(int32_t)); bin_ostream.write((char*)&initial_parameters[0], sizeof(double) * initial_parameters.size()); @@ -3569,30 +3576,30 @@ void RNN_Genome::write_to_stream(ostream &bin_ostream) { int32_t n_nodes = nodes.size(); bin_ostream.write((char*)&n_nodes, sizeof(int32_t)); - Log::debug("writing %d nodes.\n", n_nodes); + LOG_DEBUG("writing %d nodes.\n", n_nodes); for (uint32_t i = 0; i < nodes.size(); i++) { - Log::debug("NODE: %d %d %d %lf '%s'\n", nodes[i]->innovation_number, nodes[i]->layer_type, nodes[i]->node_type, nodes[i]->depth, nodes[i]->parameter_name.c_str()); + LOG_DEBUG("NODE: %d %d %d %lf '%s'\n", nodes[i]->innovation_number, nodes[i]->layer_type, nodes[i]->node_type, nodes[i]->depth, nodes[i]->parameter_name.c_str()); nodes[i]->write_to_stream(bin_ostream); } int32_t n_edges = edges.size(); bin_ostream.write((char*)&n_edges, sizeof(int32_t)); - Log::debug("writing %d edges.\n", n_edges); + LOG_DEBUG("writing %d edges.\n", n_edges); for (uint32_t i = 0; i < edges.size(); i++) { - Log::debug("EDGE: %d %d %d\n", edges[i]->innovation_number, edges[i]->input_innovation_number, edges[i]->output_innovation_number); + LOG_DEBUG("EDGE: %d %d %d\n", edges[i]->innovation_number, edges[i]->input_innovation_number, edges[i]->output_innovation_number); edges[i]->write_to_stream(bin_ostream); } int32_t n_recurrent_edges = recurrent_edges.size(); bin_ostream.write((char*)&n_recurrent_edges, sizeof(int32_t)); - Log::debug("writing %d recurrent edges.\n", n_recurrent_edges); + LOG_DEBUG("writing %d recurrent edges.\n", n_recurrent_edges); for (uint32_t i = 0; i < recurrent_edges.size(); i++) { - Log::debug("RECURRENT EDGE: %d %d %d %d\n", recurrent_edges[i]->innovation_number, recurrent_edges[i]->recurrent_depth, recurrent_edges[i]->input_innovation_number, recurrent_edges[i]->output_innovation_number); + LOG_DEBUG("RECURRENT EDGE: %d %d %d %d\n", recurrent_edges[i]->innovation_number, recurrent_edges[i]->recurrent_depth, recurrent_edges[i]->input_innovation_number, recurrent_edges[i]->output_innovation_number); recurrent_edges[i]->write_to_stream(bin_ostream); } @@ -3640,11 +3647,11 @@ void RNN_Genome::update_innovation_counts(int32_t &node_innovation_count, int32_ if (max_node_innovation_count == -1) { // Fatal log message - Log::fatal("Seed genome had max node innovation number of -1 - this should never happen (unless the genome is empty :)"); + LOG_FATAL("Seed genome had max node innovation number of -1 - this should never happen (unless the genome is empty :)"); } if (max_edge_innovation_count == -1) { // Fatal log message - Log::fatal("Seed genome had max node innovation number of -1 - this should never happen (and the genome isn't empty since max_node_innovation_count > -1)"); + LOG_FATAL("Seed genome had max node innovation number of -1 - this should never happen (and the genome isn't empty since max_node_innovation_count > -1)"); } // One more than the highest we've seen should be good enough. @@ -3693,12 +3700,12 @@ int RNN_Genome::get_max_edge_innovation_count() { void RNN_Genome::transfer_to(const vector &new_input_parameter_names, const vector &new_output_parameter_names, string transfer_learning_version, bool epigenetic_weights, int32_t min_recurrent_depth, int32_t max_recurrent_depth) { - Log::info("DOING TRANSFER OF GENOME!\n"); + LOG_INFO("DOING TRANSFER OF GENOME!\n"); double mu, sigma; set_weights(best_parameters); get_mu_sigma(best_parameters, mu, sigma); - Log::info("before transfer, mu: %lf, sigma: %lf\n", mu, sigma); + LOG_INFO("before transfer, mu: %lf, sigma: %lf\n", mu, sigma); //make sure we don't duplicate new node/edge innovation numbers int node_innovation_count = get_max_node_innovation_count() + 1; @@ -3709,36 +3716,42 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co //work backwards so we don't skip removing anything for (int i = nodes.size() - 1; i >= 0; i--) { - Log::info("checking node: %d\n", i); + LOG_INFO("checking node: %d\n", i); //add all the input and output nodes to the input_nodes and output_nodes vectors, //and remove them from the node vector for the time being RNN_Node_Interface *node = nodes[i]; if (node->layer_type == INPUT_LAYER) { input_nodes.push_back(node); - Log::info("erasing node: %d of %d\n", i, nodes.size()); + LOG_INFO("erasing node: %d of %d\n", i, nodes.size()); nodes.erase(nodes.begin() + i); - Log::info("input node with parameter name: '%s'\n", node->parameter_name.c_str()); + LOG_INFO("input node with parameter name: '%s'\n", node->parameter_name.c_str()); } else if (node->layer_type == OUTPUT_LAYER) { output_nodes.push_back(node); - Log::info("erasing node: %d of %d\n", i, nodes.size()); + LOG_INFO("erasing node: %d of %d\n", i, nodes.size()); nodes.erase(nodes.begin() + i); - Log::info("output node with parameter name: '%s'\n", node->parameter_name.c_str()); + LOG_INFO("output node with parameter name: '%s'\n", node->parameter_name.c_str()); } } - Log::info("original input parameter names:\n"); + string log_str = ""; + + log_str = "original input parameter names:\n"; for (int i = 0; i < input_parameter_names.size(); i++) { - Log::info_no_header(" %s", input_parameter_names[i].c_str()); + log_str = log_str + string_format(" %s", input_parameter_names[i].c_str()); } - Log::info_no_header("\n"); + log_str = log_str + "\n"; + LOG_INFO(log_str.c_str()); - Log::info("new input parameter names:\n"); + log_str = ""; + + log_str = "new input parameter names:\n"; for (int i = 0; i < new_input_parameter_names.size(); i++) { - Log::info_no_header(" %s", new_input_parameter_names[i].c_str()); + log_str = log_str + string_format(" %s", new_input_parameter_names[i].c_str()); } - Log::info_no_header("\n"); + log_str = log_str + "\n"; + LOG_INFO(log_str.c_str()); //first figure out which input nodes we're keeping, and add new input @@ -3759,7 +3772,7 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co } if (parameter_position >= 0) { - Log::info("keeping input node for parameter '%s'\n", new_input_parameter_names[i].c_str()); + LOG_INFO("keeping input node for parameter '%s'\n", new_input_parameter_names[i].c_str()); //this input node already existed in the genome new_input_nodes.push_back(input_nodes[parameter_position]); new_inputs.push_back(false); @@ -3768,7 +3781,7 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co //re-use it input_nodes.erase(input_nodes.begin() + parameter_position); } else { - Log::info("creating new input node for parameter '%s'\n", new_input_parameter_names[i].c_str()); + LOG_INFO("creating new input node for parameter '%s'\n", new_input_parameter_names[i].c_str()); //create a new input node for this parameter new_inputs.push_back(true); RNN_Node *node = new RNN_Node(++node_innovation_count, INPUT_LAYER, 0.0 /*input nodes should be depth 0*/, SIMPLE_NODE, new_input_parameter_names[i]); @@ -3776,31 +3789,31 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co } } - Log::info("new input node parameter names (should be the same as new input parameter names):\n"); + LOG_INFO("new input node parameter names (should be the same as new input parameter names):\n"); for (int i = 0; i < new_input_nodes.size(); i++) { - Log::info("\t%s (new: %s)\n", new_input_nodes[i]->parameter_name.c_str(), new_inputs[i] ? "true" : "false"); + LOG_INFO("\t%s (new: %s)\n", new_input_nodes[i]->parameter_name.c_str(), new_inputs[i] ? "true" : "false"); } //delete all the input nodes that were not kept in the transfer process for (int i = input_nodes.size() - 1; i >= 0; i--) { - Log::info("deleting outgoing edges for input node[%d] with parameter name: '%s' and innovation number %d\n", i, input_nodes[i]->parameter_name.c_str(), input_nodes[i]->innovation_number); + LOG_INFO("deleting outgoing edges for input node[%d] with parameter name: '%s' and innovation number %d\n", i, input_nodes[i]->parameter_name.c_str(), input_nodes[i]->innovation_number); //first delete any outgoing edges from the input node to be deleted for (int j = edges.size() - 1; j >= 0; j--) { if (edges[j]->input_innovation_number == input_nodes[i]->innovation_number) { - Log::info("deleting edges[%d] with innovation number: %d and input_innovation_number %d\n", j, edges[j]->innovation_number, edges[j]->input_innovation_number); + LOG_INFO("deleting edges[%d] with innovation number: %d and input_innovation_number %d\n", j, edges[j]->innovation_number, edges[j]->input_innovation_number); delete edges[j]; edges.erase(edges.begin() + j); } } - Log::info("deleting recurrent edges\n"); + LOG_INFO("deleting recurrent edges\n"); //do the same for any outgoing recurrent edges for (int j = recurrent_edges.size() - 1; j >= 0; j--) { //recurrent edges shouldn't go into input nodes, but check to see if it has a connection either way to the node being deleted if (recurrent_edges[j]->input_innovation_number == input_nodes[i]->innovation_number || recurrent_edges[j]->output_innovation_number == input_nodes[i]->innovation_number) { - Log::info("deleting recurrent_edges[%d] with innovation number: %d and input_innovation_number %d\n", j, recurrent_edges[j]->innovation_number, recurrent_edges[j]->input_innovation_number); + LOG_INFO("deleting recurrent_edges[%d] with innovation number: %d and input_innovation_number %d\n", j, recurrent_edges[j]->innovation_number, recurrent_edges[j]->input_innovation_number); delete recurrent_edges[j]; recurrent_edges.erase(recurrent_edges.begin() + j); } @@ -3811,18 +3824,23 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co } - Log::info("original output parameter names:\n"); + log_str = ""; + + log_str = "original output parameter names:\n"; for (int i = 0; i < output_parameter_names.size(); i++) { - Log::info_no_header(" %s", output_parameter_names[i].c_str()); + log_str = log_str + string_format(" %s", output_parameter_names[i].c_str()); } - Log::info_no_header("\n"); + log_str = log_str + "\n"; + LOG_INFO(log_str.c_str()); - Log::info("new output parameter names:\n"); + log_str = ""; + + log_str = "new output parameter names:\n"; for (int i = 0; i < new_output_parameter_names.size(); i++) { - Log::info_no_header(" %s", new_output_parameter_names[i].c_str()); + log_str = log_str + string_format(" %s", new_output_parameter_names[i].c_str()); } - Log::info_no_header("\n"); - + log_str = log_str + "\n"; + LOG_INFO(log_str.c_str()); //first figure out which output nodes we're keeping, and add new output //nodes as needed @@ -3830,23 +3848,23 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co vector new_outputs; //this will track if new output node was new (true) or added from the genome (false) for (int i = 0; i < new_output_parameter_names.size(); i++) { - Log::info("finding output node with parameter name: '%s\n", new_output_parameter_names[i].c_str()); + LOG_INFO("finding output node with parameter name: '%s\n", new_output_parameter_names[i].c_str()); int parameter_position = -1; //iterate through the output parameter names to find the output //node related to this new output paramter name, if it is //not found we need to make a new node for it for (int j = 0; j < output_nodes.size(); j++) { - Log::info("\tchecking output_nodes[%d]->parameter_name: '%s'\n", j, output_nodes[j]->parameter_name.c_str()); + LOG_INFO("\tchecking output_nodes[%d]->parameter_name: '%s'\n", j, output_nodes[j]->parameter_name.c_str()); if (output_nodes[j]->parameter_name.compare(new_output_parameter_names[i]) == 0) { - Log::info("\t\tMATCH!\n"); + LOG_INFO("\t\tMATCH!\n"); parameter_position = j; break; } } if (parameter_position >= 0) { - Log::info("keeping output node for parameter '%s'\n", new_output_parameter_names[i].c_str()); + LOG_INFO("keeping output node for parameter '%s'\n", new_output_parameter_names[i].c_str()); //this output node already existed in the genome new_output_nodes.push_back(output_nodes[parameter_position]); new_outputs.push_back(false); @@ -3855,7 +3873,7 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co //re-use it output_nodes.erase(output_nodes.begin() + parameter_position); } else { - Log::info("creating new output node for parameter '%s'\n", new_output_parameter_names[i].c_str()); + LOG_INFO("creating new output node for parameter '%s'\n", new_output_parameter_names[i].c_str()); //create a new output node for this parameter new_outputs.push_back(true); RNN_Node *node = new RNN_Node(++node_innovation_count, OUTPUT_LAYER, 1.0 /*output nodes should be depth 1*/, SIMPLE_NODE, new_output_parameter_names[i]); @@ -3863,37 +3881,37 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co } } - Log::info("new output node parameter names (should be the same as new output parameter names):\n"); + LOG_INFO("new output node parameter names (should be the same as new output parameter names):\n"); for (int i = 0; i < new_output_nodes.size(); i++) { - Log::info("\t%s (new: %s)\n", new_output_nodes[i]->parameter_name.c_str(), new_outputs[i] ? "true" : "false"); + LOG_INFO("\t%s (new: %s)\n", new_output_nodes[i]->parameter_name.c_str(), new_outputs[i] ? "true" : "false"); } //delete all the output nodes that were not kept in the transfer process for (int i = output_nodes.size() - 1; i >= 0; i--) { - Log::info("deleting incoming edges for output node[%d] with parameter name: '%s' and innovation number %d\n", i, output_nodes[i]->parameter_name.c_str(), output_nodes[i]->innovation_number); + LOG_INFO("deleting incoming edges for output node[%d] with parameter name: '%s' and innovation number %d\n", i, output_nodes[i]->parameter_name.c_str(), output_nodes[i]->innovation_number); //first delete any incoming edges to the output node to be deleted for (int j = edges.size() - 1; j >= 0; j--) { if (edges[j]->output_innovation_number == output_nodes[i]->innovation_number) { - Log::info("deleting edges[%d] with innovation number: %d and output_innovation_number %d\n", j, edges[j]->innovation_number, edges[j]->output_innovation_number); + LOG_INFO("deleting edges[%d] with innovation number: %d and output_innovation_number %d\n", j, edges[j]->innovation_number, edges[j]->output_innovation_number); delete edges[j]; edges.erase(edges.begin() + j); } } - Log::info("doing recurrent edges\n"); + LOG_INFO("doing recurrent edges\n"); //do the same for any outgoing recurrent edges for (int j = recurrent_edges.size() - 1; j >= 0; j--) { //output nodes can be the input to a recurrent edge so we need to delete those recurrent edges too if the output node is being deleted if (recurrent_edges[j]->output_innovation_number == output_nodes[i]->innovation_number || recurrent_edges[j]->input_innovation_number == output_nodes[i]->innovation_number) { - Log::info("deleting recurrent_edges[%d] with innovation number: %d and output_innovation_number %d\n", j, recurrent_edges[j]->innovation_number, recurrent_edges[j]->output_innovation_number); + LOG_INFO("deleting recurrent_edges[%d] with innovation number: %d and output_innovation_number %d\n", j, recurrent_edges[j]->innovation_number, recurrent_edges[j]->output_innovation_number); delete recurrent_edges[j]; recurrent_edges.erase(recurrent_edges.begin() + j); } } - Log::info("deleting output_nodes[%d]\n", i); + LOG_INFO("deleting output_nodes[%d]\n", i); delete output_nodes[i]; output_nodes.erase(output_nodes.begin() + i); @@ -3904,15 +3922,15 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co - V2: new inputs and new outputs to random hidden */ - Log::info("starting transfer learning versions\n"); + LOG_INFO("starting transfer learning versions\n"); if (transfer_learning_version.compare("v1") != 0 && transfer_learning_version.compare("v2") != 0 && transfer_learning_version.compare("v1+v2") != 0) { - Log::fatal("ERROR: unknown transfer learning version specified, '%s', options are:\n", transfer_learning_version.c_str()); - Log::fatal("v1: connects all new inputs to all outputs and all new outputs to all inputs\n"); - Log::fatal("v2: randomly connects all new inputs and outputs to hidden nodes\n"); - Log::fatal("v1+v2: does both v1 and v2\n"); + LOG_FATAL("ERROR: unknown transfer learning version specified, '%s', options are:\n", transfer_learning_version.c_str()); + LOG_FATAL("v1: connects all new inputs to all outputs and all new outputs to all inputs\n"); + LOG_FATAL("v2: randomly connects all new inputs and outputs to hidden nodes\n"); + LOG_FATAL("v1+v2: does both v1 and v2\n"); exit(1); } @@ -3921,10 +3939,10 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co sort_recurrent_edges_by_depth(); if (transfer_learning_version.compare("v1") == 0 || transfer_learning_version.compare("v1+v2") == 0) { - Log::info("doing transfer v1\n"); + LOG_INFO("doing transfer v1\n"); for (int i = 0; i < new_input_nodes.size(); i++) { if (!new_inputs[i]) continue; - Log::info("adding connections for new input node[%d] '%s'\n", i, new_input_nodes[i]->parameter_name.c_str()); + LOG_INFO("adding connections for new input node[%d] '%s'\n", i, new_input_nodes[i]->parameter_name.c_str()); for (int j = 0; j < new_output_nodes.size(); j++) { attempt_edge_insert(new_input_nodes[i], new_output_nodes[j], mu, sigma, edge_innovation_count); @@ -3933,7 +3951,7 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co for (int i = 0; i < new_output_nodes.size(); i++) { if (!new_outputs[i]) continue; - Log::info("adding connections for new output node[%d] '%s'\n", i, new_output_nodes[i]->parameter_name.c_str()); + LOG_INFO("adding connections for new output node[%d] '%s'\n", i, new_output_nodes[i]->parameter_name.c_str()); for (int j = 0; j < new_input_nodes.size(); j++) { attempt_edge_insert(new_input_nodes[j], new_output_nodes[i], mu, sigma, edge_innovation_count); @@ -3946,27 +3964,27 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co uniform_int_distribution rec_depth_dist(min_recurrent_depth, max_recurrent_depth); if (transfer_learning_version.compare("v2") == 0 || transfer_learning_version.compare("v1+v2") == 0) { - Log::info("doing transfer v2\n"); + LOG_INFO("doing transfer v2\n"); for (auto node : new_input_nodes) { - Log::debug("BEFORE -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); + LOG_DEBUG("BEFORE -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); connect_new_input_node(mu, sigma, node, rec_depth_dist, edge_innovation_count); - Log::debug("AFTER -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); + LOG_DEBUG("AFTER -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); } for (auto node : new_output_nodes) { - Log::debug("BEFORE -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); + LOG_DEBUG("BEFORE -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); connect_new_output_node(mu, sigma, node, rec_depth_dist, edge_innovation_count); - Log::debug("AFTER -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); + LOG_DEBUG("AFTER -- CHECK EDGE INNOVATION COUNT: %d\n", edge_innovation_count); } } - Log::info("adding new_input_nodes and new_output_nodes to nodes\n"); + LOG_INFO("adding new_input_nodes and new_output_nodes to nodes\n"); //add the new input and new output nodes back into the genome's node vector nodes.insert(nodes.begin(), new_input_nodes.begin(), new_input_nodes.end()); nodes.insert(nodes.end(), new_output_nodes.begin(), new_output_nodes.end()); - Log::info("assigning reachability\n"); + LOG_INFO("assigning reachability\n"); //need to recalculate the reachability of each node assign_reachability(); @@ -3975,40 +3993,40 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co //need to make sure that each input and each output has at least one connection for (auto node : nodes) { - Log::info("node[%d], depth: %lf, total_inputs: %d, total_outputs: %d\n", node->get_innovation_number(), node->get_depth(), node->get_total_inputs(), node->get_total_outputs()); + LOG_INFO("node[%d], depth: %lf, total_inputs: %d, total_outputs: %d\n", node->get_innovation_number(), node->get_depth(), node->get_total_inputs(), node->get_total_outputs()); if (node->get_layer_type() == INPUT_LAYER) { if (node->get_total_outputs() == 0) { - Log::info("input node[%d] had no outputs, connecting it!\n", node->get_innovation_number()); + LOG_INFO("input node[%d] had no outputs, connecting it!\n", node->get_innovation_number()); //if an input has no outgoing edges randomly connect it connect_new_input_node(mu, sigma, node, rec_depth_dist, edge_innovation_count); } } else if (node->get_layer_type() == OUTPUT_LAYER) { if (node->get_total_inputs() == 0) { - Log::info("output node[%d] had no inputs, connecting it!\n", node->get_innovation_number()); + LOG_INFO("output node[%d] had no inputs, connecting it!\n", node->get_innovation_number()); //if an output has no incoming edges randomly connect it connect_new_output_node(mu, sigma, node, rec_depth_dist, edge_innovation_count); } } } - Log::info("assigning reachability again\n"); + LOG_INFO("assigning reachability again\n"); //update the reachabaility again assign_reachability(); - Log::info("new_parameters.size() before get weights: %d\n", initial_parameters.size()); + LOG_INFO("new_parameters.size() before get weights: %d\n", initial_parameters.size()); //update the new and best parameter lengths because this will have added edges vector updated_genome_parameters; get_weights(updated_genome_parameters); if (!epigenetic_weights) { - Log::info("resetting genome parameters to randomly betwen -0.5 and 0.5\n"); + LOG_INFO("resetting genome parameters to randomly betwen -0.5 and 0.5\n"); for (int i = 0; i < updated_genome_parameters.size(); i++) { updated_genome_parameters[i] = rng_0_1(generator) - 0.5; } } else { - Log::info("not resetting weights\n"); + LOG_INFO("not resetting weights\n"); } set_initial_parameters( updated_genome_parameters ); set_best_parameters( updated_genome_parameters ); @@ -4017,10 +4035,10 @@ void RNN_Genome::transfer_to(const vector &new_input_parameter_names, co best_validation_mae = EXAMM_MAX_DOUBLE; get_mu_sigma(best_parameters, mu, sigma); - Log::info("after transfer, mu: %lf, sigma: %lf\n", mu, sigma); + LOG_INFO("after transfer, mu: %lf, sigma: %lf\n", mu, sigma); //make sure we don't duplicate new node/edge innovation numbers - Log::info("new_parameters.size() after get weights: %d\n", updated_genome_parameters.size()); + LOG_INFO("new_parameters.size() after get weights: %d\n", updated_genome_parameters.size()); - Log::info("FINISHING PREPARING INITIAL GENOME\n"); + LOG_INFO("FINISHING PREPARING INITIAL GENOME\n"); } diff --git a/rnn/rnn_node.cxx b/rnn/rnn_node.cxx index f56102e3..dd7cf23c 100644 --- a/rnn/rnn_node.cxx +++ b/rnn/rnn_node.cxx @@ -12,7 +12,7 @@ RNN_Node::RNN_Node(int _innovation_number, int _layer_type, double _depth, int _ //node type will be simple, jordan or elman node_type = _node_type; - Log::trace("created node: %d, layer type: %d, node type: %d\n", innovation_number, layer_type, node_type); + LOG_TRACE("created node: %d, layer type: %d, node type: %d\n", innovation_number, layer_type, node_type); } @@ -20,7 +20,7 @@ RNN_Node::RNN_Node(int _innovation_number, int _layer_type, double _depth, int _ //node type will be simple, jordan or elman node_type = _node_type; - Log::trace("created node: %d, layer type: %d, node type: %d\n", innovation_number, layer_type, node_type); + LOG_TRACE("created node: %d, layer type: %d, node type: %d\n", innovation_number, layer_type, node_type); } @@ -52,11 +52,11 @@ void RNN_Node::input_fired(int time, double incoming_output) { if (inputs_fired[time] < total_inputs) return; else if (inputs_fired[time] > total_inputs) { - Log::fatal("ERROR: inputs_fired on RNN_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on RNN_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } - //Log::trace("node %d - input value[%d]: %lf\n", innovation_number, time, input_values[time]); + //LOG_TRACE("node %d - input value[%d]: %lf\n", innovation_number, time, input_values[time]); output_values[time] = tanh(input_values[time] + bias); ld_output[time] = tanh_derivative(output_values[time]); @@ -66,9 +66,9 @@ void RNN_Node::input_fired(int time, double incoming_output) { #ifdef NAN_CHECKS if (isnan(output_values[time]) || isinf(output_values[time])) { - Log::fatal("ERROR: output_value[%d] becaome %lf on RNN node: %d\n", time, output_values[time], innovation_number); - Log::fatal("\tinput_value[%dd]: %lf\n", time, input_values[time]); - Log::Fatal("\tnode bias: %lf", bias); + LOG_FATAL("ERROR: output_value[%d] becaome %lf on RNN node: %d\n", time, output_values[time], innovation_number); + LOG_FATAL("\tinput_value[%dd]: %lf\n", time, input_values[time]); + LOG_FATAL("\tnode bias: %lf", bias); exit(1); } #endif @@ -77,7 +77,7 @@ void RNN_Node::input_fired(int time, double incoming_output) { void RNN_Node::try_update_deltas(int time) { if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on RNN_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on RNN_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -89,7 +89,7 @@ void RNN_Node::try_update_deltas(int time) { void RNN_Node::error_fired(int time, double error) { outputs_fired[time]++; - //Log::trace("error fired at time: %d on node %d, d_input: %lf, ld_output %lf, error_values: %lf, output_values: %lf\n", time, innovation_number, d_input[time], ld_output[time], error_values[time], output_values[time]); + //LOG_TRACE("error fired at time: %d on node %d, d_input: %lf, ld_output %lf, error_values: %lf, output_values: %lf\n", time, innovation_number, d_input[time], ld_output[time], error_values[time], output_values[time]); d_input[time] += error_values[time] * error; diff --git a/rnn/rnn_node_interface.cxx b/rnn/rnn_node_interface.cxx index b1f0b06b..29e40c21 100644 --- a/rnn/rnn_node_interface.cxx +++ b/rnn/rnn_node_interface.cxx @@ -80,7 +80,7 @@ RNN_Node_Interface::RNN_Node_Interface(int32_t _innovation_number, int32_t _laye //outputs don't have an official output node but //deltas are passed in via the output_fired method if (layer_type != HIDDEN_LAYER) { - Log::fatal("ERROR: Attempted to create a new RNN_Node that was an input or output node without using the constructor which specifies it's parameter name"); + LOG_FATAL("ERROR: Attempted to create a new RNN_Node that was an input or output node without using the constructor which specifies it's parameter name"); exit(1); } } @@ -93,7 +93,7 @@ RNN_Node_Interface::RNN_Node_Interface(int32_t _innovation_number, int32_t _laye backward_reachable = false; if (layer_type == HIDDEN_LAYER) { - Log::fatal("ERROR: assigned a parameter name '%s' to a hidden node! This should never happen.", parameter_name.c_str()); + LOG_FATAL("ERROR: assigned a parameter name '%s' to a hidden node! This should never happen.", parameter_name.c_str()); exit(1); } diff --git a/rnn/rnn_recurrent_edge.cxx b/rnn/rnn_recurrent_edge.cxx index c22ce8bd..67927610 100644 --- a/rnn/rnn_recurrent_edge.cxx +++ b/rnn/rnn_recurrent_edge.cxx @@ -7,10 +7,10 @@ RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recu recurrent_depth = _recurrent_depth; if (recurrent_depth <= 0) { - Log::fatal( "ERROR, trying to create a recurrent edge with recurrent depth <= 0\n"); - Log::fatal("innovation number: %d\n", innovation_number); - Log::fatal("input_node->innovation_number: %d\n", input_node->get_innovation_number()); - Log::fatal("output_node->innovation_number: %d\n", output_node->get_innovation_number()); + LOG_FATAL( "ERROR, trying to create a recurrent edge with recurrent depth <= 0\n"); + LOG_FATAL("innovation number: %d\n", innovation_number); + LOG_FATAL("input_node->innovation_number: %d\n", input_node->get_innovation_number()); + LOG_FATAL("output_node->innovation_number: %d\n", output_node->get_innovation_number()); exit(1); } @@ -27,7 +27,7 @@ RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recu forward_reachable = true; backward_reachable = true; - Log::debug("\t\tcreated recurrent edge %d from %d to %d\n", innovation_number, input_innovation_number, output_innovation_number); + LOG_DEBUG("\t\tcreated recurrent edge %d from %d to %d\n", innovation_number, input_innovation_number, output_innovation_number); } RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recurrent_depth, int32_t _input_innovation_number, int32_t _output_innovation_number, const vector &nodes) { @@ -38,10 +38,10 @@ RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recu output_innovation_number = _output_innovation_number; if (recurrent_depth <= 0) { - Log::fatal( "ERROR, trying to create a recurrent edge with recurrent depth <= 0\n"); - Log::fatal("innovation number: %d\n", innovation_number); - Log::fatal("input_node->innovation_number: %d\n", input_node->get_innovation_number()); - Log::fatal("output_node->innovation_number: %d\n", output_node->get_innovation_number()); + LOG_FATAL( "ERROR, trying to create a recurrent edge with recurrent depth <= 0\n"); + LOG_FATAL("innovation number: %d\n", innovation_number); + LOG_FATAL("input_node->innovation_number: %d\n", input_node->get_innovation_number()); + LOG_FATAL("output_node->innovation_number: %d\n", output_node->get_innovation_number()); exit(1); } @@ -50,7 +50,7 @@ RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recu for (int32_t i = 0; i < nodes.size(); i++) { if (nodes[i]->innovation_number == _input_innovation_number) { if (input_node != NULL) { - Log::fatal("ERROR in copying RNN_Recurrent_Edge, list of nodes has multiple nodes with same input_innovation_number -- this should never happen.\n"); + LOG_FATAL("ERROR in copying RNN_Recurrent_Edge, list of nodes has multiple nodes with same input_innovation_number -- this should never happen.\n"); exit(1); } @@ -59,7 +59,7 @@ RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recu if (nodes[i]->innovation_number == _output_innovation_number) { if (output_node != NULL) { - Log::fatal("ERROR in copying RNN_Recurrent_Edge, list of nodes has multiple nodes with same output_innovation_number -- this should never happen.\n"); + LOG_FATAL("ERROR in copying RNN_Recurrent_Edge, list of nodes has multiple nodes with same output_innovation_number -- this should never happen.\n"); exit(1); } @@ -68,12 +68,12 @@ RNN_Recurrent_Edge::RNN_Recurrent_Edge(int32_t _innovation_number, int32_t _recu } if (input_node == NULL) { - Log::fatal("ERROR initializing RNN_Edge, input node with innovation number; %d was not found!\n", input_innovation_number); + LOG_FATAL("ERROR initializing RNN_Edge, input node with innovation number; %d was not found!\n", input_innovation_number); exit(1); } if (output_node == NULL) { - Log::fatal("ERROR initializing RNN_Edge, output node with innovation number; %d was not found!\n", output_innovation_number); + LOG_FATAL("ERROR initializing RNN_Edge, output node with innovation number; %d was not found!\n", output_innovation_number); exit(1); } } @@ -130,14 +130,14 @@ void RNN_Recurrent_Edge::first_propagate_forward() { void RNN_Recurrent_Edge::propagate_forward(int32_t time) { if (input_node->inputs_fired[time] != input_node->total_inputs) { - Log::fatal("ERROR! propagate forward called on recurrent edge %d where input_node->inputs_fired[%d] (%d) != total_inputs (%d)\n", innovation_number, time, input_node->inputs_fired[time], input_node->total_inputs); + LOG_FATAL("ERROR! propagate forward called on recurrent edge %d where input_node->inputs_fired[%d] (%d) != total_inputs (%d)\n", innovation_number, time, input_node->inputs_fired[time], input_node->total_inputs); exit(1); } double output = input_node->output_values[time] * weight; if (time < series_length - recurrent_depth) { - //Log::trace("propagating forward on recurrent edge %d from time %d to time %d from node %d to node %d\n", innovation_number, time, time + recurrent_depth, input_innovation_number, output_innovation_number); + //LOG_TRACE("propagating forward on recurrent edge %d from time %d to time %d from node %d to node %d\n", innovation_number, time, time + recurrent_depth, input_innovation_number, output_innovation_number); outputs[time + recurrent_depth] = output; output_node->input_fired(time + recurrent_depth, output); @@ -148,7 +148,7 @@ void RNN_Recurrent_Edge::propagate_forward(int32_t time) { //output fireds are correct void RNN_Recurrent_Edge::first_propagate_backward() { for (uint32_t i = 0; i < recurrent_depth; i++) { - //Log::trace("FIRST propagating backward on recurrent edge %d to time %d from node %d to node %d\n", innovation_number, series_length - 1 - i, output_innovation_number, input_innovation_number); + //LOG_TRACE("FIRST propagating backward on recurrent edge %d to time %d from node %d to node %d\n", innovation_number, series_length - 1 - i, output_innovation_number, input_innovation_number); input_node->output_fired(series_length - 1 - i, 0.0); } } @@ -160,15 +160,15 @@ void RNN_Recurrent_Edge::propagate_backward(int32_t time) { /* if (output_node->outputs_fired[time] != (output_node->total_outputs - 1)) { - Log::fatal("ERROR! propagate backward called on recurrent edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); - Log::fatal("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); + LOG_FATAL("ERROR! propagate backward called on recurrent edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); + LOG_FATAL("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); exit(1); } */ //} else { - Log::fatal("ERROR! propagate backward called on recurrent edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); - Log::fatal("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); + LOG_FATAL("ERROR! propagate backward called on recurrent edge %d where output_node->outputs_fired[%d] (%d) != total_outputs (%d)\n", innovation_number, time, output_node->outputs_fired[time], output_node->total_outputs); + LOG_FATAL("input innovation number: %d, output innovation number: %d\n", input_node->innovation_number, output_node->innovation_number); exit(1); //} } @@ -176,7 +176,7 @@ void RNN_Recurrent_Edge::propagate_backward(int32_t time) { double delta = output_node->d_input[time]; if (time - recurrent_depth >= 0) { - //Log::trace("propagating backward on recurrent edge %d from time %d to time %d from node %d to node %d\n", innovation_number, time, time - recurrent_depth, output_innovation_number, input_innovation_number); + //LOG_TRACE("propagating backward on recurrent edge %d from time %d to time %d from node %d to node %d\n", innovation_number, time, time - recurrent_depth, output_innovation_number, input_innovation_number); d_weight += delta * input_node->output_values[time - recurrent_depth]; deltas[time] = delta * weight; diff --git a/rnn/species.cxx b/rnn/species.cxx index 6a7d0d52..d0fd9627 100644 --- a/rnn/species.cxx +++ b/rnn/species.cxx @@ -91,7 +91,7 @@ void Species::copy_two_random_genomes(uniform_real_distribution &rng_0_1 //inserts a copy of the genome, caller of the function will need to delete their //pointer int32_t Species::insert_genome(RNN_Genome *genome) { - Log::info("inserting genome with fitness: %s to species %d\n", parse_fitness(genome->get_fitness()).c_str(), id); + LOG_INFO("inserting genome with fitness: %s to species %d\n", parse_fitness(genome->get_fitness()).c_str(), id); // inorder insert the new individual RNN_Genome *copy = genome->copy(); @@ -111,7 +111,7 @@ int32_t Species::insert_genome(RNN_Genome *genome) { if (insert_index == 0) { // this was a new best genome for this island - Log::info("new best fitness for island: %d!\n", id); + LOG_INFO("new best fitness for island: %d!\n", id); if (genome->get_fitness() != EXAMM_MAX_DOUBLE) { // need to set the weights for non-initial genomes so we // can generate a proper graphviz file @@ -125,14 +125,14 @@ int32_t Species::insert_genome(RNN_Genome *genome) { inserted_genome_id.push_back( copy->get_generation_id()); - Log::info("Inserted genome %d at index %d\n", genome->get_generation_id(), insert_index); + LOG_INFO("Inserted genome %d at index %d\n", genome->get_generation_id(), insert_index); return insert_index; } void Species::print(string indent) { - Log::info("%s\t%s\n", indent.c_str(), RNN_Genome::print_statistics_header().c_str()); + LOG_INFO("%s\t%s\n", indent.c_str(), RNN_Genome::print_statistics_header().c_str()); for (int32_t i = 0; i < genomes.size(); i++) { - Log::info("%s\t%s\n", indent.c_str(), genomes[i]->print_statistics().c_str()); + LOG_INFO("%s\t%s\n", indent.c_str(), genomes[i]->print_statistics().c_str()); } } @@ -207,7 +207,7 @@ void Species::fitness_sharing_remove(double fitness_threshold, function total_inputs) { - Log::fatal("ERROR: inputs_fired on UGRNN_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); + LOG_FATAL("ERROR: inputs_fired on UGRNN_Node %d at time %d is %d and total_inputs is %d\n", innovation_number, time, inputs_fired[time], total_inputs); exit(1); } @@ -146,7 +146,7 @@ void UGRNN_Node::input_fired(int time, double incoming_output) { void UGRNN_Node::try_update_deltas(int time) { if (outputs_fired[time] < total_outputs) return; else if (outputs_fired[time] > total_outputs) { - Log::fatal("ERROR: outputs_fired on UGRNN_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); + LOG_FATAL("ERROR: outputs_fired on UGRNN_Node %d at time %d is %d and total_outputs is %d\n", innovation_number, time, outputs_fired[time], total_outputs); exit(1); } @@ -233,7 +233,7 @@ void UGRNN_Node::set_weights(uint32_t &offset, const vector ¶meters) g_bias = bound(parameters[offset++]); //uint32_t end_offset = offset; - //Log::debug("set weights from offset %d to %d on UGRNN_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_DEBUG("set weights from offset %d to %d on UGRNN_Node %d\n", start_offset, end_offset, innovation_number); } void UGRNN_Node::get_weights(uint32_t &offset, vector ¶meters) const { @@ -248,7 +248,7 @@ void UGRNN_Node::get_weights(uint32_t &offset, vector ¶meters) const parameters[offset++] = g_bias; //uint32_t end_offset = offset; - //Log::debug("got weights from offset %d to %d on UGRNN_Node %d\n", start_offset, end_offset, innovation_number); + //LOG_DEBUG("got weights from offset %d to %d on UGRNN_Node %d\n", start_offset, end_offset, innovation_number); } diff --git a/rnn_examples/evaluate_rnn.cxx b/rnn_examples/evaluate_rnn.cxx index 441e6dce..d8249789 100644 --- a/rnn_examples/evaluate_rnn.cxx +++ b/rnn_examples/evaluate_rnn.cxx @@ -48,7 +48,7 @@ int main(int argc, char** argv) { get_argument_vector(arguments, "--testing_filenames", true, testing_filenames); TimeSeriesSets *time_series_sets = TimeSeriesSets::generate_test(testing_filenames, genome->get_input_parameter_names(), genome->get_output_parameter_names()); - Log::debug("got time series sets.\n"); + LOG_DEBUG("got time series sets.\n"); string normalize_type = genome->get_normalize_type(); if (normalize_type.compare("min_max") == 0) { @@ -57,7 +57,7 @@ int main(int argc, char** argv) { time_series_sets->normalize_avg_std_dev(genome->get_normalize_avgs(), genome->get_normalize_std_devs(), genome->get_normalize_mins(), genome->get_normalize_maxs()); } - Log::info("normalized type: %s \n", normalize_type.c_str()); + LOG_INFO("normalized type: %s \n", normalize_type.c_str()); int32_t time_offset = 1; get_argument(arguments, "--time_offset", true, time_offset); @@ -65,23 +65,23 @@ int main(int argc, char** argv) { time_series_sets->export_test_series(time_offset, testing_inputs, testing_outputs); vector best_parameters = genome->get_best_parameters(); - Log::info("MSE: %lf\n", genome->get_mse(best_parameters, testing_inputs, testing_outputs)); - Log::info("MAE: %lf\n", genome->get_mae(best_parameters, testing_inputs, testing_outputs)); + LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, testing_inputs, testing_outputs)); + LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, testing_inputs, testing_outputs)); genome->write_predictions(output_directory, testing_filenames, best_parameters, testing_inputs, testing_outputs, time_series_sets); - if (Log::at_level(Log::DEBUG)) { + if (Log::at_level(LOG_LEVEL_DEBUG)) { int length; char *byte_array; genome->write_to_array(&byte_array, length); - Log::debug("WROTE TO BYTE ARRAY WITH LENGTH: %d\n", length); + LOG_DEBUG("WROTE TO BYTE ARRAY WITH LENGTH: %d\n", length); RNN_Genome *duplicate_genome = new RNN_Genome(byte_array, length); vector best_parameters_2 = duplicate_genome->get_best_parameters(); - Log::debug("duplicate MSE: %lf\n", duplicate_genome->get_mse(best_parameters_2, testing_inputs, testing_outputs)); - Log::debug("duplicate MAE: %lf\n", duplicate_genome->get_mae(best_parameters_2, testing_inputs, testing_outputs)); + LOG_DEBUG("duplicate MSE: %lf\n", duplicate_genome->get_mse(best_parameters_2, testing_inputs, testing_outputs)); + LOG_DEBUG("duplicate MAE: %lf\n", duplicate_genome->get_mae(best_parameters_2, testing_inputs, testing_outputs)); duplicate_genome->write_predictions(output_directory, testing_filenames, best_parameters_2, testing_inputs, testing_outputs, time_series_sets); } diff --git a/rnn_examples/evaluate_rnns_multi_offset.cxx b/rnn_examples/evaluate_rnns_multi_offset.cxx index e09484ad..844a2014 100644 --- a/rnn_examples/evaluate_rnns_multi_offset.cxx +++ b/rnn_examples/evaluate_rnns_multi_offset.cxx @@ -60,7 +60,7 @@ int main(int argc, char** argv) { vector genomes; for (int32_t i = 0; i < genome_filenames.size(); i++) { - Log::info("reading genome filename: %s\n", genome_filenames[i].c_str()); + LOG_INFO("reading genome filename: %s\n", genome_filenames[i].c_str()); genomes.push_back(new RNN_Genome(genome_filenames[i])); } @@ -68,7 +68,7 @@ int main(int argc, char** argv) { get_argument_vector(arguments, "--time_offsets", true, time_offsets); if (time_offsets.size() != genome_filenames.size()) { - Log::fatal("ERROR: number of time_offsets (%d) != number of genome_files: (%d)\n", time_offsets.size(), genome_filenames.size()); + LOG_FATAL("ERROR: number of time_offsets (%d) != number of genome_files: (%d)\n", time_offsets.size(), genome_filenames.size()); exit(1); } @@ -101,13 +101,13 @@ int main(int argc, char** argv) { time_series_sets->export_series_by_name(output_parameter_name, full_series); all_series.push_back(full_series[0]); - Log::debug("output_parameter_name: %s, full_series.size(): %d, full_series[0].size(): %d\n", output_parameter_name.c_str(), full_series.size(), full_series[0].size()); + LOG_DEBUG("output_parameter_name: %s, full_series.size(): %d, full_series[0].size(): %d\n", output_parameter_name.c_str(), full_series.size(), full_series[0].size()); delete time_series_sets; for (int32_t i = 0; i < genomes.size(); i++) { time_series_sets = TimeSeriesSets::generate_test(testing_filenames, genomes[i]->get_input_parameter_names(), genomes[i]->get_output_parameter_names()); - Log::debug("got time series sets.\n"); + LOG_DEBUG("got time series sets.\n"); string normalize_type = genomes[i]->get_normalize_type(); if (normalize_type.compare("min_max") == 0) { time_series_sets->normalize_min_max(genomes[i]->get_normalize_mins(), genomes[i]->get_normalize_maxs()); @@ -115,24 +115,24 @@ int main(int argc, char** argv) { time_series_sets->normalize_avg_std_dev(genomes[i]->get_normalize_avgs(), genomes[i]->get_normalize_std_devs(), genomes[i]->get_normalize_mins(), genomes[i]->get_normalize_maxs()); } - Log::debug("normalized time series.\n"); + LOG_DEBUG("normalized time series.\n"); time_series_sets->export_test_series(time_offsets[i], testing_inputs, testing_outputs); vector best_parameters = genomes[i]->get_best_parameters(); - Log::info("MSE: %lf\n", genomes[i]->get_mse(best_parameters, testing_inputs, testing_outputs)); - Log::info("MAE: %lf\n", genomes[i]->get_mae(best_parameters, testing_inputs, testing_outputs)); + LOG_INFO("MSE: %lf\n", genomes[i]->get_mse(best_parameters, testing_inputs, testing_outputs)); + LOG_INFO("MAE: %lf\n", genomes[i]->get_mae(best_parameters, testing_inputs, testing_outputs)); vector< vector > predictions = genomes[i]->get_predictions(best_parameters, testing_inputs, testing_outputs); - Log::debug("predictions.size(): %d\n", predictions.size()); + LOG_DEBUG("predictions.size(): %d\n", predictions.size()); if (predictions.size() != 1) { - Log::fatal("ERROR: had more than one testing file, currently only one supported.\n"); + LOG_FATAL("ERROR: had more than one testing file, currently only one supported.\n"); exit(1); } - Log::debug("genomes[%d] had %d outputs.\n", i, predictions[0].size()); + LOG_DEBUG("genomes[%d] had %d outputs.\n", i, predictions[0].size()); all_series.push_back(predictions[0]); @@ -148,10 +148,10 @@ int main(int argc, char** argv) { } outfile << endl; - Log::debug("all_series.size(): %d\n", all_series.size()); + LOG_DEBUG("all_series.size(): %d\n", all_series.size()); for (int32_t row = 0; row < all_series[0].size(); row++) { for (int32_t i = 0; i < all_series.size(); i++) { - Log::debug("all_series[%d].size(): %d\n", i, all_series[i].size()); + LOG_DEBUG("all_series[%d].size(): %d\n", i, all_series[i].size()); if (i == 0) outfile << all_series[0][row]; else { diff --git a/rnn_examples/jordan_rnn.cxx b/rnn_examples/jordan_rnn.cxx index 4ae087fc..39511332 100644 --- a/rnn_examples/jordan_rnn.cxx +++ b/rnn_examples/jordan_rnn.cxx @@ -80,7 +80,7 @@ int main(int argc, char **argv) { vector input_parameter_names = time_series_sets->get_input_parameter_names(); vector output_parameter_names = time_series_sets->get_output_parameter_names(); - Log::info("creating jordan neural network with inputs: %d, hidden: %dx%d, outputs: %d, max input lags: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_input_lags, max_recurrent_depth); + LOG_INFO("creating jordan neural network with inputs: %d, hidden: %dx%d, outputs: %d, max input lags: %d, max recurrent depth: %d\n", input_parameter_names.size(), number_hidden_layers, number_hidden_nodes, output_parameter_names.size(), max_input_lags, max_recurrent_depth); vector rnn_nodes; vector output_layer; vector< vector > layer_nodes(2 + number_hidden_layers); @@ -145,7 +145,7 @@ int main(int argc, char **argv) { uint32_t number_of_weights = genome->get_number_weights(); - Log::info("RNN has %d weights.\n", number_of_weights); + LOG_INFO("RNN has %d weights.\n", number_of_weights); vector min_bound(number_of_weights, -1.0); vector max_bound(number_of_weights, 1.0); @@ -185,15 +185,15 @@ int main(int argc, char **argv) { genome->write_to_file(output_filename); genome->get_weights(best_parameters); - Log::info("best test MSE: %lf\n", genome->get_fitness()); + LOG_INFO("best test MSE: %lf\n", genome->get_fitness()); rnn->set_weights(best_parameters); - Log::info("TRAINING ERRORS:\n"); - Log::info("MSE: %lf\n", genome->get_mse(best_parameters, training_inputs, training_outputs)); - Log::info("MAE: %lf\n", genome->get_mae(best_parameters, training_inputs, training_outputs)); + LOG_INFO("TRAINING ERRORS:\n"); + LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, training_inputs, training_outputs)); + LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, training_inputs, training_outputs)); - Log::info("TEST ERRORS:"); - Log::info("MSE: %lf\n", genome->get_mse(best_parameters, test_inputs, test_outputs)); - Log::info("MAE: %lf\n", genome->get_mae(best_parameters, test_inputs, test_outputs)); + LOG_INFO("TEST ERRORS:"); + LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, test_inputs, test_outputs)); + LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, test_inputs, test_outputs)); Log::release_id("main"); } diff --git a/rnn_examples/rnn_heatmap.cxx b/rnn_examples/rnn_heatmap.cxx index 95004058..89930c65 100644 --- a/rnn_examples/rnn_heatmap.cxx +++ b/rnn_examples/rnn_heatmap.cxx @@ -54,37 +54,37 @@ int main(int argc, char** argv) { int32_t time_offset = 1; get_argument(arguments, "--time_offset", true, time_offset); - Log::info("input directory: '%s'\n", input_directory.c_str()); - Log::info("testing directory: '%s'\n", testing_directory.c_str()); - Log::info("output directory: '%s'\n", output_directory.c_str()); + LOG_INFO("input directory: '%s'\n", input_directory.c_str()); + LOG_INFO("testing directory: '%s'\n", testing_directory.c_str()); + LOG_INFO("output directory: '%s'\n", output_directory.c_str()); string output_filename = output_directory + "heatmap_output.csv"; ofstream output_file(output_filename); for (int cyclone = 1; cyclone <= 12; cyclone++) { string cyclone_directory = input_directory + "cyclone_" + to_string(cyclone); - Log::info("analyzing cyclone %d with directory: '%s'\n", cyclone, cyclone_directory.c_str()); + LOG_INFO("analyzing cyclone %d with directory: '%s'\n", cyclone, cyclone_directory.c_str()); for (int target_cyclone = 1; target_cyclone <= 12; target_cyclone++) { double average_mae = 0.0; for (int repeat = 0; repeat < 20; repeat++) { string repeat_directory = cyclone_directory + "/" + to_string(repeat); - Log::trace("\tgetting genome file from repeat directory: '%s'\n", repeat_directory.c_str()); + LOG_TRACE("\tgetting genome file from repeat directory: '%s'\n", repeat_directory.c_str()); string genome_filename = ""; for (const auto &entry : fs::directory_iterator(repeat_directory)) { - Log::trace("\t\trepeat directory entry: '%s'\n", entry.path().c_str()); + LOG_TRACE("\t\trepeat directory entry: '%s'\n", entry.path().c_str()); string path = entry.path(); if (path.find("rnn_genome") != std::string::npos) { - Log::trace("\t\tgot genome file: '%s'\n", path.c_str()); + LOG_TRACE("\t\tgot genome file: '%s'\n", path.c_str()); genome_filename = path; break; } } - Log::info("\tgenome filename: '%s'\n", genome_filename.c_str()); + LOG_INFO("\tgenome filename: '%s'\n", genome_filename.c_str()); RNN_Genome *genome = new RNN_Genome(genome_filename); @@ -94,7 +94,7 @@ int main(int argc, char** argv) { testing_filenames.push_back(testing_filename); TimeSeriesSets *time_series_sets = TimeSeriesSets::generate_test(testing_filenames, genome->get_input_parameter_names(), genome->get_output_parameter_names()); - Log::debug("got time series sets.\n"); + LOG_DEBUG("got time series sets.\n"); string normalize_type = genome->get_normalize_type(); if (normalize_type.compare("min_max") == 0) { @@ -103,14 +103,14 @@ int main(int argc, char** argv) { time_series_sets->normalize_avg_std_dev(genome->get_normalize_avgs(), genome->get_normalize_std_devs(), genome->get_normalize_mins(), genome->get_normalize_maxs()); } - Log::info("normalized type: %s \n", normalize_type.c_str()); + LOG_INFO("normalized type: %s \n", normalize_type.c_str()); time_series_sets->export_test_series(time_offset, testing_inputs, testing_outputs); vector best_parameters = genome->get_best_parameters(); - //Log::info("MSE: %lf\n", genome->get_mse(best_parameters, testing_inputs, testing_outputs)); - //Log::info("MAE: %lf\n", genome->get_mae(best_parameters, testing_inputs, testing_outputs)); + //LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, testing_inputs, testing_outputs)); + //LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, testing_inputs, testing_outputs)); double mae = genome->get_mae(best_parameters, testing_inputs, testing_outputs); cout << "MAE: " << mae << endl; @@ -140,7 +140,7 @@ int main(int argc, char** argv) { get_argument_vector(arguments, "--testing_filenames", true, testing_filenames); TimeSeriesSets *time_series_sets = TimeSeriesSets::generate_test(testing_filenames, genome->get_input_parameter_names(), genome->get_output_parameter_names()); - Log::debug("got time series sets.\n"); + LOG_DEBUG("got time series sets.\n"); string normalize_type = genome->get_normalize_type(); if (normalize_type.compare("min_max") == 0) { @@ -149,7 +149,7 @@ int main(int argc, char** argv) { time_series_sets->normalize_avg_std_dev(genome->get_normalize_avgs(), genome->get_normalize_std_devs(), genome->get_normalize_mins(), genome->get_normalize_maxs()); } - Log::info("normalized type: %s \n", normalize_type.c_str()); + LOG_INFO("normalized type: %s \n", normalize_type.c_str()); int32_t time_offset = 1; get_argument(arguments, "--time_offset", true, time_offset); @@ -157,8 +157,8 @@ int main(int argc, char** argv) { time_series_sets->export_test_series(time_offset, testing_inputs, testing_outputs); vector best_parameters = genome->get_best_parameters(); - Log::info("MSE: %lf\n", genome->get_mse(best_parameters, testing_inputs, testing_outputs)); - Log::info("MAE: %lf\n", genome->get_mae(best_parameters, testing_inputs, testing_outputs)); + LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, testing_inputs, testing_outputs)); + LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, testing_inputs, testing_outputs)); genome->write_predictions(output_directory, testing_filenames, best_parameters, testing_inputs, testing_outputs, time_series_sets); */ diff --git a/rnn_examples/rnn_statistics.cxx b/rnn_examples/rnn_statistics.cxx index d50c3eab..4d48adb6 100644 --- a/rnn_examples/rnn_statistics.cxx +++ b/rnn_examples/rnn_statistics.cxx @@ -46,7 +46,7 @@ int main(int argc, char** argv) { double avg_weights = 0.0; for (int32_t i = 0; i < (int32_t)rnn_filenames.size(); i++) { - Log::info("reading file: %s\n", rnn_filenames[i].c_str()); + LOG_INFO("reading file: %s\n", rnn_filenames[i].c_str()); RNN_Genome *genome = new RNN_Genome(rnn_filenames[i]); int32_t nodes = genome->get_enabled_node_count(); @@ -54,9 +54,9 @@ int main(int argc, char** argv) { int32_t rec_edges = genome->get_enabled_recurrent_edge_count(); int32_t weights = genome->get_number_weights(); - Log::info("RNN INFO FOR '%s', nodes: %d, edges: %d, rec: %d, weights: %d\n", rnn_filenames[i].c_str(), nodes, edges, rec_edges, weights); - Log::info("\t%s\n", genome->print_statistics_header().c_str()); - Log::info("\t%s\n", genome->print_statistics().c_str()); + LOG_INFO("RNN INFO FOR '%s', nodes: %d, edges: %d, rec: %d, weights: %d\n", rnn_filenames[i].c_str(), nodes, edges, rec_edges, weights); + LOG_INFO("\t%s\n", genome->print_statistics_header().c_str()); + LOG_INFO("\t%s\n", genome->print_statistics().c_str()); avg_nodes += nodes; avg_edges += edges; @@ -69,7 +69,7 @@ int main(int argc, char** argv) { avg_rec_edges /= rnn_filenames.size(); avg_weights /= rnn_filenames.size(); - Log::info("AVG INFO, nodes: %d, edges: %d, rec: %d, weights: %d\n", avg_nodes, avg_edges, avg_rec_edges, avg_weights); + LOG_INFO("AVG INFO, nodes: %d, edges: %d, rec: %d, weights: %d\n", avg_nodes, avg_edges, avg_rec_edges, avg_weights); Log::release_id("main"); diff --git a/rnn_examples/train_rnn.cxx b/rnn_examples/train_rnn.cxx index f4b74e78..c8b18570 100644 --- a/rnn_examples/train_rnn.cxx +++ b/rnn_examples/train_rnn.cxx @@ -60,7 +60,7 @@ double test_objective_function(const vector ¶meters) { double error = rnn->prediction_mse(test_inputs[i], test_outputs[i], false, true, 0.0); total_error += error; - Log::info("output for series[%d]: %lf\n", i, error); + LOG_INFO("output for series[%d]: %lf\n", i, error); } return -total_error; @@ -124,14 +124,14 @@ int main(int argc, char **argv) { genome = create_elman(input_parameter_names, 1, number_inputs, output_parameter_names, max_recurrent_depth, weight_initialize); } else { - Log::fatal("ERROR: incorrect rnn type\n"); - Log::fatal("Possibilities are:\n"); - Log::fatal(" one_layer_lstm\n"); - Log::fatal(" two_layer_lstm\n"); - Log::fatal(" one_layer_gru\n"); - Log::fatal(" two_layer_gru\n"); - Log::fatal(" one_layer_ff\n"); - Log::fatal(" two_layer_ff\n"); + LOG_FATAL("ERROR: incorrect rnn type\n"); + LOG_FATAL("Possibilities are:\n"); + LOG_FATAL(" one_layer_lstm\n"); + LOG_FATAL(" two_layer_lstm\n"); + LOG_FATAL(" one_layer_gru\n"); + LOG_FATAL(" two_layer_gru\n"); + LOG_FATAL(" one_layer_ff\n"); + LOG_FATAL(" two_layer_ff\n"); exit(1); } @@ -142,7 +142,7 @@ int main(int argc, char **argv) { uint32_t number_of_weights = genome->get_number_weights(); - Log::info("RNN has %d weights.\n", number_of_weights); + LOG_INFO("RNN has %d weights.\n", number_of_weights); vector min_bound(number_of_weights, -1.0); vector max_bound(number_of_weights, 1.0); @@ -183,15 +183,15 @@ int main(int argc, char **argv) { } genome->get_weights(best_parameters); - Log::info("best test MSE: %lf\n", genome->get_fitness()); + LOG_INFO("best test MSE: %lf\n", genome->get_fitness()); rnn->set_weights(best_parameters); - Log::info("TRAINING ERRORS:\n"); - Log::info("MSE: %lf\n", genome->get_mse(best_parameters, training_inputs, training_outputs)); - Log::info("MAE: %lf\n", genome->get_mae(best_parameters, training_inputs, training_outputs)); + LOG_INFO("TRAINING ERRORS:\n"); + LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, training_inputs, training_outputs)); + LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, training_inputs, training_outputs)); - Log::info("TEST ERRORS:"); - Log::info("MSE: %lf\n", genome->get_mse(best_parameters, test_inputs, test_outputs)); - Log::info("MAE: %lf\n", genome->get_mae(best_parameters, test_inputs, test_outputs)); + LOG_INFO("TEST ERRORS:"); + LOG_INFO("MSE: %lf\n", genome->get_mse(best_parameters, test_inputs, test_outputs)); + LOG_INFO("MAE: %lf\n", genome->get_mae(best_parameters, test_inputs, test_outputs)); Log::release_id("main"); } diff --git a/rnn_tests/gradient_test.cxx b/rnn_tests/gradient_test.cxx index e7031100..1dbff672 100644 --- a/rnn_tests/gradient_test.cxx +++ b/rnn_tests/gradient_test.cxx @@ -54,20 +54,20 @@ void gradient_test(string name, RNN_Genome *genome, const vector< vector vector parameters; vector analytic_gradient, empirical_gradient; - Log::info("\ttesting gradient on '%s'...\n", name.c_str()); + LOG_INFO("\ttesting gradient on '%s'...\n", name.c_str()); bool failed = false; RNN* rnn = genome->get_rnn(); - Log::debug("got genome \n"); + + LOG_DEBUG("got genome \n\n"); rnn->enable_use_regression(true); for (int32_t i = 0; i < test_iterations; i++) { - if (i == 0) Log::debug_no_header("\n"); - Log::debug("\tAttempt %d USING REGRESSION\n", i); + LOG_DEBUG("\tAttempt %d USING REGRESSION\n", i); generate_random_vector(rnn->get_number_weights(), parameters); - Log::debug("DEBUG: firing weights are %d \n", rnn->get_number_weights()); + LOG_DEBUG("DEBUG: firing weights are %d \n", rnn->get_number_weights()); rnn->get_analytic_gradient(parameters, inputs, outputs, analytic_mse, analytic_gradient, false, true, 0.0); rnn->get_empirical_gradient(parameters, inputs, outputs, empirical_mse, empirical_gradient, false, true, 0.0); @@ -80,28 +80,29 @@ void gradient_test(string name, RNN_Genome *genome, const vector< vector if (fabs(difference) > 10e-10) { failed = true; iteration_failed = true; - Log::info("\t\tFAILED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, REGRESSION\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); + LOG_INFO("\t\tFAILED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, REGRESSION\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); //exit(1); } else { - Log::debug("\t\tPASSED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, REGRESSION\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); + LOG_DEBUG("\t\tPASSED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, REGRESSION\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); } } if (iteration_failed) { - Log::info("\tITERATION %d FAILED!\n\n", i); + LOG_INFO("\tITERATION %d FAILED!\n\n", i); } else { - Log::debug("\tITERATION %d PASSED!\n\n", i); + LOG_DEBUG("\tITERATION %d PASSED!\n\n", i); } } rnn->enable_use_regression(false); + LOG_DEBUG("\n"); + for (int32_t i = 0; i < test_iterations; i++) { - if (i == 0) Log::debug_no_header("\n"); - Log::debug("\tAttempt %d USING SOFTMAX\n", i); + LOG_DEBUG("\tAttempt %d USING SOFTMAX\n", i); generate_random_vector(rnn->get_number_weights(), parameters); - Log::debug("DEBUG: firing weights are %d \n", rnn->get_number_weights()); + LOG_DEBUG("DEBUG: firing weights are %d \n", rnn->get_number_weights()); rnn->get_analytic_gradient(parameters, inputs, outputs, analytic_mse, analytic_gradient, false, true, 0.0); rnn->get_empirical_gradient(parameters, inputs, outputs, empirical_mse, empirical_gradient, false, true, 0.0); @@ -114,25 +115,25 @@ void gradient_test(string name, RNN_Genome *genome, const vector< vector if (fabs(difference) > 10e-10) { failed = true; iteration_failed = true; - Log::info("\t\tFAILED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, SOFTMAX\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); + LOG_INFO("\t\tFAILED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, SOFTMAX\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); //exit(1); } else { - Log::debug("\t\tPASSED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, SOFTMAX\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); + LOG_DEBUG("\t\tPASSED analytic gradient[%d]: %lf, empirical gradient[%d]: %lf, difference: %lf, SOFTMAX\n", j, analytic_gradient[j], j, empirical_gradient[j], difference); } } if (iteration_failed) { - Log::info("\tITERATION %d FAILED!\n\n", i); + LOG_INFO("\tITERATION %d FAILED!\n\n", i); } else { - Log::debug("\tITERATION %d PASSED!\n\n", i); + LOG_DEBUG("\tITERATION %d PASSED!\n\n", i); } } delete rnn; if (!failed) { - Log::info("ALL PASSED!\n"); + LOG_INFO("ALL PASSED!\n"); } else { - Log::info("SOME FAILED!\n"); + LOG_INFO("SOME FAILED!\n"); } } diff --git a/rnn_tests/test_delta_gradients.cxx b/rnn_tests/test_delta_gradients.cxx index 9c74349a..ae7277bf 100644 --- a/rnn_tests/test_delta_gradients.cxx +++ b/rnn_tests/test_delta_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING DELTA\n"); + LOG_INFO("TESTING DELTA\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,11 +55,11 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_elman_gradients.cxx b/rnn_tests/test_elman_gradients.cxx index 3f7d9510..8e964267 100644 --- a/rnn_tests/test_elman_gradients.cxx +++ b/rnn_tests/test_elman_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING ELMAN\n"); + LOG_INFO("TESTING ELMAN\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,11 +55,11 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_enarc_gradients.cxx b/rnn_tests/test_enarc_gradients.cxx index f21d9f1d..5e789bd9 100644 --- a/rnn_tests/test_enarc_gradients.cxx +++ b/rnn_tests/test_enarc_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING ENARC\n"); + LOG_INFO("TESTING ENARC\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,12 +55,12 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_enas_dag_gradients.cxx b/rnn_tests/test_enas_dag_gradients.cxx index ccffa220..b2508c83 100644 --- a/rnn_tests/test_enas_dag_gradients.cxx +++ b/rnn_tests/test_enas_dag_gradients.cxx @@ -41,7 +41,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING ENAS_DAG\n"); + LOG_INFO("TESTING ENAS_DAG\n"); vector< vector > inputs; vector< vector > outputs; @@ -56,12 +56,12 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); @@ -78,7 +78,7 @@ int main(int argc, char **argv) { delete genome; genome = create_enas_dag(inputs1, 1, 1, outputs1, max_recurrent_depth, weight_initialize); - Log::debug("created enas dag\n"); + LOG_DEBUG("created enas dag\n"); gradient_test("ENAS_DAG: 1 Input, 1x1 Hidden, 1 Output", genome, inputs, outputs); delete genome; diff --git a/rnn_tests/test_feed_forward_gradients.cxx b/rnn_tests/test_feed_forward_gradients.cxx index d1f5952f..0aa4549c 100644 --- a/rnn_tests/test_feed_forward_gradients.cxx +++ b/rnn_tests/test_feed_forward_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING FEED FORWARD\n"); + LOG_INFO("TESTING FEED FORWARD\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,11 +55,11 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_gru_gradients.cxx b/rnn_tests/test_gru_gradients.cxx index 7ccbbbb1..6bb903b5 100644 --- a/rnn_tests/test_gru_gradients.cxx +++ b/rnn_tests/test_gru_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING GRU\n"); + LOG_INFO("TESTING GRU\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,11 +55,11 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_jordan_gradients.cxx b/rnn_tests/test_jordan_gradients.cxx index d600652e..fd9f8fdc 100644 --- a/rnn_tests/test_jordan_gradients.cxx +++ b/rnn_tests/test_jordan_gradients.cxx @@ -42,7 +42,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING JORDAN\n"); + LOG_INFO("TESTING JORDAN\n"); vector< vector > inputs; vector< vector > outputs; @@ -57,11 +57,11 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_lstm_gradients.cxx b/rnn_tests/test_lstm_gradients.cxx index 9c642d5c..97e53ba9 100644 --- a/rnn_tests/test_lstm_gradients.cxx +++ b/rnn_tests/test_lstm_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING LSTM\n"); + LOG_INFO("TESTING LSTM\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,12 +55,12 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_mgu_gradients.cxx b/rnn_tests/test_mgu_gradients.cxx index 47ea4fd3..bd28a30f 100644 --- a/rnn_tests/test_mgu_gradients.cxx +++ b/rnn_tests/test_mgu_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING MGU\n"); + LOG_INFO("TESTING MGU\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,12 +55,12 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/rnn_tests/test_random_dag_gradients.cxx b/rnn_tests/test_random_dag_gradients.cxx index 2209185b..162a0391 100644 --- a/rnn_tests/test_random_dag_gradients.cxx +++ b/rnn_tests/test_random_dag_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING RANDOM_DAG\n"); + LOG_INFO("TESTING RANDOM_DAG\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,12 +55,12 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); @@ -77,7 +77,7 @@ int main(int argc, char **argv) { delete genome; genome = create_random_dag(inputs1, 1, 1, outputs1, max_recurrent_depth, weight_initialize); - Log::debug("created enas dag\n"); + LOG_DEBUG("created enas dag\n"); gradient_test("RANDOM_DAG: 1 Input, 1x1 Hidden, 1 Output", genome, inputs, outputs); delete genome; diff --git a/rnn_tests/test_ugrnn_gradients.cxx b/rnn_tests/test_ugrnn_gradients.cxx index ef80fa9e..78e4de16 100644 --- a/rnn_tests/test_ugrnn_gradients.cxx +++ b/rnn_tests/test_ugrnn_gradients.cxx @@ -40,7 +40,7 @@ int main(int argc, char **argv) { RNN_Genome *genome; - Log::info("TESTING UGRNN\n"); + LOG_INFO("TESTING UGRNN\n"); vector< vector > inputs; vector< vector > outputs; @@ -55,11 +55,11 @@ int main(int argc, char **argv) { weight_initialize = get_enum_from_string(weight_initialize_string); if (weight_initialize < 0 || weight_initialize >= NUM_WEIGHT_TYPES - 1) { - Log::fatal("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); + LOG_FATAL("weight initialization method %s is set wrong \n", weight_initialize_string.c_str()); } for (int32_t max_recurrent_depth = 1; max_recurrent_depth <= 5; max_recurrent_depth++) { - Log::info("testing with max recurrent depth: %d\n", max_recurrent_depth); + LOG_INFO("testing with max recurrent depth: %d\n", max_recurrent_depth); inputs.resize(1); outputs.resize(1); diff --git a/sample_script.sh b/sample_script.sh index 1fd25a9e..8dd842a1 100644 --- a/sample_script.sh +++ b/sample_script.sh @@ -57,7 +57,7 @@ # Sample script for examm: out_dir="./test_output/" mkdir -p $out_dir - mpirun -np 8 ./build/mpi/examm_mpi --training_filenames ./datasets/2018_coal/burner_[0-9].csv \ + mpirun -np 4 ./build/mpi/examm_mpi --training_filenames ./datasets/2018_coal/burner_[0-9].csv \ --test_filenames ./datasets/2018_coal/burner_1[0-1].csv \ --time_offset 1 \ --input_parameter_names Conditioner_Inlet_Temp Conditioner_Outlet_Temp Coal_Feeder_Rate Primary_Air_Flow Primary_Air_Split System_Secondary_Air_Flow_Total Secondary_Air_Flow Secondary_Air_Split Tertiary_Air_Split Total_Comb_Air_Flow Supp_Fuel_Flow Main_Flm_Int \ @@ -69,8 +69,8 @@ --bp_iterations 10 \ --output_directory $out_dir \ --possible_node_types simple UGRNN MGU GRU delta LSTM \ - --std_message_level INFO \ - --file_message_level INFO + --std_message_level DEBUG \ + --file_message_level DEBUG # Sample script for speciation repopulation: # out_dir="./test_output/" diff --git a/time_series/correlation_heatmap.cxx b/time_series/correlation_heatmap.cxx index 0a10411b..a143dbe0 100644 --- a/time_series/correlation_heatmap.cxx +++ b/time_series/correlation_heatmap.cxx @@ -31,7 +31,7 @@ int main(int argc, char** argv) { get_argument(arguments, "--output_directory", true, output_directory); TimeSeriesSets *time_series_sets = TimeSeriesSets::generate_from_arguments(arguments); - Log::debug("got time series sets.\n"); + LOG_DEBUG("got time series sets.\n"); int32_t max_lag = 0; get_argument(arguments, "--max_lag", true, max_lag); diff --git a/time_series/time_series.cxx b/time_series/time_series.cxx index 54e64fc4..5ab4acd6 100644 --- a/time_series/time_series.cxx +++ b/time_series/time_series.cxx @@ -81,7 +81,7 @@ void TimeSeries::calculate_statistics() { } void TimeSeries::print_statistics() { - Log::info("\t%25s stats, min: %lf, avg: %lf, max: %lf, min_change: %lf, max_change: %lf, std_dev: %lf, variance: %lf\n", name.c_str(), min, average, max, min_change, max_change, std_dev, variance); + LOG_INFO("\t%25s stats, min: %lf, avg: %lf, max: %lf, min_change: %lf, max_change: %lf, std_dev: %lf, variance: %lf\n", name.c_str(), min, average, max, min_change, max_change, std_dev, variance); } int TimeSeries::get_number_values() const { @@ -117,15 +117,15 @@ double TimeSeries::get_max_change() const { } void TimeSeries::normalize_min_max(double min, double max) { - Log::debug("normalizing time series '%s' with min: %lf and max: %lf, series min: %lf, series max: %lf\n", name.c_str(), min, max, this->min, this->max); + LOG_DEBUG("normalizing time series '%s' with min: %lf and max: %lf, series min: %lf, series max: %lf\n", name.c_str(), min, max, this->min, this->max); for (int i = 0; i < values.size(); i++) { if (values[i] < min) { - Log::warning("normalizing series %s, value[%d] %lf was less than min for normalization: %lf\n", name.c_str(), i, values[i], min); + LOG_WARNING("normalizing series %s, value[%d] %lf was less than min for normalization: %lf\n", name.c_str(), i, values[i], min); } if (values[i] > max) { - Log::warning("normalizing series %s, value[%d] %lf was greater than max for normalization: %lf\n", name.c_str(), i, values[i], max); + LOG_WARNING("normalizing series %s, value[%d] %lf was greater than max for normalization: %lf\n", name.c_str(), i, values[i], max); } values[i] = (values[i] - min) / (max - min); @@ -134,7 +134,7 @@ void TimeSeries::normalize_min_max(double min, double max) { //divide by the normalized max to make things between -1 and 1 void TimeSeries::normalize_avg_std_dev(double avg, double std_dev, double norm_max) { - Log::debug("normalizing time series '%s' with avg: %lf, std_dev: %lf and normalized max: %lf, series avg: %lf, series std_dev: %lf\n", name.c_str(), avg, std_dev, norm_max, this->average, this->std_dev); + LOG_DEBUG("normalizing time series '%s' with avg: %lf, std_dev: %lf and normalized max: %lf, series avg: %lf, series std_dev: %lf\n", name.c_str(), avg, std_dev, norm_max, this->average, this->std_dev); for (int i = 0; i < values.size(); i++) { values[i] = ((values[i] - avg) / std_dev) / norm_max; @@ -210,7 +210,7 @@ void TimeSeriesSet::add_time_series(string name) { if (time_series.count(name) == 0) { time_series[name] = new TimeSeries(name); } else { - Log::error("ERROR! Trying to add a time series to a time series set with name '%s' which already exists in the set!\n", name.c_str()); + LOG_ERROR("ERROR! Trying to add a time series to a time series set with name '%s' which already exists in the set!\n", name.c_str()); } } @@ -222,7 +222,7 @@ TimeSeriesSet::TimeSeriesSet(string _filename, const vector &_fields) { string line; if (!getline(ts_file, line)) { - Log::error("ERROR! Could not get headers from the CSV file. File potentially empty!\n"); + LOG_ERROR("ERROR! Could not get headers from the CSV file. File potentially empty!\n"); exit(1); } @@ -239,37 +239,43 @@ TimeSeriesSet::TimeSeriesSet(string _filename, const vector &_fields) { for (int32_t i = 0; i < (int32_t)fields.size(); i++) { if (find(file_fields.begin(), file_fields.end(), fields[i]) == file_fields.end()) { //one of the given fields didn't exist in the time series file - Log::fatal("ERROR: could not find specified field '%s' in time series file: '%s'\n", fields[i].c_str(), _filename.c_str()); - Log::fatal("file's fields:\n"); + LOG_FATAL("ERROR: could not find specified field '%s' in time series file: '%s'\n", fields[i].c_str(), _filename.c_str()); + LOG_FATAL("file's fields:\n"); for (int32_t j = 0; j < (int32_t)file_fields.size(); j++) { - Log::fatal("'%s'\n", file_fields[j].c_str()); + LOG_FATAL("'%s'\n", file_fields[j].c_str()); } exit(1); } } - Log::debug("fields.size(): %d, file_fields.size(): %d\n", fields.size(), file_fields.size()); + LOG_DEBUG("fields.size(): %d, file_fields.size(): %d\n", fields.size(), file_fields.size()); + + string log_str = ""; //specify which of the file fields (columns) are used vector file_fields_used(file_fields.size(), true); for (int32_t i = 0; i < (int32_t)file_fields.size(); i++) { - Log::debug("\tchecking to see if '%s' was in specified fields, file_fields_used[%d]: %d", file_fields[i].c_str(), i, file_fields_used[i]); + +// TODO: log_str = string_format("\tchecking to see if '%s' was in specified fields, file_fields_used[%d]: %d", file_fields[i].c_str(), i, file_fields_used[i]); if (find(fields.begin(), fields.end(), file_fields[i]) == fields.end()) { //the ith file field wasn't found in the specified fields - Log::debug_no_header(" -- field was not found!\n"); + log_str = log_str + " -- field was not found!\n"; file_fields_used[i] = false; } else { - Log::debug_no_header(" -- Field was found!\n"); + log_str = log_str + " -- Field was found!\n"; file_fields_used[i] = true; } + + LOG_DEBUG(log_str.c_str()); } + - Log::debug("number fields: %d\n", fields.size()); + LOG_DEBUG("number fields: %d\n", fields.size()); for (uint32_t i = 0; i < fields.size(); i++) { - Log::debug("\t%s used: %d\n", fields[i].c_str(), file_fields_used[i]); + LOG_DEBUG("\t%s used: %d\n", fields[i].c_str(), file_fields_used[i]); add_time_series(fields[i]); } @@ -285,19 +291,19 @@ TimeSeriesSet::TimeSeriesSet(string _filename, const vector &_fields) { string_split(line, ',', parts); if (parts.size() != file_fields.size()) { - Log::fatal("ERROR! number of values in row %d was %d, but there were %d fields in the header.\n", row, parts.size(), file_fields.size()); + LOG_FATAL("ERROR! number of values in row %d was %d, but there were %d fields in the header.\n", row, parts.size(), file_fields.size()); exit(1); } for (uint32_t i = 0; i < parts.size(); i++) { if (!file_fields_used[i]) continue; - Log::trace("parts[%d]: %s being added to '%s'\n", i, parts[i].c_str(), file_fields[i].c_str()); + LOG_TRACE("parts[%d]: %s being added to '%s'\n", i, parts[i].c_str(), file_fields[i].c_str()); try { time_series[ file_fields[i] ]->add_value( stod(parts[i]) ); } catch (const invalid_argument& ia) { - Log::error("file: '%s' -- invalid argument: '%s' on row %d and column %d: '%s', value: '%s'\n", filename.c_str(), ia.what(), row, i, file_fields[i].c_str(), parts[i].c_str()); + LOG_ERROR("file: '%s' -- invalid argument: '%s' on row %d and column %d: '%s', value: '%s'\n", filename.c_str(), ia.what(), row, i, file_fields[i].c_str(), parts[i].c_str()); } } @@ -306,15 +312,15 @@ TimeSeriesSet::TimeSeriesSet(string _filename, const vector &_fields) { number_rows = time_series.begin()->second->get_number_values(); if (number_rows <= 0) { - Log::fatal("ERROR, number rows: %d <= 0\n", number_rows); + LOG_FATAL("ERROR, number rows: %d <= 0\n", number_rows); exit(1); } for (auto series = time_series.begin(); series != time_series.end(); series++) { series->second->calculate_statistics(); if (series->second->get_min_change() == 0 && series->second->get_max_change() == 0) { - Log::warning("WARNING: unchanging series: '%s'\n", series->first.c_str()); - //Log::warning("removing unchanging series: '%s'\n", series->first.c_str()); + LOG_WARNING("WARNING: unchanging series: '%s'\n", series->first.c_str()); + //LOG_WARNING("removing unchanging series: '%s'\n", series->first.c_str()); series->second->print_statistics(); //time_series.erase(series); } else { @@ -324,11 +330,11 @@ TimeSeriesSet::TimeSeriesSet(string _filename, const vector &_fields) { int series_rows = series->second->get_number_values(); if (series_rows != number_rows) { - Log::error("ERROR! number of rows for field '%s' (%d) doesn't equal number of rows in first field '%s' (%d)\n", series->first.c_str(), series->second->get_number_values(), time_series.begin()->first.c_str(), time_series.begin()->second->get_number_values()); + LOG_ERROR("ERROR! number of rows for field '%s' (%d) doesn't equal number of rows in first field '%s' (%d)\n", series->first.c_str(), series->second->get_number_values(), time_series.begin()->first.c_str(), time_series.begin()->second->get_number_values()); } } - Log::info("read time series '%s' with number rows: %d\n", filename.c_str(), number_rows); + LOG_INFO("read time series '%s' with number rows: %d\n", filename.c_str(), number_rows); } TimeSeriesSet::~TimeSeriesSet(){ @@ -410,20 +416,20 @@ double TimeSeriesSet::get_correlation(string field1, string field2, int32_t lag) * Time offset > 0 generates output data. Do not use the first values */ void TimeSeriesSet::export_time_series(vector< vector > &data, const vector &requested_fields, const vector &shift_fields, int32_t time_offset) { - Log::debug("clearing data\n"); + LOG_DEBUG("clearing data\n"); data.clear(); //for some reason fabs is not working right int abs_time_offset = time_offset; if (abs_time_offset < 0) abs_time_offset *= -1; - Log::debug("resizing '%s' (number rows: %d, time offset: %d) to %d by %d\n", filename.c_str(), number_rows, time_offset, requested_fields.size(), number_rows - abs_time_offset); - Log::debug("time offset: %d\n", time_offset); - Log::debug("abs_time_offset: %d\n", abs_time_offset); + LOG_DEBUG("resizing '%s' (number rows: %d, time offset: %d) to %d by %d\n", filename.c_str(), number_rows, time_offset, requested_fields.size(), number_rows - abs_time_offset); + LOG_DEBUG("time offset: %d\n", time_offset); + LOG_DEBUG("abs_time_offset: %d\n", abs_time_offset); data.resize(requested_fields.size(), vector(number_rows - abs_time_offset, 0.0)); - Log::debug("resized! time_offset = %d\n", time_offset); + LOG_DEBUG("resized! time_offset = %d\n", time_offset); if (time_offset == 0) { for (int i = 0; i != requested_fields.size(); i++) { @@ -443,16 +449,16 @@ void TimeSeriesSet::export_time_series(vector< vector > &data, const vec } else if (time_offset < 0) { //input data, ignore the last N values for (int i = 0; i != requested_fields.size(); i++) { - Log::debug("exporting for field: '%s'\n", requested_fields[i].c_str()); + LOG_DEBUG("exporting for field: '%s'\n", requested_fields[i].c_str()); if (find(shift_fields.begin(), shift_fields.end(), requested_fields[i]) != shift_fields.end()) { - Log::debug("doing shift for field: '%s'\n", requested_fields[i].c_str()); + LOG_DEBUG("doing shift for field: '%s'\n", requested_fields[i].c_str()); //shift the shifted fields to the same as the output, not the input for (int j = -time_offset; j < number_rows; j++) { data[i][j + time_offset] = time_series[ requested_fields[i] ]->get_value(j); - //Log::info("\tdata[%d][%d]: %lf\n", i, j + time_offset, data[i][j + time_offset]); + //LOG_INFO("\tdata[%d][%d]: %lf\n", i, j + time_offset, data[i][j + time_offset]); } } else { - Log::debug("not doing shift for field: '%s'\n", requested_fields[i].c_str()); + LOG_DEBUG("not doing shift for field: '%s'\n", requested_fields[i].c_str()); for (int j = 0; j < number_rows + time_offset; j++) { data[i][j] = time_series[ requested_fields[i] ]->get_value(j); } @@ -514,7 +520,7 @@ void TimeSeriesSet::split(int slices, vector &sub_series) { slice->cut((int32_t)start, (int32_t)stop); sub_series.push_back( this->copy() ); - Log::info("split series from time %d to %d\n", (int32_t)start, (int32_t)stop); + LOG_INFO("split series from time %d to %d\n", (int32_t)start, (int32_t)stop); start += slice_size; stop += slice_size; @@ -524,7 +530,7 @@ void TimeSeriesSet::split(int slices, vector &sub_series) { void TimeSeriesSet::select_parameters(const vector ¶meter_names) { for (auto series = time_series.begin(); series != time_series.end(); series++) { if (std::find(parameter_names.begin(), parameter_names.end(), series->first) == parameter_names.end()) { - Log::info("removing series: '%s'\n", series->first.c_str()); + LOG_INFO("removing series: '%s'\n", series->first.c_str()); time_series.erase(series->first); } } @@ -538,30 +544,30 @@ void TimeSeriesSet::select_parameters(const vector &input_parameter_name } void TimeSeriesSets::help_message() { - Log::info("TimeSeriesSets initialization options from arguments:\n"); - Log::info("\tFile input:\n"); - Log::info("\t\t\t--filenames * : list of input CSV files\n"); - Log::info("\t\tWith the following are optional unless you want to split the data into training/testing sets:\n"); - Log::info("\t\t\t--training_indexes : array of ints (starting at 0) specifying which files are training files\n"); - Log::info("\t\t\t--test_indexes : array of ints (starting at 0) specifying which files are test files\n"); - Log::info("\tOR:\n"); - Log::info("\t\t\t--training_filenames : list of input CSV files for training time series\n"); - Log::info("\t\t\t--test_filenames : list of input CSV files for test time series\n"); - - Log::info("\tSpecifying parameters:\n"); - Log::info("\t\t\t--input_parameter_names *: parameters to be used as inputs\n"); - Log::info("\t\t\t--output_parameter_names *: parameters to be used as outputs\n"); - Log::info("\t\t\t--shift_parameter_names *: parameters to shift to same timestep as output\n"); - Log::info("\t\tOR:\n"); - Log::info("\t\t\t --parameters * : list of parameters, with a settings string and potentially bounds\n"); - Log::info("\t\t\t\tThe settings string should consist of only the characters 'i', 'o', and 'b'.\n"); - Log::info("\t\t\t\t'i' denotes the parameter as an input parameter.\n"); - Log::info("\t\t\t\t'o' denoting the parameter as an output parameter.\n"); - Log::info("\t\t\t\t'b' denoting the parameter as having user specified bounds, if this is specified the following two values should be the min and max bounds for the parameter.\n"); - Log::info("\t\t\t\tThe settings string requires at one of 'i' or 'o'.\n"); - - Log::info("\tNormalization:\n"); - Log::info("\t\t--normalize : (optional) normalize the data. Types can be 'min_max' or 'avg_std_dev'. 'min_max' will take each parameter and subtract the min, then divide by max-min. 'avg_std_dev' will subtract the average, divide by the standard deviation and then divide by the normalized max to ensure values are between -1 and 1.\n"); + LOG_INFO("TimeSeriesSets initialization options from arguments:\n"); + LOG_INFO("\tFile input:\n"); + LOG_INFO("\t\t\t--filenames * : list of input CSV files\n"); + LOG_INFO("\t\tWith the following are optional unless you want to split the data into training/testing sets:\n"); + LOG_INFO("\t\t\t--training_indexes : array of ints (starting at 0) specifying which files are training files\n"); + LOG_INFO("\t\t\t--test_indexes : array of ints (starting at 0) specifying which files are test files\n"); + LOG_INFO("\tOR:\n"); + LOG_INFO("\t\t\t--training_filenames : list of input CSV files for training time series\n"); + LOG_INFO("\t\t\t--test_filenames : list of input CSV files for test time series\n"); + + LOG_INFO("\tSpecifying parameters:\n"); + LOG_INFO("\t\t\t--input_parameter_names *: parameters to be used as inputs\n"); + LOG_INFO("\t\t\t--output_parameter_names *: parameters to be used as outputs\n"); + LOG_INFO("\t\t\t--shift_parameter_names *: parameters to shift to same timestep as output\n"); + LOG_INFO("\t\tOR:\n"); + LOG_INFO("\t\t\t --parameters * : list of parameters, with a settings string and potentially bounds\n"); + LOG_INFO("\t\t\t\tThe settings string should consist of only the characters 'i', 'o', and 'b'.\n"); + LOG_INFO("\t\t\t\t'i' denotes the parameter as an input parameter.\n"); + LOG_INFO("\t\t\t\t'o' denoting the parameter as an output parameter.\n"); + LOG_INFO("\t\t\t\t'b' denoting the parameter as having user specified bounds, if this is specified the following two values should be the min and max bounds for the parameter.\n"); + LOG_INFO("\t\t\t\tThe settings string requires at one of 'i' or 'o'.\n"); + + LOG_INFO("\tNormalization:\n"); + LOG_INFO("\t\t--normalize : (optional) normalize the data. Types can be 'min_max' or 'avg_std_dev'. 'min_max' will take each parameter and subtract the min, then divide by max-min. 'avg_std_dev' will subtract the average, divide by the standard deviation and then divide by the normalized max to ensure values are between -1 and 1.\n"); } TimeSeriesSets::TimeSeriesSets() : normalize_type("none") { @@ -597,7 +603,7 @@ void TimeSeriesSets::parse_parameters_string(const vector &p) { i++; if (settings.find_first_not_of("iob")) { - Log::fatal("Settings string for parameter '%s' was invalid, should consist only of characters 'i', 'o', or 'b'; i : input, o : output, b : bounded.\n", parameter.c_str()); + LOG_FATAL("Settings string for parameter '%s' was invalid, should consist only of characters 'i', 'o', or 'b'; i : input, o : output, b : bounded.\n", parameter.c_str()); help_message(); exit(1); } @@ -629,17 +635,22 @@ void TimeSeriesSets::parse_parameters_string(const vector &p) { } if (!has_input && !has_output) { - Log::fatal("Settings string for parameter '%s' was invalid, did not contain an 'i' for input or 'o' for output.\n", parameter.c_str()); + LOG_FATAL("Settings string for parameter '%s' was invalid, did not contain an 'i' for input or 'o' for output.\n", parameter.c_str()); help_message(); exit(1); } - Log::info("parsed parameter '%s' as "); - if (has_input) Log::info_no_header("input"); - if (has_output && has_input) Log::info_no_header(", "); - if (has_output) Log::info_no_header("output"); - if (has_bounds) Log::info_no_header(", min_bound: %lf, max_bound: %lf", min_bound, max_bound); - Log::info_no_header("\n"); + string log_str = ""; + + log_str = log_str + string_format("parsed parameter '%s' as ", parameter.c_str()); + + if (has_input) log_str = log_str + "input"; + if (has_output && has_input) log_str = log_str + ", "; + if (has_output) log_str = log_str + "output"; + if (has_bounds) log_str = log_str + string_format(", min_bound: %lf, max_bound: %lf", min_bound, max_bound); + + log_str = log_str + "\n"; + LOG_INFO(log_str.c_str()); } } @@ -647,24 +658,24 @@ void TimeSeriesSets::parse_parameters_string(const vector &p) { void TimeSeriesSets::load_time_series() { int32_t rows = 0; time_series.clear(); - if (Log::at_level(Log::DEBUG)) { - Log::debug("loading time series with parameters:\n"); + if (Log::at_level(LOG_LEVEL_DEBUG)) { + LOG_DEBUG("loading time series with parameters:\n"); for (uint32_t i = 0; i < all_parameter_names.size(); i++) { - Log::debug("\t'%s'\n", all_parameter_names[i].c_str()); + LOG_DEBUG("\t'%s'\n", all_parameter_names[i].c_str()); } - Log::debug("got time series filenames:\n"); + LOG_DEBUG("got time series filenames:\n"); } for (uint32_t i = 0; i < filenames.size(); i++) { - Log::debug("\t%s\n", filenames[i].c_str()); + LOG_DEBUG("\t%s\n", filenames[i].c_str()); TimeSeriesSet *ts = new TimeSeriesSet(filenames[i], all_parameter_names); time_series.push_back( ts ); rows += ts->get_number_rows(); } - Log::debug("number of time series files: %d, total rows: %d\n", filenames.size(), rows); + LOG_DEBUG("number of time series files: %d, total rows: %d\n", filenames.size(), rows); } @@ -699,7 +710,7 @@ TimeSeriesSets* TimeSeriesSets::generate_from_arguments(const vector &ar } } else { - Log::fatal("Could not find the '--filenames' or the '--training_filenames' and '--test_filenames' command line arguments. Usage instructions:\n"); + LOG_FATAL("Could not find the '--filenames' or the '--training_filenames' and '--test_filenames' command line arguments. Usage instructions:\n"); help_message(); exit(1); } @@ -727,24 +738,24 @@ TimeSeriesSets* TimeSeriesSets::generate_from_arguments(const vector &ar merge_parameter_names(tss->input_parameter_names, tss->output_parameter_names, tss->all_parameter_names); - if (Log::at_level(Log::DEBUG)) { - Log::debug("input parameter names:\n"); + if (Log::at_level(LOG_LEVEL_DEBUG)) { + LOG_DEBUG("input parameter names:\n"); for (int i = 0; i < tss->input_parameter_names.size(); i++) { - Log::debug("\t%s\n", tss->input_parameter_names[i].c_str()); + LOG_DEBUG("\t%s\n", tss->input_parameter_names[i].c_str()); } - Log::debug("output parameter names:\n"); + LOG_DEBUG("output parameter names:\n"); for (int i = 0; i < tss->output_parameter_names.size(); i++) { - Log::debug("\t%s\n", tss->output_parameter_names[i].c_str()); + LOG_DEBUG("\t%s\n", tss->output_parameter_names[i].c_str()); } - Log::debug("all parameter names:\n"); + LOG_DEBUG("all parameter names:\n"); for (int i = 0; i < tss->all_parameter_names.size(); i++) { - Log::debug("\t%s\n", tss->all_parameter_names[i].c_str()); + LOG_DEBUG("\t%s\n", tss->all_parameter_names[i].c_str()); } } } else { - Log::fatal("Could not find the '--parameters' or the '--input_parameter_names' and '--output_parameter_names' command line arguments. Usage instructions:\n"); + LOG_FATAL("Could not find the '--parameters' or the '--input_parameter_names' and '--output_parameter_names' command line arguments. Usage instructions:\n"); help_message(); exit(1); } @@ -759,15 +770,15 @@ TimeSeriesSets* TimeSeriesSets::generate_from_arguments(const vector &ar } if (tss->normalize_type.compare("none") == 0) { - Log::debug("not normalizing time series.\n"); + LOG_DEBUG("not normalizing time series.\n"); } else if (tss->normalize_type.compare("min_max") == 0) { - Log::debug("doing min max normalization on the time series.\n"); + LOG_DEBUG("doing min max normalization on the time series.\n"); tss->normalize_min_max(); } else if (tss->normalize_type.compare("avg_std_dev") == 0) { - Log::debug("doing avg std dev normalization on the time series.\n"); + LOG_DEBUG("doing avg std dev normalization on the time series.\n"); tss->normalize_avg_std_dev(); } else { - Log::fatal("Unknown normalize type: '%s'\n", tss->normalize_type.c_str()); + LOG_FATAL("Unknown normalize type: '%s'\n", tss->normalize_type.c_str()); help_message(); exit(1); } @@ -828,13 +839,13 @@ double TimeSeriesSets::denormalize(string field_name, double value) { return value; } else { - Log::fatal("Unknown normalize type on denormalize for '%s' and '%lf', '%s', this should never happen.\n", field_name.c_str(), value, normalize_type.c_str()); + LOG_FATAL("Unknown normalize type on denormalize for '%s' and '%lf', '%s', this should never happen.\n", field_name.c_str(), value, normalize_type.c_str()); exit(1); } } void TimeSeriesSets::normalize_min_max() { - Log::info("doing min/max normalization:\n"); + LOG_INFO("doing min/max normalization:\n"); for (int i = 0; i < all_parameter_names.size(); i++) { string parameter_name = all_parameter_names[i]; @@ -849,7 +860,7 @@ void TimeSeriesSets::normalize_min_max() { min = normalize_mins[parameter_name]; max = normalize_maxs[parameter_name]; - Log::info("user specified bounds for "); + LOG_INFO("user specified bounds for "); } else { for (int j = 0; j < time_series.size(); j++) { @@ -863,10 +874,10 @@ void TimeSeriesSets::normalize_min_max() { normalize_mins[parameter_name] = min; normalize_maxs[parameter_name] = max; - Log::info("calculated bounds for "); + LOG_INFO("calculated bounds for "); } - Log::info_no_header("%30s, min: %22.10lf, max: %22.10lf\n", parameter_name.c_str(), min, max); + LOG_INFO("%30s, min: %22.10lf, max: %22.10lf\n", parameter_name.c_str(), min, max); //for each series, subtract min, divide by (max - min) for (int j = 0; j < time_series.size(); j++) { @@ -886,20 +897,20 @@ void TimeSeriesSets::normalize_min_max(const map &_normalize_mins if (normalize_mins.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); - Log::fatal("normalize min fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); + LOG_FATAL("normalize min fields/values:\n"); for (auto iterator = normalize_mins.begin(); iterator != normalize_mins.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_maxs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); - Log::fatal("normalize max fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); + LOG_FATAL("normalize max fields/values:\n"); for (auto iterator = normalize_maxs.begin(); iterator != normalize_maxs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } @@ -914,7 +925,7 @@ void TimeSeriesSets::normalize_min_max(const map &_normalize_mins } void TimeSeriesSets::normalize_avg_std_dev() { - Log::info("doing min/max normalization:\n"); + LOG_INFO("doing min/max normalization:\n"); for (int i = 0; i < all_parameter_names.size(); i++) { string parameter_name = all_parameter_names[i]; @@ -931,7 +942,7 @@ void TimeSeriesSets::normalize_avg_std_dev() { avg = normalize_avgs[parameter_name]; std_dev = normalize_std_devs[parameter_name]; - Log::info("user specified avg/std dev for "); + LOG_INFO("user specified avg/std dev for "); } else { double numerator_average = 0.0; @@ -969,7 +980,7 @@ void TimeSeriesSets::normalize_avg_std_dev() { normalize_std_devs[parameter_name] = std_dev; - Log::info("calculated bounds for "); + LOG_INFO("calculated bounds for "); } double norm_min = (min - avg) / std_dev; @@ -977,7 +988,7 @@ void TimeSeriesSets::normalize_avg_std_dev() { norm_max = fmax(norm_min, norm_max); - Log::info_no_header("%30s, min: %22.10lf, max: %22.10lf, norm_max; %22.10lf, combined average: %22.10lf, combined std_dev: %22.10lf\n", parameter_name.c_str(), min, max, avg, norm_max, std_dev); + LOG_INFO("%30s, min: %22.10lf, max: %22.10lf, norm_max; %22.10lf, combined average: %22.10lf, combined std_dev: %22.10lf\n", parameter_name.c_str(), min, max, avg, norm_max, std_dev); //for each series, subtract min, divide by (max - min) for (int j = 0; j < time_series.size(); j++) { @@ -999,40 +1010,40 @@ void TimeSeriesSets::normalize_avg_std_dev(const map &_normalize_ if (normalize_avgs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize avg values.\n", field.c_str()); - Log::fatal("normalize avg fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize avg values.\n", field.c_str()); + LOG_FATAL("normalize avg fields/values:\n"); for (auto iterator = normalize_avgs.begin(); iterator != normalize_avgs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_std_devs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize std_dev values.\n", field.c_str()); - Log::fatal("normalize std_dev fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize std_dev values.\n", field.c_str()); + LOG_FATAL("normalize std_dev fields/values:\n"); for (auto iterator = normalize_std_devs.begin(); iterator != normalize_std_devs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_mins.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); - Log::fatal("normalize min fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); + LOG_FATAL("normalize min fields/values:\n"); for (auto iterator = normalize_mins.begin(); iterator != normalize_mins.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_maxs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); - Log::fatal("normalize max fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); + LOG_FATAL("normalize max fields/values:\n"); for (auto iterator = normalize_maxs.begin(); iterator != normalize_maxs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } @@ -1079,7 +1090,7 @@ void TimeSeriesSets::export_time_series(const vector &series_indexes, int t */ void TimeSeriesSets::export_training_series(int time_offset, vector< vector< vector > > &inputs, vector< vector< vector > > &outputs) { if (training_indexes.size() == 0) { - Log::fatal("ERROR: attempting to export training time series, however the training_indexes were not specified.\n"); + LOG_FATAL("ERROR: attempting to export training time series, however the training_indexes were not specified.\n"); exit(1); } @@ -1091,7 +1102,7 @@ void TimeSeriesSets::export_training_series(int time_offset, vector< vector< vec */ void TimeSeriesSets::export_test_series(int time_offset, vector< vector< vector > > &inputs, vector< vector< vector > > &outputs) { if (test_indexes.size() == 0) { - Log::fatal("ERROR: attempting to export test time series, however the test_indexes were not specified.\n"); + LOG_FATAL("ERROR: attempting to export test time series, however the test_indexes were not specified.\n"); exit(1); } diff --git a/word_series/word_series.cxx b/word_series/word_series.cxx index 59bd9cc5..c23325d4 100644 --- a/word_series/word_series.cxx +++ b/word_series/word_series.cxx @@ -90,7 +90,7 @@ void WordSeries::calculate_statistics() { } void WordSeries::print_statistics() { - Log::info("\t%25s stats, min: %lf, avg: %lf, max: %lf, min_change: %lf, max_change: %lf, std_dev: %lf, variance: %lf\n", name.c_str(), min, average, max, min_change, max_change, std_dev, variance); + LOG_INFO("\t%25s stats, min: %lf, avg: %lf, max: %lf, min_change: %lf, max_change: %lf, std_dev: %lf, variance: %lf\n", name.c_str(), min, average, max, min_change, max_change, std_dev, variance); } int WordSeries::get_number_values() const { @@ -126,15 +126,15 @@ double WordSeries::get_max_change() const { } void WordSeries::normalize_min_max(double min, double max) { - Log::debug("normalizing time series '%s' with min: %lf and max: %lf, series min: %lf, series max: %lf\n", name.c_str(), min, max, this->min, this->max); + LOG_DEBUG("normalizing time series '%s' with min: %lf and max: %lf, series min: %lf, series max: %lf\n", name.c_str(), min, max, this->min, this->max); for (int i = 0; i < values.size(); i++) { if (values[i] < min) { - Log::warning("normalizing series %s, value[%d] %lf was less than min for normalization: %lf\n", name.c_str(), i, values[i], min); + LOG_WARNING("normalizing series %s, value[%d] %lf was less than min for normalization: %lf\n", name.c_str(), i, values[i], min); } if (values[i] > max) { - Log::warning("normalizing series %s, value[%d] %lf was greater than max for normalization: %lf\n", name.c_str(), i, values[i], max); + LOG_WARNING("normalizing series %s, value[%d] %lf was greater than max for normalization: %lf\n", name.c_str(), i, values[i], max); } values[i] = (values[i] - min) / (max - min); @@ -143,7 +143,7 @@ void WordSeries::normalize_min_max(double min, double max) { //divide by the normalized max to make things between -1 and 1 void WordSeries::normalize_avg_std_dev(double avg, double std_dev, double norm_max) { - Log::debug("normalizing time series '%s' with avg: %lf, std_dev: %lf and normalized max: %lf, series avg: %lf, series std_dev: %lf\n", name.c_str(), avg, std_dev, norm_max, this->average, this->std_dev); + LOG_DEBUG("normalizing time series '%s' with avg: %lf, std_dev: %lf and normalized max: %lf, series avg: %lf, series std_dev: %lf\n", name.c_str(), avg, std_dev, norm_max, this->average, this->std_dev); for (int i = 0; i < values.size(); i++) { values[i] = ((values[i] - avg) / std_dev) / norm_max; @@ -219,7 +219,7 @@ void SentenceSeries::add_word_series(string name) { if (word_series.count(name) == 0) { word_series[name] = new WordSeries(name); } else { - Log::error("ERROR! Trying to add a time series to a time series set with name '%s' which already exists in the set!\n", name.c_str()); + LOG_ERROR("ERROR! Trying to add a time series to a time series set with name '%s' which already exists in the set!\n", name.c_str()); } } @@ -268,8 +268,8 @@ SentenceSeries::SentenceSeries(const string _filename,const vector & _wo for (auto series = word_series.begin(); series != word_series.end(); series++) { series->second->calculate_statistics(); if (series->second->get_min_change() == 0 && series->second->get_max_change() == 0) { - Log::warning("WARNING: unchanging series: '%s'\n", series->first.c_str()); - Log::warning("removing unchanging series: '%s'\n", series->first.c_str()); + LOG_WARNING("WARNING: unchanging series: '%s'\n", series->first.c_str()); + LOG_WARNING("removing unchanging series: '%s'\n", series->first.c_str()); series->second->print_statistics(); //word_series.erase(series); } else { @@ -279,12 +279,12 @@ SentenceSeries::SentenceSeries(const string _filename,const vector & _wo int series_rows = series->second->get_number_values(); if (series_rows != number_rows) { - Log::error("ERROR! number of rows for field '%s' (%d) doesn't equal number of rows in first field '%s' (%d)\n", series->first.c_str(), series->second->get_number_values(), word_series.begin()->first.c_str(), word_series.begin()->second->get_number_values()); + LOG_ERROR("ERROR! number of rows for field '%s' (%d) doesn't equal number of rows in first field '%s' (%d)\n", series->first.c_str(), series->second->get_number_values(), word_series.begin()->first.c_str(), word_series.begin()->second->get_number_values()); } } - Log::info("read time series '%s' with number rows: %d\n", filename.c_str(), number_rows); + LOG_INFO("read time series '%s' with number rows: %d\n", filename.c_str(), number_rows); } @@ -429,7 +429,7 @@ SentenceSeries* SentenceSeries::copy(){ void SentenceSeries::select_parameters(const vector ¶meter_names) { for (auto series = word_series.begin(); series != word_series.end(); series++) { if (std::find(parameter_names.begin(), parameter_names.end(), series->first) == parameter_names.end()) { - Log::info("removing series: '%s'\n", series->first.c_str()); + LOG_INFO("removing series: '%s'\n", series->first.c_str()); word_series.erase(series->first); } } @@ -510,7 +510,7 @@ void Corpus::load_word_library(){ for (uint32_t i = 0; i < filenames.size(); i++) { - Log::debug("\t%s\n", filenames[i].c_str()); + LOG_DEBUG("\t%s\n", filenames[i].c_str()); SentenceSeries *ss = new SentenceSeries(filenames[i], word_index,vocab); sent_series.push_back( ss ); @@ -549,7 +549,7 @@ Corpus* Corpus::generate_from_arguments(const vector &arguments){ } } else { - Log::fatal("Could not find the '--filenames' or the '--training_filenames' and '--test_filenames' command line arguments. Usage instructions:\n"); + LOG_FATAL("Could not find the '--filenames' or the '--training_filenames' and '--test_filenames' command line arguments. Usage instructions:\n"); //help_message(); exit(1); } @@ -618,13 +618,13 @@ double Corpus::denormalize(string field_name, double value) { return value; } else { - Log::fatal("Unknown normalize type on denormalize for '%s' and '%lf', '%s', this should never happen.\n", field_name.c_str(), value, normalize_type.c_str()); + LOG_FATAL("Unknown normalize type on denormalize for '%s' and '%lf', '%s', this should never happen.\n", field_name.c_str(), value, normalize_type.c_str()); exit(1); } } void Corpus::normalize_min_max() { - Log::info("doing min/max normalization:\n"); + LOG_INFO("doing min/max normalization:\n"); for (int i = 0; i < all_parameter_names.size(); i++) { string parameter_name = all_parameter_names[i]; @@ -635,11 +635,13 @@ void Corpus::normalize_min_max() { //get the min of all series of the same name //get the max of all series of the same name + string log_str = ""; + if (normalize_mins.count(parameter_name) > 0) { min = normalize_mins[parameter_name]; max = normalize_maxs[parameter_name]; - Log::info("user specified bounds for "); + log_str = "user specified bounds for "; } else { for (int j = 0; j < sent_series.size(); j++) { @@ -653,10 +655,12 @@ void Corpus::normalize_min_max() { normalize_mins[parameter_name] = min; normalize_maxs[parameter_name] = max; - Log::info("calculated bounds for "); + log_str = "calculated bounds for "; } - Log::info_no_header("%30s, min: %22.10lf, max: %22.10lf\n", parameter_name.c_str(), min, max); + log_str = log_str + string_format("%30s, min: %22.10lf, max: %22.10lf\n", parameter_name.c_str(), min, max); + + LOG_INFO(log_str.c_str()); //for each series, subtract min, divide by (max - min) for (int j = 0; j < sent_series.size(); j++) { @@ -676,20 +680,20 @@ void Corpus::normalize_min_max(const map &_normalize_mins, const if (normalize_mins.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); - Log::fatal("normalize min fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); + LOG_FATAL("normalize min fields/values:\n"); for (auto iterator = normalize_mins.begin(); iterator != normalize_mins.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_maxs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); - Log::fatal("normalize max fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); + LOG_FATAL("normalize max fields/values:\n"); for (auto iterator = normalize_maxs.begin(); iterator != normalize_maxs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } @@ -704,7 +708,7 @@ void Corpus::normalize_min_max(const map &_normalize_mins, const } void Corpus::normalize_avg_std_dev() { - Log::info("doing min/max normalization:\n"); + LOG_INFO("doing min/max normalization:\n"); for (int i = 0; i < all_parameter_names.size(); i++) { string parameter_name = all_parameter_names[i]; @@ -715,13 +719,15 @@ void Corpus::normalize_avg_std_dev() { double avg = 0.0; double std_dev = 0.0; + string log_str = ""; + if (normalize_avgs.count(parameter_name) > 0) { min = normalize_mins[parameter_name]; max = normalize_maxs[parameter_name]; avg = normalize_avgs[parameter_name]; std_dev = normalize_std_devs[parameter_name]; - Log::info("user specified avg/std dev for "); + log_str = "user specified avg/std dev for "; } else { double numerator_average = 0.0; @@ -759,7 +765,7 @@ void Corpus::normalize_avg_std_dev() { normalize_std_devs[parameter_name] = std_dev; - Log::info("calculated bounds for "); + log_str = "calculated bounds for "; } double norm_min = (min - avg) / std_dev; @@ -767,7 +773,9 @@ void Corpus::normalize_avg_std_dev() { norm_max = fmax(norm_min, norm_max); - Log::info_no_header("%30s, min: %22.10lf, max: %22.10lf, norm_max; %22.10lf, combined average: %22.10lf, combined std_dev: %22.10lf\n", parameter_name.c_str(), min, max, avg, norm_max, std_dev); + log_str = log_str + string_format("%30s, min: %22.10lf, max: %22.10lf, norm_max; %22.10lf, combined average: %22.10lf, combined std_dev: %22.10lf\n", parameter_name.c_str(), min, max, avg, norm_max, std_dev); + + LOG_INFO(log_str.c_str()); //for each series, subtract min, divide by (max - min) for (int j = 0; j < sent_series.size(); j++) { @@ -789,40 +797,40 @@ void Corpus::normalize_avg_std_dev(const map &_normalize_avgs, co if (normalize_avgs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize avg values.\n", field.c_str()); - Log::fatal("normalize avg fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize avg values.\n", field.c_str()); + LOG_FATAL("normalize avg fields/values:\n"); for (auto iterator = normalize_avgs.begin(); iterator != normalize_avgs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_std_devs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize std_dev values.\n", field.c_str()); - Log::fatal("normalize std_dev fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize std_dev values.\n", field.c_str()); + LOG_FATAL("normalize std_dev fields/values:\n"); for (auto iterator = normalize_std_devs.begin(); iterator != normalize_std_devs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_mins.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); - Log::fatal("normalize min fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize min values.\n", field.c_str()); + LOG_FATAL("normalize min fields/values:\n"); for (auto iterator = normalize_mins.begin(); iterator != normalize_mins.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } if (normalize_maxs.count(field) == 0) { //field doesn't exist in the normalize values, report an error - Log::fatal("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); - Log::fatal("normalize max fields/values:\n"); + LOG_FATAL("ERROR, couldn't find field '%s' in normalize max values.\n", field.c_str()); + LOG_FATAL("normalize max fields/values:\n"); for (auto iterator = normalize_maxs.begin(); iterator != normalize_maxs.end(); iterator++) { - Log::fatal("\t%s: %lf\n", iterator->first.c_str(), iterator->second); + LOG_FATAL("\t%s: %lf\n", iterator->first.c_str(), iterator->second); } exit(1); } @@ -916,7 +924,7 @@ void Corpus::export_sent_series(const vector &series_indexes, int word_offs */ void Corpus::export_training_series(int word_offset, vector< vector< vector > > &inputs, vector< vector< vector > > &outputs) { if (training_indexes.size() == 0) { - Log::fatal("ERROR: attempting to export training time series, however the training_indexes were not specified.\n"); + LOG_FATAL("ERROR: attempting to export training time series, however the training_indexes were not specified.\n"); exit(1); } @@ -928,7 +936,7 @@ void Corpus::export_training_series(int word_offset, vector< vector< vector > > &inputs, vector< vector< vector > > &outputs) { if (test_indexes.size() == 0) { - Log::fatal("ERROR: attempting to export test time series, however the test_indexes were not specified.\n"); + LOG_FATAL("ERROR: attempting to export test time series, however the test_indexes were not specified.\n"); exit(1); }