From f0092115e1b3cc22645f528ff8f92bae6d9f1c1c Mon Sep 17 00:00:00 2001 From: masterdezign Date: Wed, 3 Mar 2021 17:48:30 +0100 Subject: [PATCH] Fix TF deprecation warnings --- batch_norm.py | 12 ++++++------ eval.py | 2 +- gmm.py | 4 ++-- infer_model.py | 6 +++--- lets_start.py | 2 +- model.py | 14 +++++++------- ops.py | 2 +- reactions.py | 31 ++++++++++++++++--------------- realreaction.py | 8 ++++---- rnn.py | 47 ++++++++++++++++++++++++----------------------- util.py | 6 +++--- 11 files changed, 68 insertions(+), 66 deletions(-) diff --git a/batch_norm.py b/batch_norm.py index 30d5235..e746c5d 100755 --- a/batch_norm.py +++ b/batch_norm.py @@ -32,7 +32,7 @@ def _set_default_initializer(self, var_name): def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): - self._moving_mean = tf.get_variable( + self._moving_mean = tf.compat.v1.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, @@ -40,7 +40,7 @@ def _build_statistics_variance(self, input_batch, initializer=tf.zeros_initializer, trainable=False) - self._moving_variance = tf.get_variable( + self._moving_variance = tf.compat.v1.get_variable( "moving_variance", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, @@ -81,7 +81,7 @@ def build_moving_stats(): def _build_statistics_second_moment(self, input_batch, reduction_indices, use_batch_stats): - self._moving_mean = tf.get_variable( + self._moving_mean = tf.compat.v1.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, @@ -89,7 +89,7 @@ def _build_statistics_second_moment(self, input_batch, initializer=tf.zeros_initializer, trainable=False) - self._moving_second_moment = tf.get_variable( + self._moving_second_moment = tf.compat.v1.get_variable( "moving_second_moment", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, @@ -252,7 +252,7 @@ def _build(self, input_batch, is_training=True, test_local_stats=True): # Set up optional scale and offset factors. if self._offset: self._set_default_initializer(self.BETA) - self._beta = tf.get_variable( + self._beta = tf.compat.v1.get_variable( self.BETA, shape=self._mean_shape, initializer=self._initializers[self.BETA]) @@ -261,7 +261,7 @@ def _build(self, input_batch, is_training=True, test_local_stats=True): if self._scale: self._set_default_initializer(self.GAMMA) - self._gamma = tf.get_variable( + self._gamma = tf.compat.v1.get_variable( self.GAMMA, shape=self._mean_shape, initializer=self._initializers[self.GAMMA]) diff --git a/eval.py b/eval.py index 5df7fdc..1dd741a 100755 --- a/eval.py +++ b/eval.py @@ -23,7 +23,7 @@ def main(): config = json.load(config_file, object_hook=lambda d:namedtuple('x', d.keys())(*d.values())) num_unrolls = config.num_steps // config.unroll_length - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: model = util.load_model(sess, config, logger) all_y = [] for i in range(10): diff --git a/gmm.py b/gmm.py index 55fc2cf..813cb7e 100755 --- a/gmm.py +++ b/gmm.py @@ -69,10 +69,10 @@ def test_tf(): xr = list(np.arange(0, 1, 0.02)) X = np.array(list(product(xr, repeat=2))) Y = [] - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: gmm = tf_GMM(batch_size=1, ncoef=6, num_dims=2, cov=0.5) y = gmm(tf.placeholder(tf.float32, shape=[1, 2], name='x')) - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) for x in X: Y.append(sess.run(y, feed_dict={'x:0':x.reshape((1, 2))})) diff --git a/infer_model.py b/infer_model.py index 11c53ba..8ff95bd 100755 --- a/infer_model.py +++ b/infer_model.py @@ -28,7 +28,7 @@ def __init__(self, cell, func, ndim, nsteps, ckpt_path, logger, constraints): self.init_state = self.cell.get_initial_state(1, tf.float32) self.results = self.build_graph() - self.saver = tf.train.Saver(tf.global_variables()) + self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables()) def get_state_shapes(self): return [(s[0].get_shape().as_list(), s[1].get_shape().as_list()) @@ -78,7 +78,7 @@ def get_init(self): return x, y, init_state def run(self): - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: self.load(sess, self.ckpt_path) x, y, state = self.get_init() x_array = np.zeros((self.nsteps + 1, self.ndim)) @@ -147,6 +147,6 @@ def main(): ax2.plot(x_array[:, 0], x_array[:, 1], x_array[:, 2]) fig2.show() plt.show() - + if __name__ == '__main__': main() diff --git a/lets_start.py b/lets_start.py index 6b530a9..63f63ef 100755 --- a/lets_start.py +++ b/lets_start.py @@ -22,7 +22,7 @@ def main(): logger.info(str(json.load(config_file))) config_file.close() num_unrolls = config.num_steps // config.unroll_length - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # tf.get_default_graph().finalize() model = util.create_model(sess, config, logger) step, loss, reset, fx_array, x_array = model.step() diff --git a/model.py b/model.py index b584ada..9af3f02 100755 --- a/model.py +++ b/model.py @@ -17,17 +17,17 @@ def __init__(self, cell, logger, func, ndim, batch_size, unroll_len, self.make_loss(func, ndim, batch_size, unroll_len) loss_func = self.get_loss_func(loss_type, direction) self.loss = loss_func(self.fx_array) - optimizer = getattr(tf.train, optimizer + 'Optimizer')(lr) + optimizer = getattr(tf.compat.v1.train, optimizer + 'Optimizer')(lr) gvs = optimizer.compute_gradients(self.loss) capped_gvs = [(tf.clip_by_value(grad, -0.1, 0.1), var) for grad, var in gvs] self.opt = optimizer.apply_gradients(capped_gvs) # self.opt = optimizer.minimize(self.loss) - self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3) + self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=3) logger.info('model variable:') - logger.info(str([var.name for var in tf.global_variables()])) + logger.info(str([var.name for var in tf.compat.v1.global_variables()])) logger.info('trainable variables:') - logger.info(str([var.name for var in tf.trainable_variables()])) + logger.info(str([var.name for var in tf.compat.v1.trainable_variables()])) self.fx_array = self.fx_array.stack() self.x_array = self.x_array.stack() @@ -35,11 +35,11 @@ def __init__(self, cell, logger, func, ndim, batch_size, unroll_len, def make_discount(self, gamma, unroll_len): df = [(gamma ** (unroll_len - i)) for i in range(unroll_len + 1)] return tf.constant(df, shape=[unroll_len + 1, 1], dtype=tf.float32) - + def make_loss(self, func, ndim, batch_size, unroll_len): self.unroll_len = unroll_len - x = tf.get_variable('x', shape=[batch_size, ndim], + x = tf.compat.v1.get_variable('x', shape=[batch_size, ndim], initializer=tf.truncated_normal_initializer(mean=0.5, stddev=0.2), trainable=self.trainable_init) constants = func.get_parameters() @@ -81,7 +81,7 @@ def step(t, x, state, fx_array, x_array): variables = [x,] + constants # Empty array as part of the reset process. - self.reset = [tf.variables_initializer(variables), + self.reset = [tf.compat.v1.variables_initializer(variables), self.fx_array.close(), self.x_array.close()] return self.fx_array, self.x_array diff --git a/ops.py b/ops.py index bbd231b..ad0996f 100755 --- a/ops.py +++ b/ops.py @@ -5,7 +5,7 @@ def wrap_variable_creation(func, custom_getter): """Provides a custom getter for all variable creations.""" - original_get_variable = tf.get_variable + original_get_variable = tf.compat.v1.get_variable def custom_get_variable(*args, **kwargs): if hasattr(kwargs, 'custom_getter'): raise AttributeError('Custom getters are not supported for ' diff --git a/reactions.py b/reactions.py index 59f41eb..9b7eeed 100755 --- a/reactions.py +++ b/reactions.py @@ -1,6 +1,7 @@ import os os.environ['TF_CPP_MIN_LOG_LEVEL']='3' import tensorflow as tf +import tensorflow_probability as tfp import numpy as np class ConstraintQuadratic: @@ -8,11 +9,11 @@ class ConstraintQuadratic: def __init__(self, batch_size=128, num_dims=3, ptype='convex', random=0.05, dtype=tf.float32): self.ptype = ptype - self.w = tf.get_variable('w', shape=[batch_size, num_dims, num_dims], + self.w = tf.compat.v1.get_variable('w', shape=[batch_size, num_dims, num_dims], dtype=dtype, initializer=tf.random_normal_initializer(), trainable=False) - self.a = tf.get_variable('y', shape=[batch_size, num_dims], + self.a = tf.compat.v1.get_variable('y', shape=[batch_size, num_dims], dtype=dtype, initializer=tf.random_uniform_initializer(minval=0.01, maxval=0.99), trainable=False) @@ -41,7 +42,7 @@ def _barrier(self, var): def __call__(self, x): ''' - x = tf.get_variable('x', shape=[batch_size, num_dims], + x = tf.compat.v1.get_variable('x', shape=[batch_size, num_dims], dtype=dtype, initializer=tf.random_normal_initializer(stddev=stdev)) ''' res = (self._func(x) / self.normalizer + self.e + self._barrier(x)) @@ -56,21 +57,21 @@ def __init__(self, batch_size=128, ncoef=6, num_dims=3, random=None, self.num_dim = num_dims self.batch_size = batch_size self.dtype = dtype - with tf.variable_scope('func_gmm'): - self.m = [tf.get_variable('mu_{}'.format(i), shape=[batch_size, num_dims], + with tf.compat.v1.variable_scope('func_gmm'): + self.m = [tf.compat.v1.get_variable('mu_{}'.format(i), shape=[batch_size, num_dims], dtype=dtype, initializer=tf.random_uniform_initializer(minval=0.01, maxval=0.99), trainable=False) for i in range(ncoef)] - self.cov = [tf.get_variable('cov_{}'.format(i), shape=[batch_size, num_dims], + self.cov = [tf.compat.v1.get_variable('cov_{}'.format(i), shape=[batch_size, num_dims], dtype=dtype, initializer=tf.truncated_normal_initializer( mean=cov, stddev=cov/5), trainable=False) for i in range(ncoef)] - self.coef = tf.get_variable('coef', shape=[ncoef, 1], dtype=dtype, + self.coef = tf.compat.v1.get_variable('coef', shape=[ncoef, 1], dtype=dtype, initializer=tf.random_normal_initializer(stddev=0.2), trainable=False) @@ -91,7 +92,7 @@ def get_parameters(self): return self.m + self.cov + [self.coef] def __call__(self, x): - dist = [tf.contrib.distributions.MultivariateNormalDiag( + dist = [tfp.distributions.MultivariateNormalDiag( self.m[i], self.cov[i], name='MultVarNorm_{}'.format(i)) for i in range(self.ncoef)] p = tf.concat([tf.reshape(dist[i].prob(x), [-1, 1]) @@ -101,7 +102,7 @@ def __call__(self, x): result = (fx / self.cst - self.bots) / (self.tops - self.bots) # import pdb; pdb.set_trace() if self.random: - result = result + tf.random_normal(shape=[self.batch_size, 1], + result = result + tf.random_normal(shape=[self.batch_size, 1], stddev=self.random, dtype=self.dtype, name='error') return result @@ -113,11 +114,11 @@ class Quadratic: def __init__(self, batch_size=128, num_dims=3, ptype='convex', random=0.05, dtype=tf.float32): self.ptype = ptype - self.w = tf.get_variable('w', shape=[batch_size, num_dims, num_dims], + self.w = tf.compat.v1.get_variable('w', shape=[batch_size, num_dims, num_dims], dtype=dtype, initializer=tf.random_normal_initializer(), trainable=False) - self.a = tf.get_variable('y', shape=[batch_size, num_dims], + self.a = tf.compat.v1.get_variable('y', shape=[batch_size, num_dims], dtype=dtype, initializer=tf.truncated_normal_initializer(mean=0.5, stddev=0.2), trainable=False) @@ -144,7 +145,7 @@ def _func(self, var): def __call__(self, x): ''' - x = tf.get_variable('x', shape=[batch_size, num_dims], + x = tf.compat.v1.get_variable('x', shape=[batch_size, num_dims], dtype=dtype, initializer=tf.random_normal_initializer(stddev=stdev)) ''' res = (self._func(x) / self.normalizer + self.e) @@ -195,7 +196,7 @@ def __call__(self, x): if self.record: self.history['x'].append(x) self.history['y'].append(res) - return res + return res class ConstraintQuadraticEval: def __init__(self, num_dim=3, random=0.5, ptype='convex', @@ -233,7 +234,7 @@ def __call__(self, x): res = 1 - res print('Output:') print(res) - return res + return res class RealReaction: @@ -264,4 +265,4 @@ def __call__(self, x): result = float(input('Input the reaction yield:')) return self.y_convert(result) - + diff --git a/realreaction.py b/realreaction.py index b0d0a84..e6be51e 100755 --- a/realreaction.py +++ b/realreaction.py @@ -25,7 +25,7 @@ def __init__(self, cell, func, ndim, nsteps, ckpt_path, logger, constraints): self.init_state = self.cell.get_initial_state(1, tf.float32) self.results = self.build_graph() - self.saver = tf.train.Saver(tf.global_variables()) + self.saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables()) def get_state_shapes(self): return [(s[0].get_shape().as_list(), s[1].get_shape().as_list()) @@ -75,7 +75,7 @@ def get_init(self): return x, y, init_state def run(self): - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: self.load(sess, self.ckpt_path) x, y, state = self.get_init() x_array = np.zeros((self.nsteps + 1, self.ndim)) @@ -110,10 +110,10 @@ def main(): constraints=config.constraints) x_array, y_array = optimizer.run() - + # plt.figure(1) # plt.plot(y_array) # plt.show() - + if __name__ == '__main__': main() diff --git a/rnn.py b/rnn.py index 21311d1..5d0ac1b 100755 --- a/rnn.py +++ b/rnn.py @@ -1,4 +1,5 @@ import tensorflow as tf +import tensorflow_probability as tfp import batch_norm import util import pdb @@ -33,8 +34,8 @@ def __call__(self, x, y, state, scope=None): inputs = tf.concat([x, tf.reshape(y, (-1, 1))], axis=1, name='inputs') output, nstate = self.cell(inputs, state) - with tf.variable_scope('proj'): - w = tf.get_variable('proj_weight', [self._num_units, x_dim]) + with tf.compat.v1.variable_scope('proj'): + w = tf.compat.v1.get_variable('proj_weight', [self._num_units, x_dim]) x = tf.matmul(output, w) return x, nstate @@ -59,15 +60,15 @@ def __init__(self, cell, kwargs, nlayers=1, reuse=False): for i in range(nlayers)]) def __call__(self, x, y, state, scope=None): - with tf.variable_scope(scope or 'multi_input_rnn'): + with tf.compat.v1.variable_scope(scope or 'multi_input_rnn'): x_dim = int(x.get_shape()[1]) y = tf.tile(tf.reshape(y, [-1, 1]), [1, x_dim]) inputs = tf.concat([x, y], axis=1, name='inputs') output, nstate = self.rnncell(inputs, state) - with tf.variable_scope('proj'): - w = tf.get_variable('proj_weight', + with tf.compat.v1.variable_scope('proj'): + w = tf.compat.v1.get_variable('proj_weight', [self.cell.output_size.as_list()[0], x_dim]) - b = tf.get_variable('proj_bias', [x_dim]) + b = tf.compat.v1.get_variable('proj_bias', [x_dim]) x = tf.matmul(output, w) + b return x, nstate @@ -97,19 +98,19 @@ def __init__(self, cell, kwargs, nlayers=1, reuse=False): def __call__(self, x, y, state, scope=None): hidden_size = self.cell.output_size.as_list()[0] batch_size = x.get_shape().as_list()[0] - with tf.variable_scope(scope or 'multi_input_rnn'): + with tf.compat.v1.variable_scope(scope or 'multi_input_rnn'): x_dim = int(x.get_shape()[1]) y = tf.tile(tf.reshape(y, [-1, 1]), [1, x_dim]) inputs = tf.concat([x, y], axis=1, name='inputs') output, nstate = self.rnncell(inputs, state) tot_dim = x_dim * (x_dim + 1) - with tf.variable_scope('proj'): - w = tf.get_variable('proj_weight', [hidden_size, tot_dim]) - b = tf.get_variable('proj_bias', [tot_dim]) + with tf.compat.v1.variable_scope('proj'): + w = tf.compat.v1.get_variable('proj_weight', [hidden_size, tot_dim]) + b = tf.compat.v1.get_variable('proj_bias', [tot_dim]) out = tf.matmul(output, w) + b mean, var = tf.split(out, [x_dim, x_dim ** 2], axis=1) var = tf.reshape(var, [batch_size, x_dim, x_dim]) - dist = tf.contrib.distributions.MultivariateNormalTriL( + dist = tfp.distributions.MultivariateNormalTriL( mean, var, name='x_dist') x = dist.sample() @@ -153,7 +154,7 @@ def __init__(self, name="lstm"): super(LSTM, self).__init__() self.name_ = name - self._template = tf.make_template(self.name_, self._build, + self._template = tf.compat.v1.make_template(self.name_, self._build, create_scope_now_=True) self._hidden_size = hidden_size self._forget_bias = forget_bias @@ -267,25 +268,25 @@ def _create_batch_norm_variables(self, dtype): gamma_initializer = tf.constant_initializer(0.1) if self._use_batch_norm_h: - self._gamma_h = tf.get_variable( + self._gamma_h = tf.compat.v1.get_variable( LSTM.GAMMA_H, shape=[4 * self._hidden_size], dtype=dtype, initializer=(self._initializers.get(LSTM.GAMMA_H, gamma_initializer))) if self._use_batch_norm_x: - self._gamma_x = tf.get_variable( + self._gamma_x = tf.compat.v1.get_variable( LSTM.GAMMA_X, shape=[4 * self._hidden_size], dtype=dtype, initializer=(self._initializers.get(LSTM.GAMMA_X, gamma_initializer))) if self._use_batch_norm_c: - self._gamma_c = tf.get_variable( + self._gamma_c = tf.compat.v1.get_variable( LSTM.GAMMA_C, shape=[self._hidden_size], dtype=dtype, initializer=( self._initializers.get(LSTM.GAMMA_C, gamma_initializer))) - self._beta_c = tf.get_variable( + self._beta_c = tf.compat.v1.get_variable( LSTM.BETA_C, shape=[self._hidden_size], dtype=dtype, @@ -304,23 +305,23 @@ def _create_gate_variables(self, input_shape, dtype): initializer = util.create_linear_initializer(equiv_input_size) if self._use_batch_norm_h or self._use_batch_norm_x: - self._w_h = tf.get_variable( + self._w_h = tf.compat.v1.get_variable( LSTM.W_GATES + "_H", shape=[self._hidden_size, 4 * self._hidden_size], dtype=dtype, initializer=self._initializers.get(LSTM.W_GATES, initializer)) - self._w_x = tf.get_variable( + self._w_x = tf.compat.v1.get_variable( LSTM.W_GATES + "_X", shape=[input_size, 4 * self._hidden_size], dtype=dtype, initializer=self._initializers.get(LSTM.W_GATES, initializer)) else: - self._w_xh = tf.get_variable( + self._w_xh = tf.compat.v1.get_variable( LSTM.W_GATES, shape=[self._hidden_size + input_size, 4 * self._hidden_size], dtype=dtype, initializer=self._initializers.get(LSTM.W_GATES, initializer)) - self._b = tf.get_variable( + self._b = tf.compat.v1.get_variable( LSTM.B_GATES, shape=b_shape, dtype=dtype, @@ -328,17 +329,17 @@ def _create_gate_variables(self, input_shape, dtype): def _create_peephole_variables(self, dtype): """Initialize the variables used for the peephole connections.""" - self._w_f_diag = tf.get_variable( + self._w_f_diag = tf.compat.v1.get_variable( LSTM.W_F_DIAG, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(LSTM.W_F_DIAG)) - self._w_i_diag = tf.get_variable( + self._w_i_diag = tf.compat.v1.get_variable( LSTM.W_I_DIAG, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(LSTM.W_I_DIAG)) - self._w_o_diag = tf.get_variable( + self._w_o_diag = tf.compat.v1.get_variable( LSTM.W_O_DIAG, shape=[self._hidden_size], dtype=dtype, diff --git a/util.py b/util.py index 71582a7..7f5b45d 100755 --- a/util.py +++ b/util.py @@ -19,7 +19,7 @@ def create_model(sess, config, logger): if not config.save_path == None: if not os.path.exists(config.save_path): os.mkdir(config.save_path) - copyfile('config.json', os.path.join(config.save_path, 'config.json')) + copyfile('config.json', os.path.join(config.save_path, 'config.json')) if config.opt_direction == 'max': problem_type = 'concave' @@ -77,7 +77,7 @@ def create_model(sess, config, logger): model.saver.restore(sess, ckpt.model_checkpoint_path) else: logger.info('Creating Model with fresh parameters.') - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) return model def load_model(sess, config, logger): @@ -202,7 +202,7 @@ def trainable_initial_state(batch_size, state_size, dtype, initializers=None): for name, size, init in zip(names, flat_state_size, flat_initializer): shape_with_batch_dim = [1] + tensor_shape.as_shape(size).as_list() - initial_state_variable = tf.get_variable( + initial_state_variable = tf.compat.v1.get_variable( name, shape=shape_with_batch_dim, dtype=dtype, initializer=init) initial_state_variable_dims = initial_state_variable.get_shape().ndims