From 4910f1d09f64be8ce8b7c872f56b982e14ecba70 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 17:33:26 +0800 Subject: [PATCH 01/15] Update chiron_rcnn_train.py --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index fe743f1..e045224 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -32,7 +32,7 @@ def loss(logits,seq_len,label): return loss def train_step(loss,global_step = None): - opt = tf.train.AdamOptimizer(FLAGS.step_rate).minimize(loss,global_step=global_step) + opt = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss,global_step=global_step) # opt = tf.train.GradientDescentOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.RMSPropOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.MomentumOptimizer(FLAGS.step_rate,0.9).minimize(loss) @@ -124,7 +124,7 @@ def __init__(self): self.log_dir = '/media/haotianteng/Linux_ex/GVM_model' self.sequence_len = 300 self.batch_size = 750 - self.step_rate = 1e-3 + self.learning_rate = 1e-3 self.max_steps = 20000 self.k_mer = 1 self.model_name = 'test' From fafcd92926f8fc0b3719387c9ac2c0b72bad4c8b Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 17:35:42 +0800 Subject: [PATCH 02/15] Added gradient visual --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index e045224..7462a04 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -13,6 +13,7 @@ #from rnn import rnn_layers from rnn import rnn_layers_one_direction import time,os +from summary import variable_summaries def save_model(): copy_tree(os.path.dirname(os.path.abspath(__file__)),FLAGS.log_dir+FLAGS.model_name+'/model') @@ -90,7 +91,8 @@ def train(): batch_x,seq_len,batch_y = train_ds.next_batch(FLAGS.batch_size) indxs,values,shape = batch_y feed_dict = {x:batch_x,seq_length:seq_len/ratio,y_indexs:indxs,y_values:values,y_shape:shape,training:True} - loss_val,_ = sess.run([ctc_loss,opt],feed_dict = feed_dict) + loss_val,gradients = sess.run([ctc_loss,opt],feed_dict = feed_dict) + variable_summaries(gradients) if i%10 ==0: global_step_val = tf.train.global_step(sess,global_step) valid_x,valid_len,valid_y = train_ds.next_batch(FLAGS.batch_size) From ed615044dc80f7293e925b1520f2a80568748737 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 17:37:35 +0800 Subject: [PATCH 03/15] Update README.md --- Chiron+weight_visual/README.md | 176 +-------------------------------- 1 file changed, 2 insertions(+), 174 deletions(-) diff --git a/Chiron+weight_visual/README.md b/Chiron+weight_visual/README.md index 193205f..585ae84 100644 --- a/Chiron+weight_visual/README.md +++ b/Chiron+weight_visual/README.md @@ -1,174 +1,2 @@ -# Chiron -## A basecaller for Oxford Nanopore Technologies' sequencers -Using a deep learning CNN+RNN+CTC structure to establish end-to-end basecalling for the nanopore sequencer. -Built with **TensorFlow** and python 2.7. - -If you found Chiron useful, please consider to cite: -> Teng, H., et al. (2017). Chiron: Translating nanopore raw signal directly into nucleotide sequence using deep learning. [bioRxiv 179531] (https://www.biorxiv.org/content/early/2017/09/12/179531) - ---- - -## Install -### Install using `pip` (recommended) -If you currently have TensorFlow installed on your system, we would advise you to create a virtual environment to install Chiron into, this way there is no clash of versions etc. - -If you would like to do this, the best options would be [`virtualenv`](https://virtualenv.pypa.io/en/stable/installation/), the more user-friendly [`virtualenvwrapper`](https://virtualenvwrapper.readthedocs.io/en/latest/install.html), or through [anaconda](https://docs.continuum.io/anaconda/install/). After installing one of these and activating the virtual environment you will be installing Chiron into, continue with the rest of the installation instructions as normal. - -To install with `pip`: - -``` -pip install chiron -``` -This will install Chiron, the CPU-only distribution of TensorFlow (and it's dependencies), and [`h5py`](https://github.com/h5py/h5py) (required for reading in `.fast5` files). - -**Note**: If you are after the GPU version, follow the steps in the following section. - -### Install from GitHub -This is currently the best install method if you are wanting to run Chiron on in GPU mode (`pip install` version is coming). -``` -git clone https://github.com/haotianteng/chiron.git -cd chiron -``` -You will also need to install dependencies. - -For CPU-version: -``` -pip install tensorflow==1.0.1 -pip install h5py -``` -For GPU-version(Nvidia GPU required): -``` -pip install tensorflow-gpu==1.0.1 -pip install h5py -``` - -For alternate/detailed installation instructions for TensorFlow, see their [fantastic documentation](https://www.tensorflow.org/). - -## Basecall -### If installed from `pip`: -An example call to Chiron to run basecalling is: -``` -chiron call -i -o - -``` - -### If installed from GitHub: - -All Chiron functionality can be run from **entry.py** in the Chiron folder. (You might like to also add the path to Chiron into your path for ease of running). - -``` -python chiron/entry.py call -i -o - -``` - -### Test run - -We provide 5 sample fast5 files (courtesy of [nanonet](https://github.com/nanoporetech/nanonet)) in the GitHub repository which you can run a test on. These are located in `chiron/example_data/`. From inside the Chiron repository: -``` -python chiron/entry.py call -i chiron/example_folder/ -o -``` - - -### Output -`chiron call` will create five folders in `` called `raw`, `result`, `segments`, `meta`, and `reference`. - -* `result`: fastq/fasta files with the same name as the fast5 file they contain the basecalling result for. To create a single, merged version of these fasta files, try something like `paste --delimiter=\\n --serial result/*.fasta > merged.fasta` -* `raw`: Contains a file for each fast5 file with it's raw signal. This file format is an list of integers. i.e `544 554 556 571 563 472 467 487 482 513 517 521 495 504 500 520 492 506 ... ` -* `segments`: Contains the segments basecalled from each fast5 file. -* `meta`: Contains the meta information for each read (read length, basecalling rate etc.). Each file has the same name as it's fast5 file. -* `reference`: Contains the reference sequence (if any). - -### Output format -With -e flag to output fastq file(default) with quality score or fasta file. -Example: -chiron call -i -o -e fastq - - -chiron call -i -o -e fasta - -## Training -The default DNA model trained on R9.4 protocol with a mix of Lambda and E.coli dataset, if the basecalling result is not satisfying, you can train a model on your own training data set. - -#### Hardware request: -Recommend training on GPU with TensorFlow - usually 8GB RAM (GPU) is required. - -#### Prepare the training data set. -Using raw.py script to extract the signal and label from the re-squiggled fast5 file. -(For how to re-squiggle fast5 file, check [here, nanoraw re-squiggle](https://nanoraw.readthedocs.io/en/latest/resquiggle.html#example-commands)) - -#### If installed from `pip`: -``` -chiron export -i -o -``` - -or directly use the raw.py script in utils. - -``` -python chiron/utils/raw.py --input --output -``` -`.signal` file and correspond `.label` file, a typical file format: - -`.signal` file format: -`544 554 556 571 563 472 467 487 482 513 517 521 495 504 500 520 492 506 ...` -i.e the file must contain only one row/column of raw signal numbers. - -`.label` file format: -``` -70 174 A -174 184 T -184 192 A -192 195 G -195 204 C -204 209 A -209 224 C -... -``` - -Each line represents a DNA base pair in the Pore. -* 1st column: Start position of the current nucleotide, position related to the signal vector (index count starts from zero). -* 2nd column: End position of the current nucleotide. -* 3rd column: Nucleotide, for DNA: A, G, C, or T. Although, there is no reason you could not use other labels. - -#### Adjust Chiron parameters -Go in to `chiron/chiron_rcnn_train.py` and change the hyper parameters in the `FLAGS` class. - -```py -class Flags(): - def __init__(self): - self.home_dir = "/home/haotianteng/UQ/deepBNS/" - self.data_dir = self.home_dir + 'data/Lambda_R9.4/raw/' - self.log_dir = self.home_dir+'/chiron/log/' - self.sequence_len = 200 - self.batch_size = 100 - self.step_rate = 1e-3 - self.max_steps = 2500 - self.k_mer = 1 - self.model_name = 'crnn5+5_res_moving_norm' - self.retrain = False -``` - -`data_dir`: The folder containing your signal and label files. -`log_dir`: The folder where you want to save the model. -`sequence_len`: The length of the segment you want to separate the sequence into. Longer length requires larger RAM. -`batch_size`: The batch size. -`step_rate`: Learning rate of the optimizer. -`max_step`: Maximum step of the optimizer. -`k_mer`: Chiron supports learning based on k-mer instead of a single nucleotide, this should be an odd number, even numbers will cause an error. -`model_name`: The name of the model. The record will be stored in the directory `log_dir/model_name/` -`retrain`: If this is a new model, or you want to load the model you trained before. The model will be loaded from `log_dir/model_name/` - -### Train - -``` -source activate tensorflow -``` -#### If installed from `pip`: -``` -chiron train --data_dir --log_dir --model_name -``` - -or run directly by - -``` -python chiron/chiron_rcnn_train.py -``` +# Deepore +This modified version of Chiron allows us to visualise both the weights and the gradients to address training problem in Chiron, like local minima and bottlenecks during training that we have observed. From 13db567e8fd43f7a7f199bc34a76e2d721263639 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 17:57:01 +0800 Subject: [PATCH 04/15] Update summary.py --- Chiron+weight_visual/chiron/summary.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Chiron+weight_visual/chiron/summary.py b/Chiron+weight_visual/chiron/summary.py index 56213fa..bc418d6 100644 --- a/Chiron+weight_visual/chiron/summary.py +++ b/Chiron+weight_visual/chiron/summary.py @@ -1,3 +1,5 @@ +import tensorflow as tf + def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): From 4927b2ee594962a4f0f10db51a41610e97bd3cfa Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 18:01:02 +0800 Subject: [PATCH 05/15] Update rnn.py --- Chiron+weight_visual/chiron/rnn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Chiron+weight_visual/chiron/rnn.py b/Chiron+weight_visual/chiron/rnn.py index 4230408..997529b 100644 --- a/Chiron+weight_visual/chiron/rnn.py +++ b/Chiron+weight_visual/chiron/rnn.py @@ -60,5 +60,5 @@ def rnn_layers_one_direction(x,seq_length,training,hidden_num=200,layer_num = 3, lasth_rs = tf.reshape(lasth,[batch_size*max_time,hidden_num],name = 'lasth_rs') logits = tf.reshape(tf.nn.bias_add(tf.matmul(lasth_rs,weight_class),bias_class),[batch_size,max_time,class_n],name = "rnn_logits_rs") variable_summaries(weight_class) - variable_summaries(biases_out) + variable_summaries(bias_class) return logits From ba7be0e07a7ce374be8510aea912cc4571d94afe Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 18:03:39 +0800 Subject: [PATCH 06/15] Update chiron_rcnn_train.py --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 7462a04..11aad84 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -121,13 +121,14 @@ def run(args): if __name__ == "__main__": class Flags(): def __init__(self): - self.data_dir = '/media/haotianteng/Linux_ex/Nanopore_data/Lambda_R9.4/raw' - self.cache_dir = '/media/haotianteng/Linux_ex/Nanopore_data/Lambda_R9.4/cache' - self.log_dir = '/media/haotianteng/Linux_ex/GVM_model' + self.data_dir = '/home/docker/raw' #human + self.data_dir = '/home/docker/ecoli' #ecoli + self.cache_dir = '/home/docker/out/cache' + self.log_dir = '/home/docker/out/logs' self.sequence_len = 300 - self.batch_size = 750 - self.learning_rate = 1e-3 - self.max_steps = 20000 + self.batch_size = 64 + self.step_rate = 1e-3 + self.max_steps = 10000 self.k_mer = 1 self.model_name = 'test' self.retrain =False From efcf322939e1af8aea7abeee27740fd4ba355401 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 18:04:10 +0800 Subject: [PATCH 07/15] Update chiron_rcnn_train.py --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 11aad84..0e9ea9a 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -127,7 +127,7 @@ def __init__(self): self.log_dir = '/home/docker/out/logs' self.sequence_len = 300 self.batch_size = 64 - self.step_rate = 1e-3 + self.learning_rate = 1e-3 self.max_steps = 10000 self.k_mer = 1 self.model_name = 'test' From 5ddc98ff071606087bff3845d7730d6371120487 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 18:07:50 +0800 Subject: [PATCH 08/15] Update chiron_rcnn_train.py --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 0e9ea9a..8936ea6 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -91,8 +91,8 @@ def train(): batch_x,seq_len,batch_y = train_ds.next_batch(FLAGS.batch_size) indxs,values,shape = batch_y feed_dict = {x:batch_x,seq_length:seq_len/ratio,y_indexs:indxs,y_values:values,y_shape:shape,training:True} - loss_val,gradients = sess.run([ctc_loss,opt],feed_dict = feed_dict) - variable_summaries(gradients) + loss_val,grad = sess.run([ctc_loss,opt],feed_dict = feed_dict) + variable_summaries(grad) if i%10 ==0: global_step_val = tf.train.global_step(sess,global_step) valid_x,valid_len,valid_y = train_ds.next_batch(FLAGS.batch_size) From 1cc8e94c67b6a94fb6a0a867c03d3012a99087e5 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 18:09:08 +0800 Subject: [PATCH 09/15] Update chiron_rcnn_train.py --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 8936ea6..5e75cc4 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -33,7 +33,7 @@ def loss(logits,seq_len,label): return loss def train_step(loss,global_step = None): - opt = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss,global_step=global_step) + opt = tf.train.AdamOptimizer(FLAGS.step_rate).minimize(loss,global_step=global_step) # opt = tf.train.GradientDescentOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.RMSPropOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.MomentumOptimizer(FLAGS.step_rate,0.9).minimize(loss) @@ -127,7 +127,7 @@ def __init__(self): self.log_dir = '/home/docker/out/logs' self.sequence_len = 300 self.batch_size = 64 - self.learning_rate = 1e-3 + self.step_rate = 1e-3 self.max_steps = 10000 self.k_mer = 1 self.model_name = 'test' From 80ee7047788b8b21d0658248b7730b0034158fdf Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 22:48:13 +0800 Subject: [PATCH 10/15] Update chiron_rcnn_train.py --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 5e75cc4..8b0fb1a 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -122,7 +122,7 @@ def run(args): class Flags(): def __init__(self): self.data_dir = '/home/docker/raw' #human - self.data_dir = '/home/docker/ecoli' #ecoli + #self.data_dir = '/home/docker/ecoli' #ecoli self.cache_dir = '/home/docker/out/cache' self.log_dir = '/home/docker/out/logs' self.sequence_len = 300 From 88a8f279305c47549732e1ba05ce3fc6cd2a033a Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 23:48:30 +0800 Subject: [PATCH 11/15] Update README.md --- README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c9a778f..79201b1 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,15 @@ we modified the docker from `https://github.com/anurag/fastai-course-1.git` ``` -nvidia-docker run -it \ +DATADIR=/data/nanopore + +nvidia-docker run \ + --rm -it \ --entrypoint /bin/zsh \ - -v /data/nanopore/new/fast5Dir/:/data \ - -p 8889:8888 \ + -v $DATADIR:/data \ + -p 8890:8888 \ + --name haruhi \ + -w /home/docker \ etheleon/chiron ``` From 95d55a7ebef05469d1ce28084a2e4979ebfcd1e4 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sat, 4 Nov 2017 23:57:29 +0800 Subject: [PATCH 12/15] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 79201b1..2709c51 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ nvidia-docker run \ To train deepore we need to run chiron_rcnn_train.py ``` -cd $HOME +export CUDA_VISIBLE_DEVICES="0" python Chiron/chiron/chiron_rcnn_train.py ``` From 3146c1b6cc57e8b55df2ca6006997b0f205effc9 Mon Sep 17 00:00:00 2001 From: etheleon Date: Sun, 5 Nov 2017 03:00:02 +0000 Subject: [PATCH 13/15] Debugging --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 8b0fb1a..87dff0e 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -33,10 +33,13 @@ def loss(logits,seq_len,label): return loss def train_step(loss,global_step = None): - opt = tf.train.AdamOptimizer(FLAGS.step_rate).minimize(loss,global_step=global_step) + opt = tf.train.AdamOptimizer(FLAGS.step_rate) # opt = tf.train.GradientDescentOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.RMSPropOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.MomentumOptimizer(FLAGS.step_rate,0.9).minimize(loss) + grad = opt.compute_gradients(loss) + tf.summary.scalar('grad',tf.reduce_mean(grad[1])) + opt = opt.minimize(loss,global_step=global_step) return opt def prediction(logits,seq_length,label,top_paths=1): """ @@ -91,8 +94,7 @@ def train(): batch_x,seq_len,batch_y = train_ds.next_batch(FLAGS.batch_size) indxs,values,shape = batch_y feed_dict = {x:batch_x,seq_length:seq_len/ratio,y_indexs:indxs,y_values:values,y_shape:shape,training:True} - loss_val,grad = sess.run([ctc_loss,opt],feed_dict = feed_dict) - variable_summaries(grad) + loss_val,_ = sess.run([ctc_loss,opt],feed_dict = feed_dict) if i%10 ==0: global_step_val = tf.train.global_step(sess,global_step) valid_x,valid_len,valid_y = train_ds.next_batch(FLAGS.batch_size) From 6b205003cbabdbb5fb8fd79412cca28924926e26 Mon Sep 17 00:00:00 2001 From: Ang Ming Liang Date: Sun, 5 Nov 2017 11:38:38 +0800 Subject: [PATCH 14/15] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2709c51..d0716ec 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ nvidia-docker run \ To train deepore we need to run chiron_rcnn_train.py ``` -export CUDA_VISIBLE_DEVICES="0" +export CUDA_VISIBLE_DEVICES="1" python Chiron/chiron/chiron_rcnn_train.py ``` From 2c638e77bb0b238dde7a4d64cec0e91ff659e279 Mon Sep 17 00:00:00 2001 From: etheleon Date: Sat, 11 Nov 2017 12:33:15 +0000 Subject: [PATCH 15/15] update gradient visual --- Chiron+weight_visual/chiron/chiron_rcnn_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Chiron+weight_visual/chiron/chiron_rcnn_train.py b/Chiron+weight_visual/chiron/chiron_rcnn_train.py index 87dff0e..341fb99 100644 --- a/Chiron+weight_visual/chiron/chiron_rcnn_train.py +++ b/Chiron+weight_visual/chiron/chiron_rcnn_train.py @@ -38,7 +38,7 @@ def train_step(loss,global_step = None): # opt = tf.train.RMSPropOptimizer(FLAGS.step_rate).minimize(loss) # opt = tf.train.MomentumOptimizer(FLAGS.step_rate,0.9).minimize(loss) grad = opt.compute_gradients(loss) - tf.summary.scalar('grad',tf.reduce_mean(grad[1])) + tf.summary.scalar('grad',tf.reduce_mean(grad[0][0])) opt = opt.minimize(loss,global_step=global_step) return opt def prediction(logits,seq_length,label,top_paths=1):