diff --git a/examples/keras_recipes/float8_training_and_inference_with_transformer.py b/examples/keras_recipes/float8_quantization_aware_training_and_inference_with_transformer.py
similarity index 98%
rename from examples/keras_recipes/float8_training_and_inference_with_transformer.py
rename to examples/keras_recipes/float8_quantization_aware_training_and_inference_with_transformer.py
index 13490cb880..27ab23681b 100644
--- a/examples/keras_recipes/float8_training_and_inference_with_transformer.py
+++ b/examples/keras_recipes/float8_quantization_aware_training_and_inference_with_transformer.py
@@ -1,9 +1,9 @@
"""
-Title: Float8 training and inference with a simple Transformer model
+Title: Float8 Quantization-Aware Training and Inference with a simple Transformer model
Author: [Hongyu Chiu](https://github.com/james77777778)
Date created: 2024/05/14
Last modified: 2024/05/14
-Description: Train a simple Transformer model with the float8 quantization.
+Description: Train a simple Transformer model with float8 quantization-aware training.
Accelerator: GPU
"""
diff --git a/examples/keras_recipes/ipynb/float8_training_and_inference_with_transformer.ipynb b/examples/keras_recipes/ipynb/float8_quantization_aware_training_and_inference_with_transformer.ipynb
similarity index 99%
rename from examples/keras_recipes/ipynb/float8_training_and_inference_with_transformer.ipynb
rename to examples/keras_recipes/ipynb/float8_quantization_aware_training_and_inference_with_transformer.ipynb
index b1736a91e0..6e2bfd1692 100644
--- a/examples/keras_recipes/ipynb/float8_training_and_inference_with_transformer.ipynb
+++ b/examples/keras_recipes/ipynb/float8_quantization_aware_training_and_inference_with_transformer.ipynb
@@ -6,12 +6,12 @@
"colab_type": "text"
},
"source": [
- "# Float8 training and inference with a simple Transformer model\n",
+ "# Float8 Quantization Aware Training and Inference with a simple Transformer model\n",
"\n",
"**Author:** [Hongyu Chiu](https://github.com/james77777778)
\n",
"**Date created:** 2024/05/14
\n",
"**Last modified:** 2024/05/14
\n",
- "**Description:** Train a simple Transformer model with the float8 quantization."
+ "**Description:** Train a simple Transformer model with float8 quantization-aware training."
]
},
{
diff --git a/examples/keras_recipes/md/float8_training_and_inference_with_transformer.md b/examples/keras_recipes/md/float8_quantization_aware_training_and_inference_with_transformer.md
similarity index 99%
rename from examples/keras_recipes/md/float8_training_and_inference_with_transformer.md
rename to examples/keras_recipes/md/float8_quantization_aware_training_and_inference_with_transformer.md
index 57b7c551cc..ef11e61ab8 100644
--- a/examples/keras_recipes/md/float8_training_and_inference_with_transformer.md
+++ b/examples/keras_recipes/md/float8_quantization_aware_training_and_inference_with_transformer.md
@@ -1,9 +1,9 @@
-# Float8 training and inference with a simple Transformer model
+# Float8 Quantization-Aware Training and Inference with a simple Transformer model
**Author:** [Hongyu Chiu](https://github.com/james77777778)
**Date created:** 2024/05/14
**Last modified:** 2024/05/14
-**Description:** Train a simple Transformer model with the float8 quantization.
+**Description:** Train a simple Transformer model with float8 quantization-aware training.
[**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/float8_training_and_inference_with_transformer.ipynb) •
[**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/float8_training_and_inference_with_transformer.py)
diff --git a/scripts/examples_master.py b/scripts/examples_master.py
index 6a3c36cbe8..2206f42591 100644
--- a/scripts/examples_master.py
+++ b/scripts/examples_master.py
@@ -958,8 +958,8 @@
"keras_3": True,
},
{
- "path": "float8_training_and_inference_with_transformer",
- "title": "Float8 training and inference with a simple Transformer model",
+ "path": "float8_quantization_aware_training_and_inference_with_transformer",
+ "title": "Float8 Quantization-Aware Training and Inference with a simple Transformer model",
"subcategory": "Keras usage tips",
"keras_3": True,
},