@@ -815,55 +815,55 @@ Intel® Neural Compressor validated examples with multiple compression technique
815815 <td>ResNet50 V1.5</td>
816816 <td>Image Recognition</td>
817817 <td>Post-Training Static Quantization</td>
818- <td><a href="./ onnxrt/image_recognition/resnet50/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/resnet50/quantization/ptq">qdq</a></td>
818+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/resnet50/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/resnet50/quantization/ptq">qdq</a></td>
819819 </tr >
820820 <tr >
821821 <td>ResNet50 V1.5 MLPerf</td>
822822 <td>Image Recognition</td>
823823 <td>Post-Training Static Quantization</td>
824- <td><a href="./ onnxrt/image_recognition/resnet50/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/resnet50/quantization/ptq">qdq</a></td>
824+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/resnet50/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/resnet50/quantization/ptq">qdq</a></td>
825825 </tr >
826826 <tr >
827827 <td>VGG16</td>
828828 <td>Image Recognition</td>
829829 <td>Post-Training Static Quantization</td>
830- <td><a href="./ onnxrt/image_recognition/vgg16/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/vgg16/quantization/ptq">qdq</a></td>
830+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/vgg16/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/vgg16/quantization/ptq">qdq</a></td>
831831 </tr >
832832 <tr >
833833 <td>MobileNet V2</td>
834834 <td>Image Recognition</td>
835835 <td>Post-Training Static Quantization</td>
836- <td><a href="./ onnxrt/image_recognition/mobilenet_v2/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/mobilenet_v2/quantization/ptq">qdq</a></td>
836+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/mobilenet_v2/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/mobilenet_v2/quantization/ptq">qdq</a></td>
837837 </tr >
838838 <tr >
839839 <td>MobileNet V3 MLPerf</td>
840840 <td>Image Recognition</td>
841841 <td>Post-Training Static Quantization</td>
842- <td><a href="./ onnxrt/image_recognition/mobilenet_v3/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/mobilenet_v3/quantization/ptq">qdq</a></td>
842+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/mobilenet_v3/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/mobilenet_v3/quantization/ptq">qdq</a></td>
843843 </tr >
844844 <tr >
845845 <td>AlexNet</td>
846846 <td>Image Recognition</td>
847847 <td>Post-Training Static Quantization</td>
848- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/alexnet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/alexnet/quantization/ptq">qdq</a></td>
848+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/alexnet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/alexnet/quantization/ptq">qdq</a></td>
849849 </tr >
850850 <tr >
851851 <td>CaffeNet</td>
852852 <td>Image Recognition</td>
853853 <td>Post-Training Static Quantization</td>
854- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/caffenet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/caffenet/quantization/ptq">qdq</a></td>
854+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/caffenet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/caffenet/quantization/ptq">qdq</a></td>
855855 </tr >
856856 <tr >
857857 <td>DenseNet</td>
858858 <td>Image Recognition</td>
859859 <td>Post-Training Static Quantization</td>
860- <td><a href=". /onnxrt/image_recognition/onnx_model_zoo/densenet/quantization/ptq">qlinearops</a></td>
860+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/densenet/quantization/ptq">qlinearops</a></td>
861861 </tr >
862862 <tr >
863863 <td>EfficientNet</td>
864864 <td>Image Recognition</td>
865865 <td>Post-Training Static Quantization</td>
866- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/efficientnet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/efficientnet/quantization/ptq">qdq</a></td>
866+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/efficientnet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/efficientnet/quantization/ptq">qdq</a></td>
867867 </tr >
868868 <tr >
869869 <td>FCN</td>
@@ -875,37 +875,37 @@ Intel® Neural Compressor validated examples with multiple compression technique
875875 <td>GoogleNet</td>
876876 <td>Image Recognition</td>
877877 <td>Post-Training Static Quantization</td>
878- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/googlenet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/googlenet/quantization/ptq">qdq</a></td>
878+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/googlenet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/googlenet/quantization/ptq">qdq</a></td>
879879 </tr >
880880 <tr >
881881 <td>Inception V1</td>
882882 <td>Image Recognition</td>
883883 <td>Post-Training Static Quantization</td>
884- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/inception/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/inception/quantization/ptq">qdq</a></td>
884+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/inception/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/inception/quantization/ptq">qdq</a></td>
885885 </tr >
886886 <tr >
887887 <td>MNIST</td>
888888 <td>Image Recognition</td>
889889 <td>Post-Training Static Quantization</td>
890- <td><a href=". /onnxrt/image_recognition/onnx_model_zoo/mnist/quantization/ptq">qlinearops</a></td>
890+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/mnist/quantization/ptq">qlinearops</a></td>
891891 </tr >
892892 <tr >
893893 <td>MobileNet V2 (ONNX Model Zoo)</td>
894894 <td>Image Recognition</td>
895895 <td>Post-Training Static Quantization</td>
896- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/mobilenet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/mobilenet/quantization/ptq">qdq</a></td>
896+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/mobilenet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/mobilenet/quantization/ptq">qdq</a></td>
897897 </tr >
898898 <tr >
899899 <td>ResNet50 V1.5 (ONNX Model Zoo)</td>
900900 <td>Image Recognition</td>
901901 <td>Post-Training Static Quantization</td>
902- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq">qdq</a></td>
902+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq">qdq</a></td>
903903 </tr >
904904 <tr >
905905 <td>ShuffleNet V2</td>
906906 <td>Image Recognition</td>
907907 <td>Post-Training Static Quantization</td>
908- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/shufflenet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/shufflenet/quantization/ptq">qdq</a></td>
908+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/shufflenet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/shufflenet/quantization/ptq">qdq</a></td>
909909 </tr >
910910 <tr >
911911 <td>SqueezeNet</td>
@@ -917,19 +917,19 @@ Intel® Neural Compressor validated examples with multiple compression technique
917917 <td>VGG16 (ONNX Model Zoo)</td>
918918 <td>Image Recognition</td>
919919 <td>Post-Training Static Quantization</td>
920- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/vgg16/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/vgg16/quantization/ptq">qdq</a></td>
920+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/vgg16/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/vgg16/quantization/ptq">qdq</a></td>
921921 </tr >
922922 <tr >
923923 <td>ZFNet</td>
924924 <td>Image Recognition</td>
925925 <td>Post-Training Static Quantization</td>
926- <td><a href="./ onnxrt/image_recognition/onnx_model_zoo/zfnet/quantization/ptq">qlinearops</a> / <a href=". /onnxrt/image_recognition/onnx_model_zoo/zfnet/quantization/ptq">qdq</a></td>
926+ <td><a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples/ onnxrt/image_recognition/onnx_model_zoo/zfnet/quantization/ptq">qlinearops</a> / <a href="https://github.com/intel/neural-compressor/tree/old_api_examples/examples /onnxrt/image_recognition/onnx_model_zoo/zfnet/quantization/ptq">qdq</a></td>
927927 </tr >
928928 <tr >
929929 <td>ArcFace</td>
930930 <td>Image Recognition</td>
931931 <td>Post-Training Static Quantization</td>
932- <td><a href="./onnxrt/image_recognition /onnx_model_zoo/arcface/quantization/ptq">qlinearops</a></td>
932+ <td><a href="./onnxrt/body_analysis /onnx_model_zoo/arcface/quantization/ptq">qlinearops</a></td>
933933 </tr >
934934 <tr >
935935 <td>BERT base MRPC</td>
@@ -1109,7 +1109,7 @@ Intel® Neural Compressor validated examples with multiple compression technique
11091109 <td>Emotion FERPlus</td>
11101110 <td>Body Analysis</td>
11111111 <td>Post-Training Static Quantization</td>
1112- <td><a href=". /onnxrt/body_analysis/onnx_model_zoo/emotion_ferplus/quantization/ptq">qlinearops</a></td>
1112+ <td><a href="/onnxrt/body_analysis/onnx_model_zoo/emotion_ferplus/quantization/ptq">qlinearops</a></td>
11131113 </tr >
11141114 <tr >
11151115 <td>Ultra Face</td>
0 commit comments