Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
sauvegarde/
save/
19 changes: 19 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Utiliser une image avec Python 3.6 préinstallé
FROM python:3.6-slim

# Mettre à jour le système et installer les outils nécessaires
RUN apt-get update && apt-get install -y \
build-essential \
&& apt-get clean

# Installer TensorFlow et Keras (versions spécifiques)
RUN pip install tensorflow==1.10.1 keras==2.2.2

# Créer un dossier pour la sauvegarde des résultats
RUN mkdir -p /app/save

# Définir le répertoire de travail
WORKDIR /app

# Commande par défaut : ouvrir un terminal interactif
CMD ["bash"]
Binary file not shown.
Binary file added alg/__pycache__/vae_new.cpython-312.pyc
Binary file not shown.
Binary file added alg/__pycache__/vae_new.cpython-36.pyc
Binary file not shown.
10 changes: 10 additions & 0 deletions cleverhans/cleverhans.egg-info/PKG-INFO
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Metadata-Version: 2.1
Name: cleverhans
Version: 1.0.0
Summary: UNKNOWN
Home-page: https://github.com/tensorflow/cleverhans
License: MIT
Description: UNKNOWN
Platform: UNKNOWN
Provides-Extra: tf_gpu
Provides-Extra: tf
15 changes: 15 additions & 0 deletions cleverhans/cleverhans.egg-info/SOURCES.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
README.md
setup.py
cleverhans/__init__.py
cleverhans/attacks.py
cleverhans/attacks_tf.py
cleverhans/model.py
cleverhans/utils.py
cleverhans/utils_keras.py
cleverhans/utils_mnist.py
cleverhans/utils_tf.py
cleverhans.egg-info/PKG-INFO
cleverhans.egg-info/SOURCES.txt
cleverhans.egg-info/dependency_links.txt
cleverhans.egg-info/requires.txt
cleverhans.egg-info/top_level.txt
1 change: 1 addition & 0 deletions cleverhans/cleverhans.egg-info/dependency_links.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

10 changes: 10 additions & 0 deletions cleverhans/cleverhans.egg-info/requires.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
nose
pycodestyle
scipy
matplotlib

[tf]
tensorflow>=1.0.0

[tf_gpu]
tensorflow-gpu>=1.0.0
1 change: 1 addition & 0 deletions cleverhans/cleverhans.egg-info/top_level.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cleverhans
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
25 changes: 25 additions & 0 deletions cleverhans/cleverhans/utils_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,38 @@ def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
assert isinstance(test_start, int)
assert isinstance(test_end, int)

'''old
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
X_train = np.vstack((mnist.train.images, mnist.validation.images))
Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
X_test = mnist.test.images
Y_test = mnist.test.labels

X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]'''

from keras.datasets import fashion_mnist

# Load data from keras
(X_train, Y_train), (X_test, Y_test) = fashion_mnist.load_data()

# Normalize data to [0, 1] range
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0

# Add channel dimension (28x28 -> 28x28x1)
X_train = np.expand_dims(X_train, axis=-1)
X_test = np.expand_dims(X_test, axis=-1)

# One-hot encode labels
from keras.utils import to_categorical
Y_train = to_categorical(Y_train, 10)
Y_test = to_categorical(Y_test, 10)

# Apply slicing
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added models/__pycache__/convnet.cpython-312.pyc
Binary file not shown.
Binary file added models/__pycache__/convnet.cpython-36.pyc
Binary file not shown.
Binary file added models/__pycache__/mlp.cpython-312.pyc
Binary file not shown.
Binary file added models/__pycache__/mlp.cpython-36.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
107 changes: 107 additions & 0 deletions test_attacks/correct_indexes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import numpy as np
import tensorflow as tf
import os
import pickle
from tensorflow.python.platform import flags
import sys

sys.path.append('../utils/')
sys.path.append('../cleverhans/')
from cleverhans.utils import set_log_level
from model_eval import model_eval

import keras.backend
sys.path.append('load/')
from load_classifier import load_classifier

FLAGS = flags.FLAGS

def extract_correct_indices(data_name, model_name, batch_size=128):
"""
Extract indices of correctly classified images.
:param data_name: Name of the dataset (e.g., 'mnist', 'cifar10').
:param model_name: Name of the model to evaluate.
:param batch_size: Batch size for evaluation.
:return: List of indices of correctly classified images.
"""
# Set TF random seed for reproducibility
tf.set_random_seed(1234)

# Create TF session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
print("Created TensorFlow session.")
#set_log_level(logging.DEBUG)

# Load dataset
if data_name == 'mnist':
from cleverhans.utils_mnist import data_mnist
X_train, Y_train, X_test, Y_test = data_mnist(train_start=0, train_end=60000,
test_start=0, test_end=10000)
elif data_name in ['cifar10', 'plane_frog']:
from import_data_cifar10 import load_data_cifar10
labels = None
if data_name == 'plane_frog':
labels = [0, 6]
datapath = '../cifar_data/'
X_train, X_test, Y_train, Y_test = load_data_cifar10(datapath, labels=labels)
else:
raise ValueError("Unsupported dataset: {}".format(data_name))

img_rows, img_cols, channels = X_test[0].shape
nb_classes = Y_test.shape[1]

# Define placeholders
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))

# Load model
model = load_classifier(sess, model_name, data_name)

# Ensure correct learning phase for evaluation
if 'bnn' not in model_name:
keras.backend.set_learning_phase(0)
else:
keras.backend.set_learning_phase(1)

# Model predictions
preds = model.predict(x, softmax=False)

# Evaluate accuracy and retrieve predictions
eval_params = {'batch_size': batch_size}
accuracy, y_pred_clean = model_eval(sess, x, y, preds, X_test, Y_test,
args=eval_params, return_pred=True)
print('Test accuracy on legitimate test examples: {:.2f}%'.format(accuracy * 100))

# Extract indices of correctly classified images
correct_prediction = (np.argmax(Y_test, axis=1) == np.argmax(y_pred_clean, axis=1))
correct_indices = np.where(correct_prediction)[0]
print('Number of correctly classified images: {}/{}'.format(len(correct_indices), len(X_test)))

# Save indices
output_dir = 'correct_indices'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = os.path.join(output_dir, f'{data_name}_{model_name}_correct_indices.pkl')
with open(output_file, 'wb') as f:
pickle.dump(correct_indices, f)
print(f"Correct indices saved to {output_file}")

return correct_indices


if __name__ == '__main__':
# Define command-line flags
flags.DEFINE_string('data_name', 'mnist', 'Dataset name (e.g., mnist, cifar10)')
flags.DEFINE_string('model_name', 'bayes_K10_A', 'Model name to evaluate')
flags.DEFINE_integer('batch_size', 128, 'Batch size for evaluation')

# Parse flags and execute
args = FLAGS
extract_correct_indices(data_name=args.data_name, model_name=args.model_name, batch_size=args.batch_size)
4 changes: 3 additions & 1 deletion test_attacks/detect_attacks_logp.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@
import os, sys, pickle, argparse
sys.path.append('../utils/')
from model_eval import model_eval
from scipy.misc import logsumexp
#from scipy.misc import logsumexp
from scipy.special import logsumexp

import keras.backend
sys.path.append('load/')
from load_classifier import load_classifier
Expand Down
Binary file not shown.
Binary file not shown.
14 changes: 13 additions & 1 deletion test_attacks/load/load_bayes_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,27 @@ def bayes_classifier(x, enc, dec, ll, dimY, dimZ, lowerbound, K = 1, beta=1.0, u
if use_mean: K=1
enc_conv, enc_mlp = enc
fea = enc_conv(x)
N = x.get_shape().as_list()[0]
N = x.get_shape().as_list()[0]
#N = tf.shape(x)[0] # Récupère la taille dynamique du batch

logpxy = []
if no_z:
z_holder = tf.zeros([N, dimZ])
K = 1
else:
z_holder = None

print(f"N: {N}, dimY: {dimY}")
assert N is not None, "N (batch size) is None. Check how it is being computed."
assert dimY is not None, "dimY (number of classes) is None. Ensure it is set correctly."

for i in range(dimY):
y = np.zeros([N, dimY]); y[:, i] = 1; y = tf.constant(np.asarray(y, dtype='f'))

#y = tf.one_hot(indices=i, depth=dimY, on_value=1.0, off_value=0.0)
#y = tf.tile(tf.expand_dims(y, axis=0), [N, 1]) # Répète le vecteur pour chaque exemple du batch


bound = lowerbound(x, fea, y, enc_mlp, dec, ll, K, IS=False, beta=beta,
use_mean=use_mean, fix_samples=fix_samples, seed=seed, z=z_holder)
logpxy.append(tf.expand_dims(bound, 1))
Expand Down
91 changes: 91 additions & 0 deletions test_attacks/mosaique.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import pickle
import matplotlib.pyplot as plt
import numpy as np
import os

# Charger les résultats sauvegardés
path_to_results = 'raw_attack_results/bayes_K10_A_cnn/mnist_fgsm_eps0.10_untargeted.pkl'
output_dir = 'adversarial_images'

# Créer le dossier de sortie
if not os.path.exists(output_dir):
os.makedirs(output_dir)

with open(path_to_results, 'rb') as f:
adv, true_ys, adv_ys, adv_logits = pickle.load(f)

# Charger les données originales (vous devez fournir X_test)
# Remplacez cette ligne par le chemin vers vos données originales si nécessaire
from cleverhans.utils_mnist import data_mnist
_, _, X_test, _ = data_mnist(train_start=0, train_end=60000, test_start=0, test_end=10000)

# Noms des catégories pour Fashion MNIST
LABEL_NAMES = [
"T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"
]

# Identifier les prédictions et les labels
adv_preds = np.argmax(adv_logits, axis=1)
true_labels = np.argmax(true_ys, axis=1)

# Mélanger les indices pour avoir une mosaïque variée
indices = np.arange(len(adv))
np.random.shuffle(indices)

# Extraire les informations du chemin pour générer le titre
attack_type = path_to_results.split('/')[-1].split('_')[1] # fgsm
eps_value = path_to_results.split('_')[3].replace("eps", "") # 0.10
title_text = f"A model's classification with {attack_type.upper()} eps {eps_value} attacks."

# Paramètres de la mosaïque
rows, cols = 5, 5
fig, axes = plt.subplots(rows * 2, cols, figsize=(12, 20)) # Double hauteur pour deux grilles

# Boucle pour afficher les images attaquées (première mosaïque)
for i, ax in zip(indices[:rows * cols], axes[:rows].flatten()):
ax.imshow(adv[i].reshape(28, 28), cmap='gray')

# Annoter avec les noms des catégories
true_name = LABEL_NAMES[true_labels[i]]
pred_name = LABEL_NAMES[adv_preds[i]]

color = 'red' if adv_preds[i] != true_labels[i] else 'green'
ax.set_title(f"True: {true_name}\nPred: {pred_name}", fontsize=8)
ax.spines['top'].set_color(color)
ax.spines['bottom'].set_color(color)
ax.spines['left'].set_color(color)
ax.spines['right'].set_color(color)
ax.spines['top'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['right'].set_linewidth(2)
ax.tick_params(left=False, bottom=False, labelleft=False, labelbottom=False)

# Boucle pour afficher les images originales (deuxième mosaïque)
for i, ax in zip(indices[:rows * cols], axes[rows:].flatten()):
ax.imshow(X_test[i].reshape(28, 28), cmap='gray') # Image originale

# Annoter avec le nom de la catégorie réelle
true_name = LABEL_NAMES[true_labels[i]]
ax.set_title(f"True: {true_name}", fontsize=8)
ax.spines['top'].set_color('blue')
ax.spines['bottom'].set_color('blue')
ax.spines['left'].set_color('blue')
ax.spines['right'].set_color('blue')
ax.spines['top'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['right'].set_linewidth(2)
ax.tick_params(left=False, bottom=False, labelleft=False, labelbottom=False)

# Ajouter un titre global
fig.suptitle(title_text, fontsize=16)
plt.tight_layout(rect=[0, 0, 1, 0.96]) # Réserver de l'espace pour le titre

# Sauvegarder la mosaïque
output_path = os.path.join(output_dir, "mosaic_results_with_originals.png")
plt.savefig(output_path, dpi=150)
plt.close()

print(f"Mosaïque avec labels sauvegardée dans {output_path}")
Binary file not shown.
44 changes: 44 additions & 0 deletions test_attacks/save_miss_classified.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import pickle
import os
import matplotlib.pyplot as plt
import numpy as np

# Charger les résultats sauvegardés
path_to_results = 'raw_attack_results/bayes_K10_A_cnn/mnist_fgsm_eps0.10_untargeted.pkl'
output_dir = 'adversarial_images'

# Créer le dossier de sortie
if not os.path.exists(output_dir):
os.makedirs(output_dir)

# Charger les résultats
with open(path_to_results, 'rb') as f:
adv, true_ys, adv_ys, adv_logits = pickle.load(f)

# Identifier les exemples mal classifiés
adv_preds = np.argmax(adv_logits, axis=1)
true_labels = np.argmax(true_ys, axis=1)
misclassified_indices = np.where(adv_preds != true_labels)[0]

print(f"Nombre d'exemples mal classifiés : {len(misclassified_indices)}")

# Enregistrer les images mal classifiées
for idx, i in enumerate(misclassified_indices):
plt.imshow(adv[i].reshape(28, 28), cmap='gray')
plt.title(f"True: {true_labels[i]}, Predicted: {adv_preds[i]}")
plt.axis('off')
output_path = os.path.join(output_dir, f"misclassified_{idx}.png")
plt.savefig(output_path)
plt.close()


html_output_path = os.path.join(output_dir, "report.html")

with open(html_output_path, "w") as f:
f.write("<html><body>\n")
f.write("<h1>Rapport des exemples mal classifiés</h1>\n")
for idx, i in enumerate(misclassified_indices):
image_path = f"misclassified_{idx}.png"
f.write(f"<div><h3>True: {true_labels[i]}, Predicted: {adv_preds[i]}</h3>\n")
f.write(f"<img src='{image_path}' style='width:150px; height:150px;'/></div><br>\n")
f.write("</body></html>\n")
Binary file added utils/__pycache__/utils.cpython-312.pyc
Binary file not shown.
Binary file added utils/__pycache__/utils.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/utils_mnist.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/visualisation.cpython-312.pyc
Binary file not shown.
Binary file not shown.
Loading