Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions deepfake-inceptionv4.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":8497982,"sourceType":"datasetVersion","datasetId":5070657}],"dockerImageVersionId":31090,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"import os\nimport torch\nimport timm\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom PIL import ImageFile, Image\nimport matplotlib.pyplot as plt\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\nclass SafeImageFolder(datasets.ImageFolder):\n def __getitem__(self, index):\n try:\n path, target = self.imgs[index]\n img = Image.open(path)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n if self.transform is not None:\n img = self.transform(img)\n return img, target\n except (OSError, IOError):\n print(f\"[Warning] Skipping corrupted image at index {index}\")\n return self.__getitem__((index + 1) % len(self.imgs))\n\ntrain_dir = \"/kaggle/input/ai-generated-images-vs-real-images/train\"\ntest_dir = \"/kaggle/input/ai-generated-images-vs-real-images/test\"\n\nimg_size = 299 # InceptionV4 default input\nbatch_size = 32\n\ntransform = transforms.Compose([\n transforms.Resize((img_size, img_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]) # scale to [-1, 1]\n])\n\n\nif __name__ == '__main__':\n train_data = SafeImageFolder(train_dir, transform=transform)\n test_data = SafeImageFolder(test_dir, transform=transform)\n print(f\"Found {len(train_data)} training images and {len(test_data)} test images.\")\n if len(train_data) == 0:\n raise ValueError(f\"No images found in {train_dir}. Please check your data path and structure.\")\n if len(test_data) == 0:\n raise ValueError(f\"No images found in {test_dir}. Please check your data path and structure.\")\n\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0)\n test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=0)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = timm.create_model(\"inception_v4\", pretrained=True, num_classes=2)\n model = model.to(device)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=1e-4)\n\n epochs = 5\n train_loss_hist, test_loss_hist = [], []\n train_acc_hist, test_acc_hist = [], []\n\n for epoch in range(epochs):\n model.train()\n running_loss = 0.0\n running_corrects = torch.tensor(0, dtype=torch.float32, device=device)\n for images, labels in train_loader:\n images, labels = images.to(device), labels.to(device)\n\n optimizer.zero_grad()\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * images.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(train_data)\n epoch_acc = running_corrects.double() / len(train_data)\n train_loss_hist.append(epoch_loss)\n train_acc_hist.append(epoch_acc.item())\n\n model.eval()\n val_loss = 0.0\n val_corrects = torch.tensor(0, dtype=torch.float32, device=device)\n with torch.no_grad():\n for images, labels in test_loader:\n images, labels = images.to(device), labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n val_loss += loss.item() * images.size(0)\n val_corrects += torch.sum(preds == labels.data)\n\n val_loss /= len(test_data)\n val_acc = val_corrects.double() / len(test_data)\n test_loss_hist.append(val_loss)\n test_acc_hist.append(val_acc.item())\n\n print(f\"Epoch {epoch+1}/{epochs} - \"\n f\"Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.4f} - \"\n f\"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}\")\n\n torch.save(model.state_dict(), \"inceptionv4_deepfake.pth\")\n print(\"Model weights saved as inceptionv4_deepfake.pth\")\n\n plt.figure(figsize=(10,5))\n plt.plot(train_loss_hist, label=\"Train Loss\")\n plt.plot(test_loss_hist, label=\"Val Loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.title(\"Training & Validation Loss\")\n plt.show()\n\n plt.figure(figsize=(10,5))\n plt.plot(train_acc_hist, label=\"Train Acc\")\n plt.plot(test_acc_hist, label=\"Val Acc\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n plt.title(\"Training & Validation Accuracy\")\n plt.savefig(\"training_curves.png\")\n plt.show()\n print(\"training_curves.png saved\")\n","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"execution":{"iopub.status.busy":"2025-08-28T03:44:15.239735Z","iopub.execute_input":"2025-08-28T03:44:15.240097Z","execution_failed":"2025-08-28T14:52:49.156Z"}},"outputs":[{"name":"stdout","text":"Found 48000 training images and 12000 test images.\n","output_type":"stream"},{"name":"stderr","text":"/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (98058240 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (99991727 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:1043: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (98806617 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (96000000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (107184040 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (96012000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (90671520 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (161087488 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (143040000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n/usr/local/lib/python3.11/dist-packages/PIL/Image.py:3442: DecompressionBombWarning: Image size (121554000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack.\n warnings.warn(\n","output_type":"stream"},{"name":"stdout","text":"Epoch 1/10 - Train Loss: 0.1990, Train Acc: 0.9209 - Val Loss: 0.1253, Val Acc: 0.9542\nEpoch 2/10 - Train Loss: 0.1057, Train Acc: 0.9610 - Val Loss: 0.1006, Val Acc: 0.9629\nEpoch 3/10 - Train Loss: 0.0692, Train Acc: 0.9740 - Val Loss: 0.1122, Val Acc: 0.9586\nEpoch 4/10 - Train Loss: 0.0491, Train Acc: 0.9821 - Val Loss: 0.1206, Val Acc: 0.9563\nEpoch 5/10 - Train Loss: 0.0440, Train Acc: 0.9839 - Val Loss: 0.1471, Val Acc: 0.9544\n","output_type":"stream"}],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}