diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 60725c1d..3432fe1a 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -17,21 +17,25 @@ jobs: python-version: ["3.9"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Test + - name: Install dependencies run: | pip install --upgrade pip pip install wheel setuptools + pip install deprecated pip install -e . pip install -r requirements-dev.txt + pip install --upgrade monty + - name: Run tests + run: | coverage run -m unittest discover coverage json - name: Upload coverage artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: coverage_artifact path: coverage.json @@ -40,29 +44,30 @@ jobs: needs: test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Download coverage artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: coverage_artifact - name: Check coverage run: | - pip install monty==4.0.2 + pip install --upgrade monty python scripts/coverage.py benchmark_submission: runs-on: ubuntu-latest steps: - - - uses: actions/checkout@v2 - - name: Set up python 3.9 - uses: actions/setup-python@v2 - with: - python-version: 3.9 - - name: Run submission check - run: | - pip install --upgrade pip - pip install wheel setuptools - pip install -e . - pip install -r requirements-dev.txt - python -m unittest scripts/test_submission.py + - uses: actions/checkout@v4 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Run submission check + run: | + pip install --upgrade pip + pip install wheel setuptools + pip install deprecated + pip install -e . + pip install -r requirements-dev.txt + pip install --upgrade monty + python -m unittest scripts/test_submission.py diff --git a/.github/workflows/rebuild_website.yml b/.github/workflows/rebuild_website.yml index 44ed187b..c90aa15c 100644 --- a/.github/workflows/rebuild_website.yml +++ b/.github/workflows/rebuild_website.yml @@ -9,9 +9,9 @@ jobs: rebuild_docs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.9 - name: Build docs diff --git a/benchmarks/matbench_v0.1_CrystalX/CrystalX.ipynb b/benchmarks/matbench_v0.1_CrystalX/CrystalX.ipynb new file mode 100644 index 00000000..1c689421 --- /dev/null +++ b/benchmarks/matbench_v0.1_CrystalX/CrystalX.ipynb @@ -0,0 +1,1292 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import shutil\n", + "import time\n", + "import warnings\n", + "from random import sample\n", + "from matbench.bench import MatbenchBenchmark" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from sklearn import metrics\n", + "from torch.optim.lr_scheduler import MultiStepLR\n", + "from torch.utils.data import Dataset, DataLoader\n", + "from tqdm.auto import tqdm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from torch_geometric.nn import EdgeConv\n", + "import torch.nn.functional as F\n", + "\n", + "\n", + "class ConvLayer(nn.Module):\n", + " \"\"\"\n", + " Crystal Graph Convolutional Layer combining edge features with node features.\n", + " \n", + " This layer:\n", + " 1. Transforms edge features through an MLP\n", + " 2. Embeds edge features into nodes via aggregation\n", + " 3. Applies EdgeConv on the combined node+edge features\n", + " 4. Uses residual connections and batch normalization for stability\n", + " \"\"\"\n", + " \n", + " def __init__(self, atom_fea_len, nbr_fea_len, edge_hidden_dims=[256, 128], dropout=0.1):\n", + " super(ConvLayer, self).__init__()\n", + " self.atom_fea_len = atom_fea_len\n", + " self.nbr_fea_len = nbr_fea_len\n", + "\n", + " # MLP to transform edge features (nbr_fea)\n", + " self.edge_mlp = nn.Sequential(\n", + " nn.Linear(nbr_fea_len, edge_hidden_dims[0]),\n", + " nn.ReLU(),\n", + " nn.Linear(edge_hidden_dims[0], edge_hidden_dims[1]),\n", + " nn.ReLU(),\n", + " nn.Linear(edge_hidden_dims[1], atom_fea_len), # Output size matches node features\n", + " nn.Dropout(dropout)\n", + " )\n", + "\n", + " # EdgeConv layer to operate on node features\n", + " self.edge_conv = EdgeConv(nn.Sequential(\n", + " nn.Linear(64 * 2, 256), # For concatenated source and target node features\n", + " nn.ReLU(),\n", + " nn.Linear(256, 64), # Back to original feature size\n", + " ),aggr='mean')\n", + " \n", + " # BatchNorm layers\n", + " self.bn_input = nn.BatchNorm1d(atom_fea_len) # BatchNorm for input node features\n", + " self.bn_combined = nn.BatchNorm1d(atom_fea_len) # BatchNorm for combined node and edge features\n", + " self.bn_output = nn.BatchNorm1d(atom_fea_len) # BatchNorm for final output features\n", + "\n", + "\n", + " def forward(self, atom_fea, nbr_fea, edge_index):\n", + " \"\"\"\n", + " Forward pass for EdgeConv with edge features.\n", + "\n", + " Parameters\n", + " ----------\n", + " atom_fea: torch.Tensor, shape (N, atom_fea_len)\n", + " Node (atom) features.\n", + " nbr_fea: torch.Tensor, shape (E, nbr_fea_len)\n", + " Edge features (e.g., bond features).\n", + " edge_index: torch.LongTensor, shape (2, E)\n", + " Edge connectivity in COO format.\n", + "\n", + " Returns\n", + " -------\n", + " atom_out_fea: torch.Tensor, shape (N, atom_fea_len)\n", + " Updated node (atom) features.\n", + " \"\"\"\n", + " # Step 1: Transform edge features\n", + " transformed_nbr_fea = self.edge_mlp(nbr_fea) # Shape: (E, atom_fea_len)\n", + "\n", + " \n", + " atom_fea = self.bn_input(atom_fea) # Normalize node features\n", + " \n", + " \n", + " # Step 2: Embed edge features into node features\n", + " src, tgt = edge_index # Source and target nodes\n", + " #print(f\"src.shape: {src.shape}\")\n", + " #print(f\"tgt.shape: {tgt.shape}\")\n", + " \n", + " transformed_nbr_fea = transformed_nbr_fea.view(-1, transformed_nbr_fea.size(-1))\n", + " #print(f\"transformed_nbr_fea.shape: {transformed_nbr_fea.shape}\")\n", + " # Create empty tensors for node features that will aggregate edge features\n", + " edge_embedded_src = torch.zeros_like(atom_fea) # Shape: (num_nodes, atom_fea_len)\n", + " edge_embedded_tgt = torch.zeros_like(atom_fea) # Shape: (num_nodes, atom_fea_len)\n", + "\n", + " # Aggregate edge features to both source and target nodes\n", + " edge_embedded_src = edge_embedded_src.index_add_(0, src, transformed_nbr_fea) # Source node aggregation\n", + " edge_embedded_tgt = edge_embedded_tgt.index_add_(0, tgt, transformed_nbr_fea) # Target node aggregation\n", + "\n", + " # Step 3: Combine node features with edge features (source + target)\n", + " combined_fea = atom_fea + edge_embedded_src + edge_embedded_tgt # Shape: (num_nodes, atom_fea_len)\n", + "\n", + " # Step 4: Apply EdgeConv on updated features\n", + " atom_out_fea = self.edge_conv((combined_fea, atom_fea), edge_index) # Ensure input is a PairTensor\n", + "\n", + " atom_out_fea = self.bn_output(atom_out_fea)\n", + " \n", + " atom_out_fea = atom_out_fea+combined_fea\n", + " \n", + " return atom_out_fea\n", + "\n", + "\n", + "class CrystalX(nn.Module):\n", + " \"\"\"\n", + " Crystal Graph Convolutional Neural Network (CrystalX).\n", + " \n", + " A deep learning model for predicting properties of crystalline materials.\n", + " Uses graph convolutions to learn from crystal structure representations\n", + " where atoms are nodes and bonds are edges with features.\n", + " \n", + " Architecture:\n", + " 1. Embedding layer: Maps raw atom features to learnable space\n", + " 2. Graph convolutions: Learn structural patterns through message passing\n", + " 3. Global pooling: Aggregates atom-level features to crystal-level\n", + " 4. MLP head: Final prediction layers\n", + " \"\"\"\n", + " \n", + " def __init__(self, orig_atom_fea_len, nbr_fea_len, atom_fea_len=64, n_conv=3, \n", + " h_fea_len=256, n_h=2, dense_units=None, num_classes=2, task=\"regression\", \n", + " dropout=0.1):\n", + " \"\"\"\n", + " Initialize CrystalX model.\n", + "\n", + " Args:\n", + " orig_atom_fea_len (int): Original atom feature dimension\n", + " nbr_fea_len (int): Neighbor/bond feature dimension \n", + " atom_fea_len (int): Embedded atom feature dimension [default: 64]\n", + " n_conv (int): Number of convolution layers [default: 3]\n", + " h_fea_len (int): Hidden feature dimension after pooling [default: 256]\n", + " n_h (int): Number of hidden layers (deprecated, use dense_units)\n", + " dense_units (list): FC layer dimensions [default: [128, 64]]\n", + " num_classes (int): Number of output classes for classification [default: 2]\n", + " task (str): 'regression' or 'classification' [default: 'regression']\n", + " dropout (float): Dropout probability [default: 0.1]\n", + " \"\"\"\n", + " super(CrystalX, self).__init__()\n", + " self.num_classes = num_classes\n", + " self.task = task\n", + " self.atom_fea_len = atom_fea_len\n", + " \n", + " # Set default dense units if not provided\n", + " if dense_units is None:\n", + " dense_units = [128, 64]\n", + "\n", + " # Atom Feature Embedding\n", + " # Projects raw atom features to learnable embedding space\n", + " self.embedding = nn.Sequential(\n", + " nn.Linear(orig_atom_fea_len, atom_fea_len),\n", + " nn.ReLU(inplace=True),\n", + " nn.BatchNorm1d(atom_fea_len)\n", + " )\n", + "\n", + " # Graph Convolutional Layers\n", + " # Stack of ConvLayers for hierarchical feature learning\n", + " self.convs = nn.ModuleList([\n", + " ConvLayer(atom_fea_len=atom_fea_len, \n", + " nbr_fea_len=nbr_fea_len, \n", + " dropout=dropout) \n", + " for _ in range(n_conv)\n", + " ])\n", + "\n", + " # Transition from graph features to dense features\n", + " self.conv_to_fc = nn.Sequential(\n", + " nn.Linear(atom_fea_len, h_fea_len),\n", + " nn.ReLU(inplace=True),\n", + " nn.Dropout(dropout)\n", + " )\n", + "\n", + " # Fully Connected Prediction Head\n", + " fc_layers = []\n", + " input_size = h_fea_len\n", + " \n", + " for i, units in enumerate(dense_units):\n", + " fc_layers.extend([\n", + " nn.Linear(input_size, units),\n", + " nn.ReLU(inplace=True),\n", + " nn.Dropout(dropout)\n", + " ])\n", + " input_size = units\n", + " \n", + " self.fc_hidden = nn.Sequential(*fc_layers)\n", + "\n", + " # Output layer based on task type\n", + " if self.task == 'regression':\n", + " self.fc_out = nn.Linear(dense_units[-1], 1)\n", + " elif self.task == 'classification':\n", + " self.fc_out = nn.Linear(dense_units[-1], num_classes)\n", + " else:\n", + " raise ValueError(f\"Unsupported task: {task}. Use 'regression' or 'classification'\")\n", + "\n", + " def create_edge_index(self, nbr_fea_idx):\n", + " \"\"\"\n", + " Convert neighbor indices to edge_index format for PyTorch Geometric.\n", + " \n", + " Args:\n", + " nbr_fea_idx (torch.Tensor): Neighbor indices [N, M] where N=atoms, M=max_neighbors\n", + " \n", + " Returns:\n", + " torch.LongTensor: Edge connectivity in COO format [2, E]\n", + " \"\"\"\n", + " N, M = nbr_fea_idx.size()\n", + " \n", + " # Create source node indices (repeated for each neighbor)\n", + " src = torch.arange(N, device=nbr_fea_idx.device, dtype=torch.long).unsqueeze(1).expand(-1, M)\n", + " \n", + " # Target nodes are the neighbor indices\n", + " tgt = nbr_fea_idx\n", + " \n", + " # Stack and reshape to COO format\n", + " edge_index = torch.stack([src.reshape(-1), tgt.reshape(-1)], dim=0)\n", + " \n", + " return edge_index\n", + "\n", + " def forward(self, atom_fea, nbr_fea, nbr_fea_idx, crystal_atom_idx):\n", + " \"\"\"\n", + " Forward pass of CrystalX.\n", + "\n", + " Args:\n", + " atom_fea (torch.Tensor): Atom features [N, orig_atom_fea_len]\n", + " nbr_fea (torch.Tensor): Neighbor features [N*M, nbr_fea_len] (flattened)\n", + " nbr_fea_idx (torch.LongTensor): Neighbor indices [N, M] \n", + " crystal_atom_idx (list): List of atom indices for each crystal\n", + "\n", + " Returns:\n", + " torch.Tensor: Predictions [batch_size, output_dim]\n", + " \"\"\"\n", + " # Embed atom features to target dimension\n", + " atom_fea = self.embedding(atom_fea) # [N, atom_fea_len]\n", + "\n", + " # Convert neighbor indices to edge connectivity format\n", + " edge_index = self.create_edge_index(nbr_fea_idx) # [2, E]\n", + "\n", + " # Apply graph convolutional layers sequentially\n", + " for i, conv_layer in enumerate(self.convs):\n", + " atom_fea = conv_layer(atom_fea, nbr_fea, edge_index)\n", + "\n", + " # Global pooling: aggregate atom features to crystal-level features\n", + " crys_fea = self.pooling(atom_fea, crystal_atom_idx) # [batch_size, atom_fea_len]\n", + "\n", + " # Transform to dense feature space\n", + " crys_fea = self.conv_to_fc(crys_fea) # [batch_size, h_fea_len]\n", + "\n", + " # Apply fully connected layers\n", + " crys_fea = self.fc_hidden(crys_fea) # [batch_size, dense_units[-1]]\n", + "\n", + " # Final prediction\n", + " out = self.fc_out(crys_fea) # [batch_size, output_dim]\n", + "\n", + " # Apply appropriate activation based on task\n", + " if self.task == 'classification':\n", + " return F.log_softmax(out, dim=-1) # Log probabilities for NLL loss\n", + " else:\n", + " return out # Raw values for regression\n", + "\n", + " def pooling(self, atom_fea, crystal_atom_idx):\n", + " \"\"\"\n", + " Global pooling operation to aggregate atom features within each crystal.\n", + " \n", + " Uses mean pooling to create permutation-invariant crystal representations.\n", + " \n", + " Args:\n", + " atom_fea (torch.Tensor): Atom features after convolution [N, atom_fea_len]\n", + " crystal_atom_idx (list): List of tensors, each containing atom indices for a crystal\n", + " \n", + " Returns:\n", + " torch.Tensor: Crystal-level features [batch_size, atom_fea_len]\n", + " \"\"\"\n", + " # Use list comprehension with mean pooling for each crystal\n", + " # This is more memory efficient than creating large tensors\n", + " crystal_features = []\n", + " \n", + " for idx_map in crystal_atom_idx:\n", + " if len(idx_map) > 0: # Handle empty crystals gracefully\n", + " # Mean pooling over atoms in this crystal\n", + " crystal_fea = torch.mean(atom_fea[idx_map], dim=0, keepdim=True)\n", + " else:\n", + " # Handle edge case of empty crystal\n", + " crystal_fea = torch.zeros(1, atom_fea.size(-1), \n", + " device=atom_fea.device, dtype=atom_fea.dtype)\n", + " crystal_features.append(crystal_fea)\n", + " \n", + " # Concatenate all crystal features\n", + " return torch.cat(crystal_features, dim=0)\n", + "\n", + " def count_parameters(self):\n", + " \"\"\"\n", + " Count the total number of trainable parameters.\n", + " \n", + " Returns:\n", + " int: Total number of parameters\n", + " \"\"\"\n", + " return sum(p.numel() for p in self.parameters() if p.requires_grad)\n", + "\n", + " def summary(self):\n", + " \"\"\"\n", + " Print model summary with layer information.\n", + " \"\"\"\n", + " print(f\"CrystalX Model Summary:\")\n", + " print(f\"Task: {self.task}\")\n", + " print(f\"Atom feature length: {self.atom_fea_len}\")\n", + " print(f\"Number of conv layers: {len(self.convs)}\")\n", + " print(f\"Total parameters: {self.count_parameters():,}\")\n", + " print(f\"Model size: {self.count_parameters() * 4 / 1024 / 1024:.2f} MB (float32)\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ATOM_EMBEDDING_DATA = {\n", + " 1: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 2: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n", + " 3: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 4: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 5: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 6: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 7: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 8: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 9: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 10: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 11: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 12: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 13: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 14: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 15: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 16: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 17: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 18: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n", + " 19: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n", + " 20: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n", + " 21: [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 22: [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 23: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 24: [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 25: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 26: [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 27: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 28: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 29: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 30: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 31: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 32: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 33: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 34: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 35: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 36: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n", + " 37: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n", + " 38: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n", + " 39: [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 40: [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 41: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 42: [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 43: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 44: [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 45: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 46: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 47: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 48: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 49: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 50: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 51: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 52: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 53: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n", + " 54: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n", + " 55: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n", + " 56: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n", + " 57: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 58: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 59: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 60: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 61: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 62: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 63: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n", + " 64: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 65: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 66: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 67: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 68: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 69: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 70: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n", + " 71: [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 72: [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 73: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 74: [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 75: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 76: [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 77: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 78: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 79: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " 80: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 81: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 82: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 83: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 84: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 85: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 86: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 87: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 88: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n", + " 89: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 90: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 91: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n", + " 92: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n", + " 93: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 94: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 95: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 96: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n", + " 97: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 98: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 99: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " 100: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import print_function, division\n", + "\n", + "import csv\n", + "import functools\n", + "import json\n", + "import os\n", + "import random\n", + "import warnings\n", + "\n", + "import numpy as np\n", + "import torch\n", + "from pymatgen.core.structure import Structure\n", + "from torch.utils.data import Dataset, DataLoader\n", + "from torch.utils.data.dataloader import default_collate\n", + "from torch.utils.data.sampler import SubsetRandomSampler\n", + "\n", + "\n", + "def get_train_val_test_loader(dataset, collate_fn=default_collate,\n", + " batch_size=64, train_ratio=None,\n", + " val_ratio=0.1, test_ratio=0, return_test=False,\n", + " num_workers=1, pin_memory=False, **kwargs):\n", + " \"\"\"\n", + " Utility function for dividing a dataset to train, val, test datasets.\n", + "\n", + " !!! The dataset needs to be shuffled before using the function !!!\n", + "\n", + " Parameters\n", + " ----------\n", + " \n", + " dataset: torch.utils.data.Dataset\n", + " The full dataset to be divided.\n", + " collate_fn: torch.utils.data.DataLoader\n", + " batch_size: int\n", + " train_ratio: float\n", + " val_ratio: float\n", + " test_ratio: float\n", + " return_test: bool\n", + " Whether to return the test dataset loader. If False, the last test_size\n", + " data will be hidden.\n", + " num_workers: int\n", + " pin_memory: bool\n", + "\n", + " Returns\n", + " -------\n", + " train_loader: torch.utils.data.DataLoader\n", + " DataLoader that random samples the training data.\n", + " val_loader: torch.utils.data.DataLoader\n", + " DataLoader that random samples the validation data.\n", + " (test_loader): torch.utils.data.DataLoader\n", + " DataLoader that random samples the test data, returns if\n", + " return_test=True.\n", + " \"\"\"\n", + " total_size = len(dataset)\n", + " if kwargs['train_size'] is None:\n", + " if train_ratio is None:\n", + " assert val_ratio + test_ratio < 1\n", + " train_ratio = 1 - val_ratio - test_ratio\n", + " print(f'[Warning] train_ratio is None, using 1 - val_ratio - '\n", + " f'test_ratio = {train_ratio} as training data.')\n", + " else:\n", + " assert train_ratio + val_ratio + test_ratio <= 1\n", + " indices = list(range(total_size))\n", + " if kwargs['train_size']:\n", + " train_size = kwargs['train_size']\n", + " else:\n", + " train_size = int(train_ratio * total_size)\n", + " if kwargs['test_size']:\n", + " test_size = kwargs['test_size']\n", + " else:\n", + " test_size = int(test_ratio * total_size)\n", + " if kwargs['val_size']:\n", + " valid_size = kwargs['val_size']\n", + " else:\n", + " valid_size = int(val_ratio * total_size)\n", + " train_sampler = SubsetRandomSampler(indices[:train_size])\n", + " val_sampler = SubsetRandomSampler(\n", + " indices[train_size:])\n", + " \n", + "\n", + " train_loader = DataLoader(dataset, batch_size=batch_size,\n", + " sampler=train_sampler,\n", + " num_workers=num_workers,\n", + " collate_fn=collate_fn, pin_memory=pin_memory)\n", + " val_loader = DataLoader(dataset, batch_size=batch_size,\n", + " sampler=val_sampler,\n", + " num_workers=num_workers,\n", + " collate_fn=collate_fn, pin_memory=pin_memory)\n", + " if return_test:\n", + " test_loader = DataLoader(dataset, batch_size=batch_size,\n", + " sampler=test_sampler,\n", + " num_workers=num_workers,\n", + " collate_fn=collate_fn, pin_memory=pin_memory)\n", + " if return_test:\n", + " return train_loader, val_loader, test_loader\n", + " else:\n", + " return train_loader, val_loader\n", + "\n", + "\n", + "\n", + "def collate_pool(dataset_list):\n", + " \"\"\"\n", + " Collate a list of data and return a batch for predicting crystal\n", + " properties.\n", + "\n", + " Parameters\n", + " ----------\n", + "\n", + " dataset_list: list of tuples for each data point.\n", + " (atom_fea, nbr_fea, nbr_fea_idx, target)\n", + "\n", + " atom_fea: torch.Tensor shape (n_i, atom_fea_len)\n", + " nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)\n", + " nbr_fea_idx: torch.LongTensor shape (n_i, M)\n", + " target: torch.Tensor shape (1, )\n", + " cif_id: str or int\n", + "\n", + " Returns\n", + " -------\n", + " N = sum(n_i); N0 = sum(i)\n", + "\n", + " batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len)\n", + " Atom features from atom type\n", + " batch_nbr_fea: torch.Tensor shape (N, M, nbr_fea_len)\n", + " Bond features of each atom's M neighbors\n", + " batch_nbr_fea_idx: torch.LongTensor shape (N, M)\n", + " Indices of M neighbors of each atom\n", + " crystal_atom_idx: list of torch.LongTensor of length N0\n", + " Mapping from the crystal idx to atom idx\n", + " target: torch.Tensor shape (N, 1)\n", + " Target value for prediction\n", + " batch_cif_ids: list\n", + " \"\"\"\n", + " batch_atom_fea, batch_nbr_fea, batch_nbr_fea_idx = [], [], []\n", + " crystal_atom_idx, batch_target = [], []\n", + " batch_cif_ids = []\n", + "\n", + " base_idx = 0\n", + " for i, ((atom_fea, nbr_fea, nbr_fea_idx), target, cif_id)\\\n", + " in enumerate(dataset_list):\n", + " n_i = atom_fea.shape[0] # number of atoms for this crystal\n", + " batch_atom_fea.append(atom_fea)\n", + " batch_nbr_fea.append(nbr_fea)\n", + " batch_nbr_fea_idx.append(nbr_fea_idx+base_idx)\n", + " new_idx = torch.LongTensor(np.arange(n_i)+base_idx)\n", + " crystal_atom_idx.append(new_idx)\n", + " batch_target.append(target)\n", + " batch_cif_ids.append(cif_id)\n", + " base_idx += n_i\n", + " return (torch.cat(batch_atom_fea, dim=0),\n", + " torch.cat(batch_nbr_fea, dim=0),\n", + " torch.cat(batch_nbr_fea_idx, dim=0),\n", + " crystal_atom_idx),\\\n", + " torch.stack(batch_target, dim=0),\\\n", + " batch_cif_ids\n", + "\n", + "\n", + "\n", + "class GaussianDistance(object):\n", + " \"\"\"\n", + " Expands the distance by Gaussian basis.\n", + "\n", + " Unit: angstrom\n", + " \"\"\"\n", + " def __init__(self, dmin, dmax, step, var=None):\n", + " \"\"\"\n", + " Parameters\n", + " ----------\n", + "\n", + " dmin: float\n", + " Minimum interatomic distance\n", + " dmax: float\n", + " Maximum interatomic distance\n", + " step: float\n", + " Step size for the Gaussian filter\n", + " \"\"\"\n", + " assert dmin < dmax\n", + " assert dmax - dmin > step\n", + " self.filter = np.arange(dmin, dmax+step, step)\n", + " if var is None:\n", + " var = step\n", + " self.var = var\n", + "\n", + " def expand(self, distances):\n", + " \"\"\"\n", + " Apply Gaussian disntance filter to a numpy distance array\n", + "\n", + " Parameters\n", + " ----------\n", + "\n", + " distance: np.array shape n-d array\n", + " A distance matrix of any shape\n", + "\n", + " Returns\n", + " -------\n", + " expanded_distance: shape (n+1)-d array\n", + " Expanded distance matrix with the last dimension of length\n", + " len(self.filter)\n", + " \"\"\"\n", + " return np.exp(-(distances[..., np.newaxis] - self.filter)**2 /\n", + " self.var**2)\n", + "\n", + "\n", + "class AtomInitializer:\n", + " \"\"\"\n", + " Initialize atom feature vectors using external data dictionary.\n", + " \"\"\"\n", + " \n", + " def __init__(self):\n", + " self.atom_types = set(ATOM_EMBEDDING_DATA.keys())\n", + " self._embedding = {}\n", + " \n", + " # Convert lists to numpy arrays\n", + " for key, value in ATOM_EMBEDDING_DATA.items():\n", + " self._embedding[key] = np.array(value, dtype=float)\n", + " \n", + " def get_atom_fea(self, atom_type):\n", + " assert atom_type in self.atom_types, f\"Atom type {atom_type} not found\"\n", + " return self._embedding[atom_type]\n", + "\n", + " def load_state_dict(self, state_dict):\n", + " self._embedding = state_dict\n", + " self.atom_types = set(self._embedding.keys())\n", + " self._decodedict = {idx: atom_type for atom_type, idx in\n", + " self._embedding.items()}\n", + "\n", + " def state_dict(self):\n", + " return self._embedding\n", + "\n", + " def decode(self, idx):\n", + " if not hasattr(self, '_decodedict'):\n", + " self._decodedict = {idx: atom_type for atom_type, idx in\n", + " self._embedding.items()}\n", + " return self._decodedict[idx]\n", + "\n", + "\n", + "class CIFData(Dataset):\n", + " \"\"\"\n", + " The CIFData dataset is a wrapper for a dataset where the crystal structures\n", + " are stored in the form of CIF files. The dataset should have the following\n", + " directory structure:\n", + "\n", + " root_dir\n", + " ├── id_prop.csv\n", + " ├── atom_init.json\n", + " ├── id0.cif\n", + " ├── id1.cif\n", + " ├── ...\n", + "\n", + " id_prop.csv: a CSV file with two columns. The first column recodes a\n", + " unique ID for each crystal, and the second column recodes the value of\n", + " target property.\n", + "\n", + " atom_init.json: a JSON file that stores the initialization vector for each\n", + " element.\n", + "\n", + " ID.cif: a CIF file that recodes the crystal structure, where ID is the\n", + " unique ID for the crystal.\n", + "\n", + " Parameters\n", + " ----------\n", + "\n", + " root_dir: str\n", + " The path to the root directory of the dataset\n", + " max_num_nbr: int\n", + " The maximum number of neighbors while constructing the crystal graph\n", + " radius: float\n", + " The cutoff radius for searching neighbors\n", + " dmin: float\n", + " The minimum distance for constructing GaussianDistance\n", + " step: float\n", + " The step size for constructing GaussianDistance\n", + " random_seed: int\n", + " Random seed for shuffling the dataset\n", + "\n", + " Returns\n", + " -------\n", + "\n", + " atom_fea: torch.Tensor shape (n_i, atom_fea_len)\n", + " nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)\n", + " nbr_fea_idx: torch.LongTensor shape (n_i, M)\n", + " target: torch.Tensor shape (1, )\n", + " cif_id: str or int\n", + " \"\"\"\n", + " def __init__(self, structures, targets, max_num_nbr=12, radius=8, dmin=0, step=0.2,\n", + " random_seed=123):\n", + " self.max_num_nbr, self.radius = max_num_nbr, radius\n", + " cif_ids = structures.index\n", + " assert (structures.index == targets.index).all(), \"Indices do not match or are out of order\"\n", + "\n", + "\n", + " self.id_prop_data = list(zip(structures, targets, cif_ids))\n", + " random.seed(random_seed)\n", + " random.shuffle(self.id_prop_data)\n", + "\n", + " self.ari = AtomInitializer()\n", + " self.gdf = GaussianDistance(dmin=dmin, dmax=self.radius, step=step)\n", + "\n", + " def __len__(self):\n", + " return len(self.id_prop_data)\n", + "\n", + " @functools.lru_cache(maxsize=None) # Cache loaded structures\n", + " def __getitem__(self, idx):\n", + " crystal, target, cif_id = self.id_prop_data[idx]\n", + "\n", + " atom_fea = np.vstack([self.ari.get_atom_fea(crystal[i].specie.number)\n", + " for i in range(len(crystal))])\n", + " atom_fea = torch.Tensor(atom_fea)\n", + " all_nbrs = crystal.get_all_neighbors(self.radius, include_index=True)\n", + " all_nbrs = [sorted(nbrs, key=lambda x: x[1]) for nbrs in all_nbrs]\n", + " nbr_fea_idx, nbr_fea = [], []\n", + " for nbr in all_nbrs:\n", + " if len(nbr) < self.max_num_nbr:\n", + " warnings.warn('not find enough neighbors to build graph. '\n", + " 'If it happens frequently, consider increase '\n", + " 'radius.')\n", + " nbr_fea_idx.append(list(map(lambda x: x[2], nbr)) +\n", + " [0] * (self.max_num_nbr - len(nbr)))\n", + " nbr_fea.append(list(map(lambda x: x[1], nbr)) +\n", + " [self.radius + 1.] * (self.max_num_nbr -\n", + " len(nbr)))\n", + " else:\n", + " nbr_fea_idx.append(list(map(lambda x: x[2],\n", + " nbr[:self.max_num_nbr])))\n", + " nbr_fea.append(list(map(lambda x: x[1],\n", + " nbr[:self.max_num_nbr])))\n", + " nbr_fea_idx, nbr_fea = np.array(nbr_fea_idx), np.array(nbr_fea)\n", + " nbr_fea = self.gdf.expand(nbr_fea)\n", + " atom_fea = torch.Tensor(atom_fea)\n", + " nbr_fea = torch.Tensor(nbr_fea)\n", + " nbr_fea_idx = torch.LongTensor(nbr_fea_idx)\n", + " target = torch.Tensor([float(target)])\n", + " return (atom_fea, nbr_fea, nbr_fea_idx), target, cif_id\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Config:\n", + " def __init__(self):\n", + " # Training parameters\n", + " self.task = 'regression' # 'regression' or 'classification'\n", + " self.epochs = 500\n", + " self.batch_size = 256\n", + " self.lr = 0.001\n", + " self.lr_milestones = [100]\n", + " self.momentum = 0.9\n", + " self.weight_decay = 0\n", + " self.optim = 'Adam' # 'SGD' or 'Adam'\n", + " \n", + " # Data parameters\n", + " self.train_ratio = 0.85\n", + " self.val_ratio = 0.15\n", + " self.test_ratio = 0.0\n", + " self.workers = 0\n", + " \n", + " # Search parameters\n", + " self.search_radius = 20\n", + " self.max_num_nbr = 15\n", + " self.search_step = 0.2\n", + "\n", + " # Model parameters\n", + " self.atom_fea_len = 64\n", + " self.h_fea_len = 256\n", + " self.n_conv = 8\n", + " self.n_h = 2\n", + " \n", + " # System parameters\n", + " self.cuda = torch.cuda.is_available()\n", + " self.pin_memory = True\n", + " \n", + " # Best error tracking\n", + " if self.task == 'regression':\n", + " self.best_mae_error = 1e10\n", + " else:\n", + " self.best_mae_error = 0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = Config()\n", + "print(f\"Device: {'CUDA' if config.cuda else 'CPU'}\")\n", + "print(f\"Task: {config.task}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Normalizer:\n", + " \"\"\"Optimized normalizer for tensor normalization and denormalization.\"\"\"\n", + " \n", + " def __init__(self, tensor):\n", + " # Use more stable computation and store as scalars for efficiency\n", + " self.mean = torch.mean(tensor).item()\n", + " self.std = torch.std(tensor).item()\n", + " # Avoid division by zero\n", + " if self.std == 0:\n", + " self.std = 1.0\n", + "\n", + " def norm(self, tensor):\n", + " return (tensor - self.mean) / self.std\n", + "\n", + " def denorm(self, normed_tensor):\n", + " return normed_tensor * self.std + self.mean\n", + "\n", + " def state_dict(self):\n", + " return {'mean': self.mean, 'std': self.std}\n", + "\n", + " def load_state_dict(self, state_dict):\n", + " self.mean = state_dict['mean']\n", + " self.std = state_dict['std']\n", + "\n", + "\n", + "def mae(prediction, target):\n", + " \"\"\"Mean Absolute Error calculation\"\"\"\n", + " prediction = prediction.view(-1)\n", + " target = target.view(-1)\n", + " return torch.mean(torch.abs(target - prediction))\n", + "\n", + "\n", + "def class_eval(prediction, target):\n", + " \"\"\"Classification evaluation metrics\"\"\"\n", + " prediction = np.exp(prediction.numpy())\n", + " target = target.numpy()\n", + " pred_label = np.argmax(prediction, axis=1)\n", + " target_label = np.squeeze(target)\n", + " \n", + " if not target_label.shape:\n", + " target_label = np.asarray([target_label])\n", + " \n", + " if prediction.shape[1] == 2:\n", + " precision, recall, fscore, _ = metrics.precision_recall_fscore_support(\n", + " target_label, pred_label, average='binary')\n", + " auc_score = metrics.roc_auc_score(target_label, prediction[:, 1])\n", + " accuracy = metrics.accuracy_score(target_label, pred_label)\n", + " else:\n", + " raise NotImplementedError(\"Only binary classification supported\")\n", + " \n", + " return accuracy, precision, recall, fscore, auc_score\n", + "\n", + "\n", + "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n", + " \"\"\"Save model checkpoint\"\"\"\n", + " torch.save(state, filename)\n", + " if is_best:\n", + " shutil.copyfile(filename, 'model_best.pth.tar')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class AverageMeter(object):\n", + " \"\"\"Computes and stores the average and current value\"\"\"\n", + "\n", + " def __init__(self):\n", + " self.reset()\n", + "\n", + " def reset(self):\n", + " self.val = 0\n", + " self.avg = 0\n", + " self.sum = 0\n", + " self.count = 0\n", + "\n", + " def update(self, val, n=1):\n", + " self.val = val\n", + " self.sum += val * n\n", + " self.count += n\n", + " self.avg = self.sum / self.count" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_model_and_optimizer(orig_atom_fea_len, nbr_fea_len, config):\n", + " \"\"\"Create model and optimizer with given configuration\"\"\"\n", + " model = CrystalX(\n", + " orig_atom_fea_len, \n", + " nbr_fea_len,\n", + " atom_fea_len=config.atom_fea_len,\n", + " n_conv=config.n_conv, \n", + " h_fea_len=config.h_fea_len,\n", + " task=config.task\n", + " )\n", + " \n", + " if config.cuda:\n", + " model.cuda()\n", + "\n", + " # Define loss function\n", + " if config.task == 'classification':\n", + " criterion = nn.NLLLoss()\n", + " else:\n", + " criterion = nn.MSELoss()\n", + "\n", + " # Define optimizer\n", + " if config.optim == 'SGD':\n", + " optimizer = optim.SGD(\n", + " model.parameters(), \n", + " config.lr,\n", + " momentum=config.momentum,\n", + " weight_decay=config.weight_decay\n", + " )\n", + " elif config.optim == 'Adam':\n", + " optimizer = optim.Adam(\n", + " model.parameters(), \n", + " config.lr,\n", + " weight_decay=config.weight_decay\n", + " )\n", + " else:\n", + " raise ValueError('Only SGD or Adam optimizers are supported')\n", + " \n", + " return model, criterion, optimizer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def train_epoch(train_loader, model, criterion, optimizer, normalizer, config):\n", + " \"\"\"Train for one epoch and return MAE\"\"\"\n", + " model.train()\n", + " mae_errors = AverageMeter()\n", + " \n", + " for input_data, target, _ in train_loader:\n", + " # Prepare input variables\n", + " if config.cuda:\n", + " input_var = (\n", + " input_data[0].cuda(non_blocking=True),\n", + " input_data[1].cuda(non_blocking=True),\n", + " input_data[2].cuda(non_blocking=True),\n", + " [crys_idx.cuda(non_blocking=True) for crys_idx in input_data[3]]\n", + " )\n", + " target_var = normalizer.norm(target).cuda(non_blocking=True)\n", + " else:\n", + " input_var = input_data\n", + " target_var = normalizer.norm(target)\n", + "\n", + " # Forward pass\n", + " output = model(*input_var)\n", + " loss = criterion(output, target_var)\n", + "\n", + " # Compute MAE\n", + " mae_error = mae(normalizer.denorm(output.data.cpu()), target)\n", + " mae_errors.update(mae_error.item(), target.size(0))\n", + "\n", + " # Backward pass\n", + " optimizer.zero_grad()\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " return mae_errors.avg\n", + "\n", + "\n", + "def validate_epoch(val_loader, model, criterion, normalizer, config, test_inputs=None, filename=None, is_test=False):\n", + " \"\"\"Validate and return MAE\"\"\"\n", + " model.eval()\n", + " mae_errors = AverageMeter()\n", + " \n", + " if is_test:\n", + " test_targets = []\n", + " test_preds = []\n", + " test_cif_ids = []\n", + "\n", + " with torch.no_grad():\n", + " for input_data, target, batch_cif_ids in val_loader:\n", + " # Prepare input variables\n", + " if config.cuda:\n", + " input_var = (\n", + " input_data[0].cuda(non_blocking=True),\n", + " input_data[1].cuda(non_blocking=True),\n", + " input_data[2].cuda(non_blocking=True),\n", + " [crys_idx.cuda(non_blocking=True) for crys_idx in input_data[3]]\n", + " )\n", + " target_var = normalizer.norm(target).cuda(non_blocking=True)\n", + " else:\n", + " input_var = input_data\n", + " target_var = normalizer.norm(target)\n", + "\n", + " # Forward pass\n", + " output = model(*input_var)\n", + " \n", + " # Compute MAE\n", + " mae_error = mae(normalizer.denorm(output.data.cpu()), target)\n", + " mae_errors.update(mae_error.item(), target.size(0))\n", + " \n", + " if is_test:\n", + " test_pred = normalizer.denorm(output.data.cpu())\n", + " test_preds.extend(test_pred.view(-1).tolist())\n", + " test_targets.extend(target.view(-1).tolist())\n", + " test_cif_ids.extend(batch_cif_ids)\n", + "\n", + " # Save test results\n", + " if is_test and filename and test_inputs is not None:\n", + " import csv\n", + " indices = test_inputs.index\n", + " id_to_vals = {cid: (tgt, pred) for cid, tgt, pred in zip(test_cif_ids, test_targets, test_preds)}\n", + " \n", + " with open(filename, 'w', newline='') as f:\n", + " writer = csv.writer(f)\n", + " for cif_id in indices:\n", + " target, pred = id_to_vals[cif_id]\n", + " writer.writerow((cif_id, target, pred))\n", + "\n", + " return mae_errors.avg" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def train_fold(task, fold, config):\n", + " \"\"\"Train a single fold with progress bars\"\"\"\n", + " print(f\"\\nTraining {task.dataset_name} - Fold {fold}\")\n", + " \n", + " filename = f\"{task.dataset_name}_fold_{fold}\"\n", + " \n", + " # Prepare data\n", + " train_inputs, train_outputs = task.get_train_and_val_data(fold)\n", + " train_outputs = train_outputs.astype(float)\n", + " dataset = CIFData(train_inputs, train_outputs, max_num_nbr=config.max_num_nbr, radius = config.search_radius)\n", + " \n", + " train_loader, val_loader = get_train_val_test_loader(\n", + " dataset, \n", + " collate_fn=collate_pool, \n", + " batch_size=config.batch_size,\n", + " return_test=False, \n", + " train_ratio=config.train_ratio, \n", + " val_ratio=config.val_ratio,\n", + " test_ratio=config.test_ratio,\n", + " num_workers=config.workers, \n", + " pin_memory=config.pin_memory,\n", + " train_size=None, \n", + " test_size=None, \n", + " val_size=None\n", + " )\n", + "\n", + " # Create normalizer\n", + " if len(dataset) < 500:\n", + " sample_data_list = [dataset[i] for i in range(len(dataset))]\n", + " else:\n", + " sample_data_list = [dataset[i] for i in sample(range(len(dataset)), 500)]\n", + " _, sample_target, _ = collate_pool(sample_data_list)\n", + " normalizer = Normalizer(sample_target)\n", + "\n", + " # Build model\n", + " structures, _, _ = dataset[0]\n", + " orig_atom_fea_len = structures[0].shape[-1]\n", + " nbr_fea_len = structures[1].shape[-1]\n", + " \n", + " model, criterion, optimizer = create_model_and_optimizer(\n", + " orig_atom_fea_len, nbr_fea_len, config\n", + " )\n", + "\n", + " scheduler = MultiStepLR(optimizer, milestones=config.lr_milestones, gamma=0.1)\n", + " best_mae_error = 1e10\n", + "\n", + " # Training loop with progress bar\n", + " progress_bar = tqdm(range(config.epochs), desc=\"Training Progress\")\n", + " \n", + " for epoch in progress_bar:\n", + " # Train\n", + " train_mae = train_epoch(train_loader, model, criterion, optimizer, normalizer, config)\n", + " \n", + " # Validate\n", + " val_mae = validate_epoch(val_loader, model, criterion, normalizer, config)\n", + " \n", + " # Update progress bar\n", + " progress_bar.set_postfix({\n", + " 'Train MAE': f'{train_mae:.4f}',\n", + " 'Val MAE': f'{val_mae:.4f}'\n", + " })\n", + " \n", + " # Check for NaN\n", + " if val_mae != val_mae:\n", + " print('Training failed due to NaN')\n", + " return None\n", + " \n", + " scheduler.step()\n", + " \n", + " # Save best model\n", + " is_best = val_mae < best_mae_error\n", + " best_mae_error = min(val_mae, best_mae_error)\n", + " \n", + " save_checkpoint({\n", + " 'epoch': epoch + 1,\n", + " 'state_dict': model.state_dict(),\n", + " 'best_mae_error': best_mae_error,\n", + " 'optimizer': optimizer.state_dict(),\n", + " 'normalizer': normalizer.state_dict(),\n", + " 'config': vars(config)\n", + " }, is_best)\n", + "\n", + " # Test evaluation\n", + " print('Evaluating on test set...')\n", + " best_checkpoint = torch.load('model_best.pth.tar')\n", + " model.load_state_dict(best_checkpoint['state_dict'])\n", + " \n", + " test_inputs, test_outputs = task.get_test_data(fold, include_target=True)\n", + " test_outputs = test_outputs.astype(float)\n", + " test_dataset = CIFData(train_inputs, train_outputs, max_num_nbr=config.max_num_nbr, radius = config.search_radius)\n", + " \n", + " test_loader = DataLoader(\n", + " test_dataset, \n", + " collate_fn=collate_pool,\n", + " batch_size=config.batch_size,\n", + " num_workers=config.workers,\n", + " pin_memory=config.cuda\n", + " )\n", + "\n", + " test_mae = validate_epoch(\n", + " test_loader, model, criterion, normalizer, config, \n", + " test_inputs=test_inputs, filename=filename, is_test=True\n", + " )\n", + " \n", + " print(f\"Final Results - Test MAE: {test_mae:.4f}\")\n", + " \n", + " # Read predictions\n", + " predictions = []\n", + " import csv\n", + " with open(filename, 'r') as f:\n", + " reader = csv.reader(f)\n", + " for row in reader:\n", + " if len(row) >= 3:\n", + " predictions.append(float(row[2]))\n", + " \n", + " return predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def main():\n", + " \"\"\"Main training function\"\"\"\n", + " # Load datasets\n", + " mb = MatbenchBenchmark(\n", + " autoload=False,\n", + " subset=[\n", + " \"matbench_dielectric\",\n", + " ]\n", + " )\n", + "\n", + " print(\"Starting CrystalX Matbench Evaluation\")\n", + " print(f\"Datasets: {[task.dataset_name for task in mb.tasks]}\")\n", + " \n", + " # Process each task\n", + " for task in mb.tasks:\n", + " print(f\"\\nLoading {task.dataset_name}...\")\n", + " task.load()\n", + " \n", + " # Process each fold\n", + " for fold in task.folds:\n", + " predictions = train_fold(task, fold, config)\n", + " if predictions is not None:\n", + " task.record(fold, predictions)\n", + " else:\n", + " print(f\"Failed to train fold {fold} for {task.dataset_name}\")\n", + " \n", + " # Save results\n", + " print(\"\\nSaving benchmark results...\")\n", + " mb.to_file(\"crystalx_benchmark_results.json.gz\")\n", + " print(\"Benchmark completed successfully!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "main()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/benchmarks/matbench_v0.1_CrystalX/config_example.json b/benchmarks/matbench_v0.1_CrystalX/config_example.json new file mode 100644 index 00000000..6a666209 --- /dev/null +++ b/benchmarks/matbench_v0.1_CrystalX/config_example.json @@ -0,0 +1,29 @@ +{ + "training_parameters": { + "task": "regression", + "epochs": 500, + "batch_size": 256, + "lr": 0.001, + "lr_milestones": [100], + "momentum": 0.9, + "weight_decay": 0, + "optim": "Adam" + }, + "data_parameters": { + "train_ratio": 0.85, + "val_ratio": 0.15, + "test_ratio": 0.0, + "workers": 0 + }, + "search_parameters": { + "search_radius": 20, + "max_num_nbr": 15, + "search_step": 0.2 + }, + "model_parameters": { + "atom_fea_len": 64, + "h_fea_len": 256, + "n_conv": 8, + "n_h": 2 + } +} \ No newline at end of file diff --git a/benchmarks/matbench_v0.1_CrystalX/info.json b/benchmarks/matbench_v0.1_CrystalX/info.json new file mode 100644 index 00000000..d742e5a4 --- /dev/null +++ b/benchmarks/matbench_v0.1_CrystalX/info.json @@ -0,0 +1,15 @@ +{ + "authors": "Shehroz A. Shoaib, Burhan K. SaifAddin", + "algorithm": "CrystalX", + "algorithm_long": "CrystalX is a crystal graph neural network implementation utilizing asymmetric edge convolution for materials property prediction. The model processes crystal structures by constructing graphs where atoms are nodes and bonds are edges, applying specialized convolutional operations that can handle asymmetric relationships between neighboring atoms in the crystal lattice.", + "bibtex_refs": "", + "notes":"", + "requirements": { + "python": [ + "pymatgen==2025", + "torch==2.6", + "tqdm", + "torch-geometric" + ] + } +} diff --git a/benchmarks/matbench_v0.1_CrystalX/results.json.gz b/benchmarks/matbench_v0.1_CrystalX/results.json.gz new file mode 100644 index 00000000..2d89baf3 Binary files /dev/null and b/benchmarks/matbench_v0.1_CrystalX/results.json.gz differ diff --git a/matbench/tests/test_data_ops.py b/matbench/tests/test_data_ops.py index 5d9763b6..db234e7e 100644 --- a/matbench/tests/test_data_ops.py +++ b/matbench/tests/test_data_ops.py @@ -50,7 +50,7 @@ def test_downloads_mbv01(self): test_types = ( [np.bool_, bool] if problem_type == CLF_KEY - else [np.float_, np.float32, np.float64, float] + else [np.float32, np.float64, float] ) self.assertIn(df[metadata["target"]].dtypes, test_types)