diff --git a/milp-evolve/README.md b/milp-evolve/README.md index f03dded..5b84be0 100644 --- a/milp-evolve/README.md +++ b/milp-evolve/README.md @@ -1,22 +1,4 @@ ---- -language: English -license: cdla-2.0 -multilinguality: monolingual -size_categories: - - 100K @@ -319,4 +325,6 @@ export TEXT_TYPES="description only" python contrast_train_test.py --epochs $EPOCH --dataset $DATASET --eval_epochs $EVAL_EPOCHS --print_iters $PRINT_ITERS --text_types $TEXT_TYPES ``` - \ No newline at end of file + + + diff --git a/milp-evolve/src/.DS_Store b/milp-evolve/src/.DS_Store deleted file mode 100644 index b20b54e..0000000 Binary files a/milp-evolve/src/.DS_Store and /dev/null differ diff --git a/milp-evolve/src/milp_evolve_llm/.DS_Store b/milp-evolve/src/milp_evolve_llm/.DS_Store deleted file mode 100644 index 5c3476a..0000000 Binary files a/milp-evolve/src/milp_evolve_llm/.DS_Store and /dev/null differ diff --git a/milp-evolve/src/multi_class_learning/.DS_Store b/milp-evolve/src/multi_class_learning/.DS_Store deleted file mode 100644 index 44b82d3..0000000 Binary files a/milp-evolve/src/multi_class_learning/.DS_Store and /dev/null differ diff --git a/milp-evolve/src/multi_class_learning/contrast_class_split.py b/milp-evolve/src/multi_class_learning/contrast_class_split.py index 1509acd..e9d4941 100644 --- a/milp-evolve/src/multi_class_learning/contrast_class_split.py +++ b/milp-evolve/src/multi_class_learning/contrast_class_split.py @@ -1,25 +1,18 @@ +import glob import json import os +import pdb +import sys +import argparse import pickle import re -import argparse from collections import defaultdict import numpy as np -# First, randomly determine the IDs for trainning and testing -total_length = 10000 # assume we have 10000 classes, which is above the actual number -train_ids = np.random.choice(total_length, 8000, replace=False) -test_ids = [i for i in range(total_length) if i not in train_ids] -def add_data(filename, text): - global TRAIN_DATA, TEST_DATA - _id = milp_id(filename) - if _id in train_ids: - TRAIN_DATA[filename].append(text) - else: - TEST_DATA[filename].append(text) +############# helper functions def milp_id(path): x = re.findall("milp_(\d+)-", path) if x: @@ -35,10 +28,31 @@ def milp_id(path): z = re.findall("(\d+)_algo", path) if z: return int(z[0]) + + z = re.findall("(\d+)", path) + if z: + return int(z[0]) + raise ValueError("Cannot find the MILP ID for " + path) +def add_data(filename, text, train_ids): + global TRAIN_DATA, TEST_DATA + _id = milp_id(filename) + if _id in train_ids: + TRAIN_DATA[filename].append(text) + else: + TEST_DATA[filename].append(text) -####### helper function +def _remove_heading_spaces(solve_code): + while True: + lines = solve_code.split("\n") + # Check if all non-empty lines have leading spaces + if all(line.startswith(" ") or line == "" or line.startswith("#") for line in lines): + # Remove two leading spaces from each line + solve_code = "\n".join([line[2:] if line.startswith(" ") else line for line in lines]) + else: + break + return solve_code def parse_code(code_filename): if not os.path.exists(code_filename): @@ -62,30 +76,49 @@ def parse_code(code_filename): solve_code = solve_imp[0] _code = _remove_heading_spaces(solve_code) ans.append(_code) + # pdb.set_trace() return ans +####### + -def _remove_heading_spaces(solve_code): - while True: - lines = solve_code.split("\n") - # Check if all non-empty lines have leading spaces - if all(line.startswith(" ") or line == "" or line.startswith("#") for line in lines): - # Remove two leading spaces from each line - solve_code = "\n".join([line[2:] if line.startswith(" ") else line for line in lines]) - else: - break - return solve_code -####### Now, Load the Data ##### +### aggregate data and description +def build_dataset(parent_data_dir, parent_desc_dir, multimodal_data_file, desc_suffix=""): + desc_path_glob = os.path.join(parent_desc_dir, f"*/desc_*{desc_suffix}.txt") -def aggregate_data(multimodal_data_file, - out_dir="save_dir/contrast", out_suffix=""): + descs = glob.glob(desc_path_glob) + + count = 0 + data = [] + for desc in descs: + gz_path = desc.replace(parent_desc_dir, parent_data_dir).replace("desc", "data").replace(desc_suffix, "").replace(".txt", ".pkl.gz") + + if not os.path.exists(gz_path): + continue + + count += 1 + data.append({ + "id": str(count), "image": gz_path, "text_path": desc, + "conversations": [{ + "from": "human", + "value": "\nDescribe the data." + }, { + "from": "gpt", + "value": open(desc, "r").read() + }] + }) + + json.dump(data, open(multimodal_data_file, "w"), indent=2) + + +### split data into train/test/val +def split_data(multimodal_data_file, parent_data_dir, parent_code_dir, parent_save_dir, train_ids, out_suffix=""): global TRAIN_DATA, TEST_DATA TRAIN_DATA = defaultdict(list) TEST_DATA = defaultdict(list) - # First, loading the llava description multimodal_data = json.load(open(multimodal_data_file, "r")) - multimodal_files = [item["milp"] for item in multimodal_data] + multimodal_files = [item["image"] for item in multimodal_data] multimodal_desc_files = [item["text_path"] for item in multimodal_data] # remove files that does not exist x = zip(multimodal_files, multimodal_desc_files) @@ -93,26 +126,58 @@ def aggregate_data(multimodal_data_file, multimodal_files, multimodal_desc_files = zip(*x) for i, (mps_file, desc_file) in enumerate(zip(multimodal_files, multimodal_desc_files)): - add_data(mps_file, open(desc_file, "r").read()) + add_data(mps_file, open(desc_file, "r").read(), train_ids=train_ids) + + class_name = os.path.basename(os.path.dirname(desc_file)) + code_filename = os.path.join(parent_code_dir, f"{class_name}.py") - code_filename = re.sub("desc_seed.*.txt", "milp.py", desc_file) - if code_filename.endswith(".py"): + if os.path.exists(code_filename): for component in parse_code(code_filename): - add_data(mps_file, component) + add_data(mps_file, component, train_ids=train_ids) + + + if parent_data_dir: + for problem_dir in glob.glob(os.path.join(parent_data_dir, "*")): + _id = milp_id(problem_dir) + src_codename = glob.glob(os.path.join(parent_code_dir, f"milp_{_id}-*.py"))[0] + code_components = parse_code(src_codename) + + for mps_filename in glob.glob(os.path.join(problem_dir, "*.pkl.gz")): + for component in code_components: + add_data(mps_filename, component, train_ids=train_ids) # Finally, dump the data. Let's mainly use the pickle format because of its compression - json.dump(TRAIN_DATA, os.path.join(out_dir, open(f"train_{out_suffix}_data.json", "w")), indent=2) - json.dump(TEST_DATA, os.path.join(out_dir, open(f"test_{out_suffix}_data.json", "w")), indent=2) + json.dump(TRAIN_DATA, open(os.path.join(parent_save_dir, f"train_{out_suffix}data.json"), "w"), indent=2) + json.dump(TEST_DATA, open(os.path.join(parent_save_dir, f"test_{out_suffix}data.json"), "w"), indent=2) - pickle.dump(TRAIN_DATA, os.path.join(out_dir, open(f"train_{out_suffix}_data.pkl.gz", "wb"))) - pickle.dump(TEST_DATA, os.path.join(out_dir, open(f"test_{out_suffix}_data.pkl.gz", "wb"))) + pickle.dump(TRAIN_DATA, open(os.path.join(parent_save_dir, f"train_{out_suffix}data.pkl.gz"), "wb")) + pickle.dump(TEST_DATA, open(os.path.join(parent_save_dir, f"test_{out_suffix}data.pkl.gz"), "wb")) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--multimodal_data_file", type=str, default="save_dir/contrast/ours_multimodal.json", help="Multimodal data file") - parser.add_argument("--out_dir", type=str, default="save_dir/contrast", help="Output directory") - parser.add_argument("--out_suffix", type=str, default="ours_", help="Output suffix") + parser.add_argument("--parent_code_dir", type=str, default="milp_code_v1/code") + parser.add_argument("--parent_data_dir", type=str, default="save_dir/contrast/data") + parser.add_argument("--parent_desc_dir", type=str, default="save_dir/contrast/conv") + parser.add_argument("--parent_save_dir", type=str, default="save_dir/contrast") + parser.add_argument("--multimodal_data_file", type=str, default="save_dir/contrast/data.json") + parser.add_argument("--desc_suffix", type=str, default="") + parser.add_argument("--out_suffix", type=str, default="ours") + args = parser.parse_args() - aggregate_data(args.multimodal_data_file, args.out_dir, args.out_suffix) \ No newline at end of file + build_dataset(args.parent_data_dir, args.parent_desc_dir, multimodal_data_file=args.multimodal_data_file, + desc_suffix=args.desc_suffix) + + + # First, randomly determine the IDs for trainning and testing + total_length = 10000 # assume we have 10000 classes, which is above the actual number + train_ids = np.random.choice(total_length, 8000, replace=False) + test_ids = [i for i in range(total_length) if i not in train_ids] + + split_data(multimodal_data_file = args.multimodal_data_file, + parent_data_dir = args.parent_data_dir, + parent_code_dir=args.parent_code_dir, + parent_save_dir=args.parent_save_dir, + train_ids=train_ids, + out_suffix=args.out_suffix) \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/contrast_train_test.py b/milp-evolve/src/multi_class_learning/contrast_train_test.py index 424d475..89521f5 100644 --- a/milp-evolve/src/multi_class_learning/contrast_train_test.py +++ b/milp-evolve/src/multi_class_learning/contrast_train_test.py @@ -52,7 +52,7 @@ def encode_with_diskcache(model_name: str, texts: list): ans = torch.tensor(ans) return ans.reshape(len(texts), -1) -def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, dataloader, +def run(mode, epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, dataloader, device='cuda', freeze_text_encoder=True, repeats=1, print_iters=10, writer=None): global OUT # Check if mode is valid @@ -64,7 +64,7 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize if not freeze_text_encoder: text_encoder.train() if mode == "train" else text_encoder.eval() - image_encoder.train() if mode == "train" else image_encoder.eval() + milp_encoder.train() if mode == "train" else milp_encoder.eval() epoch_loss = 0.0 running_n = 0 @@ -94,20 +94,20 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize text_features = text_features.float() if mode == "train": - image_optimizer.zero_grad() + milp_optimizer.zero_grad() assert text_features.shape == (len(text_inputs), 4096) # Forward pass through the encoders - image_features = image_encoder(images) # [bs, n_out_neurons] + milp_features = milp_encoder(images) # [bs, n_out_neurons] # Normalize the features text_features = F.normalize(text_features, p=2, dim=1) - image_features = F.normalize(image_features, p=2, dim=1) + milp_features = F.normalize(milp_features, p=2, dim=1) # Calculate the logits (dot product of text and image features) - logits_per_image = image_features @ text_features.T - logits_per_text = text_features @ image_features.T + logits_per_image = milp_features @ text_features.T + logits_per_text = text_features @ milp_features.T # Labels for contrastive learning labels = torch.arange(len(text_inputs)).to(device) @@ -125,12 +125,12 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize # calculate 4-way accuracy if len(text_inputs) >= 4: - image_features_4way = image_features[:4, :] + milp_features_4way = milp_features[:4, :] text_features_4way = text_features[:4, :] - logits_per_image_4way = image_features_4way @ text_features_4way.T - logits_per_text_4way = text_features_4way @ image_features_4way.T + logits_per_milp_4way = milp_features_4way @ text_features_4way.T + logits_per_text_4way = text_features_4way @ milp_features_4way.T labels_4way = torch.arange(4).to(device) - acc_4way_i2t = (torch.argmax(logits_per_image_4way, dim=1) == labels_4way).float().mean() + acc_4way_i2t = (torch.argmax(logits_per_milp_4way, dim=1) == labels_4way).float().mean() acc_4way_t2i = (torch.argmax(logits_per_text_4way, dim=1) == labels_4way).float().mean() accs["4way-i2t"].append(acc_4way_i2t.item()) accs["4way-t2i"].append(acc_4way_t2i.item()) @@ -140,7 +140,7 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize loss.backward() if not freeze_text_encoder: text_optimizer.step() - image_optimizer.step() + milp_optimizer.step() # Accumulate loss epoch_loss += loss.item() @@ -259,22 +259,22 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize test_miplib_loader = None # MAIN # -image_encoder = MyGNNAttn(emb_size=args.embed_size, n_out_neurons=4096, dropout=args.dropout, max_token_attn=args.max_token_attn, +milp_encoder = MyGNNAttn(emb_size=args.embed_size, n_out_neurons=4096, dropout=args.dropout, max_token_attn=args.max_token_attn, n_attn_iters=args.n_attn_layers, n_gnn_iters=args.n_gnn_layers, edge_nfeats=1) if args.load_from and args.load_from != "None.pt": state_dict = torch.load(os.path.join(args.log_root, args.load_from)) - image_encoder.load_state_dict(state_dict) + milp_encoder.load_state_dict(state_dict) -image_encoder = image_encoder.cuda() +milp_encoder = milp_encoder.cuda() # print the number of trainable parameters, split with comma in thousands -n_trainable_params = sum(p.numel() for p in image_encoder.parameters() if p.requires_grad) +n_trainable_params = sum(p.numel() for p in milp_encoder.parameters() if p.requires_grad) print(f"Number of trainable parameters: {n_trainable_params:,}") # Optimizers for the encoders (only used in training mode) text_optimizer = None # optim.Adam(text_encoder.parameters(), lr=0.0001) -image_optimizer = optim.Adam(image_encoder.parameters(), lr=lr) +milp_optimizer = optim.Adam(milp_encoder.parameters(), lr=lr) OUT_FILE = os.path.join(args.log_root, f"{args.dataset}_use_attn_embed_{args.embed_size}_num_milp_{args.num_milp_instance}-{args.num_milp_class}_layser_{args.n_attn_layers}_{args.n_gnn_layers}_output.txt") @@ -297,31 +297,31 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize if args.dataset == "miplib" and valid_dataloader: # let's do zero-shot eval first. - run("validation", -1, text_encoder, image_encoder, text_optimizer, image_optimizer, valid_dataloader, + run("validation", -1, text_encoder, milp_encoder, text_optimizer, milp_optimizer, valid_dataloader, device='cuda', freeze_text_encoder=True, repeats=validation_repeats, print_iters=args.print_iters, writer=writer) for epoch in range(args.epochs): - run("train", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, train_dataloader, + run("train", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, train_dataloader, device='cuda', freeze_text_encoder=True, print_iters=args.print_iters, writer=writer) - torch.save(image_encoder.state_dict(), + torch.save(milp_encoder.state_dict(), os.path.join(args.log_root, - f"{args.dataset}_image_encoder_use_attn_embed_{args.embed_size}_num_milp_{args.num_milp_instance}-{args.num_milp_class}_epoch{epoch}.pt")) + f"{args.dataset}_milp_encoder_use_attn_embed_{args.embed_size}_num_milp_{args.num_milp_instance}-{args.num_milp_class}_epoch{epoch}.pt")) if args.save_to: - torch.save(image_encoder.state_dict(), os.path.join(args.log_root, args.save_to)) + torch.save(milp_encoder.state_dict(), os.path.join(args.log_root, args.save_to)) if epoch == args.epochs - 1 or epoch % args.eval_epochs == 0: if valid_dataloader: - run("validation", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, valid_dataloader, + run("validation", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, valid_dataloader, device='cuda', freeze_text_encoder=True, repeats=validation_repeats, print_iters=args.print_iters, writer=writer) if test_ours_dataloader: - run("test_ours", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, test_ours_dataloader, + run("test_ours", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, test_ours_dataloader, device='cuda', freeze_text_encoder=True, repeats=1, print_iters=args.print_iters, writer=writer) if test_miplib_loader: - run("test_miplib", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, test_miplib_loader, + run("test_miplib", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, test_miplib_loader, device='cuda', freeze_text_encoder=True, repeats=1, print_iters=args.print_iters, writer=writer) # Close the SummaryWriter diff --git a/milp-evolve/src/multi_class_learning/full_scripts/.DS_Store b/milp-evolve/src/multi_class_learning/full_scripts/.DS_Store deleted file mode 100644 index 3208279..0000000 Binary files a/milp-evolve/src/multi_class_learning/full_scripts/.DS_Store and /dev/null differ diff --git a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh b/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh deleted file mode 100644 index 555e2c6..0000000 --- a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh +++ /dev/null @@ -1,10 +0,0 @@ -export PARENT_DATA_DIR=save_dir/contrast/data -export PARENT_INSTANCES_DIR=save_dir/instances/mps - -python -u lang_extract_context.py --n_cpus $N_CPUS --parent_data_dir $PARENT_DATA_DIR --parent_instances_dir $PARENT_INSTANCES_DIR - -export PARENT_CODE_DIR=milp_code_v1/code -export PARENT_INSTANCE_DIR=save_dir/instances/mps -export PARENT_OUTPUT_DIR=save_dir/contrast/conv - -python contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance_dir $PARENT_INSTANCE_DIR --parent_output_dir $PARENT_OUTPUT_DIR diff --git a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh b/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh deleted file mode 100644 index 5cf4f39..0000000 --- a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh +++ /dev/null @@ -1,5 +0,0 @@ -export PARENT_CODE_DIR='milp_code_v1/code' -export PARENT_INSTANCE_DIR='save_dir/instances/mps' -export PARENT_OUTPUT_DIR='save_dir/contrast/conv' - -python -u contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance_dir $PARENT_INSTANCE_DIR --parent_output_dir $PARENT_OUTPUT_DIR diff --git a/milp-evolve/src/multi_class_learning/full_scripts/branching/collect_branching_data.sh b/milp-evolve/src/multi_class_learning/scripts/branching/collect_branching_data.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/branching/collect_branching_data.sh rename to milp-evolve/src/multi_class_learning/scripts/branching/collect_branching_data.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/branching/test_branching.sh b/milp-evolve/src/multi_class_learning/scripts/branching/test_branching.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/branching/test_branching.sh rename to milp-evolve/src/multi_class_learning/scripts/branching/test_branching.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/branching/train_branching.sh b/milp-evolve/src/multi_class_learning/scripts/branching/train_branching.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/branching/train_branching.sh rename to milp-evolve/src/multi_class_learning/scripts/branching/train_branching.sh diff --git a/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh new file mode 100644 index 0000000..630fbaf --- /dev/null +++ b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh @@ -0,0 +1,5 @@ +export N_CPUS=60 +export PARENT_DATA_DIR=save_dir/contrast_data # location to save the milp input features +export PARENT_INSTANCES_DIR=save_dir/instances/mps/code_v1 # location where the MILP instances are saved + +python -u contrast_milp_collect.py --n_cpus $N_CPUS --parent_data_dir $PARENT_DATA_DIR --parent_instances_dir $PARENT_INSTANCES_DIR \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh new file mode 100644 index 0000000..9fef4b2 --- /dev/null +++ b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh @@ -0,0 +1,5 @@ +export PARENT_CODE_DIR=milp_code_v1/code # location where the optimization code files are saved +export PARENT_INSTANCE_DIR=save_dir/instances/mps/code_v1 # location where the MILP instances are saved +export PARENT_OUTPUT_DIR=save_dir/contrast/conv # location to save the text descriptions + +python contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance_dir $PARENT_INSTANCE_DIR --parent_output_dir $PARENT_OUTPUT_DIR \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/full_scripts/contrast/train_test_contrast.sh b/milp-evolve/src/multi_class_learning/scripts/contrast/train_test_contrast.sh similarity index 99% rename from milp-evolve/src/multi_class_learning/full_scripts/contrast/train_test_contrast.sh rename to milp-evolve/src/multi_class_learning/scripts/contrast/train_test_contrast.sh index 0fb6d5f..742e6f7 100644 --- a/milp-evolve/src/multi_class_learning/full_scripts/contrast/train_test_contrast.sh +++ b/milp-evolve/src/multi_class_learning/scripts/contrast/train_test_contrast.sh @@ -3,4 +3,5 @@ export DATASET=ours export EVAL_EPOCHS=10 export PRINT_ITERS=10000 export TEXT_TYPES="description only" + python train.py --epochs $EPOCH --dataset $DATASET --eval_epochs $EVAL_EPOCHS --print_iters $PRINT_ITERS --text_types $TEXT_TYPES \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gap/collect_gap_data.sh b/milp-evolve/src/multi_class_learning/scripts/gap/collect_gap_data.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gap/collect_gap_data.sh rename to milp-evolve/src/multi_class_learning/scripts/gap/collect_gap_data.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gap/test_gap.sh b/milp-evolve/src/multi_class_learning/scripts/gap/test_gap.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gap/test_gap.sh rename to milp-evolve/src/multi_class_learning/scripts/gap/test_gap.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gap/train_gap.sh b/milp-evolve/src/multi_class_learning/scripts/gap/train_gap.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gap/train_gap.sh rename to milp-evolve/src/multi_class_learning/scripts/gap/train_gap.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gen_instances/gen_milp_instances.sh b/milp-evolve/src/multi_class_learning/scripts/gen_instances/gen_milp_instances.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gen_instances/gen_milp_instances.sh rename to milp-evolve/src/multi_class_learning/scripts/gen_instances/gen_milp_instances.sh