From af966e0eff2f170816d0030dd74e1cd972cb6698 Mon Sep 17 00:00:00 2001 From: siruil <34456232+siruil@users.noreply.github.com> Date: Thu, 6 Mar 2025 11:37:34 -0800 Subject: [PATCH 1/2] Update MILP-Evolve code --- milp-evolve/README.md | 108 +------------ milp-evolve/data/.DS_Store | Bin 6148 -> 0 bytes milp-evolve/data/milp_code/.DS_Store | Bin 10244 -> 0 bytes .../data/milp_code/evolve_tab1/.DS_Store | Bin 10244 -> 0 bytes .../data/milp_code/evolve_tab1/code/.DS_Store | Bin 6148 -> 0 bytes .../data/milp_code/evolve_tab2/.DS_Store | Bin 12292 -> 0 bytes milp-evolve/setup.md | 22 ++- milp-evolve/src/.DS_Store | Bin 10244 -> 0 bytes milp-evolve/src/milp_evolve_llm/.DS_Store | Bin 8196 -> 0 bytes .../src/multi_class_learning/.DS_Store | Bin 6148 -> 0 bytes .../contrast_class_split.py | 145 +++++++++++++----- .../contrast_train_test.py | 50 +++--- .../full_scripts/.DS_Store | Bin 6148 -> 0 bytes .../contrast/collect_contrast_milp.sh | 10 -- .../contrast/collect_contrast_text.sh | 5 - .../branching/collect_branching_data.sh | 0 .../branching/test_branching.sh | 0 .../branching/train_branching.sh | 0 .../scripts/contrast/collect_contrast_milp.sh | 5 + .../scripts/contrast/collect_contrast_text.sh | 5 + .../contrast/train_test_contrast.sh | 1 + .../gap/collect_gap_data.sh | 0 .../{full_scripts => scripts}/gap/test_gap.sh | 0 .../gap/train_gap.sh | 0 .../gen_instances/gen_milp_instances.sh | 0 25 files changed, 159 insertions(+), 192 deletions(-) delete mode 100644 milp-evolve/data/.DS_Store delete mode 100644 milp-evolve/data/milp_code/.DS_Store delete mode 100644 milp-evolve/data/milp_code/evolve_tab1/.DS_Store delete mode 100644 milp-evolve/data/milp_code/evolve_tab1/code/.DS_Store delete mode 100644 milp-evolve/data/milp_code/evolve_tab2/.DS_Store delete mode 100644 milp-evolve/src/.DS_Store delete mode 100644 milp-evolve/src/milp_evolve_llm/.DS_Store delete mode 100644 milp-evolve/src/multi_class_learning/.DS_Store delete mode 100644 milp-evolve/src/multi_class_learning/full_scripts/.DS_Store delete mode 100644 milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh delete mode 100644 milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/branching/collect_branching_data.sh (100%) rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/branching/test_branching.sh (100%) rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/branching/train_branching.sh (100%) create mode 100644 milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh create mode 100644 milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/contrast/train_test_contrast.sh (99%) rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/gap/collect_gap_data.sh (100%) rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/gap/test_gap.sh (100%) rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/gap/train_gap.sh (100%) rename milp-evolve/src/multi_class_learning/{full_scripts => scripts}/gen_instances/gen_milp_instances.sh (100%) diff --git a/milp-evolve/README.md b/milp-evolve/README.md index f03dded..04a96e1 100644 --- a/milp-evolve/README.md +++ b/milp-evolve/README.md @@ -1,22 +1,4 @@ ---- -language: English -license: cdla-2.0 -multilinguality: monolingual -size_categories: - - 100KS5T317H;B-Kg5DOq7EJv`yu?~x!1!}esf{TbOtYas7A%F_Jm`b?3Z8{N zg3sgZPo>fd9z;ZDVD_7xo!QNP3A-5pVC7C&0w@3gM^%_}u$drgTt~a{NUG)Vp(_0TINI!pQq;r9mtM9DNDWIBKF#Aqwjla()^mq3o7rCkb0R*Ea*) z;ZC)%-0Q8m8=FN@+IIUz(c4*bi(+GatKWC{;>zmoVf8j{B=XVl$?((DGH-DRPiRco zT6|sduVf<~hE0xY$V$Z$!0(S=9|q5Zy{{s}Z$_p!-&y(2Zzi!!u5q>D&PE=UOk#i- zAO?Pd0eiggxu2Mf<{$=$fq%t-Iv)h8LeFAmP#+!G=n?=ik8Uk!t6D73Q1}1A z*Y*GBB33L5$<##nU~FQIBdzfcCE UXE8H~9u)c!&@@m(4E!ns?^ytGkpKVy diff --git a/milp-evolve/data/milp_code/.DS_Store b/milp-evolve/data/milp_code/.DS_Store deleted file mode 100644 index 8a27e66a8b84485e02ded4bef2dfc7438e666ac5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10244 zcmeHMU2GIp6uxI#=p8!HX(@jzWe2KO%L=;-v{XR0?G~&cZMrRO0a z_7y$?J_0@hJ_0@hJ_6GL0eok(gO+mXUws681bhU}Bf!=N1y3epo*d@VT{`gMrT{35 zQSBahk9mOWqj)mr$zd*CS8S7K58%0imtp{S$9jdCql|fSm`m+fF$A83;wiGiH|1 z5?V@cOY~WT2`A;`?50$9Co6SYmXR*pr_$SXZ7?ZUHe05X(oHSvI_Ro~CcC%WRB`){JGgXEj#eow1BuziB7s@=VIeI%}r4$I%WrtZrOY&kozlI?ZllNmT8D;h>3J=VE!sUg zK9V_Bju;-h1@YxM3l=ZErgr7(h7C=dZrOIGOfIjGDy8||dEM62hSoKxt4?2U%Fbw} zs+)UvU|^b#e!H$^#c~LVoo1?EJ5wQ-mn~cr2=wIA`u?2m+#y#}%G+8JP(=APx=Y25 z+-j-JQ*~zM2Rf+yG5W~5nb&bu z#OdRUBdsn>L3ON_-?|Wr)v>TwLrE2hk~rB)_K++YB1g!R)$T zLP}@BhP&Wyr1c>r_F*^z55dFm2s{nXz_Tv7kHK+x5nh7V;SD$eZ^C5pTja3)$S*=Gk7mg6AGQ*}?3FxpZCd!c3Un-Hyis v+m64^(-o3jRv+_p!(6)VsCV5_^48K2+SN%oR$vUZ&`IkLHPohHF`dVbY$nxwh-k;IMdn4I59d%5#%oh9pA z_jd1`)C61*&$i-?+N!03@~R3EARaBy@=~F^1%yH>s35JN2q^p!Ar;ZWH~YBQXB&xv zN>FyC-EZcb`DXX~?)S}aW{xq2wtS|6u_$9qqFYEsrpE%8Xczaqi+h^oBtiBpJ!MU% z4Kq#kN9Oeo6hQ=n2m}!bA`nC%h`_}V0pe_NNh@3#Jc9@X5eOnsjDVOQqI3%x_vDN# zovDK!&K7`VIjNnC%8DqEjCwNe$r)Et3tezcNxG)+wis~DNgwt4#XULWO4pph+kC)# zGrS!NoVU~XQKvgY+?ByIh(Hj5`4JF%_i{GIOqOBV?EQOs($FotzWzz@vhpQMmr2W{ z71E7~W9C%CPT6^@EtNYgYW=2Zr2TR#J*aC_Nu{dY%-ShEtK}R6J)=>VBSU&NV@|c1 z`E15=Y!hyS6p}(oW!?02V}0|rb!Qs~EvHU}q}4UC9fy*~jft#z zyXynId4OG3;?o_o9Wty!yL%u}R_i*O-~@%W0y0Xv{S76Im;%RE(vJvAmJ8wLONB&~Md9x5^sJnTFA4TDll! zWZ2eD*rIJ-)9|Qe=InN7u%x2oE$xs-W9bF0gGRK5b$X>RtSWI1q0=j`SYLfrZLE3w zuC~25^qyIzR8-1U@|vN7Zs}=58<^5F_OX$aHKt`VdUkx2LY%ern{_Q0sz6<+KbxA+ z&QvNDtJbayhllfN{g%9LpHylj$sKD$I2uwuFCEFyhxC{kVVQAS!W+UHCF!uxx92o^ zx@viI_|+jLCG}hMz3vco@--pF3iThS-`h29keehW6B;m5Y0Yp|Epn@*90&~++*rHH zDtV_A8XhwzCg`IrSGPO7M^c`UZW1rCEW1nRC28upt~C5ku|CWVDRubhs~kNK$@6Hj17t=!9?j`COPwHU3Xf6R&lP zP6$Ia@p`XCVk&E79ju!jWjQv@?qqkdhuG)Xcq86~x8pr{FW!$2;Dh)$K7j}E zS$r8^!DIL;eu$sr7dVS&@h3cwKXdTqyo#^pYj}i5`IWqexA1K|&bxRoKj?lp_cLh3 z_t4W1lPpdNltJE+9)Dor42taAzpqO^@PCs*XIF}xxx8xink%9rky>}{DI(4VsE^6n zSa;w(Cn+3wJ6I@%CS}&E)~j-4fbg%KD9uPeQ;w807l~|9B#CfREy1IE^nlQ^hy%J$#=y@JDzWzr%C*JSQiv5}>@jQFMS>Y+PzS~0Ux@N-kUhF4accTY`7{;ix z!qYGg9j_&n+qAwrfs?qE)^~5lTk$q$mG?epm3KcrL^%HlA^kKyjn6o%y+>)S_ceSS z0`X_?E1bhKE?SEi?(xyFl?Sf0unX=AjS zO;KBlN&{5aXk9C`7SpMer>E#4dLE}vOZp1)oeUuY{? z98klhca^y=N5%Pn=i>kWUwVNFJPsmo2}J diff --git a/milp-evolve/data/milp_code/evolve_tab1/code/.DS_Store b/milp-evolve/data/milp_code/evolve_tab1/code/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0Vr`uYV^UN=z|Z67mP8H7-Qn~$tNHDXJ=2LEhQ02xHyx{{5zL_ zcIH1b=l{N)VT_?Em#$}QDPv5f$fcn(Ry>WdNI7NB3cQ}jNuh{JK#}wqJsXU9EX6X+ zVnZxDskJPS7LXRWoGhTwVEFzmrdZC>%pA9WE#@-E_VUe1+fC92uT2iwmY0o2zlBm% zTvA%5YSYT~>7nkP?(KSC+?biS%;EN-tmQi1b~EFVj2ji+UU#@HToV&42vs4tha_UEkZ-A0Y7 zioO_pPEMQ;uI8r9gM_2z}GMPZe`Ue)?i?%*H|+7o3PLyf9&Lfyq* zyfxuv+lNf2Fr1&6g1jY)&KFRoU62^t*o2zXT4=%DAdopYD}li zbTH{qN2pUZJ|`W+a`IM|ehIQaiJYg_6n=u5Sgm;UO{mqxqCt(ys@Y1`!nU&A@yP>W@_5zDa-Eoj9ibYdq4unTdd zF$f1k*pEE!$0#1eLwFcZ;7L4%r||+_!pk^<*YPgi!~6IEpW$nKgH!kszu|ZMp+K3Y zR4KESIZ9ZGD2tUEr9oM(#FRE=o6;`Et?<3WL%$G(RO!1ShA;L$4O*Sx?+f#gQd&S- z;D2lZ{{I%C$Ym^$qeA+BeUxwr!RtAM@B=*)5Ae$(fs6%mR7hWybbM8kLKOkUfKaFM z2yad@7RXT{g*t;!X9(VmfT6&DJLyII<_s|*WhyNoEil;vrBpOE_8$`1UwQG|*gw4a zw#{w&mMaGPpPIqZ{;aCmbLK`k1z5A8aSSU9Xk9?lXQOl_kvaoZy-T z)q1#xkh^(cB9|y|45N%Q3H2mzy=bu>HcANFi|qAFC|gRnUSyZmg~NtIbV6jK^V}g5^*_S#Q|vVRnf=NBCOAF=voH@! z5Jdyl6ZqbS4s@X#Jp{maBZ-s`!d-&k8F;uC_Yn+#fPnZR9LA$~43FbkJcs9fFn$C_ z@d{qWTX-AC@D4u2aeRyu_ynKg3w(zka9XV0G0?qr{948@Q!nIvSH`hjXAiaD-lK@w zL~ZvZhrM8Vvtxzj{Z&A1%e1ocvOBwoy9yH1l*sJlxo+3BlZEF*ayv2FxKY*YI@yFp z?8#RkzDP|5dT9l?!%c3^NqfF-m#EM*Rf`+*M@CjgSJy>WuRT~78L5x1iq=KyqtWQW zgPJBfTAclak)<%-5UC z+uH?xY=U+LMUoUr zQgi{N=^7H@Jsqg>! zjbtJ%AT4lpS^&i@T`f&Cx56>Q#A))g_6Wt@6nWt7MuqfMC<%_^{W||Bp2`aMyLkH; sHRp0vNP41h2`&LZJOAGzzh5U00r`dRsqf@T|9=sW0WS*Yy#W9J4}MQlEC2ui diff --git a/milp-evolve/setup.md b/milp-evolve/setup.md index 9377797..6c162ce 100644 --- a/milp-evolve/setup.md +++ b/milp-evolve/setup.md @@ -275,6 +275,7 @@ CUDA_VISIBLE_DEVICES=0 python -u branching_test.py --n_cpus $N_CPUS \ - Extract MILP input features: We run the following code to extract the MILP input features. The code is similar to `gap_collect.py`, except here we only solve each MILP instance to the root-node LP relaxation to collect the input feature, and we do not need to solve the MILP instance to optimal (only required to collect gap data). ```script +export N_CPUS=60 export PARENT_DATA_DIR=save_dir/contrast_data # location to save the milp input features export PARENT_INSTANCES_DIR=save_dir/instances/mps/code_v1 # location where the MILP instances are saved @@ -296,12 +297,17 @@ python contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance We then run the following code to split the multi-modal dataset (MILP and text) into disjoint train and test splits. In particular, `$MULTIMODAL_DATA_FILE` is a json file that contains a directory with format `[{"milp": path to the input features of the milp instance, "text_path": path to the text description of the milp instance}, ...]` to split into train and set sets with disjoint MILP classes. The train/test splits are saved as `{out_dir}/train_{out_suffix}_data.pkl.gz` and `{out_dir}/test_{out_suffix}_data.pkl.gz`, which are used to train and test the language-MILP contrastive model. -```script -export MULTIMODAL_DATA_FILE=[json file that provides the associated milp and text paths] -export OUT_DIR=save_dir/contrast -export OUT_SUFFIX=ours - -python contrast_class_split.py --multimodal_data_file $MULTIMODAL_DATA_FILE --out_dir $OUT_DIR --out_suffix $OUT_SUFFIX +``` +export PARENT_CODE_DIR=milp_code_v1/code +export PARENT_DATA_DIR=save_dir/contrast/data +export PARENT_DESC_DIR=save_dir/contrast/conv +export PARENT_SAVE_DIR=save_dir/contrast +export MULTIMODAL_DATA_FILE=save_dir/contrast/data_ours.json +export OUT_SUFFIX=ours_ + +python contrast_class_split.py --parent_code_dir $PARENT_CODE_DIR --parent_data_dir $PARENT_DATA_DIR \ + --parent_desc_dir $PARENT_DESC_DIR --parent_save_dir $PARENT_SAVE_DIR \ + --multimodal_data_file $MULTIMODAL_DATA_FILE --out_suffix $OUT_SUFFIX ``` @@ -319,4 +325,6 @@ export TEXT_TYPES="description only" python contrast_train_test.py --epochs $EPOCH --dataset $DATASET --eval_epochs $EVAL_EPOCHS --print_iters $PRINT_ITERS --text_types $TEXT_TYPES ``` - \ No newline at end of file + + + diff --git a/milp-evolve/src/.DS_Store b/milp-evolve/src/.DS_Store deleted file mode 100644 index b20b54ed8abfb0c88173eefdec509c0d8094cbac..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10244 zcmeHMU2GLa6rQs!W!84-7K(ODp$kNP&37!8}S4 z0ucfc0ucfc0uch2BLcW*vqZ~DoK{ucxE4&b$++7=HM@z9`#4kj+aKuchVz{?FX^g*C3UYE+|__ z7t(Tid-t$6+8r1{-fuH<`&q5W^PF_CY^3{aYc!=+w|j12*shi14z_8b$$@^`HNDX` zFYlT@_w5!9RHkxDtr;J0+7NHl$`k3a0=P_*j-xGHHLc0iul8)kpzb}S^d{udcvPkRgtLRS- zD1*jWk2Mx_+xJ>2wbIA9*qQO%y*aCZ@z9^~oP5^xQ)*Sla58zv2&`7eVaBMd^fNin zarSz?9oU{bkWv>jr>+rJ^ZTrvk1`p|=zW%ba?tm3K|3EIY2%>c9@M;V?Xz$syI^(T zcz#1-Er*84la)2gSFOGwwrT73_M1Dqrqrs%ORASD${3jNj@9EDS*r>fWqE2z)nZkv)#|9)K$H2jeRtjt?%`UcURx=vZ_xuLj(e+T zvK~!gU$0$9>3*hf&smrYwX#NAE2{?W@evWx#X4oZtom}#A*76OjVl|eYRbKik+vM6 z+M+a5wNvgd2#kbssd6KglNm3Y#mV?Wm(bd%dXnD32HtJ>LDz`o7H=t`-9+i2KWyZz zu2Cdx(PI~-{7Isox$H3QzzZAewLO#$u+Cad6Z>HR?!8UZQmSgZ1sj(Kd}3miP`{*< zHYA3B;@DSp#bCXAUp&Q z!%=t~o`vV&7(5Rz!>jNbOv2mnA)J7Z;A8j-et;k0H2eyGz@KnNDw8TDRjQJfN!LnS zq-JTW)FN$@c1V3vN;0LaxOu6#H0>`=F4VgyFNAyHQY;;xhkN=hJ1cf|?!Kb9_id$M z+tt_9tYj2y-rhDHt0J_E(L9qSoEWp{#EHR!W^Pbxwae<3uTb<}WR5j9ACc9CjKy?{ zS+8nPbaer8OH{94gVi$Rm8dqZ({)usI*V#kTvyZ*B&*DQH?PAggq5u=ilUYxZRM%D z+GU!uLTQy{m2kuM4(x`J@FLLRH2H=6M*adoIxK-2Btau=f);4S;O~ZBNWuUNA^|KI zf?-aA04XpEV{kt_fHXLQM0f(8gs0$Xcm`g87vUvNhDmrG-hemZJ$N6E!v}B@PQj<} z8GH_3z&D%}e~0hpWIE{g%nAAn+F|GH%!_arHgIzL)oAoVqZo;A_Q?m_7}4m9i7%RnJ{l7}ckV2>1sc?7FwRZpo;mlN zb7#)E-c-l^UW*_5f+ffeQ{4=`I>k1fmE; z5r`rXMIeg6-53F?*?h>isry12%~1rR2>dT2z`qYVU7RLEIwxrG>7XfS0f<)9axmzg z`hdVj4{0)_bAkq@w5RAEFf_%m#Xw**>@6 z7+zqT5&}h$l{9NUdv;65*0|o?l{gpI&u;5X#Pu!PI?tU` z9|rQKa$F#G5f5#z*e^V+YAk#BMd+cdsCC-t*tnWzT9Mosp7AZ$@y5)&M>NeEJaTt` z&UFsvO)ekLxpraN@zSg@m$7pNJL8+ZwwxBgyvsx(Ydu`B{4=aYmP>3| zL;X5USC1L=P7k;S-_4NOyGkHw(RA%7m-OdNnvKw)O}kee@yIQiWrSi}-K>tBqQh30 zCssPtE^XA#WKCOPhImeox>Xx5ir6Y_NG$G9o3x3XJ3URVc#2rtt@UaT^2Z*?c>eH= z>F`&Y7qS78O?oFYd2@J{z!tvIK3WZqi`Kc(pkeuLAf#v<&^7G{->%m%0w0=Ju439D zEhV!nGM$X&6fFWcow&fc+>+cV4kw6cOEMuig-Y0J*h~d$9~`6PnSnF#6kLRt;Z1lC zF2g*03RmGv_zJ#(Z{Y{{8GglTtj7jq*oZM~!wt9*+i?pfu^0Pr2oK^ROyLL~$1LX1 z#1rUZ0gHGV&*0;D9xve2_zb>)ui|U?I=+FI@I(9vFXJb81wX?QevLokHT(&Gk*cL7 zQjOFgF-en}q*kd-N=V((F7e`0IdzUyVousk{Nkw-ejFtGR;d#k+_U%YNS&)od9pN8 zvaDFwvSCwa&yL;KK_yUF2Hqdx9ALNt`2mK5Q(u6;Qjk^ZnziLLx{+8S(MAdU=4n}> zJY=<@_C4!m0zX?4vaM~z4zU`+#@gdDB^r|gR*fY(6iO#n6|(K!#G+u-ZQB(}ELL4w zy{dWT&e$52fvkkv^!3v=^GyEpsC@@6!N-K!5~21x_#UpoZzvO1W2j?mfYx^0ggtma zZpH1m69;h*?#2Ct+YubaF`U3jLajl#ok9!q=%G*8eH7>LF?<4_B;-De&*AglgI}#t N{r~%o?A=@ce+N)pt_%PG diff --git a/milp-evolve/src/multi_class_learning/.DS_Store b/milp-evolve/src/multi_class_learning/.DS_Store deleted file mode 100644 index 44b82d322df1213939b5febf835947ce78567f6b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKO;6iE5S=9JIx`AqYG^T_`OWxLC8L$leZw$z@yG_4nLMeWa&u>9_IL$|4Dnj_&g`+gdi%#dBt<~!5 zjSXkR*>W!Y-(u<)!(x*6hU0U&Iujxd7xQ8GEs3Xr`@S!-Vwhy{SOp|e43{6@lPnU` zo|t4&u3~+?;WV9Q;I?M7?&nW!esI{GxB2YLey7d52Z!@{)7ky7_w_XRlU`@yMia_V zf0XPPT)-_REA#v7kF!){SMZVX@8}wpD@E-JP{jmA6e~tzU{nz)Z`6sSX)X)%}%hn*Wg&tR!hhZ8f+2QzPGrbA)s?RY*{?!-Ke zcC-vw2A(od(@R(0|3|C)|EEEAWf`yxREhysANYeFQZjezQgZUH<SH*1|9$}PS$_` diff --git a/milp-evolve/src/multi_class_learning/contrast_class_split.py b/milp-evolve/src/multi_class_learning/contrast_class_split.py index 1509acd..e9d4941 100644 --- a/milp-evolve/src/multi_class_learning/contrast_class_split.py +++ b/milp-evolve/src/multi_class_learning/contrast_class_split.py @@ -1,25 +1,18 @@ +import glob import json import os +import pdb +import sys +import argparse import pickle import re -import argparse from collections import defaultdict import numpy as np -# First, randomly determine the IDs for trainning and testing -total_length = 10000 # assume we have 10000 classes, which is above the actual number -train_ids = np.random.choice(total_length, 8000, replace=False) -test_ids = [i for i in range(total_length) if i not in train_ids] -def add_data(filename, text): - global TRAIN_DATA, TEST_DATA - _id = milp_id(filename) - if _id in train_ids: - TRAIN_DATA[filename].append(text) - else: - TEST_DATA[filename].append(text) +############# helper functions def milp_id(path): x = re.findall("milp_(\d+)-", path) if x: @@ -35,10 +28,31 @@ def milp_id(path): z = re.findall("(\d+)_algo", path) if z: return int(z[0]) + + z = re.findall("(\d+)", path) + if z: + return int(z[0]) + raise ValueError("Cannot find the MILP ID for " + path) +def add_data(filename, text, train_ids): + global TRAIN_DATA, TEST_DATA + _id = milp_id(filename) + if _id in train_ids: + TRAIN_DATA[filename].append(text) + else: + TEST_DATA[filename].append(text) -####### helper function +def _remove_heading_spaces(solve_code): + while True: + lines = solve_code.split("\n") + # Check if all non-empty lines have leading spaces + if all(line.startswith(" ") or line == "" or line.startswith("#") for line in lines): + # Remove two leading spaces from each line + solve_code = "\n".join([line[2:] if line.startswith(" ") else line for line in lines]) + else: + break + return solve_code def parse_code(code_filename): if not os.path.exists(code_filename): @@ -62,30 +76,49 @@ def parse_code(code_filename): solve_code = solve_imp[0] _code = _remove_heading_spaces(solve_code) ans.append(_code) + # pdb.set_trace() return ans +####### + -def _remove_heading_spaces(solve_code): - while True: - lines = solve_code.split("\n") - # Check if all non-empty lines have leading spaces - if all(line.startswith(" ") or line == "" or line.startswith("#") for line in lines): - # Remove two leading spaces from each line - solve_code = "\n".join([line[2:] if line.startswith(" ") else line for line in lines]) - else: - break - return solve_code -####### Now, Load the Data ##### +### aggregate data and description +def build_dataset(parent_data_dir, parent_desc_dir, multimodal_data_file, desc_suffix=""): + desc_path_glob = os.path.join(parent_desc_dir, f"*/desc_*{desc_suffix}.txt") -def aggregate_data(multimodal_data_file, - out_dir="save_dir/contrast", out_suffix=""): + descs = glob.glob(desc_path_glob) + + count = 0 + data = [] + for desc in descs: + gz_path = desc.replace(parent_desc_dir, parent_data_dir).replace("desc", "data").replace(desc_suffix, "").replace(".txt", ".pkl.gz") + + if not os.path.exists(gz_path): + continue + + count += 1 + data.append({ + "id": str(count), "image": gz_path, "text_path": desc, + "conversations": [{ + "from": "human", + "value": "\nDescribe the data." + }, { + "from": "gpt", + "value": open(desc, "r").read() + }] + }) + + json.dump(data, open(multimodal_data_file, "w"), indent=2) + + +### split data into train/test/val +def split_data(multimodal_data_file, parent_data_dir, parent_code_dir, parent_save_dir, train_ids, out_suffix=""): global TRAIN_DATA, TEST_DATA TRAIN_DATA = defaultdict(list) TEST_DATA = defaultdict(list) - # First, loading the llava description multimodal_data = json.load(open(multimodal_data_file, "r")) - multimodal_files = [item["milp"] for item in multimodal_data] + multimodal_files = [item["image"] for item in multimodal_data] multimodal_desc_files = [item["text_path"] for item in multimodal_data] # remove files that does not exist x = zip(multimodal_files, multimodal_desc_files) @@ -93,26 +126,58 @@ def aggregate_data(multimodal_data_file, multimodal_files, multimodal_desc_files = zip(*x) for i, (mps_file, desc_file) in enumerate(zip(multimodal_files, multimodal_desc_files)): - add_data(mps_file, open(desc_file, "r").read()) + add_data(mps_file, open(desc_file, "r").read(), train_ids=train_ids) + + class_name = os.path.basename(os.path.dirname(desc_file)) + code_filename = os.path.join(parent_code_dir, f"{class_name}.py") - code_filename = re.sub("desc_seed.*.txt", "milp.py", desc_file) - if code_filename.endswith(".py"): + if os.path.exists(code_filename): for component in parse_code(code_filename): - add_data(mps_file, component) + add_data(mps_file, component, train_ids=train_ids) + + + if parent_data_dir: + for problem_dir in glob.glob(os.path.join(parent_data_dir, "*")): + _id = milp_id(problem_dir) + src_codename = glob.glob(os.path.join(parent_code_dir, f"milp_{_id}-*.py"))[0] + code_components = parse_code(src_codename) + + for mps_filename in glob.glob(os.path.join(problem_dir, "*.pkl.gz")): + for component in code_components: + add_data(mps_filename, component, train_ids=train_ids) # Finally, dump the data. Let's mainly use the pickle format because of its compression - json.dump(TRAIN_DATA, os.path.join(out_dir, open(f"train_{out_suffix}_data.json", "w")), indent=2) - json.dump(TEST_DATA, os.path.join(out_dir, open(f"test_{out_suffix}_data.json", "w")), indent=2) + json.dump(TRAIN_DATA, open(os.path.join(parent_save_dir, f"train_{out_suffix}data.json"), "w"), indent=2) + json.dump(TEST_DATA, open(os.path.join(parent_save_dir, f"test_{out_suffix}data.json"), "w"), indent=2) - pickle.dump(TRAIN_DATA, os.path.join(out_dir, open(f"train_{out_suffix}_data.pkl.gz", "wb"))) - pickle.dump(TEST_DATA, os.path.join(out_dir, open(f"test_{out_suffix}_data.pkl.gz", "wb"))) + pickle.dump(TRAIN_DATA, open(os.path.join(parent_save_dir, f"train_{out_suffix}data.pkl.gz"), "wb")) + pickle.dump(TEST_DATA, open(os.path.join(parent_save_dir, f"test_{out_suffix}data.pkl.gz"), "wb")) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--multimodal_data_file", type=str, default="save_dir/contrast/ours_multimodal.json", help="Multimodal data file") - parser.add_argument("--out_dir", type=str, default="save_dir/contrast", help="Output directory") - parser.add_argument("--out_suffix", type=str, default="ours_", help="Output suffix") + parser.add_argument("--parent_code_dir", type=str, default="milp_code_v1/code") + parser.add_argument("--parent_data_dir", type=str, default="save_dir/contrast/data") + parser.add_argument("--parent_desc_dir", type=str, default="save_dir/contrast/conv") + parser.add_argument("--parent_save_dir", type=str, default="save_dir/contrast") + parser.add_argument("--multimodal_data_file", type=str, default="save_dir/contrast/data.json") + parser.add_argument("--desc_suffix", type=str, default="") + parser.add_argument("--out_suffix", type=str, default="ours") + args = parser.parse_args() - aggregate_data(args.multimodal_data_file, args.out_dir, args.out_suffix) \ No newline at end of file + build_dataset(args.parent_data_dir, args.parent_desc_dir, multimodal_data_file=args.multimodal_data_file, + desc_suffix=args.desc_suffix) + + + # First, randomly determine the IDs for trainning and testing + total_length = 10000 # assume we have 10000 classes, which is above the actual number + train_ids = np.random.choice(total_length, 8000, replace=False) + test_ids = [i for i in range(total_length) if i not in train_ids] + + split_data(multimodal_data_file = args.multimodal_data_file, + parent_data_dir = args.parent_data_dir, + parent_code_dir=args.parent_code_dir, + parent_save_dir=args.parent_save_dir, + train_ids=train_ids, + out_suffix=args.out_suffix) \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/contrast_train_test.py b/milp-evolve/src/multi_class_learning/contrast_train_test.py index 424d475..89521f5 100644 --- a/milp-evolve/src/multi_class_learning/contrast_train_test.py +++ b/milp-evolve/src/multi_class_learning/contrast_train_test.py @@ -52,7 +52,7 @@ def encode_with_diskcache(model_name: str, texts: list): ans = torch.tensor(ans) return ans.reshape(len(texts), -1) -def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, dataloader, +def run(mode, epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, dataloader, device='cuda', freeze_text_encoder=True, repeats=1, print_iters=10, writer=None): global OUT # Check if mode is valid @@ -64,7 +64,7 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize if not freeze_text_encoder: text_encoder.train() if mode == "train" else text_encoder.eval() - image_encoder.train() if mode == "train" else image_encoder.eval() + milp_encoder.train() if mode == "train" else milp_encoder.eval() epoch_loss = 0.0 running_n = 0 @@ -94,20 +94,20 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize text_features = text_features.float() if mode == "train": - image_optimizer.zero_grad() + milp_optimizer.zero_grad() assert text_features.shape == (len(text_inputs), 4096) # Forward pass through the encoders - image_features = image_encoder(images) # [bs, n_out_neurons] + milp_features = milp_encoder(images) # [bs, n_out_neurons] # Normalize the features text_features = F.normalize(text_features, p=2, dim=1) - image_features = F.normalize(image_features, p=2, dim=1) + milp_features = F.normalize(milp_features, p=2, dim=1) # Calculate the logits (dot product of text and image features) - logits_per_image = image_features @ text_features.T - logits_per_text = text_features @ image_features.T + logits_per_image = milp_features @ text_features.T + logits_per_text = text_features @ milp_features.T # Labels for contrastive learning labels = torch.arange(len(text_inputs)).to(device) @@ -125,12 +125,12 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize # calculate 4-way accuracy if len(text_inputs) >= 4: - image_features_4way = image_features[:4, :] + milp_features_4way = milp_features[:4, :] text_features_4way = text_features[:4, :] - logits_per_image_4way = image_features_4way @ text_features_4way.T - logits_per_text_4way = text_features_4way @ image_features_4way.T + logits_per_milp_4way = milp_features_4way @ text_features_4way.T + logits_per_text_4way = text_features_4way @ milp_features_4way.T labels_4way = torch.arange(4).to(device) - acc_4way_i2t = (torch.argmax(logits_per_image_4way, dim=1) == labels_4way).float().mean() + acc_4way_i2t = (torch.argmax(logits_per_milp_4way, dim=1) == labels_4way).float().mean() acc_4way_t2i = (torch.argmax(logits_per_text_4way, dim=1) == labels_4way).float().mean() accs["4way-i2t"].append(acc_4way_i2t.item()) accs["4way-t2i"].append(acc_4way_t2i.item()) @@ -140,7 +140,7 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize loss.backward() if not freeze_text_encoder: text_optimizer.step() - image_optimizer.step() + milp_optimizer.step() # Accumulate loss epoch_loss += loss.item() @@ -259,22 +259,22 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize test_miplib_loader = None # MAIN # -image_encoder = MyGNNAttn(emb_size=args.embed_size, n_out_neurons=4096, dropout=args.dropout, max_token_attn=args.max_token_attn, +milp_encoder = MyGNNAttn(emb_size=args.embed_size, n_out_neurons=4096, dropout=args.dropout, max_token_attn=args.max_token_attn, n_attn_iters=args.n_attn_layers, n_gnn_iters=args.n_gnn_layers, edge_nfeats=1) if args.load_from and args.load_from != "None.pt": state_dict = torch.load(os.path.join(args.log_root, args.load_from)) - image_encoder.load_state_dict(state_dict) + milp_encoder.load_state_dict(state_dict) -image_encoder = image_encoder.cuda() +milp_encoder = milp_encoder.cuda() # print the number of trainable parameters, split with comma in thousands -n_trainable_params = sum(p.numel() for p in image_encoder.parameters() if p.requires_grad) +n_trainable_params = sum(p.numel() for p in milp_encoder.parameters() if p.requires_grad) print(f"Number of trainable parameters: {n_trainable_params:,}") # Optimizers for the encoders (only used in training mode) text_optimizer = None # optim.Adam(text_encoder.parameters(), lr=0.0001) -image_optimizer = optim.Adam(image_encoder.parameters(), lr=lr) +milp_optimizer = optim.Adam(milp_encoder.parameters(), lr=lr) OUT_FILE = os.path.join(args.log_root, f"{args.dataset}_use_attn_embed_{args.embed_size}_num_milp_{args.num_milp_instance}-{args.num_milp_class}_layser_{args.n_attn_layers}_{args.n_gnn_layers}_output.txt") @@ -297,31 +297,31 @@ def run(mode, epoch, text_encoder, image_encoder, text_optimizer, image_optimize if args.dataset == "miplib" and valid_dataloader: # let's do zero-shot eval first. - run("validation", -1, text_encoder, image_encoder, text_optimizer, image_optimizer, valid_dataloader, + run("validation", -1, text_encoder, milp_encoder, text_optimizer, milp_optimizer, valid_dataloader, device='cuda', freeze_text_encoder=True, repeats=validation_repeats, print_iters=args.print_iters, writer=writer) for epoch in range(args.epochs): - run("train", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, train_dataloader, + run("train", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, train_dataloader, device='cuda', freeze_text_encoder=True, print_iters=args.print_iters, writer=writer) - torch.save(image_encoder.state_dict(), + torch.save(milp_encoder.state_dict(), os.path.join(args.log_root, - f"{args.dataset}_image_encoder_use_attn_embed_{args.embed_size}_num_milp_{args.num_milp_instance}-{args.num_milp_class}_epoch{epoch}.pt")) + f"{args.dataset}_milp_encoder_use_attn_embed_{args.embed_size}_num_milp_{args.num_milp_instance}-{args.num_milp_class}_epoch{epoch}.pt")) if args.save_to: - torch.save(image_encoder.state_dict(), os.path.join(args.log_root, args.save_to)) + torch.save(milp_encoder.state_dict(), os.path.join(args.log_root, args.save_to)) if epoch == args.epochs - 1 or epoch % args.eval_epochs == 0: if valid_dataloader: - run("validation", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, valid_dataloader, + run("validation", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, valid_dataloader, device='cuda', freeze_text_encoder=True, repeats=validation_repeats, print_iters=args.print_iters, writer=writer) if test_ours_dataloader: - run("test_ours", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, test_ours_dataloader, + run("test_ours", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, test_ours_dataloader, device='cuda', freeze_text_encoder=True, repeats=1, print_iters=args.print_iters, writer=writer) if test_miplib_loader: - run("test_miplib", epoch, text_encoder, image_encoder, text_optimizer, image_optimizer, test_miplib_loader, + run("test_miplib", epoch, text_encoder, milp_encoder, text_optimizer, milp_optimizer, test_miplib_loader, device='cuda', freeze_text_encoder=True, repeats=1, print_iters=args.print_iters, writer=writer) # Close the SummaryWriter diff --git a/milp-evolve/src/multi_class_learning/full_scripts/.DS_Store b/milp-evolve/src/multi_class_learning/full_scripts/.DS_Store deleted file mode 100644 index 3208279f5cacfe1da23b182a99e8644d074bac7e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T4blTSVwVp~nTU1*;Z+@Dgf$0V8@)sfj5Xj9FyO|HNKL8;5lV}^D1ppEop&+G3$Xx4MaKVJa%rS-lGRVUsd#ah} zH=5|%CES9B6mt0V{b8a*^u2{oNu1@~?z>o6Y%DFW$Q8LNul;*n_<4{|vVJhWq|uqy zNm%JYcpi_6VQc+Zr+E;kqp?YdqY;K&UBqdmi@u(uQD$;|>yS;^9JboiY42daqjvXt zvyPe`?sPkHaA$yA{XZphrOW^`@J$A2e^A&6J%^=5vvpuc*JtuK2uV<ZdSsm@lgoE%ba?cDf1M>_l+F^^% z{}cRWY9IOYDLi5Zn1MgWfM^W-K_829XX}^p=&V)PZm^M1TuBNF>T{O>9MC?}HcsnT bq+^`tu(U|Cklmys@d6- diff --git a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh b/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh deleted file mode 100644 index 555e2c6..0000000 --- a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_milp.sh +++ /dev/null @@ -1,10 +0,0 @@ -export PARENT_DATA_DIR=save_dir/contrast/data -export PARENT_INSTANCES_DIR=save_dir/instances/mps - -python -u lang_extract_context.py --n_cpus $N_CPUS --parent_data_dir $PARENT_DATA_DIR --parent_instances_dir $PARENT_INSTANCES_DIR - -export PARENT_CODE_DIR=milp_code_v1/code -export PARENT_INSTANCE_DIR=save_dir/instances/mps -export PARENT_OUTPUT_DIR=save_dir/contrast/conv - -python contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance_dir $PARENT_INSTANCE_DIR --parent_output_dir $PARENT_OUTPUT_DIR diff --git a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh b/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh deleted file mode 100644 index 5cf4f39..0000000 --- a/milp-evolve/src/multi_class_learning/full_scripts/contrast/collect_contrast_text.sh +++ /dev/null @@ -1,5 +0,0 @@ -export PARENT_CODE_DIR='milp_code_v1/code' -export PARENT_INSTANCE_DIR='save_dir/instances/mps' -export PARENT_OUTPUT_DIR='save_dir/contrast/conv' - -python -u contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance_dir $PARENT_INSTANCE_DIR --parent_output_dir $PARENT_OUTPUT_DIR diff --git a/milp-evolve/src/multi_class_learning/full_scripts/branching/collect_branching_data.sh b/milp-evolve/src/multi_class_learning/scripts/branching/collect_branching_data.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/branching/collect_branching_data.sh rename to milp-evolve/src/multi_class_learning/scripts/branching/collect_branching_data.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/branching/test_branching.sh b/milp-evolve/src/multi_class_learning/scripts/branching/test_branching.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/branching/test_branching.sh rename to milp-evolve/src/multi_class_learning/scripts/branching/test_branching.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/branching/train_branching.sh b/milp-evolve/src/multi_class_learning/scripts/branching/train_branching.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/branching/train_branching.sh rename to milp-evolve/src/multi_class_learning/scripts/branching/train_branching.sh diff --git a/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh new file mode 100644 index 0000000..630fbaf --- /dev/null +++ b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_milp.sh @@ -0,0 +1,5 @@ +export N_CPUS=60 +export PARENT_DATA_DIR=save_dir/contrast_data # location to save the milp input features +export PARENT_INSTANCES_DIR=save_dir/instances/mps/code_v1 # location where the MILP instances are saved + +python -u contrast_milp_collect.py --n_cpus $N_CPUS --parent_data_dir $PARENT_DATA_DIR --parent_instances_dir $PARENT_INSTANCES_DIR \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh new file mode 100644 index 0000000..9fef4b2 --- /dev/null +++ b/milp-evolve/src/multi_class_learning/scripts/contrast/collect_contrast_text.sh @@ -0,0 +1,5 @@ +export PARENT_CODE_DIR=milp_code_v1/code # location where the optimization code files are saved +export PARENT_INSTANCE_DIR=save_dir/instances/mps/code_v1 # location where the MILP instances are saved +export PARENT_OUTPUT_DIR=save_dir/contrast/conv # location to save the text descriptions + +python contrast_mps_conv.py --parent_code_dir $PARENT_CODE_DIR --parent_instance_dir $PARENT_INSTANCE_DIR --parent_output_dir $PARENT_OUTPUT_DIR \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/full_scripts/contrast/train_test_contrast.sh b/milp-evolve/src/multi_class_learning/scripts/contrast/train_test_contrast.sh similarity index 99% rename from milp-evolve/src/multi_class_learning/full_scripts/contrast/train_test_contrast.sh rename to milp-evolve/src/multi_class_learning/scripts/contrast/train_test_contrast.sh index 0fb6d5f..742e6f7 100644 --- a/milp-evolve/src/multi_class_learning/full_scripts/contrast/train_test_contrast.sh +++ b/milp-evolve/src/multi_class_learning/scripts/contrast/train_test_contrast.sh @@ -3,4 +3,5 @@ export DATASET=ours export EVAL_EPOCHS=10 export PRINT_ITERS=10000 export TEXT_TYPES="description only" + python train.py --epochs $EPOCH --dataset $DATASET --eval_epochs $EVAL_EPOCHS --print_iters $PRINT_ITERS --text_types $TEXT_TYPES \ No newline at end of file diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gap/collect_gap_data.sh b/milp-evolve/src/multi_class_learning/scripts/gap/collect_gap_data.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gap/collect_gap_data.sh rename to milp-evolve/src/multi_class_learning/scripts/gap/collect_gap_data.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gap/test_gap.sh b/milp-evolve/src/multi_class_learning/scripts/gap/test_gap.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gap/test_gap.sh rename to milp-evolve/src/multi_class_learning/scripts/gap/test_gap.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gap/train_gap.sh b/milp-evolve/src/multi_class_learning/scripts/gap/train_gap.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gap/train_gap.sh rename to milp-evolve/src/multi_class_learning/scripts/gap/train_gap.sh diff --git a/milp-evolve/src/multi_class_learning/full_scripts/gen_instances/gen_milp_instances.sh b/milp-evolve/src/multi_class_learning/scripts/gen_instances/gen_milp_instances.sh similarity index 100% rename from milp-evolve/src/multi_class_learning/full_scripts/gen_instances/gen_milp_instances.sh rename to milp-evolve/src/multi_class_learning/scripts/gen_instances/gen_milp_instances.sh From 6a534d226072186e4fb148aeafaa42a6d95c38ed Mon Sep 17 00:00:00 2001 From: siruil <34456232+siruil@users.noreply.github.com> Date: Thu, 6 Mar 2025 11:39:00 -0800 Subject: [PATCH 2/2] Update REAMDE --- milp-evolve/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/milp-evolve/README.md b/milp-evolve/README.md index 04a96e1..5b84be0 100644 --- a/milp-evolve/README.md +++ b/milp-evolve/README.md @@ -30,7 +30,7 @@ ## Dataset Description - **Homepage:** [The OptiGuide Project](https://www.microsoft.com/en-us/research/project/optiguide-genai-for-supply-chain-optimization/?msockid=1a1ccce4197d663e1c2bdd4318e1678d) -- **Repository:** [MILP-Evolve](https://github.com/microsoft/MILP-Evolve) +- **Repository:** [MILP-Evolve](https://github.com/microsoft/OptiGuide/tree/main/milp-evolve) - **Dataset:** [Hugging Face](https://huggingface.co/datasets/microsoft/MILP-Evolve) - **Paper:** [arXiv]([arXiv](https://arxiv.org/abs/2410.08288)), [openreview](https://openreview.net/forum?id=6yENDA7J4G&referrer=%5BAuthor%20Console%5D(%2Fgroup%3Fid%3DICLR.cc%2F2025%2FConference%2FAuthors%23your-submissions)) - **Leaderboard:** Beibin Li, Ishai Menache, Sirui Li, Janardhan Kulkarni, Cathy Wu