Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions index.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,17 @@
"name": "Meta-Llama-3-70B-Instruct-GGUF",
"architecture": "llama",
"model_type": "instruct"
},
{
"id": "second-state/Llama-3-8B-Instruct-GGUF",
"name": "Llama-3-8B-Instruct-GGUF",
"model_type": "instruct",
"architecture": "llama"
},
{
"id": "second-state/Phi-3-mini-4k-instruct-GGUF",
"name": "Phi-3-mini-4k-instruct",
"model_type": "instruct",
"architecture": "llama"
}
]
153 changes: 153 additions & 0 deletions second-state/Llama-3-8B-Instruct-GGUF.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
{
"id": "second-state/Llama-3-8B-Instruct-GGUF",
"status": "published",
"model_type": "instruct",
"name": "Llama-3-8B-Instruct-GGUF",
"summary": "Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.",
"author": {
"name": "meta-llama",
"url": "https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct",
"description": ""
},
"size": "8B",
"requires": "8GB+ RAM",
"released_at": "2024-04-19T03:47:19Z",
"architecture": "llama",
"files": [
{
"name": "Meta-Llama-3-8B-Instruct-f16.gguf",
"size": "16068890816",
"quantization": "FULL PRECISION",
"tags": [],
"sha256": "1716d233aae1f0277b8717769824a24155a60403e096a5cdd598288d77d46b63",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-f16.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q2_K.gguf",
"size": "3179131104",
"quantization": "Q2_K",
"tags": [],
"sha256": "d68ac41701708ea3d2ae86782512dbdda66feaeb396a170a53554724848141bf",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q2_K.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q3_K_L.gguf",
"size": "4321956064",
"quantization": "Q3_K_L",
"tags": [],
"sha256": "ff72590849813d118aa8f3e73eb6375fb29c2d1e0ccca532c6756902d01800cf",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q3_K_L.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q3_K_M.gguf",
"size": "4018917600",
"quantization": "Q3_K_M",
"tags": [],
"sha256": "2866937b90add3b5b9b11e9b284f63eb5e85ccdb030a90ed424d248571b3918f",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q3_K_M.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q3_K_S.gguf",
"size": "3664498912",
"quantization": "Q3_K_S",
"tags": [],
"sha256": "c442b24bcab9c15df64cb47dfc147d63be081c8fab6c719c3ac48998cd0528b9",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q3_K_S.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q4_0.gguf",
"size": "4661211360",
"quantization": "Q4_0",
"tags": [],
"sha256": "54a161a881652bf4a2bca0fb7bcf5ffcde567c71d7194814a43800757d234acd",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_0.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf",
"size": "4920733920",
"quantization": "Q4_K_M",
"tags": [],
"sha256": "2d4c2369f29c685f740b0f23b4f8a39fa46221eb21429ef1ffa1b14b1970bb17",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q4_K_S.gguf",
"size": "4692668640",
"quantization": "Q4_K_S",
"tags": [],
"sha256": "de5a0eb7dc8f89a5858f4552b6e5133b3281ca172acdc7113cac76850143569c",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_S.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q5_0.gguf",
"size": "5599293664",
"quantization": "Q5_0",
"tags": [],
"sha256": "bc34607cef0f441c17a1a6440ac318a15143cc05dbebf6bbf213ed79bb4f8696",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q5_0.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q5_K_M.gguf",
"size": "5733500128",
"quantization": "Q5_K_M",
"tags": [],
"sha256": "d6b4f889a00afc90e1216bb5cf814b1711ed1870453c7dd8b7f15cc6001a065e",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q5_K_M.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q5_K_S.gguf",
"size": "5599293664",
"quantization": "Q5_K_S",
"tags": [],
"sha256": "5ae59049f7cdb2a75ed96fae719750e4f91c61740575136f1b42fbc074b914d0",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q5_K_S.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q6_K.gguf",
"size": "6596006112",
"quantization": "Q6_K",
"tags": [],
"sha256": "82a953c375815a00f1e36773ccd7a62d9e045bcabd1a6554ad4ddb9d337cb0f2",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q6_K.gguf"
}
},
{
"name": "Meta-Llama-3-8B-Instruct-Q8_0.gguf",
"size": "8540770528",
"quantization": "Q8_0",
"tags": [],
"sha256": "a0cc95c65e87360bfedfac50db3051d92b25f66a678959595a35a7e06556628b",
"download": {
"default": "https://huggingface.co/second-state/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q8_0.gguf"
}
}
],
"prompt_template": "llama-3-chat",
"reverse_prompt": "",
"context_size": 4096,
"vector_size": 0,
"metrics": {}
}
163 changes: 163 additions & 0 deletions second-state/Phi-3-mini-4k-instruct-GGUF.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
{
"id": "second-state/Phi-3-mini-4k-instruct-GGUF",
"status": "published",
"model_type": "instruct",
"name": "Phi-3-mini-4k-instruct",
"summary": "The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Mini version in two variants 4K and 128K which is the context length (in tokens) that it can support.\n\nThe model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3 Mini-4K-Instruct showcased a robust and state-of-the-art performance among models with less than 13 billion parameters.",
"author": {
"name": "microsoft",
"url": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct",
"description": ""
},
"size": "3.82B",
"requires": "8GB+ RAM",
"released_at": "2024-04-23T15:11:30Z",
"architecture": "llama",
"files": [
{
"name": "Phi-3-mini-4k-instruct-f16.gguf",
"size": "7643295840",
"quantization": "",
"tags": [],
"sha256": "3151f5fa15470cfdefe94f17add74a0a9a7fdbef640db236487a21c110650203",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-f16.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q2_K.gguf",
"size": "1416202912",
"quantization": "Q2_K",
"tags": [],
"sha256": "d827ee57632ec3680951550e3c8a08b749ba48cff7bcf840d7bf5a68c67d99db",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q2_K.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q3_K_L.gguf",
"size": "2087596192",
"quantization": "Q3_K_L",
"tags": [],
"sha256": "ce340d0ba94846326dd01951c2e3199027028aa021abc2c9ca90e5ffe08cca07",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q3_K_L.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q3_K_M.gguf",
"size": "1955475616",
"quantization": "Q3_K_M",
"tags": [],
"sha256": "2c3dbf76b07a9f1a792cdd3bf29e4204ddb229287e301a01f2873ddfcd2d4f29",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q3_K_M.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q3_K_S.gguf",
"size": "1681797280",
"quantization": "Q3_K_S",
"tags": [],
"sha256": "1d969985fac8f7da02fc464ed52405289e1f9682f29e24494354bd8f0fa612d3",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q3_K_S.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q4_0.gguf",
"size": "2176175776",
"quantization": "Q4_0",
"tags": [],
"sha256": "6f50649e74c67ec154fee88cf89966640196a360a3f3a0c90c548263b7e7cf71",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q4_0.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q4_K_M.gguf",
"size": "2393231008",
"quantization": "Q4_K_M",
"tags": [],
"sha256": "07c46c4f7fbc286417be8a61ee1b45094ff30b5be8039e67fb403b102dc8838f",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q4_K_M.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q4_K_S.gguf",
"size": "2188758688",
"quantization": "Q4_K_S",
"tags": [],
"sha256": "1f3df621f2fe5da21b4802001e910ac279e0d1409d90fabec0d77bdc9b294ca8",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q4_K_S.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q4.gguf",
"size": "2318919040",
"quantization": "Q4",
"tags": [],
"sha256": "4fed7364ee3e0c7cb4fe0880148bfdfcd1b630981efa0802a6b62ee52e7da97e",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q4.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q5_0.gguf",
"size": "2641473184",
"quantization": "Q5_0",
"tags": [],
"sha256": "10dd25f058c9d246c67e77d16549ef13f65dbcc3d6e9300b43fabcbff777a1ef",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q5_0.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q5_K_M.gguf",
"size": "2815274656",
"quantization": "Q5_K_M",
"tags": [],
"sha256": "ed4479317c3d28c3b7fe68f21176cd2a1b8862cc6ee89934a2defec9f502fa50",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q5_K_M.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q5_K_S.gguf",
"size": "2641473184",
"quantization": "Q5_K_S",
"tags": [],
"sha256": "231d5c9a40d3819c5b2186eda00592e56938dac81b0c9b0ad084f4f87ef619be",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q5_K_S.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q6_K.gguf",
"size": "3135851680",
"quantization": "Q6_K",
"tags": [],
"sha256": "8d1050a276cac7381eba4bd790254d66c8441d862370dbba2f490c71139221be",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q6_K.gguf"
}
},
{
"name": "Phi-3-mini-4k-instruct-Q8_0.gguf",
"size": "4061221024",
"quantization": "Q8_0",
"tags": [],
"sha256": "8d2f3732e31c354e169cd81dcde9807a1c73b85b9a0f9b16c19013e7a4bb151c",
"download": {
"default": "https://huggingface.co/second-state/Phi-3-mini-4k-instruct-GGUF/resolve/main/Phi-3-mini-4k-instruct-Q8_0.gguf"
}
}
],
"prompt_template": "phi-3-chat",
"reverse_prompt": "",
"context_size": 3072,
"vector_size": 0,
"metrics": {}
}