diff --git a/gallery/index.yaml b/gallery/index.yaml index e72da407b750..492dddf46eec 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3076,6 +3076,22 @@ - filename: DemyAgent-4B.i1-Q4_K_M.gguf sha256: be619b23510debc492ddba73b6764382a8e0c4e97e5c206e0e2ee86d117c0878 uri: huggingface://mradermacher/DemyAgent-4B-i1-GGUF/DemyAgent-4B.i1-Q4_K_M.gguf +- !!merge <<: *qwen3 + name: "boomerang-qwen3-2.3b" + icon: https://cdn-avatars.huggingface.co/v1/production/uploads/660591cbb8cda932fa1292ba/9eTKbCpP-C5rUHj26HTo_.png + urls: + - https://huggingface.co/Harvard-DCML/boomerang-qwen3-2.3B + - https://huggingface.co/mradermacher/boomerang-qwen3-2.3B-GGUF + description: | + Boomerang distillation is a phenomenon in LLMs where we can distill a teacher model into a student and reincorporate teacher layers to create intermediate-sized models with no additional training. This is the student model distilled from Qwen3-4B-Base from our paper. + This model was initialized from Qwen3-4B-Base by copying every other layer and the last 2 layers. It was distilled on 2.1B tokens of The Pile deduplicated with cross entropy, KL, and cosine loss to match the activations of Qwen3-4B-Base. + overrides: + parameters: + model: boomerang-qwen3-2.3B.Q4_K_M.gguf + files: + - filename: boomerang-qwen3-2.3B.Q4_K_M.gguf + sha256: 59d4fa743abb74177667b2faa4eb0f5bfd874109e9bc27a84d4ac392e90f96cc + uri: huggingface://mradermacher/boomerang-qwen3-2.3B-GGUF/boomerang-qwen3-2.3B.Q4_K_M.gguf - &gemma3 url: "github:mudler/LocalAI/gallery/gemma.yaml@master" name: "gemma-3-27b-it"