merge_method: della_linear base_model: bunnycore/Qwen2.5-7B-RRP-1M dtype: float16 parameters: epsilon: 0.015 # Fine-grain scaling for precision. lambda: 1.6 # Strong emphasis on top-performing models. normalize: true # Stable parameter integration across models. adaptive_merge_parameters: task_weights: tinyArc: 1.75 # Logical reasoning. tinyHellaswag: 1.65 # Contextual predictions. tinyMMLU: 1.8 # Domain knowledge. tinyTruthfulQA: 2.0 # Prioritize truthful reasoning. tinyTruthfulQA_mc1: 1.85 tinyWinogrande: 1.9 # Advanced reasoning and predictions. IFEval: 2.1 # Instruction-following and multitasking. BBH: 1.9 # Complex reasoning. MATH: 2.3 # Mathematical reasoning. GPQA: 2.2 # Factual QA. MUSR: 2.0 # Multi-step reasoning. MMLU-PRO: 2.2 # Domain multitask performance. smoothing_factor: 0.1 # Smooth blending across benchmarks. models: - model: AXCXEPT/Qwen2.5-Math-7B-Instruct-jp-EZO_OREO parameters: weight: 1 density: 1