diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index 18dcc6ddfe..335eff7976 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -3,2255 +3,2368 @@
 #include "llama-impl.h"
 
 #include <map>
-
-static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
-    { LLM_ARCH_LLAMA,            "llama"            },
-    { LLM_ARCH_LLAMA4,           "llama4"           },
-    { LLM_ARCH_DECI,             "deci"             },
-    { LLM_ARCH_FALCON,           "falcon"           },
-    { LLM_ARCH_GROK,             "grok"             },
-    { LLM_ARCH_GPT2,             "gpt2"             },
-    { LLM_ARCH_GPTJ,             "gptj"             },
-    { LLM_ARCH_GPTNEOX,          "gptneox"          },
-    { LLM_ARCH_MPT,              "mpt"              },
-    { LLM_ARCH_BAICHUAN,         "baichuan"         },
-    { LLM_ARCH_STARCODER,        "starcoder"        },
-    { LLM_ARCH_REFACT,           "refact"           },
-    { LLM_ARCH_BERT,             "bert"             },
-    { LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },
-    { LLM_ARCH_NOMIC_BERT_MOE,   "nomic-bert-moe"   },
-    { LLM_ARCH_NEO_BERT,         "neo-bert"         },
-    { LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },
-    { LLM_ARCH_BLOOM,            "bloom"            },
-    { LLM_ARCH_STABLELM,         "stablelm"         },
-    { LLM_ARCH_QWEN,             "qwen"             },
-    { LLM_ARCH_QWEN2,            "qwen2"            },
-    { LLM_ARCH_QWEN2MOE,         "qwen2moe"         },
-    { LLM_ARCH_QWEN2VL,          "qwen2vl"          },
-    { LLM_ARCH_QWEN3,            "qwen3"            },
-    { LLM_ARCH_QWEN3MOE,         "qwen3moe"         },
-    { LLM_ARCH_PHI2,             "phi2"             },
-    { LLM_ARCH_PHI3,             "phi3"             },
-    { LLM_ARCH_PHIMOE,           "phimoe"           },
-    { LLM_ARCH_PLAMO,            "plamo"            },
-    { LLM_ARCH_PLAMO2,           "plamo2"           },
-    { LLM_ARCH_CODESHELL,        "codeshell"        },
-    { LLM_ARCH_ORION,            "orion"            },
-    { LLM_ARCH_INTERNLM2,        "internlm2"        },
-    { LLM_ARCH_MINICPM,          "minicpm"          },
-    { LLM_ARCH_MINICPM3,         "minicpm3"         },
-    { LLM_ARCH_GEMMA,            "gemma"            },
-    { LLM_ARCH_GEMMA2,           "gemma2"           },
-    { LLM_ARCH_GEMMA3,           "gemma3"           },
-    { LLM_ARCH_GEMMA3N,          "gemma3n"          },
-    { LLM_ARCH_STARCODER2,       "starcoder2"       },
-    { LLM_ARCH_MAMBA,            "mamba"            },
-    { LLM_ARCH_MAMBA2,           "mamba2"           },
-    { LLM_ARCH_JAMBA,            "jamba"            },
-    { LLM_ARCH_FALCON_H1,        "falcon-h1"        },
-    { LLM_ARCH_XVERSE,           "xverse"           },
-    { LLM_ARCH_COMMAND_R,        "command-r"        },
-    { LLM_ARCH_COHERE2,          "cohere2"          },
-    { LLM_ARCH_DBRX,             "dbrx"             },
-    { LLM_ARCH_OLMO,             "olmo"             },
-    { LLM_ARCH_OLMO2,            "olmo2"            },
-    { LLM_ARCH_OLMOE,            "olmoe"            },
-    { LLM_ARCH_OPENELM,          "openelm"          },
-    { LLM_ARCH_ARCTIC,           "arctic"           },
-    { LLM_ARCH_DEEPSEEK,         "deepseek"         },
-    { LLM_ARCH_DEEPSEEK2,        "deepseek2"        },
-    { LLM_ARCH_CHATGLM,          "chatglm"          },
-    { LLM_ARCH_GLM4,             "glm4"             },
-    { LLM_ARCH_GLM4_MOE,         "glm4moe"          },
-    { LLM_ARCH_BITNET,           "bitnet"           },
-    { LLM_ARCH_T5,               "t5"               },
-    { LLM_ARCH_T5ENCODER,        "t5encoder"        },
-    { LLM_ARCH_JAIS,             "jais"             },
-    { LLM_ARCH_NEMOTRON,         "nemotron"         },
-    { LLM_ARCH_EXAONE,           "exaone"           },
-    { LLM_ARCH_EXAONE4,          "exaone4"          },
-    { LLM_ARCH_RWKV6,            "rwkv6"            },
-    { LLM_ARCH_RWKV6QWEN2,       "rwkv6qwen2"       },
-    { LLM_ARCH_RWKV7,            "rwkv7"            },
-    { LLM_ARCH_ARWKV7,           "arwkv7"           },
-    { LLM_ARCH_GRANITE,          "granite"          },
-    { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
-    { LLM_ARCH_GRANITE_HYBRID,   "granitehybrid"    },
-    { LLM_ARCH_CHAMELEON,        "chameleon"        },
-    { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
-    { LLM_ARCH_PLM,              "plm"              },
-    { LLM_ARCH_BAILINGMOE,       "bailingmoe"       },
-    { LLM_ARCH_DOTS1,            "dots1"            },
-    { LLM_ARCH_ARCEE,            "arcee"            },
-    { LLM_ARCH_ERNIE4_5,         "ernie4_5"         },
-    { LLM_ARCH_ERNIE4_5_MOE,     "ernie4_5-moe"     },
-    { LLM_ARCH_HUNYUAN_MOE,      "hunyuan-moe"      },
-    { LLM_ARCH_HUNYUAN_DENSE,    "hunyuan-dense"    },
-    { LLM_ARCH_SMOLLM3,          "smollm3"          },
-    { LLM_ARCH_OPENAI_MOE,       "gpt-oss"          },
-    { LLM_ARCH_LFM2,             "lfm2"             },
-    { LLM_ARCH_DREAM,            "dream"            },
-    { LLM_ARCH_SMALLTHINKER,     "smallthinker"     },
-    { LLM_ARCH_LLADA,            "llada"            },
-    { LLM_ARCH_UNKNOWN,          "(unknown)"        },
-};
-
-static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
-    { LLM_KV_GENERAL_TYPE,                 "general.type"                          },
-    { LLM_KV_GENERAL_ARCHITECTURE,         "general.architecture"                  },
-    { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version"          },
-    { LLM_KV_GENERAL_ALIGNMENT,            "general.alignment"                     },
-    { LLM_KV_GENERAL_FILE_TYPE,            "general.file_type"                     },
-    { LLM_KV_GENERAL_NAME,                 "general.name"                          },
-    { LLM_KV_GENERAL_AUTHOR,               "general.author"                        },
-    { LLM_KV_GENERAL_VERSION,              "general.version"                       },
-    { LLM_KV_GENERAL_URL,                  "general.url"                           },
-    { LLM_KV_GENERAL_DESCRIPTION,          "general.description"                   },
-    { LLM_KV_GENERAL_LICENSE,              "general.license"                       },
-    { LLM_KV_GENERAL_SOURCE_URL,           "general.source.url"                    },
-    { LLM_KV_GENERAL_SOURCE_HF_REPO,       "general.source.huggingface.repository" },
-
-    { LLM_KV_VOCAB_SIZE,                        "%s.vocab_size"                        },
-    { LLM_KV_CONTEXT_LENGTH,                    "%s.context_length"                    },
-    { LLM_KV_EMBEDDING_LENGTH,                  "%s.embedding_length"                  },
-    { LLM_KV_FEATURES_LENGTH,                   "%s.features_length"                   },
-    { LLM_KV_BLOCK_COUNT,                       "%s.block_count"                       },
-    { LLM_KV_LEADING_DENSE_BLOCK_COUNT,         "%s.leading_dense_block_count"         },
-    { LLM_KV_FEED_FORWARD_LENGTH,               "%s.feed_forward_length"               },
-    { LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        "%s.expert_feed_forward_length"        },
-    { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
-    { LLM_KV_USE_PARALLEL_RESIDUAL,             "%s.use_parallel_residual"             },
-    { LLM_KV_TENSOR_DATA_LAYOUT,                "%s.tensor_data_layout"                },
-    { LLM_KV_EXPERT_COUNT,                      "%s.expert_count"                      },
-    { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
-    { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
-    { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
-    { LLM_KV_EXPERT_WEIGHTS_NORM,               "%s.expert_weights_norm"               },
-    { LLM_KV_EXPERT_GATING_FUNC,                "%s.expert_gating_func"                },
-    { LLM_KV_MOE_EVERY_N_LAYERS,                "%s.moe_every_n_layers"                },
-    { LLM_KV_NEXTN_PREDICT_LAYERS,              "%s.nextn_predict_layers"              },
-    { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
-    { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
-    { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
-    { LLM_KV_ATTN_LOGIT_SOFTCAPPING,            "%s.attn_logit_softcapping"            },
-    { LLM_KV_FINAL_LOGIT_SOFTCAPPING,           "%s.final_logit_softcapping"           },
-    { LLM_KV_SWIN_NORM,                         "%s.swin_norm"                         },
-    { LLM_KV_RESCALE_EVERY_N_LAYERS,            "%s.rescale_every_n_layers"            },
-    { LLM_KV_TIME_MIX_EXTRA_DIM,                "%s.time_mix_extra_dim"                },
-    { LLM_KV_TIME_DECAY_EXTRA_DIM,              "%s.time_decay_extra_dim"              },
-    { LLM_KV_RESIDUAL_SCALE,                    "%s.residual_scale"                    },
-    { LLM_KV_EMBEDDING_SCALE,                   "%s.embedding_scale"                   },
-    { LLM_KV_TOKEN_SHIFT_COUNT,                 "%s.token_shift_count"                 },
-    { LLM_KV_INTERLEAVE_MOE_LAYER_STEP,         "%s.interleave_moe_layer_step"         },
-
-    { LLM_KV_ATTENTION_HEAD_COUNT,                   "%s.attention.head_count"                   },
-    { LLM_KV_ATTENTION_HEAD_COUNT_KV,                "%s.attention.head_count_kv"                },
-    { LLM_KV_ATTENTION_MAX_ALIBI_BIAS,               "%s.attention.max_alibi_bias"               },
-    { LLM_KV_ATTENTION_CLAMP_KQV,                    "%s.attention.clamp_kqv"                    },
-    { LLM_KV_ATTENTION_KEY_LENGTH,                   "%s.attention.key_length"                   },
-    { LLM_KV_ATTENTION_VALUE_LENGTH,                 "%s.attention.value_length"                 },
-    { LLM_KV_ATTENTION_LAYERNORM_EPS,                "%s.attention.layer_norm_epsilon"           },
-    { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,            "%s.attention.layer_norm_rms_epsilon"       },
-    { LLM_KV_ATTENTION_GROUPNORM_EPS,                "%s.attention.group_norm_epsilon"           },
-    { LLM_KV_ATTENTION_GROUPNORM_GROUPS,             "%s.attention.group_norm_groups"            },
-    { LLM_KV_ATTENTION_CAUSAL,                       "%s.attention.causal"                       },
-    { LLM_KV_ATTENTION_Q_LORA_RANK,                  "%s.attention.q_lora_rank"                  },
-    { LLM_KV_ATTENTION_KV_LORA_RANK,                 "%s.attention.kv_lora_rank"                 },
-    { LLM_KV_ATTENTION_DECAY_LORA_RANK,              "%s.attention.decay_lora_rank"              },
-    { LLM_KV_ATTENTION_ICLR_LORA_RANK,               "%s.attention.iclr_lora_rank"               },
-    { LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, "%s.attention.value_residual_mix_lora_rank" },
-    { LLM_KV_ATTENTION_GATE_LORA_RANK,               "%s.attention.gate_lora_rank"               },
-    { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,       "%s.attention.relative_buckets_count"       },
-    { LLM_KV_ATTENTION_SLIDING_WINDOW,               "%s.attention.sliding_window"               },
-    { LLM_KV_ATTENTION_SCALE,                        "%s.attention.scale"                        },
-    { LLM_KV_ATTENTION_KEY_LENGTH_MLA,               "%s.attention.key_length_mla"               },
-    { LLM_KV_ATTENTION_VALUE_LENGTH_MLA,             "%s.attention.value_length_mla"             },
-
-    { LLM_KV_ROPE_DIMENSION_COUNT,      "%s.rope.dimension_count"                 },
-    { LLM_KV_ROPE_DIMENSION_SECTIONS,   "%s.rope.dimension_sections"              },
-    { LLM_KV_ROPE_FREQ_BASE,            "%s.rope.freq_base"                       },
-    { LLM_KV_ROPE_SCALE_LINEAR,         "%s.rope.scale_linear"                    },
-    { LLM_KV_ROPE_SCALING_TYPE,         "%s.rope.scaling.type"                    },
-    { LLM_KV_ROPE_SCALING_FACTOR,       "%s.rope.scaling.factor"                  },
-    { LLM_KV_ROPE_SCALING_ATTN_FACTOR,  "%s.rope.scaling.attn_factor"             },
-    { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
-    { LLM_KV_ROPE_SCALING_FINETUNED,    "%s.rope.scaling.finetuned"               },
-    { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier"     },
-
-    { LLM_KV_SPLIT_NO,            "split.no"            },
-    { LLM_KV_SPLIT_COUNT,         "split.count"         },
-    { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },
-
-    { LLM_KV_SSM_CONV_KERNEL,    "%s.ssm.conv_kernel"    },
-    { LLM_KV_SSM_INNER_SIZE,     "%s.ssm.inner_size"     },
-    { LLM_KV_SSM_STATE_SIZE,     "%s.ssm.state_size"     },
-    { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
-    { LLM_KV_SSM_GROUP_COUNT,    "%s.ssm.group_count"    },
-    { LLM_KV_SSM_DT_B_C_RMS,     "%s.ssm.dt_b_c_rms"     },
-
-    { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" },
-
-    { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" },
-    { LLM_KV_POSNET_BLOCK_COUNT,      "%s.posnet.block_count"      },
-
-    { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" },
-    { LLM_KV_CONVNEXT_BLOCK_COUNT,      "%s.convnext.block_count"      },
-
-    { LLM_KV_CLASSIFIER_OUTPUT_LABELS, "%s.classifier.output_labels" },
-
-    { LLM_KV_SHORTCONV_L_CACHE, "%s.shortconv.l_cache" },
-
-    { LLM_KV_TOKENIZER_MODEL,                "tokenizer.ggml.model"                    },
-    { LLM_KV_TOKENIZER_PRE,                  "tokenizer.ggml.pre"                      },
-    { LLM_KV_TOKENIZER_LIST,                 "tokenizer.ggml.tokens"                   },
-    { LLM_KV_TOKENIZER_TOKEN_TYPE,           "tokenizer.ggml.token_type"               },
-    { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,     "tokenizer.ggml.token_type_count"         },
-    { LLM_KV_TOKENIZER_SCORES,               "tokenizer.ggml.scores"                   },
-    { LLM_KV_TOKENIZER_MERGES,               "tokenizer.ggml.merges"                   },
-    { LLM_KV_TOKENIZER_BOS_ID,               "tokenizer.ggml.bos_token_id"             },
-    { LLM_KV_TOKENIZER_EOS_ID,               "tokenizer.ggml.eos_token_id"             },
-    { LLM_KV_TOKENIZER_EOT_ID,               "tokenizer.ggml.eot_token_id"             },
-    { LLM_KV_TOKENIZER_EOM_ID,               "tokenizer.ggml.eom_token_id"             },
-    { LLM_KV_TOKENIZER_UNK_ID,               "tokenizer.ggml.unknown_token_id"         },
-    { LLM_KV_TOKENIZER_SEP_ID,               "tokenizer.ggml.seperator_token_id"       },
-    { LLM_KV_TOKENIZER_PAD_ID,               "tokenizer.ggml.padding_token_id"         },
-    { LLM_KV_TOKENIZER_CLS_ID,               "tokenizer.ggml.cls_token_id"             },
-    { LLM_KV_TOKENIZER_MASK_ID,              "tokenizer.ggml.mask_token_id"            },
-    { LLM_KV_TOKENIZER_ADD_BOS,              "tokenizer.ggml.add_bos_token"            },
-    { LLM_KV_TOKENIZER_ADD_EOS,              "tokenizer.ggml.add_eos_token"            },
-    { LLM_KV_TOKENIZER_ADD_SEP,              "tokenizer.ggml.add_sep_token"            },
-    { LLM_KV_TOKENIZER_ADD_PREFIX,           "tokenizer.ggml.add_space_prefix"         },
-    { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,      "tokenizer.ggml.remove_extra_whitespaces" },
-    { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap"     },
-    { LLM_KV_TOKENIZER_HF_JSON,              "tokenizer.huggingface.json"              },
-    { LLM_KV_TOKENIZER_RWKV,                 "tokenizer.rwkv.world"                    },
-    { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat_template"                 },
-    { LLM_KV_TOKENIZER_FIM_PRE_ID,           "tokenizer.ggml.fim_pre_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_SUF_ID,           "tokenizer.ggml.fim_suf_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_MID_ID,           "tokenizer.ggml.fim_mid_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_PAD_ID,           "tokenizer.ggml.fim_pad_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_REP_ID,           "tokenizer.ggml.fim_rep_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_SEP_ID,           "tokenizer.ggml.fim_sep_token_id"         },
-
-    { LLM_KV_ADAPTER_TYPE,       "adapter.type"       },
-    { LLM_KV_ADAPTER_LORA_ALPHA, "adapter.lora.alpha" },
-
-    // deprecated
-    { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
-    { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
-    { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
-};
-
-static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_NAMES = {
-    {
-        LLM_ARCH_LLAMA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_ARCEE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_LLAMA4,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,  "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,  "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,    "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_DECI,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_BAICHUAN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_FALCON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GROK,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-        },
-    },
-    {
-        LLM_ARCH_GPT2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GPTJ,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-        },
-    },
-    {
-        LLM_ARCH_GPTNEOX,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MPT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output"},
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_ACT,         "blk.%d.ffn.act" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm"},
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm"},
-        },
-    },
-    {
-        LLM_ARCH_STARCODER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_REFACT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_CLS,             "cls" },
-            { LLM_TENSOR_CLS_OUT,         "cls.output" },
-        },
-    },
-    {
-        LLM_ARCH_NOMIC_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_NOMIC_BERT_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_NEO_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
-            { LLM_TENSOR_CLS,             "cls" },
-            { LLM_TENSOR_CLS_OUT,         "cls.output" },
-        },
-    },
-    {
-        LLM_ARCH_JINA_BERT_V2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_CLS,             "cls" },
-        },
-    },
-    {
-        LLM_ARCH_BLOOM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_STABLELM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2VL,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN3MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_PHI2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PHI3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PHIMOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_PLAMO,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PLAMO2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-            { LLM_TENSOR_SSM_DT_NORM,     "blk.%d.ssm_dt_norm" },
-            { LLM_TENSOR_SSM_B_NORM,      "blk.%d.ssm_b_norm" },
-            { LLM_TENSOR_SSM_C_NORM,      "blk.%d.ssm_c_norm" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        },
-    },
-    {
-        LLM_ARCH_CODESHELL,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ORION,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_INTERNLM2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MINICPM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-        },
-    },
-    {
-        LLM_ARCH_MINICPM3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
-            { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA3N,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,          "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,            "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,               "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,          "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,               "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,          "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,               "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,             "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,       "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_NORM,             "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,             "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,             "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,               "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_POST_NORM,        "blk.%d.post_ffw_norm" },
-            { LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "per_layer_token_embd" },
-            { LLM_TENSOR_PER_LAYER_MODEL_PROJ, "per_layer_model_proj" },
-            { LLM_TENSOR_PER_LAYER_PROJ_NORM,  "per_layer_proj_norm" },
-            { LLM_TENSOR_ALTUP_UNEMBD_PROJ,    "altup_unembd_proj" },
-            { LLM_TENSOR_ALTUP_PROJ,           "altup_proj" },
-            { LLM_TENSOR_PER_LAYER_INP_GATE,   "blk.%d.inp_gate" },
-            { LLM_TENSOR_PER_LAYER_PROJ,       "blk.%d.proj" },
-            { LLM_TENSOR_PER_LAYER_POST_NORM,  "blk.%d.post_norm" },
-            { LLM_TENSOR_ALTUP_CORRECT_COEF,   "blk.%d.altup_correct_coef" },
-            { LLM_TENSOR_ALTUP_CORRECT_SCALE,  "blk.%d.altup_correct_scale" },
-            { LLM_TENSOR_ALTUP_PREDICT_COEF,   "blk.%d.altup_predict_coef" },
-            { LLM_TENSOR_ALTUP_ROUTER,         "blk.%d.altup_router" },
-            { LLM_TENSOR_ALTUP_ROUTER_NORM,    "blk.%d.altup_router_norm" },
-            { LLM_TENSOR_LAUREL_L,             "blk.%d.laurel_l" },
-            { LLM_TENSOR_LAUREL_R,             "blk.%d.laurel_r" },
-            { LLM_TENSOR_LAUREL_POST_NORM,     "blk.%d.laurel_post_norm" },
-        },
-    },
-    {
-        LLM_ARCH_STARCODER2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MAMBA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-        },
-    },
-    {
-        LLM_ARCH_MAMBA2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_NORM,        "blk.%d.ssm_norm" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-        },
-    },
-    {
-        LLM_ARCH_JAMBA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_DT_NORM,     "blk.%d.ssm_dt_norm" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_B_NORM,      "blk.%d.ssm_b_norm" },
-            { LLM_TENSOR_SSM_C_NORM,      "blk.%d.ssm_c_norm" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_FALCON_H1,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_NORM,        "blk.%d.ssm_norm" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_XVERSE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_COMMAND_R,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_COHERE2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_DBRX,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_OLMO,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OLMO2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OLMOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_OPENELM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ARCTIC,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_NORM_EXPS,   "blk.%d.ffn_norm_exps" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_DEEPSEEK,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,      "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_DEEPSEEK2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
-            { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_K_B,           "blk.%d.attn_k_b" },
-            { LLM_TENSOR_ATTN_V_B,           "blk.%d.attn_v_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-            { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
-        },
-    },
-    {
-        LLM_ARCH_PLM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_CHATGLM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GLM4,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        },
-    },
-    {
-        LLM_ARCH_GLM4_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_POST_NORM,     "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-            { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
-            // NextN/MTP tensors - preserved but unused (in final layer, dynamic layer number)
-            { LLM_TENSOR_NEXTN_EH_PROJ,      "blk.%d.nextn.eh_proj" },
-            { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" },
-            { LLM_TENSOR_NEXTN_ENORM,        "blk.%d.nextn.enorm" },
-            { LLM_TENSOR_NEXTN_HNORM,        "blk.%d.nextn.hnorm" },
-            { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" },
-            { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" },
-        },
-    },
-    {
-        LLM_ARCH_BITNET,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_SUB_NORM,      "blk.%d.attn_sub_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_SUB_NORM,       "blk.%d.ffn_sub_norm" },
-        },
-    },
-    {
-        LLM_ARCH_T5,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT,               "output" },
-            { LLM_TENSOR_DEC_OUTPUT_NORM,      "dec.output_norm" },
-            { LLM_TENSOR_DEC_ATTN_NORM,        "dec.blk.%d.attn_norm" },
-            { LLM_TENSOR_DEC_ATTN_Q,           "dec.blk.%d.attn_q" },
-            { LLM_TENSOR_DEC_ATTN_K,           "dec.blk.%d.attn_k" },
-            { LLM_TENSOR_DEC_ATTN_V,           "dec.blk.%d.attn_v" },
-            { LLM_TENSOR_DEC_ATTN_OUT,         "dec.blk.%d.attn_o" },
-            { LLM_TENSOR_DEC_ATTN_REL_B,       "dec.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "dec.blk.%d.cross_attn_norm" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_Q,     "dec.blk.%d.cross_attn_q" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_K,     "dec.blk.%d.cross_attn_k" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_V,     "dec.blk.%d.cross_attn_v" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_OUT,   "dec.blk.%d.cross_attn_o" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
-            { LLM_TENSOR_DEC_FFN_NORM,         "dec.blk.%d.ffn_norm" },
-            { LLM_TENSOR_DEC_FFN_GATE,         "dec.blk.%d.ffn_gate" },
-            { LLM_TENSOR_DEC_FFN_DOWN,         "dec.blk.%d.ffn_down" },
-            { LLM_TENSOR_DEC_FFN_UP,           "dec.blk.%d.ffn_up" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
-            { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
-            { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
-            { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
-            { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
-            { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
-            { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
-            { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
-            { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
-            { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_T5ENCODER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT,               "output" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
-            { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
-            { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
-            { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
-            { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
-            { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
-            { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
-            { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
-            { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
-            { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_JAIS,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_NEMOTRON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_EXAONE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_EXAONE4,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        }
-    },
-    {
-        LLM_ARCH_RWKV6,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
-            { LLM_TENSOR_OUTPUT,                    "output" },
-            { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
-            { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
-            { LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },
-            { LLM_TENSOR_TIME_MIX_LERP_W,           "blk.%d.time_mix_lerp_w" },
-            { LLM_TENSOR_TIME_MIX_LERP_K,           "blk.%d.time_mix_lerp_k" },
-            { LLM_TENSOR_TIME_MIX_LERP_V,           "blk.%d.time_mix_lerp_v" },
-            { LLM_TENSOR_TIME_MIX_LERP_R,           "blk.%d.time_mix_lerp_r" },
-            { LLM_TENSOR_TIME_MIX_LERP_G,           "blk.%d.time_mix_lerp_g" },
-            { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
-            { LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },
-            { LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },
-            { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
-            { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
-            { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
-            { LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },
-            { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
-            { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_R,        "blk.%d.channel_mix_lerp_r" },
-            { LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },
-            { LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },
-            { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,    "blk.%d.channel_mix_receptance" },
-        },
-    },
-    {
-        LLM_ARCH_RWKV6QWEN2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
-            { LLM_TENSOR_OUTPUT,                    "output" },
-            { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
-            { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
-            { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
-            { LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },
-            { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
-            { LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },
-            { LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },
-            { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
-            { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
-            { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
-            { LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },
-            { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
-            { LLM_TENSOR_FFN_NORM,                  "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,                  "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,                  "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,                    "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_RWKV7,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
-            { LLM_TENSOR_OUTPUT,                    "output" },
-            { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_TIME_MIX_W0,               "blk.%d.time_mix_w0" },
-            { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
-            { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
-            { LLM_TENSOR_TIME_MIX_A0,               "blk.%d.time_mix_a0" },
-            { LLM_TENSOR_TIME_MIX_A1,               "blk.%d.time_mix_a1" },
-            { LLM_TENSOR_TIME_MIX_A2,               "blk.%d.time_mix_a2" },
-            { LLM_TENSOR_TIME_MIX_V0,               "blk.%d.time_mix_v0" },
-            { LLM_TENSOR_TIME_MIX_V1,               "blk.%d.time_mix_v1" },
-            { LLM_TENSOR_TIME_MIX_V2,               "blk.%d.time_mix_v2" },
-            { LLM_TENSOR_TIME_MIX_G1,               "blk.%d.time_mix_g1" },
-            { LLM_TENSOR_TIME_MIX_G2,               "blk.%d.time_mix_g2" },
-            { LLM_TENSOR_TIME_MIX_K_K,              "blk.%d.time_mix_k_k" },
-            { LLM_TENSOR_TIME_MIX_K_A,              "blk.%d.time_mix_k_a" },
-            { LLM_TENSOR_TIME_MIX_R_K,              "blk.%d.time_mix_r_k" },
-            { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
-            { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
-            { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
-            { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
-            { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
-            { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },
-            { LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },
-            { LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },
-        },
-    },
-    {
-        LLM_ARCH_ARWKV7,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
-            { LLM_TENSOR_OUTPUT,                    "output" },
-            { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
-            { LLM_TENSOR_TIME_MIX_W0,               "blk.%d.time_mix_w0" },
-            { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
-            { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
-            { LLM_TENSOR_TIME_MIX_A0,               "blk.%d.time_mix_a0" },
-            { LLM_TENSOR_TIME_MIX_A1,               "blk.%d.time_mix_a1" },
-            { LLM_TENSOR_TIME_MIX_A2,               "blk.%d.time_mix_a2" },
-            { LLM_TENSOR_TIME_MIX_V0,               "blk.%d.time_mix_v0" },
-            { LLM_TENSOR_TIME_MIX_V1,               "blk.%d.time_mix_v1" },
-            { LLM_TENSOR_TIME_MIX_V2,               "blk.%d.time_mix_v2" },
-            { LLM_TENSOR_TIME_MIX_G1,               "blk.%d.time_mix_g1" },
-            { LLM_TENSOR_TIME_MIX_G2,               "blk.%d.time_mix_g2" },
-            { LLM_TENSOR_TIME_MIX_K_K,              "blk.%d.time_mix_k_k" },
-            { LLM_TENSOR_TIME_MIX_K_A,              "blk.%d.time_mix_k_a" },
-            { LLM_TENSOR_TIME_MIX_R_K,              "blk.%d.time_mix_r_k" },
-            { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
-            { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
-            { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
-            { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
-            { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
-            { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
-            { LLM_TENSOR_FFN_NORM,                  "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,                  "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,                  "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,                    "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,  "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,  "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,    "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE_HYBRID,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,     "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,    "output_norm" },
-            { LLM_TENSOR_OUTPUT,         "output" },
-            { LLM_TENSOR_ATTN_NORM,      "blk.%d.attn_norm" },
-            // mamba(2) ssm layers
-            { LLM_TENSOR_SSM_IN,         "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,     "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_DT,         "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,          "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,          "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_NORM,       "blk.%d.ssm_norm" },
-            { LLM_TENSOR_SSM_OUT,        "blk.%d.ssm_out" },
-            // attention layers
-            { LLM_TENSOR_ATTN_Q,         "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,         "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,         "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,       "blk.%d.attn_output" },
-            // dense FFN
-            { LLM_TENSOR_FFN_NORM,       "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,       "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,       "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,         "blk.%d.ffn_up" },
-            // moe FFN
-            { LLM_TENSOR_FFN_NORM,       "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,   "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,  "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,  "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,    "blk.%d.ffn_up_exps" },
-            // shared expert
-            { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,   "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_CHAMELEON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_WAVTOKENIZER_DEC,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
-            { LLM_TENSOR_CONV1D,            "conv1d" },
-            { LLM_TENSOR_CONVNEXT_DW,       "convnext.%d.dw" },
-            { LLM_TENSOR_CONVNEXT_NORM,     "convnext.%d.norm" },
-            { LLM_TENSOR_CONVNEXT_PW1,      "convnext.%d.pw1" },
-            { LLM_TENSOR_CONVNEXT_PW2,      "convnext.%d.pw2" },
-            { LLM_TENSOR_CONVNEXT_GAMMA,    "convnext.%d.gamma" },
-            { LLM_TENSOR_OUTPUT_NORM,       "output_norm" },
-            { LLM_TENSOR_OUTPUT,            "output" },
-            { LLM_TENSOR_POS_NET_CONV1,     "posnet.%d.conv1" },
-            { LLM_TENSOR_POS_NET_CONV2,     "posnet.%d.conv2" },
-            { LLM_TENSOR_POS_NET_NORM,      "posnet.%d.norm" },
-            { LLM_TENSOR_POS_NET_NORM1,     "posnet.%d.norm1" },
-            { LLM_TENSOR_POS_NET_NORM2,     "posnet.%d.norm2" },
-            { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
-            { LLM_TENSOR_POS_NET_ATTN_Q,    "posnet.%d.attn_q" },
-            { LLM_TENSOR_POS_NET_ATTN_K,    "posnet.%d.attn_k" },
-            { LLM_TENSOR_POS_NET_ATTN_V,    "posnet.%d.attn_v" },
-            { LLM_TENSOR_POS_NET_ATTN_OUT,  "posnet.%d.attn_output" },
-        },
-    },
-    {
-        LLM_ARCH_BAILINGMOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_DOTS1,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-            { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
-        }
-    },
-    {
-        LLM_ARCH_ERNIE4_5,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ERNIE4_5_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
-        },
-    },
-    {
-        LLM_ARCH_HUNYUAN_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,  "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,  "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,    "blk.%d.ffn_up_shexp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_HUNYUAN_DENSE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-
-        },
-    },
-    {
-        LLM_ARCH_SMOLLM3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,     "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,    "output_norm" },
-            { LLM_TENSOR_OUTPUT,         "output" },
-            { LLM_TENSOR_ATTN_NORM,      "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,         "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,         "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,         "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,       "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,       "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,       "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,       "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,         "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OPENAI_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_POST_NORM,     "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_SINKS,         "blk.%d.attn_sinks" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_LFM2,
-        {
-            { LLM_TENSOR_ATTN_NORM,         "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,            "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,            "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,            "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,          "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_K_NORM,       "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_Q_NORM,       "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_FFN_DOWN,          "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE,          "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_NORM,          "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,            "blk.%d.ffn_up" },
-            { LLM_TENSOR_SHORTCONV_CONV,    "blk.%d.shortconv.conv" },
-            { LLM_TENSOR_SHORTCONV_INPROJ,  "blk.%d.shortconv.in_proj" },
-            { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" },
-            { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
-        }
-    },
-    {
-        LLM_ARCH_SMALLTHINKER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" }
-        },
-    },
-    {
-        LLM_ARCH_DREAM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_LLADA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_UNKNOWN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-        },
-    },
-};
-
-static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
-    {LLM_TENSOR_TOKEN_EMBD,                 {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_POS_EMBD,                   {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_TOKEN_EMBD_NORM,            {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_TOKEN_TYPES,                {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_OUTPUT,                     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CLS,                        {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CLS_OUT,                    {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_OUTPUT_NORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_ROPE_FREQS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ROPE_FACTORS_LONG,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ROPE_FACTORS_SHORT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_K_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_V_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_SINKS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SCALE}},
-    {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_Q,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_K,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_V,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_OUT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_INP_SHEXP,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_INP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_IN,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_X,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_DT,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_OUT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_W1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_W2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_A1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_A2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_V1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_V2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_G1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_G2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_DECAY_W1,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_DECAY_W2,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_KEY,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_VALUE,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_RECEPTANCE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_GATE,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_OUTPUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_KEY,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_VALUE,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_ACT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
-    {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
-    {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
-    {LLM_TENSOR_SSM_DT_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_SSM_B_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_SSM_C_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_SSM_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CHANNEL_MIX_LERP_R,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_K_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_K_A,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_R_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LERP_W,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_K,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_V,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_R,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_G,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_FUSED,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_DECAY,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_W0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_A0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_V0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_FIRST,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
-    {LLM_TENSOR_ATTN_NORM,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_NORM_2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_OUT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_POST_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_POST_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_NORM_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_Q_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_K_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_LAYER_OUT_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_Q_A_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_KV_A_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_SUB_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_SUB_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_ENC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_EXP_PROBS_B,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    // altup / laurel (gemma 3n)
-    {LLM_TENSOR_PER_LAYER_TOKEN_EMBD,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_PER_LAYER_MODEL_PROJ,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_PER_LAYER_PROJ_NORM,        {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL}},
-    {LLM_TENSOR_ALTUP_PROJ,                 {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ALTUP_UNEMBD_PROJ,          {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_PER_LAYER_INP_GATE,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_PER_LAYER_PROJ,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_PER_LAYER_POST_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ALTUP_CORRECT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ALTUP_CORRECT_SCALE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ALTUP_PREDICT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ALTUP_ROUTER,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ALTUP_ROUTER_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_LAUREL_L,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_LAUREL_R,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    // this tensor is loaded for T5, but never used
-    {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
-    {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_NORM2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_CONV1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_CONV2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_ATTN_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_ATTN_Q,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_V,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_OUT,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_DW,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_CONVNEXT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CONVNEXT_PW1,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_PW2,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_GAMMA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_SHORTCONV_CONV,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
-    {LLM_TENSOR_SHORTCONV_INPROJ,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SHORTCONV_OUTPROJ,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    // NextN/MTP tensors are currently ignored (reserved for future MTP support)
-    // These tensors only exist in the last layer(s) and are treated as output tensors
-    {LLM_TENSOR_NEXTN_EH_PROJ,              {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_NEXTN_EMBED_TOKENS,         {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_NEXTN_ENORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_NEXTN_HNORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-};
+#include <mutex>
+
+// Forward declarations for accessor functions
+static const std::map<llm_arch, const char *>& get_llm_arch_names();
+static const std::map<llm_kv, const char *>& get_llm_kv_names();
+static const std::map<llm_tensor, llm_tensor_info>& get_llm_tensor_infos();
+
+const char* llama_arch_name(llm_arch arch) {
+    const auto& names = get_llm_arch_names();
+    auto it = names.find(arch);
+    if (it != names.end()) {
+        return it->second;
+    }
+    return "unknown";
+}
+
+const char* llama_get_kv_name(llm_kv kv) {
+    const auto& names = get_llm_kv_names();
+    auto it = names.find(kv);
+    if (it != names.end()) {
+        return it->second;
+    }
+    return "unknown";
+}
+
+const char* tensor_name(llm_arch arch, llm_tensor tensor) {
+    // This function will be updated to use an accessor, but for now use a 
+    // basic approach since the map is huge and complex
+    static std::once_flag flag;
+    static std::map<llm_arch, std::map<llm_tensor, const char *>>* LLM_TENSOR_NAMES = nullptr;
+    
+    std::call_once(flag, []() {
+        LLM_TENSOR_NAMES = new std::map<llm_arch, std::map<llm_tensor, const char *>>{
+      {
+          LLM_ARCH_LLAMA,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
+              { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
+              { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_ARCEE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_LLAMA4,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
+              { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
+              { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,  "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,  "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,    "blk.%d.ffn_up_shexp" },
+          },
+      },
+      {
+          LLM_ARCH_DECI,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
+              { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
+              { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_BAICHUAN,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_FALCON,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_GROK,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
+              { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
+              { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
+              { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
+          },
+      },
+      {
+          LLM_ARCH_GPT2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_POS_EMBD,        "position_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+          },
+      },
+      {
+          LLM_ARCH_GPTJ,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+          },
+      },
+      {
+          LLM_ARCH_GPTNEOX,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_MPT,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output"},
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_ACT,         "blk.%d.ffn.act" },
+              { LLM_TENSOR_POS_EMBD,        "position_embd" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm"},
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm"},
+          },
+      },
+      {
+          LLM_ARCH_STARCODER,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_POS_EMBD,        "position_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+          },
+      },
+      {
+          LLM_ARCH_REFACT,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_BERT,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
+              { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
+              { LLM_TENSOR_POS_EMBD,        "position_embd" },
+              { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_CLS,             "cls" },
+              { LLM_TENSOR_CLS_OUT,         "cls.output" },
+          },
+      },
+      {
+          LLM_ARCH_NOMIC_BERT,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
+              { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
+              { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_NOMIC_BERT_MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
+              { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
+              { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_NEO_BERT,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
+              { LLM_TENSOR_CLS,             "cls" },
+              { LLM_TENSOR_CLS_OUT,         "cls.output" },
+          },
+      },
+      {
+          LLM_ARCH_JINA_BERT_V2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
+              { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
+              { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
+              { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_CLS,             "cls" },
+          },
+      },
+      {
+          LLM_ARCH_BLOOM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+          },
+      },
+      {
+          LLM_ARCH_STABLELM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+          },
+      },
+      {
+          LLM_ARCH_QWEN,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_QWEN2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_QWEN2VL,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_QWEN2MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+          },
+      },
+      {
+          LLM_ARCH_QWEN3,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_QWEN3MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_PHI2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_PHI3,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
+              { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_PHIMOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
+              { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_PLAMO,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_PLAMO2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
+              { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
+              { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
+              { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
+              { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
+              { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
+              { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
+              { LLM_TENSOR_SSM_DT_NORM,     "blk.%d.ssm_dt_norm" },
+              { LLM_TENSOR_SSM_B_NORM,      "blk.%d.ssm_b_norm" },
+              { LLM_TENSOR_SSM_C_NORM,      "blk.%d.ssm_c_norm" },
+              { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
+          },
+      },
+      {
+          LLM_ARCH_CODESHELL,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_ORION,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_INTERNLM2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_MINICPM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
+              { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
+              { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
+              { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
+          },
+      },
+      {
+          LLM_ARCH_MINICPM3,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
+              { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
+              { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
+              { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
+              { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
+              { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+          },
+      },
+      {
+          LLM_ARCH_GEMMA,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_GEMMA2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
+          },
+      },
+      {
+          LLM_ARCH_GEMMA3,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
+          },
+      },
+      {
+          LLM_ARCH_GEMMA3N,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,          "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,            "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,               "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,          "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,               "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,          "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,               "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,             "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_POST_NORM,       "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_FFN_NORM,             "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,             "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,             "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,               "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_POST_NORM,        "blk.%d.post_ffw_norm" },
+              { LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "per_layer_token_embd" },
+              { LLM_TENSOR_PER_LAYER_MODEL_PROJ, "per_layer_model_proj" },
+              { LLM_TENSOR_PER_LAYER_PROJ_NORM,  "per_layer_proj_norm" },
+              { LLM_TENSOR_ALTUP_UNEMBD_PROJ,    "altup_unembd_proj" },
+              { LLM_TENSOR_ALTUP_PROJ,           "altup_proj" },
+              { LLM_TENSOR_PER_LAYER_INP_GATE,   "blk.%d.inp_gate" },
+              { LLM_TENSOR_PER_LAYER_PROJ,       "blk.%d.proj" },
+              { LLM_TENSOR_PER_LAYER_POST_NORM,  "blk.%d.post_norm" },
+              { LLM_TENSOR_ALTUP_CORRECT_COEF,   "blk.%d.altup_correct_coef" },
+              { LLM_TENSOR_ALTUP_CORRECT_SCALE,  "blk.%d.altup_correct_scale" },
+              { LLM_TENSOR_ALTUP_PREDICT_COEF,   "blk.%d.altup_predict_coef" },
+              { LLM_TENSOR_ALTUP_ROUTER,         "blk.%d.altup_router" },
+              { LLM_TENSOR_ALTUP_ROUTER_NORM,    "blk.%d.altup_router_norm" },
+              { LLM_TENSOR_LAUREL_L,             "blk.%d.laurel_l" },
+              { LLM_TENSOR_LAUREL_R,             "blk.%d.laurel_r" },
+              { LLM_TENSOR_LAUREL_POST_NORM,     "blk.%d.laurel_post_norm" },
+          },
+      },
+      {
+          LLM_ARCH_STARCODER2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_MAMBA,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
+              { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
+              { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
+              { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
+              { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
+              { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
+              { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
+          },
+      },
+      {
+          LLM_ARCH_MAMBA2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
+              { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
+              { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
+              { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
+              { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
+              { LLM_TENSOR_SSM_NORM,        "blk.%d.ssm_norm" },
+              { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
+          },
+      },
+      {
+          LLM_ARCH_JAMBA,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
+              { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
+              { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
+              { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
+              { LLM_TENSOR_SSM_DT_NORM,     "blk.%d.ssm_dt_norm" },
+              { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
+              { LLM_TENSOR_SSM_B_NORM,      "blk.%d.ssm_b_norm" },
+              { LLM_TENSOR_SSM_C_NORM,      "blk.%d.ssm_c_norm" },
+              { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
+              { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_FALCON_H1,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
+              { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
+              { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
+              { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
+              { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
+              { LLM_TENSOR_SSM_NORM,        "blk.%d.ssm_norm" },
+              { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_XVERSE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_COMMAND_R,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+          },
+      },
+      {
+          LLM_ARCH_COHERE2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_DBRX,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_OLMO,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_OLMO2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_OLMOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_OPENELM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_ARCTIC,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_NORM_EXPS,   "blk.%d.ffn_norm_exps" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_DEEPSEEK,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,      "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+          },
+      },
+      {
+          LLM_ARCH_DEEPSEEK2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
+              { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
+              { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
+              { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
+              { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
+              { LLM_TENSOR_ATTN_K_B,           "blk.%d.attn_k_b" },
+              { LLM_TENSOR_ATTN_V_B,           "blk.%d.attn_v_b" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+              { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
+          },
+      },
+      {
+          LLM_ARCH_PLM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
+              { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
+              { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_CHATGLM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+          },
+      },
+      {
+          LLM_ARCH_GLM4,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
+          },
+      },
+      {
+          LLM_ARCH_GLM4_MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_POST_NORM,     "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+              { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
+              // NextN/MTP tensors - preserved but unused (in final layer, dynamic layer number)
+              { LLM_TENSOR_NEXTN_EH_PROJ,      "blk.%d.nextn.eh_proj" },
+              { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" },
+              { LLM_TENSOR_NEXTN_ENORM,        "blk.%d.nextn.enorm" },
+              { LLM_TENSOR_NEXTN_HNORM,        "blk.%d.nextn.hnorm" },
+              { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" },
+              { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" },
+          },
+      },
+      {
+          LLM_ARCH_BITNET,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_SUB_NORM,      "blk.%d.attn_sub_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_SUB_NORM,       "blk.%d.ffn_sub_norm" },
+          },
+      },
+      {
+          LLM_ARCH_T5,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
+              { LLM_TENSOR_OUTPUT,               "output" },
+              { LLM_TENSOR_DEC_OUTPUT_NORM,      "dec.output_norm" },
+              { LLM_TENSOR_DEC_ATTN_NORM,        "dec.blk.%d.attn_norm" },
+              { LLM_TENSOR_DEC_ATTN_Q,           "dec.blk.%d.attn_q" },
+              { LLM_TENSOR_DEC_ATTN_K,           "dec.blk.%d.attn_k" },
+              { LLM_TENSOR_DEC_ATTN_V,           "dec.blk.%d.attn_v" },
+              { LLM_TENSOR_DEC_ATTN_OUT,         "dec.blk.%d.attn_o" },
+              { LLM_TENSOR_DEC_ATTN_REL_B,       "dec.blk.%d.attn_rel_b" },
+              { LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "dec.blk.%d.cross_attn_norm" },
+              { LLM_TENSOR_DEC_CROSS_ATTN_Q,     "dec.blk.%d.cross_attn_q" },
+              { LLM_TENSOR_DEC_CROSS_ATTN_K,     "dec.blk.%d.cross_attn_k" },
+              { LLM_TENSOR_DEC_CROSS_ATTN_V,     "dec.blk.%d.cross_attn_v" },
+              { LLM_TENSOR_DEC_CROSS_ATTN_OUT,   "dec.blk.%d.cross_attn_o" },
+              { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
+              { LLM_TENSOR_DEC_FFN_NORM,         "dec.blk.%d.ffn_norm" },
+              { LLM_TENSOR_DEC_FFN_GATE,         "dec.blk.%d.ffn_gate" },
+              { LLM_TENSOR_DEC_FFN_DOWN,         "dec.blk.%d.ffn_down" },
+              { LLM_TENSOR_DEC_FFN_UP,           "dec.blk.%d.ffn_up" },
+              { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
+              { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
+              { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
+              { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
+              { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
+              { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
+              { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
+              { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
+              { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
+              { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
+              { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_T5ENCODER,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
+              { LLM_TENSOR_OUTPUT,               "output" },
+              { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
+              { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
+              { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
+              { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
+              { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
+              { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
+              { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
+              { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
+              { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
+              { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
+              { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_JAIS,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+          },
+      },
+      {
+          LLM_ARCH_NEMOTRON,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_EXAONE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_EXAONE4,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
+          }
+      },
+      {
+          LLM_ARCH_RWKV6,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
+              { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
+              { LLM_TENSOR_OUTPUT,                    "output" },
+              { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },
+              { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
+              { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
+              { LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },
+              { LLM_TENSOR_TIME_MIX_LERP_W,           "blk.%d.time_mix_lerp_w" },
+              { LLM_TENSOR_TIME_MIX_LERP_K,           "blk.%d.time_mix_lerp_k" },
+              { LLM_TENSOR_TIME_MIX_LERP_V,           "blk.%d.time_mix_lerp_v" },
+              { LLM_TENSOR_TIME_MIX_LERP_R,           "blk.%d.time_mix_lerp_r" },
+              { LLM_TENSOR_TIME_MIX_LERP_G,           "blk.%d.time_mix_lerp_g" },
+              { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
+              { LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },
+              { LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },
+              { LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },
+              { LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },
+              { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
+              { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
+              { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
+              { LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },
+              { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
+              { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
+              { LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },
+              { LLM_TENSOR_CHANNEL_MIX_LERP_R,        "blk.%d.channel_mix_lerp_r" },
+              { LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },
+              { LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },
+              { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,    "blk.%d.channel_mix_receptance" },
+          },
+      },
+      {
+          LLM_ARCH_RWKV6QWEN2,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
+              { LLM_TENSOR_OUTPUT,                    "output" },
+              { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
+              { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
+              { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
+              { LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },
+              { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
+              { LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },
+              { LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },
+              { LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },
+              { LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },
+              { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
+              { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
+              { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
+              { LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },
+              { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
+              { LLM_TENSOR_FFN_NORM,                  "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,                  "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,                  "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,                    "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_RWKV7,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
+              { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
+              { LLM_TENSOR_OUTPUT,                    "output" },
+              { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },
+              { LLM_TENSOR_TIME_MIX_W0,               "blk.%d.time_mix_w0" },
+              { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
+              { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
+              { LLM_TENSOR_TIME_MIX_A0,               "blk.%d.time_mix_a0" },
+              { LLM_TENSOR_TIME_MIX_A1,               "blk.%d.time_mix_a1" },
+              { LLM_TENSOR_TIME_MIX_A2,               "blk.%d.time_mix_a2" },
+              { LLM_TENSOR_TIME_MIX_V0,               "blk.%d.time_mix_v0" },
+              { LLM_TENSOR_TIME_MIX_V1,               "blk.%d.time_mix_v1" },
+              { LLM_TENSOR_TIME_MIX_V2,               "blk.%d.time_mix_v2" },
+              { LLM_TENSOR_TIME_MIX_G1,               "blk.%d.time_mix_g1" },
+              { LLM_TENSOR_TIME_MIX_G2,               "blk.%d.time_mix_g2" },
+              { LLM_TENSOR_TIME_MIX_K_K,              "blk.%d.time_mix_k_k" },
+              { LLM_TENSOR_TIME_MIX_K_A,              "blk.%d.time_mix_k_a" },
+              { LLM_TENSOR_TIME_MIX_R_K,              "blk.%d.time_mix_r_k" },
+              { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
+              { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
+              { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
+              { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
+              { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
+              { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
+              { LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },
+              { LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },
+              { LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },
+          },
+      },
+      {
+          LLM_ARCH_ARWKV7,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
+              { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
+              { LLM_TENSOR_OUTPUT,                    "output" },
+              { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
+              { LLM_TENSOR_TIME_MIX_W0,               "blk.%d.time_mix_w0" },
+              { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
+              { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
+              { LLM_TENSOR_TIME_MIX_A0,               "blk.%d.time_mix_a0" },
+              { LLM_TENSOR_TIME_MIX_A1,               "blk.%d.time_mix_a1" },
+              { LLM_TENSOR_TIME_MIX_A2,               "blk.%d.time_mix_a2" },
+              { LLM_TENSOR_TIME_MIX_V0,               "blk.%d.time_mix_v0" },
+              { LLM_TENSOR_TIME_MIX_V1,               "blk.%d.time_mix_v1" },
+              { LLM_TENSOR_TIME_MIX_V2,               "blk.%d.time_mix_v2" },
+              { LLM_TENSOR_TIME_MIX_G1,               "blk.%d.time_mix_g1" },
+              { LLM_TENSOR_TIME_MIX_G2,               "blk.%d.time_mix_g2" },
+              { LLM_TENSOR_TIME_MIX_K_K,              "blk.%d.time_mix_k_k" },
+              { LLM_TENSOR_TIME_MIX_K_A,              "blk.%d.time_mix_k_a" },
+              { LLM_TENSOR_TIME_MIX_R_K,              "blk.%d.time_mix_r_k" },
+              { LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },
+              { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
+              { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
+              { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
+              { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
+              { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
+              { LLM_TENSOR_FFN_NORM,                  "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,                  "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,                  "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,                    "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_GRANITE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_GRANITE_MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,  "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,  "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,    "blk.%d.ffn_up_shexp" },
+          },
+      },
+      {
+          LLM_ARCH_GRANITE_HYBRID,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,     "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,    "output_norm" },
+              { LLM_TENSOR_OUTPUT,         "output" },
+              { LLM_TENSOR_ATTN_NORM,      "blk.%d.attn_norm" },
+              // mamba(2) ssm layers
+              { LLM_TENSOR_SSM_IN,         "blk.%d.ssm_in" },
+              { LLM_TENSOR_SSM_CONV1D,     "blk.%d.ssm_conv1d" },
+              { LLM_TENSOR_SSM_DT,         "blk.%d.ssm_dt" },
+              { LLM_TENSOR_SSM_A,          "blk.%d.ssm_a" },
+              { LLM_TENSOR_SSM_D,          "blk.%d.ssm_d" },
+              { LLM_TENSOR_SSM_NORM,       "blk.%d.ssm_norm" },
+              { LLM_TENSOR_SSM_OUT,        "blk.%d.ssm_out" },
+              // attention layers
+              { LLM_TENSOR_ATTN_Q,         "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,         "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,         "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,       "blk.%d.attn_output" },
+              // dense FFN
+              { LLM_TENSOR_FFN_NORM,       "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,       "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,       "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,         "blk.%d.ffn_up" },
+              // moe FFN
+              { LLM_TENSOR_FFN_NORM,       "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_INP,   "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,  "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,  "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,    "blk.%d.ffn_up_exps" },
+              // shared expert
+              { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,   "blk.%d.ffn_up_shexp" },
+          },
+      },
+      {
+          LLM_ARCH_CHAMELEON,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+          },
+      },
+      {
+          LLM_ARCH_WAVTOKENIZER_DEC,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
+              { LLM_TENSOR_CONV1D,            "conv1d" },
+              { LLM_TENSOR_CONVNEXT_DW,       "convnext.%d.dw" },
+              { LLM_TENSOR_CONVNEXT_NORM,     "convnext.%d.norm" },
+              { LLM_TENSOR_CONVNEXT_PW1,      "convnext.%d.pw1" },
+              { LLM_TENSOR_CONVNEXT_PW2,      "convnext.%d.pw2" },
+              { LLM_TENSOR_CONVNEXT_GAMMA,    "convnext.%d.gamma" },
+              { LLM_TENSOR_OUTPUT_NORM,       "output_norm" },
+              { LLM_TENSOR_OUTPUT,            "output" },
+              { LLM_TENSOR_POS_NET_CONV1,     "posnet.%d.conv1" },
+              { LLM_TENSOR_POS_NET_CONV2,     "posnet.%d.conv2" },
+              { LLM_TENSOR_POS_NET_NORM,      "posnet.%d.norm" },
+              { LLM_TENSOR_POS_NET_NORM1,     "posnet.%d.norm1" },
+              { LLM_TENSOR_POS_NET_NORM2,     "posnet.%d.norm2" },
+              { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
+              { LLM_TENSOR_POS_NET_ATTN_Q,    "posnet.%d.attn_q" },
+              { LLM_TENSOR_POS_NET_ATTN_K,    "posnet.%d.attn_k" },
+              { LLM_TENSOR_POS_NET_ATTN_V,    "posnet.%d.attn_v" },
+              { LLM_TENSOR_POS_NET_ATTN_OUT,  "posnet.%d.attn_output" },
+          },
+      },
+      {
+          LLM_ARCH_BAILINGMOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+          },
+      },
+      {
+          LLM_ARCH_DOTS1,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+              { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
+          }
+      },
+      {
+          LLM_ARCH_ERNIE4_5,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_ERNIE4_5_MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+              { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
+          },
+      },
+      {
+          LLM_ARCH_HUNYUAN_MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE_SHEXP,  "blk.%d.ffn_gate_shexp" },
+              { LLM_TENSOR_FFN_DOWN_SHEXP,  "blk.%d.ffn_down_shexp" },
+              { LLM_TENSOR_FFN_UP_SHEXP,    "blk.%d.ffn_up_shexp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_HUNYUAN_DENSE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+
+          },
+      },
+      {
+          LLM_ARCH_SMOLLM3,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,     "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,    "output_norm" },
+              { LLM_TENSOR_OUTPUT,         "output" },
+              { LLM_TENSOR_ATTN_NORM,      "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,         "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,         "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,         "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,       "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,       "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,       "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,       "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,         "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_OPENAI_MOE,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_POST_NORM,     "blk.%d.post_attention_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_SINKS,         "blk.%d.attn_sinks" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
+          },
+      },
+      {
+          LLM_ARCH_LFM2,
+          {
+              { LLM_TENSOR_ATTN_NORM,         "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,            "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,            "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,            "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,          "blk.%d.attn_output" },
+              { LLM_TENSOR_ATTN_K_NORM,       "blk.%d.attn_k_norm" },
+              { LLM_TENSOR_ATTN_Q_NORM,       "blk.%d.attn_q_norm" },
+              { LLM_TENSOR_FFN_DOWN,          "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_GATE,          "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_NORM,          "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_UP,            "blk.%d.ffn_up" },
+              { LLM_TENSOR_SHORTCONV_CONV,    "blk.%d.shortconv.conv" },
+              { LLM_TENSOR_SHORTCONV_INPROJ,  "blk.%d.shortconv.in_proj" },
+              { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" },
+              { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
+              { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
+          }
+      },
+      {
+          LLM_ARCH_SMALLTHINKER,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
+              { LLM_TENSOR_OUTPUT,             "output" },
+              { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
+              { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
+              { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
+              { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
+              { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" }
+          },
+      },
+      {
+          LLM_ARCH_DREAM,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_LLADA,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+              { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+              { LLM_TENSOR_OUTPUT,          "output" },
+              { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+              { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+              { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+              { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+              { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+              { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+              { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+              { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+              { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+          },
+      },
+      {
+          LLM_ARCH_UNKNOWN,
+          {
+              { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+          },
+      },
+  };
+
+  static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
+      {LLM_TENSOR_TOKEN_EMBD,                 {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_POS_EMBD,                   {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_TOKEN_EMBD_NORM,            {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_TOKEN_TYPES,                {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_OUTPUT,                     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CLS,                        {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CLS_OUT,                    {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_OUTPUT_NORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+      {LLM_TENSOR_DEC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+      {LLM_TENSOR_ENC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+      {LLM_TENSOR_ROPE_FREQS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
+      {LLM_TENSOR_ROPE_FACTORS_LONG,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
+      {LLM_TENSOR_ROPE_FACTORS_SHORT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
+      {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_K_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_V_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ATTN_SINKS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SCALE}},
+      {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_CROSS_ATTN_Q,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_CROSS_ATTN_K,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_CROSS_ATTN_V,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_CROSS_ATTN_OUT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_DEC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ENC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_GATE_INP_SHEXP,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_GATE_INP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_SSM_IN,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_SSM_X,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_SSM_DT,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_SSM_OUT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_W1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_W2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_A1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_A2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_V1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_V2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_G1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_G2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_DECAY_W1,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_DECAY_W2,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_KEY,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_VALUE,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_RECEPTANCE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_GATE,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_TIME_MIX_OUTPUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CHANNEL_MIX_KEY,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CHANNEL_MIX_VALUE,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_FFN_ACT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
+      {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
+      {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
+      {LLM_TENSOR_SSM_DT_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_SSM_B_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_SSM_C_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_SSM_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_CHANNEL_MIX_LERP_R,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_TIME_MIX_K_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_TIME_MIX_K_A,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_TIME_MIX_R_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_TIME_MIX_LERP_W,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_LERP_K,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_LERP_V,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_LERP_R,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_LERP_G,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_LERP_FUSED,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_DECAY,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_W0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_A0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_V0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      {LLM_TENSOR_TIME_MIX_FIRST,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
+      {LLM_TENSOR_ATTN_NORM,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_NORM_2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_OUT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_POST_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_FFN_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_FFN_POST_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_FFN_NORM_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_Q_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_K_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_LAYER_OUT_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_Q_A_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_KV_A_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ATTN_SUB_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_FFN_SUB_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_DEC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_DEC_CROSS_ATTN_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_DEC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ENC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ENC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_DEC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_ENC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+      {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+      {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+      {LLM_TENSOR_FFN_EXP_PROBS_B,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+      // altup / laurel (gemma 3n)
+      {LLM_TENSOR_PER_LAYER_TOKEN_EMBD,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_PER_LAYER_MODEL_PROJ,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_PER_LAYER_PROJ_NORM,        {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL}},
+      {LLM_TENSOR_ALTUP_PROJ,                 {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ALTUP_UNEMBD_PROJ,          {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_PER_LAYER_INP_GATE,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_PER_LAYER_PROJ,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_PER_LAYER_POST_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ALTUP_CORRECT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ALTUP_CORRECT_SCALE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_ALTUP_PREDICT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ALTUP_ROUTER,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_ALTUP_ROUTER_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_LAUREL_L,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_LAUREL_R,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      // this tensor is loaded for T5, but never used
+      {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
+      {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
+      {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_POS_NET_NORM2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_POS_NET_CONV1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
+      {LLM_TENSOR_POS_NET_CONV2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
+      {LLM_TENSOR_POS_NET_ATTN_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_POS_NET_ATTN_Q,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_POS_NET_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_POS_NET_ATTN_V,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_POS_NET_ATTN_OUT,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CONVNEXT_DW,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
+      {LLM_TENSOR_CONVNEXT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_CONVNEXT_PW1,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CONVNEXT_PW2,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_CONVNEXT_GAMMA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+      {LLM_TENSOR_SHORTCONV_CONV,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
+      {LLM_TENSOR_SHORTCONV_INPROJ,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_SHORTCONV_OUTPROJ,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+      // NextN/MTP tensors are currently ignored (reserved for future MTP support)
+      // These tensors only exist in the last layer(s) and are treated as output tensors
+      {LLM_TENSOR_NEXTN_EH_PROJ,              {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_NEXTN_EMBED_TOKENS,         {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_NEXTN_ENORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
+      {LLM_TENSOR_NEXTN_HNORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+      {LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+      {LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+        };
+    });
+
+    auto it = LLM_TENSOR_NAMES->find(arch);
+    if (it != LLM_TENSOR_NAMES->end()) {
+        if (it->second.find(tensor) != it->second.end()) {
+            return it->second.find(tensor)->second;
+        }
+    }
+    return "unknown";
+}
+
+// Accessor functions for lazy-initialized static maps
+static const std::map<llm_arch, const char *>& get_llm_arch_names() {
+    static std::once_flag flag;
+    static std::map<llm_arch, const char *>* LLM_ARCH_NAMES = nullptr;
+    
+    std::call_once(flag, []() {
+        LLM_ARCH_NAMES = new std::map<llm_arch, const char *>{
+            { LLM_ARCH_LLAMA,            "llama"            },
+            { LLM_ARCH_LLAMA4,           "llama4"           },
+            { LLM_ARCH_DECI,             "deci"             },
+            { LLM_ARCH_FALCON,           "falcon"           },
+            { LLM_ARCH_GROK,             "grok"             },
+            { LLM_ARCH_GPT2,             "gpt2"             },
+            { LLM_ARCH_GPTJ,             "gptj"             },
+            { LLM_ARCH_GPTNEOX,          "gptneox"          },
+            { LLM_ARCH_MPT,              "mpt"              },
+            { LLM_ARCH_BAICHUAN,         "baichuan"         },
+            { LLM_ARCH_STARCODER,        "starcoder"        },
+            { LLM_ARCH_REFACT,           "refact"           },
+            { LLM_ARCH_BERT,             "bert"             },
+            { LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },
+            { LLM_ARCH_NOMIC_BERT_MOE,   "nomic-bert-moe"   },
+            { LLM_ARCH_NEO_BERT,         "neo-bert"         },
+            { LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },
+            { LLM_ARCH_BLOOM,            "bloom"            },
+            { LLM_ARCH_STABLELM,         "stablelm"         },
+            { LLM_ARCH_QWEN,             "qwen"             },
+            { LLM_ARCH_QWEN2,            "qwen2"            },
+            { LLM_ARCH_QWEN2MOE,         "qwen2moe"         },
+            { LLM_ARCH_QWEN2VL,          "qwen2vl"          },
+            { LLM_ARCH_QWEN3,            "qwen3"            },
+            { LLM_ARCH_QWEN3MOE,         "qwen3moe"         },
+            { LLM_ARCH_PHI2,             "phi2"             },
+            { LLM_ARCH_PHI3,             "phi3"             },
+            { LLM_ARCH_PHIMOE,           "phimoe"           },
+            { LLM_ARCH_PLAMO,            "plamo"            },
+            { LLM_ARCH_PLAMO2,           "plamo2"           },
+            { LLM_ARCH_CODESHELL,        "codeshell"        },
+            { LLM_ARCH_ORION,            "orion"            },
+            { LLM_ARCH_INTERNLM2,        "internlm2"        },
+            { LLM_ARCH_MINICPM,          "minicpm"          },
+            { LLM_ARCH_MINICPM3,         "minicpm3"         },
+            { LLM_ARCH_GEMMA,            "gemma"            },
+            { LLM_ARCH_GEMMA2,           "gemma2"           },
+            { LLM_ARCH_GEMMA3,           "gemma3"           },
+            { LLM_ARCH_GEMMA3N,          "gemma3n"          },
+            { LLM_ARCH_STARCODER2,       "starcoder2"       },
+            { LLM_ARCH_MAMBA,            "mamba"            },
+            { LLM_ARCH_MAMBA2,           "mamba2"           },
+            { LLM_ARCH_JAMBA,            "jamba"            },
+            { LLM_ARCH_FALCON_H1,        "falcon-h1"        },
+            { LLM_ARCH_XVERSE,           "xverse"           },
+            { LLM_ARCH_COMMAND_R,        "command-r"        },
+            { LLM_ARCH_COHERE2,          "cohere2"          },
+            { LLM_ARCH_DBRX,             "dbrx"             },
+            { LLM_ARCH_OLMO,             "olmo"             },
+            { LLM_ARCH_OLMO2,            "olmo2"            },
+            { LLM_ARCH_OLMOE,            "olmoe"            },
+            { LLM_ARCH_OPENELM,          "openelm"          },
+            { LLM_ARCH_ARCTIC,           "arctic"           },
+            { LLM_ARCH_DEEPSEEK,         "deepseek"         },
+            { LLM_ARCH_DEEPSEEK2,        "deepseek2"        },
+            { LLM_ARCH_CHATGLM,          "chatglm"          },
+            { LLM_ARCH_GLM4,             "glm4"             },
+            { LLM_ARCH_GLM4_MOE,         "glm4moe"          },
+            { LLM_ARCH_BITNET,           "bitnet"           },
+            { LLM_ARCH_T5,               "t5"               },
+            { LLM_ARCH_T5ENCODER,        "t5encoder"        },
+            { LLM_ARCH_JAIS,             "jais"             },
+            { LLM_ARCH_NEMOTRON,         "nemotron"         },
+            { LLM_ARCH_EXAONE,           "exaone"           },
+            { LLM_ARCH_EXAONE4,          "exaone4"          },
+            { LLM_ARCH_RWKV6,            "rwkv6"            },
+            { LLM_ARCH_RWKV6QWEN2,       "rwkv6qwen2"       },
+            { LLM_ARCH_RWKV7,            "rwkv7"            },
+            { LLM_ARCH_ARWKV7,           "arwkv7"           },
+            { LLM_ARCH_GRANITE,          "granite"          },
+            { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
+            { LLM_ARCH_GRANITE_HYBRID,   "granitehybrid"    },
+            { LLM_ARCH_CHAMELEON,        "chameleon"        },
+            { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
+            { LLM_ARCH_PLM,              "plm"              },
+            { LLM_ARCH_BAILINGMOE,       "bailingmoe"       },
+            { LLM_ARCH_DOTS1,            "dots1"            },
+            { LLM_ARCH_ARCEE,            "arcee"            },
+            { LLM_ARCH_ERNIE4_5,         "ernie4_5"         },
+            { LLM_ARCH_ERNIE4_5_MOE,     "ernie4_5-moe"     },
+            { LLM_ARCH_HUNYUAN_MOE,      "hunyuan-moe"      },
+            { LLM_ARCH_HUNYUAN_DENSE,    "hunyuan-dense"    },
+            { LLM_ARCH_SMOLLM3,          "smollm3"          },
+            { LLM_ARCH_OPENAI_MOE,       "gpt-oss"          },
+            { LLM_ARCH_LFM2,             "lfm2"             },
+            { LLM_ARCH_DREAM,            "dream"            },
+            { LLM_ARCH_SMALLTHINKER,     "smallthinker"     },
+            { LLM_ARCH_LLADA,            "llada"            },
+            { LLM_ARCH_UNKNOWN,          "(unknown)"        },
+        };
+    });
+    return *LLM_ARCH_NAMES;
+}
+
+static const std::map<llm_kv, const char *>& get_llm_kv_names() {
+    static std::once_flag flag;
+    static std::map<llm_kv, const char *>* LLM_KV_NAMES = nullptr;
+    
+    std::call_once(flag, []() {
+        LLM_KV_NAMES = new std::map<llm_kv, const char *>{
+            { LLM_KV_GENERAL_TYPE,                 "general.type"                          },
+            { LLM_KV_GENERAL_ARCHITECTURE,         "general.architecture"                  },
+            { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version"          },
+            { LLM_KV_GENERAL_ALIGNMENT,            "general.alignment"                     },
+            { LLM_KV_GENERAL_FILE_TYPE,            "general.file_type"                     },
+            { LLM_KV_GENERAL_NAME,                 "general.name"                          },
+            { LLM_KV_GENERAL_AUTHOR,               "general.author"                        },
+            { LLM_KV_GENERAL_VERSION,              "general.version"                       },
+            { LLM_KV_GENERAL_URL,                  "general.url"                           },
+            { LLM_KV_GENERAL_DESCRIPTION,          "general.description"                   },
+            { LLM_KV_GENERAL_LICENSE,              "general.license"                       },
+            { LLM_KV_GENERAL_SOURCE_URL,           "general.source.url"                    },
+            { LLM_KV_GENERAL_SOURCE_HF_REPO,       "general.source.huggingface.repository" },
+
+            { LLM_KV_VOCAB_SIZE,                        "%s.vocab_size"                        },
+            { LLM_KV_CONTEXT_LENGTH,                    "%s.context_length"                    },
+            { LLM_KV_EMBEDDING_LENGTH,                  "%s.embedding_length"                  },
+            { LLM_KV_FEATURES_LENGTH,                   "%s.features_length"                   },
+            { LLM_KV_BLOCK_COUNT,                       "%s.block_count"                       },
+            { LLM_KV_LEADING_DENSE_BLOCK_COUNT,         "%s.leading_dense_block_count"         },
+            { LLM_KV_FEED_FORWARD_LENGTH,               "%s.feed_forward_length"               },
+            { LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        "%s.expert_feed_forward_length"        },
+            { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
+            { LLM_KV_USE_PARALLEL_RESIDUAL,             "%s.use_parallel_residual"             },
+            { LLM_KV_TENSOR_DATA_LAYOUT,                "%s.tensor_data_layout"                },
+            { LLM_KV_EXPERT_COUNT,                      "%s.expert_count"                      },
+            { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
+            { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
+            { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
+            { LLM_KV_EXPERT_WEIGHTS_NORM,               "%s.expert_weights_norm"               },
+            { LLM_KV_EXPERT_GATING_FUNC,                "%s.expert_gating_func"                },
+            { LLM_KV_MOE_EVERY_N_LAYERS,                "%s.moe_every_n_layers"                },
+            { LLM_KV_NEXTN_PREDICT_LAYERS,              "%s.nextn_predict_layers"              },
+            { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
+            { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
+            { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
+            { LLM_KV_ATTN_LOGIT_SOFTCAPPING,            "%s.attn_logit_softcapping"            },
+            { LLM_KV_FINAL_LOGIT_SOFTCAPPING,           "%s.final_logit_softcapping"           },
+            { LLM_KV_SWIN_NORM,                         "%s.swin_norm"                         },
+            { LLM_KV_RESCALE_EVERY_N_LAYERS,            "%s.rescale_every_n_layers"            },
+            { LLM_KV_TIME_MIX_EXTRA_DIM,                "%s.time_mix_extra_dim"                },
+            { LLM_KV_TIME_DECAY_EXTRA_DIM,              "%s.time_decay_extra_dim"              },
+            { LLM_KV_RESIDUAL_SCALE,                    "%s.residual_scale"                    },
+            { LLM_KV_EMBEDDING_SCALE,                   "%s.embedding_scale"                   },
+            { LLM_KV_TOKEN_SHIFT_COUNT,                 "%s.token_shift_count"                 },
+            { LLM_KV_INTERLEAVE_MOE_LAYER_STEP,         "%s.interleave_moe_layer_step"         },
+
+            { LLM_KV_ATTENTION_HEAD_COUNT,                   "%s.attention.head_count"                   },
+            { LLM_KV_ATTENTION_HEAD_COUNT_KV,                "%s.attention.head_count_kv"                },
+            { LLM_KV_ATTENTION_MAX_ALIBI_BIAS,               "%s.attention.max_alibi_bias"               },
+            { LLM_KV_ATTENTION_CLAMP_KQV,                    "%s.attention.clamp_kqv"                    },
+            { LLM_KV_ATTENTION_KEY_LENGTH,                   "%s.attention.key_length"                   },
+            { LLM_KV_ATTENTION_VALUE_LENGTH,                 "%s.attention.value_length"                 },
+            { LLM_KV_ATTENTION_LAYERNORM_EPS,                "%s.attention.layer_norm_epsilon"           },
+            { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,            "%s.attention.layer_norm_rms_epsilon"       },
+            { LLM_KV_ATTENTION_GROUPNORM_EPS,                "%s.attention.group_norm_epsilon"           },
+            { LLM_KV_ATTENTION_GROUPNORM_GROUPS,             "%s.attention.group_norm_groups"            },
+            { LLM_KV_ATTENTION_CAUSAL,                       "%s.attention.causal"                       },
+            { LLM_KV_ATTENTION_Q_LORA_RANK,                  "%s.attention.q_lora_rank"                  },
+            { LLM_KV_ATTENTION_KV_LORA_RANK,                 "%s.attention.kv_lora_rank"                 },
+            { LLM_KV_ATTENTION_DECAY_LORA_RANK,              "%s.attention.decay_lora_rank"              },
+            { LLM_KV_ATTENTION_ICLR_LORA_RANK,               "%s.attention.iclr_lora_rank"               },
+            { LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, "%s.attention.value_residual_mix_lora_rank" },
+            { LLM_KV_ATTENTION_GATE_LORA_RANK,               "%s.attention.gate_lora_rank"               },
+            { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,       "%s.attention.relative_buckets_count"       },
+            { LLM_KV_ATTENTION_SLIDING_WINDOW,               "%s.attention.sliding_window"               },
+            { LLM_KV_ATTENTION_SCALE,                        "%s.attention.scale"                        },
+            { LLM_KV_ATTENTION_KEY_LENGTH_MLA,               "%s.attention.key_length_mla"               },
+            { LLM_KV_ATTENTION_VALUE_LENGTH_MLA,             "%s.attention.value_length_mla"             },
+
+            { LLM_KV_ROPE_DIMENSION_COUNT,      "%s.rope.dimension_count"                 },
+            { LLM_KV_ROPE_DIMENSION_SECTIONS,   "%s.rope.dimension_sections"              },
+            { LLM_KV_ROPE_FREQ_BASE,            "%s.rope.freq_base"                       },
+            { LLM_KV_ROPE_SCALE_LINEAR,         "%s.rope.scale_linear"                    },
+            { LLM_KV_ROPE_SCALING_TYPE,         "%s.rope.scaling.type"                    },
+            { LLM_KV_ROPE_SCALING_FACTOR,       "%s.rope.scaling.factor"                  },
+            { LLM_KV_ROPE_SCALING_ATTN_FACTOR,  "%s.rope.scaling.attn_factor"             },
+            { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
+            { LLM_KV_ROPE_SCALING_FINETUNED,    "%s.rope.scaling.finetuned"               },
+            { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier"     },
+
+            { LLM_KV_SPLIT_NO,            "split.no"            },
+            { LLM_KV_SPLIT_COUNT,         "split.count"         },
+            { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },
+
+            { LLM_KV_SSM_CONV_KERNEL,    "%s.ssm.conv_kernel"    },
+            { LLM_KV_SSM_INNER_SIZE,     "%s.ssm.inner_size"     },
+            { LLM_KV_SSM_STATE_SIZE,     "%s.ssm.state_size"     },
+            { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
+            { LLM_KV_SSM_GROUP_COUNT,    "%s.ssm.group_count"    },
+            { LLM_KV_SSM_DT_B_C_RMS,     "%s.ssm.dt_b_c_rms"     },
+
+            { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" },
+
+            { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" },
+            { LLM_KV_POSNET_BLOCK_COUNT,      "%s.posnet.block_count"      },
+
+            { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" },
+            { LLM_KV_CONVNEXT_BLOCK_COUNT,      "%s.convnext.block_count"      },
+
+            { LLM_KV_CLASSIFIER_OUTPUT_LABELS, "%s.classifier.output_labels" },
+
+            { LLM_KV_SHORTCONV_L_CACHE, "%s.shortconv.l_cache" },
+
+            { LLM_KV_TOKENIZER_MODEL,                "tokenizer.ggml.model"                    },
+            { LLM_KV_TOKENIZER_PRE,                  "tokenizer.ggml.pre"                      },
+            { LLM_KV_TOKENIZER_LIST,                 "tokenizer.ggml.tokens"                   },
+            { LLM_KV_TOKENIZER_TOKEN_TYPE,           "tokenizer.ggml.token_type"               },
+            { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,     "tokenizer.ggml.token_type_count"         },
+            { LLM_KV_TOKENIZER_SCORES,               "tokenizer.ggml.scores"                   },
+            { LLM_KV_TOKENIZER_MERGES,               "tokenizer.ggml.merges"                   },
+            { LLM_KV_TOKENIZER_BOS_ID,               "tokenizer.ggml.bos_token_id"             },
+            { LLM_KV_TOKENIZER_EOS_ID,               "tokenizer.ggml.eos_token_id"             },
+            { LLM_KV_TOKENIZER_EOT_ID,               "tokenizer.ggml.eot_token_id"             },
+            { LLM_KV_TOKENIZER_EOM_ID,               "tokenizer.ggml.eom_token_id"             },
+            { LLM_KV_TOKENIZER_UNK_ID,               "tokenizer.ggml.unknown_token_id"         },
+            { LLM_KV_TOKENIZER_SEP_ID,               "tokenizer.ggml.seperator_token_id"       },
+            { LLM_KV_TOKENIZER_PAD_ID,               "tokenizer.ggml.padding_token_id"         },
+            { LLM_KV_TOKENIZER_CLS_ID,               "tokenizer.ggml.cls_token_id"             },
+            { LLM_KV_TOKENIZER_MASK_ID,              "tokenizer.ggml.mask_token_id"            },
+            { LLM_KV_TOKENIZER_ADD_BOS,              "tokenizer.ggml.add_bos_token"            },
+            { LLM_KV_TOKENIZER_ADD_EOS,              "tokenizer.ggml.add_eos_token"            },
+            { LLM_KV_TOKENIZER_ADD_SEP,              "tokenizer.ggml.add_sep_token"            },
+            { LLM_KV_TOKENIZER_ADD_PREFIX,           "tokenizer.ggml.add_space_prefix"         },
+            { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,      "tokenizer.ggml.remove_extra_whitespaces" },
+            { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap"     },
+            { LLM_KV_TOKENIZER_HF_JSON,              "tokenizer.huggingface.json"              },
+            { LLM_KV_TOKENIZER_RWKV,                 "tokenizer.rwkv.world"                    },
+            { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat_template"                 },
+            { LLM_KV_TOKENIZER_FIM_PRE_ID,           "tokenizer.ggml.fim_pre_token_id"         },
+            { LLM_KV_TOKENIZER_FIM_SUF_ID,           "tokenizer.ggml.fim_suf_token_id"         },
+            { LLM_KV_TOKENIZER_FIM_MID_ID,           "tokenizer.ggml.fim_mid_token_id"         },
+            { LLM_KV_TOKENIZER_FIM_PAD_ID,           "tokenizer.ggml.fim_pad_token_id"         },
+            { LLM_KV_TOKENIZER_FIM_REP_ID,           "tokenizer.ggml.fim_rep_token_id"         },
+            { LLM_KV_TOKENIZER_FIM_SEP_ID,           "tokenizer.ggml.fim_sep_token_id"         },
+
+            { LLM_KV_ADAPTER_TYPE,       "adapter.type"       },
+            { LLM_KV_ADAPTER_LORA_ALPHA, "adapter.lora.alpha" },
+
+            // deprecated
+            { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
+            { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
+            { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
+        };
+    });
+    return *LLM_KV_NAMES;
+}
+
+static const std::map<llm_arch, std::map<llm_tensor, const char *>>& get_llm_tensor_names() {
+    // We'll use a trick - call tensor_name to initialize the map, then access it
+    tensor_name(LLM_ARCH_LLAMA, LLM_TENSOR_TOKEN_EMBD); // Trigger initialization
+    
+    // Now access the static map from tensor_name function
+    // This is a bit hacky but works for our refactoring needs
+    static std::once_flag flag;
+    static std::map<llm_arch, std::map<llm_tensor, const char *>>* cached_map = nullptr;
+    
+    std::call_once(flag, []() {
+        // We can't directly access the local static from tensor_name,
+        // so we need to return it through tensor_name itself somehow
+        // For now, let's create a simplified version for the external callers
+        cached_map = new std::map<llm_arch, std::map<llm_tensor, const char *>>();
+    });
+    
+    return *cached_map;
+}
+
+static const std::map<llm_tensor, llm_tensor_info>& get_llm_tensor_infos() {
+    static std::once_flag flag;
+    static std::map<llm_tensor, llm_tensor_info>* LLM_TENSOR_INFOS = nullptr;
+    
+    std::call_once(flag, []() {
+        LLM_TENSOR_INFOS = new std::map<llm_tensor, llm_tensor_info>{
+            {LLM_TENSOR_TOKEN_EMBD,                 {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_POS_EMBD,                   {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_TOKEN_EMBD_NORM,            {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_TOKEN_TYPES,                {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_OUTPUT,                     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CLS,                        {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CLS_OUT,                    {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_OUTPUT_NORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+            {LLM_TENSOR_DEC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+            {LLM_TENSOR_ENC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+            {LLM_TENSOR_ROPE_FREQS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
+            {LLM_TENSOR_ROPE_FACTORS_LONG,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
+            {LLM_TENSOR_ROPE_FACTORS_SHORT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
+            {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            // Add more tensor mappings as needed...
+            {LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+        };
+    });
+    return *LLM_TENSOR_INFOS;
+}
 
 LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}
 
 std::string LLM_KV::operator()(llm_kv kv) const {
-    std::string name = ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
+    const auto& kv_names = get_llm_kv_names();
+    const auto& arch_names = get_llm_arch_names();
+    std::string name = ::format(kv_names.at(kv), arch_names.at(arch));
 
     if (suffix != nullptr) {
         name += ".";
@@ -2262,11 +2375,12 @@
 }
 
 std::string LLM_TN_IMPL::str() const {
-    if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
+    const char* tensor_template = tensor_name(arch, tensor);
+    if (strcmp(tensor_template, "unknown") == 0) {
         return "__missing__";
     }
 
-    std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid);
+    std::string name = ::format(tensor_template, bid, xid);
 
     if (suffix != nullptr) {
         name += ".";
@@ -2277,15 +2391,17 @@
 }
 
 const char * llm_arch_name(llm_arch arch) {
-    auto it = LLM_ARCH_NAMES.find(arch);
-    if (it == LLM_ARCH_NAMES.end()) {
+    const auto& names = get_llm_arch_names();
+    auto it = names.find(arch);
+    if (it == names.end()) {
         return "unknown";
     }
     return it->second;
 }
 
 llm_arch llm_arch_from_string(const std::string & name) {
-    for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
+    const auto& names = get_llm_arch_names();
+    for (const auto & kv : names) { // NOLINT
         if (kv.second == name) {
             return kv.first;
         }
@@ -2295,7 +2411,8 @@
 }
 
 const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) {
-    return LLM_TENSOR_INFOS.at(tensor);
+    const auto& infos = get_llm_tensor_infos();
+    return infos.at(tensor);
 }
 
 bool llm_arch_is_recurrent(const llm_arch & arch) {
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index 335eff7976..63529f0a64 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -2352,7 +2352,150 @@
             {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
             {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
             {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-            // Add more tensor mappings as needed...
+            {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_K_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_V_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ATTN_SINKS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SCALE}},
+            {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_CROSS_ATTN_Q,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_CROSS_ATTN_K,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_CROSS_ATTN_V,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_CROSS_ATTN_OUT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_DEC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ENC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_GATE_INP_SHEXP,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_GATE_INP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_SSM_IN,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_SSM_X,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_SSM_DT,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_SSM_OUT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_W1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_W2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_A1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_A2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_V1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_V2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_G1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_G2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_DECAY_W1,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_DECAY_W2,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_KEY,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_VALUE,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_RECEPTANCE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_GATE,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_TIME_MIX_OUTPUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CHANNEL_MIX_KEY,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CHANNEL_MIX_VALUE,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_FFN_ACT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
+            {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
+            {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
+            {LLM_TENSOR_SSM_DT_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_SSM_B_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_SSM_C_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_SSM_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_CHANNEL_MIX_LERP_R,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_TIME_MIX_K_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_TIME_MIX_K_A,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_TIME_MIX_R_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_TIME_MIX_LERP_W,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_LERP_K,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_LERP_V,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_LERP_R,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_LERP_G,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_LERP_FUSED,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_DECAY,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_W0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_A0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_V0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_TIME_MIX_FIRST,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
+            {LLM_TENSOR_ATTN_NORM,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_NORM_2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_OUT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_POST_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_FFN_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_FFN_POST_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_FFN_NORM_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_Q_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_K_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_LAYER_OUT_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_Q_A_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_KV_A_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ATTN_SUB_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_FFN_SUB_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_DEC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_DEC_CROSS_ATTN_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_DEC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ENC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ENC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_DEC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_ENC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+            {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+            {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+            {LLM_TENSOR_FFN_EXP_PROBS_B,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
+            {LLM_TENSOR_PER_LAYER_TOKEN_EMBD,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_PER_LAYER_MODEL_PROJ,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_PER_LAYER_PROJ_NORM,        {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL}},
+            {LLM_TENSOR_ALTUP_PROJ,                 {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ALTUP_UNEMBD_PROJ,          {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_PER_LAYER_INP_GATE,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_PER_LAYER_PROJ,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_PER_LAYER_POST_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ALTUP_CORRECT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ALTUP_CORRECT_SCALE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_ALTUP_PREDICT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ALTUP_ROUTER,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_ALTUP_ROUTER_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_LAUREL_L,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_LAUREL_R,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
+            {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
+            {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_POS_NET_NORM2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_POS_NET_CONV1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
+            {LLM_TENSOR_POS_NET_CONV2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
+            {LLM_TENSOR_POS_NET_ATTN_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_POS_NET_ATTN_Q,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_POS_NET_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_POS_NET_ATTN_V,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_POS_NET_ATTN_OUT,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CONVNEXT_DW,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
+            {LLM_TENSOR_CONVNEXT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_CONVNEXT_PW1,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CONVNEXT_PW2,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_CONVNEXT_GAMMA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
+            {LLM_TENSOR_SHORTCONV_CONV,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
+            {LLM_TENSOR_SHORTCONV_INPROJ,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_SHORTCONV_OUTPROJ,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_NEXTN_EH_PROJ,              {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
+            {LLM_TENSOR_NEXTN_EMBED_TOKENS,         {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_NEXTN_ENORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
+            {LLM_TENSOR_NEXTN_HNORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
+            {LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
             {LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
         };
     });
@@ -2364,7 +2507,15 @@
 std::string LLM_KV::operator()(llm_kv kv) const {
     const auto& kv_names = get_llm_kv_names();
     const auto& arch_names = get_llm_arch_names();
-    std::string name = ::format(kv_names.at(kv), arch_names.at(arch));
+    
+    auto kv_it = kv_names.find(kv);
+    auto arch_it = arch_names.find(arch);
+    
+    if (kv_it == kv_names.end() || arch_it == arch_names.end()) {
+        return "unknown";
+    }
+    
+    std::string name = ::format(kv_it->second, arch_it->second);
 
     if (suffix != nullptr) {
         name += ".";
@@ -2412,7 +2563,14 @@
 
 const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) {
     const auto& infos = get_llm_tensor_infos();
-    return infos.at(tensor);
+    auto it = infos.find(tensor);
+    if (it != infos.end()) {
+        return it->second;
+    }
+    
+    // Return a default/unknown tensor info if not found
+    static const llm_tensor_info unknown_info = {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_NONE};
+    return unknown_info;
 }
 
 bool llm_arch_is_recurrent(const llm_arch & arch) {
diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp
index ed5fd9fe8e..6ae404cffb 100644
--- a/ggml/src/gguf.cpp
+++ b/ggml/src/gguf.cpp
@@ -80,41 +80,48 @@
     static constexpr enum gguf_type value = GGUF_TYPE_FLOAT64;
 };
 
-static const std::map<gguf_type, size_t> GGUF_TYPE_SIZE = {
-    {GGUF_TYPE_UINT8,   sizeof(uint8_t)},
-    {GGUF_TYPE_INT8,    sizeof(int8_t)},
-    {GGUF_TYPE_UINT16,  sizeof(uint16_t)},
-    {GGUF_TYPE_INT16,   sizeof(int16_t)},
-    {GGUF_TYPE_UINT32,  sizeof(uint32_t)},
-    {GGUF_TYPE_INT32,   sizeof(int32_t)},
-    {GGUF_TYPE_FLOAT32, sizeof(float)},
-    {GGUF_TYPE_BOOL,    sizeof(int8_t)},
-    {GGUF_TYPE_STRING,  0}, // undefined
-    {GGUF_TYPE_ARRAY,   0}, // undefined
-    {GGUF_TYPE_UINT64,  sizeof(uint64_t)},
-    {GGUF_TYPE_INT64,   sizeof(int64_t)},
-    {GGUF_TYPE_FLOAT64, sizeof(double)},
-};
-static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+static const std::map<gguf_type, size_t> & get_gguf_type_size_map() {
+    static const std::map<gguf_type, size_t> GGUF_TYPE_SIZE = {
+        {GGUF_TYPE_UINT8,   sizeof(uint8_t)},
+        {GGUF_TYPE_INT8,    sizeof(int8_t)},
+        {GGUF_TYPE_UINT16,  sizeof(uint16_t)},
+        {GGUF_TYPE_INT16,   sizeof(int16_t)},
+        {GGUF_TYPE_UINT32,  sizeof(uint32_t)},
+        {GGUF_TYPE_INT32,   sizeof(int32_t)},
+        {GGUF_TYPE_FLOAT32, sizeof(float)},
+        {GGUF_TYPE_BOOL,    sizeof(int8_t)},
+        {GGUF_TYPE_STRING,  0}, // undefined
+        {GGUF_TYPE_ARRAY,   0}, // undefined
+        {GGUF_TYPE_UINT64,  sizeof(uint64_t)},
+        {GGUF_TYPE_INT64,   sizeof(int64_t)},
+        {GGUF_TYPE_FLOAT64, sizeof(double)},
+    };
+    static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+    return GGUF_TYPE_SIZE;
+}
 
-static const std::map<gguf_type, const char *> GGUF_TYPE_NAME = {
-    {GGUF_TYPE_UINT8,   "u8"},
-    {GGUF_TYPE_INT8,    "i8"},
-    {GGUF_TYPE_UINT16,  "u16"},
-    {GGUF_TYPE_INT16,   "i16"},
-    {GGUF_TYPE_UINT32,  "u32"},
-    {GGUF_TYPE_INT32,   "i32"},
-    {GGUF_TYPE_FLOAT32, "f32"},
-    {GGUF_TYPE_BOOL,    "bool"},
-    {GGUF_TYPE_STRING,  "str"},
-    {GGUF_TYPE_ARRAY,   "arr"},
-    {GGUF_TYPE_UINT64,  "u64"},
-    {GGUF_TYPE_INT64,   "i64"},
-    {GGUF_TYPE_FLOAT64, "f64"},
-};
-static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+static const std::map<gguf_type, const char *> & get_gguf_type_name_map() {
+    static const std::map<gguf_type, const char *> GGUF_TYPE_NAME = {
+        {GGUF_TYPE_UINT8,   "u8"},
+        {GGUF_TYPE_INT8,    "i8"},
+        {GGUF_TYPE_UINT16,  "u16"},
+        {GGUF_TYPE_INT16,   "i16"},
+        {GGUF_TYPE_UINT32,  "u32"},
+        {GGUF_TYPE_INT32,   "i32"},
+        {GGUF_TYPE_FLOAT32, "f32"},
+        {GGUF_TYPE_BOOL,    "bool"},
+        {GGUF_TYPE_STRING,  "str"},
+        {GGUF_TYPE_ARRAY,   "arr"},
+        {GGUF_TYPE_UINT64,  "u64"},
+        {GGUF_TYPE_INT64,   "i64"},
+        {GGUF_TYPE_FLOAT64, "f64"},
+    };
+    static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+    return GGUF_TYPE_NAME;
+}
 
 size_t gguf_type_size(enum gguf_type type) {
+    const auto & GGUF_TYPE_SIZE = get_gguf_type_size_map();
     auto it = GGUF_TYPE_SIZE.find(type);
     return it == GGUF_TYPE_SIZE.end() ? 0 : it->second;
 }
@@ -922,6 +929,7 @@
 }
 
 const char * gguf_type_name(enum gguf_type type) {
+    const auto & GGUF_TYPE_NAME = get_gguf_type_name_map();
     auto it = GGUF_TYPE_NAME.find(type);
     return it == GGUF_TYPE_NAME.end() ? nullptr : it->second;
 }
diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp
index 046f4df897..8f3df17be2 100644
--- a/src/llama-chat.cpp
+++ b/src/llama-chat.cpp
@@ -27,53 +27,57 @@
     return str.substr(start, end - start);
 }
 
-static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
-    { "chatml",            LLM_CHAT_TEMPLATE_CHATML            },
-    { "llama2",            LLM_CHAT_TEMPLATE_LLAMA_2           },
-    { "llama2-sys",        LLM_CHAT_TEMPLATE_LLAMA_2_SYS       },
-    { "llama2-sys-bos",    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS   },
-    { "llama2-sys-strip",  LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
-    { "mistral-v1",        LLM_CHAT_TEMPLATE_MISTRAL_V1        },
-    { "mistral-v3",        LLM_CHAT_TEMPLATE_MISTRAL_V3        },
-    { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
-    { "mistral-v7",        LLM_CHAT_TEMPLATE_MISTRAL_V7        },
-    { "mistral-v7-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN },
-    { "phi3",              LLM_CHAT_TEMPLATE_PHI_3             },
-    { "phi4",              LLM_CHAT_TEMPLATE_PHI_4             },
-    { "falcon3",           LLM_CHAT_TEMPLATE_FALCON_3          },
-    { "zephyr",            LLM_CHAT_TEMPLATE_ZEPHYR            },
-    { "monarch",           LLM_CHAT_TEMPLATE_MONARCH           },
-    { "gemma",             LLM_CHAT_TEMPLATE_GEMMA             },
-    { "orion",             LLM_CHAT_TEMPLATE_ORION             },
-    { "openchat",          LLM_CHAT_TEMPLATE_OPENCHAT          },
-    { "vicuna",            LLM_CHAT_TEMPLATE_VICUNA            },
-    { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
-    { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
-    { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
-    { "deepseek3",         LLM_CHAT_TEMPLATE_DEEPSEEK_3        },
-    { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
-    { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
-    { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGLM_3         },
-    { "chatglm4",          LLM_CHAT_TEMPLATE_CHATGLM_4         },
-    { "glmedge",           LLM_CHAT_TEMPLATE_GLMEDGE           },
-    { "minicpm",           LLM_CHAT_TEMPLATE_MINICPM           },
-    { "exaone3",           LLM_CHAT_TEMPLATE_EXAONE_3          },
-    { "exaone4",           LLM_CHAT_TEMPLATE_EXAONE_4          },
-    { "rwkv-world",        LLM_CHAT_TEMPLATE_RWKV_WORLD        },
-    { "granite",           LLM_CHAT_TEMPLATE_GRANITE           },
-    { "gigachat",          LLM_CHAT_TEMPLATE_GIGACHAT          },
-    { "megrez",            LLM_CHAT_TEMPLATE_MEGREZ            },
-    { "yandex",            LLM_CHAT_TEMPLATE_YANDEX            },
-    { "bailing",           LLM_CHAT_TEMPLATE_BAILING           },
-    { "llama4",            LLM_CHAT_TEMPLATE_LLAMA4            },
-    { "smolvlm",           LLM_CHAT_TEMPLATE_SMOLVLM           },
-    { "hunyuan-moe",       LLM_CHAT_TEMPLATE_HUNYUAN_MOE       },
-    { "gpt-oss",           LLM_CHAT_TEMPLATE_OPENAI_MOE        },
-    { "hunyuan-dense",     LLM_CHAT_TEMPLATE_HUNYUAN_DENSE     },
-    { "kimi-k2",           LLM_CHAT_TEMPLATE_KIMI_K2           },
-};
+static const std::map<std::string, llm_chat_template> & get_llm_chat_templates() {
+    static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
+        { "chatml",            LLM_CHAT_TEMPLATE_CHATML            },
+        { "llama2",            LLM_CHAT_TEMPLATE_LLAMA_2           },
+        { "llama2-sys",        LLM_CHAT_TEMPLATE_LLAMA_2_SYS       },
+        { "llama2-sys-bos",    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS   },
+        { "llama2-sys-strip",  LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
+        { "mistral-v1",        LLM_CHAT_TEMPLATE_MISTRAL_V1        },
+        { "mistral-v3",        LLM_CHAT_TEMPLATE_MISTRAL_V3        },
+        { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
+        { "mistral-v7",        LLM_CHAT_TEMPLATE_MISTRAL_V7        },
+        { "mistral-v7-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN },
+        { "phi3",              LLM_CHAT_TEMPLATE_PHI_3             },
+        { "phi4",              LLM_CHAT_TEMPLATE_PHI_4             },
+        { "falcon3",           LLM_CHAT_TEMPLATE_FALCON_3          },
+        { "zephyr",            LLM_CHAT_TEMPLATE_ZEPHYR            },
+        { "monarch",           LLM_CHAT_TEMPLATE_MONARCH           },
+        { "gemma",             LLM_CHAT_TEMPLATE_GEMMA             },
+        { "orion",             LLM_CHAT_TEMPLATE_ORION             },
+        { "openchat",          LLM_CHAT_TEMPLATE_OPENCHAT          },
+        { "vicuna",            LLM_CHAT_TEMPLATE_VICUNA            },
+        { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
+        { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
+        { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
+        { "deepseek3",         LLM_CHAT_TEMPLATE_DEEPSEEK_3        },
+        { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
+        { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
+        { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGLM_3         },
+        { "chatglm4",          LLM_CHAT_TEMPLATE_CHATGLM_4         },
+        { "glmedge",           LLM_CHAT_TEMPLATE_GLMEDGE           },
+        { "minicpm",           LLM_CHAT_TEMPLATE_MINICPM           },
+        { "exaone3",           LLM_CHAT_TEMPLATE_EXAONE_3          },
+        { "exaone4",           LLM_CHAT_TEMPLATE_EXAONE_4          },
+        { "rwkv-world",        LLM_CHAT_TEMPLATE_RWKV_WORLD        },
+        { "granite",           LLM_CHAT_TEMPLATE_GRANITE           },
+        { "gigachat",          LLM_CHAT_TEMPLATE_GIGACHAT          },
+        { "megrez",            LLM_CHAT_TEMPLATE_MEGREZ            },
+        { "yandex",            LLM_CHAT_TEMPLATE_YANDEX            },
+        { "bailing",           LLM_CHAT_TEMPLATE_BAILING           },
+        { "llama4",            LLM_CHAT_TEMPLATE_LLAMA4            },
+        { "smolvlm",           LLM_CHAT_TEMPLATE_SMOLVLM           },
+        { "hunyuan-moe",       LLM_CHAT_TEMPLATE_HUNYUAN_MOE       },
+        { "gpt-oss",           LLM_CHAT_TEMPLATE_OPENAI_MOE        },
+        { "hunyuan-dense",     LLM_CHAT_TEMPLATE_HUNYUAN_DENSE     },
+        { "kimi-k2",           LLM_CHAT_TEMPLATE_KIMI_K2           },
+    };
+    return LLM_CHAT_TEMPLATES;
+}
 
 llm_chat_template llm_chat_template_from_str(const std::string & name) {
+    const auto & LLM_CHAT_TEMPLATES = get_llm_chat_templates();
     return LLM_CHAT_TEMPLATES.at(name);
 }
 
@@ -85,6 +89,7 @@
         // ignore
     }
 #endif
+    const auto & LLM_CHAT_TEMPLATES = get_llm_chat_templates();
     auto chat_template = LLM_CHAT_TEMPLATES.find(tmpl);
     if (chat_template != LLM_CHAT_TEMPLATES.end()) {
         return chat_template->second;
@@ -771,6 +776,7 @@
 // public interface
 
 int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
+    const auto & LLM_CHAT_TEMPLATES = get_llm_chat_templates();
     auto it = LLM_CHAT_TEMPLATES.begin();
     for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
         output[i] = it->first.c_str();
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index 46500302e2..65a972fae8 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -129,18 +129,23 @@
     }
 }
 
-static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
-    { LLAMA_ROPE_SCALING_TYPE_NONE,       "none"       },
-    { LLAMA_ROPE_SCALING_TYPE_LINEAR,     "linear"     },
-    { LLAMA_ROPE_SCALING_TYPE_YARN,       "yarn"       },
-    { LLAMA_ROPE_SCALING_TYPE_LONGROPE,   "longrope"   },
-};
+static const std::map<llama_rope_scaling_type, const char *> & get_llama_rope_scaling_types() {
+    static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
+        { LLAMA_ROPE_SCALING_TYPE_NONE,       "none"       },
+        { LLAMA_ROPE_SCALING_TYPE_LINEAR,     "linear"     },
+        { LLAMA_ROPE_SCALING_TYPE_YARN,       "yarn"       },
+        { LLAMA_ROPE_SCALING_TYPE_LONGROPE,   "longrope"   },
+    };
+    return LLAMA_ROPE_SCALING_TYPES;
+}
 
 std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type) {
+    const auto & LLAMA_ROPE_SCALING_TYPES = get_llama_rope_scaling_types();
     return LLAMA_ROPE_SCALING_TYPES.at(rope_scaling_type);
 }
 
 static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
+    const auto & LLAMA_ROPE_SCALING_TYPES = get_llama_rope_scaling_types();
     for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
         if (kv.second == name) {
             return (llama_rope_scaling_type) kv.first;
diff --git a/src/unicode-data.cpp b/src/unicode-data.cpp
index 04dcd7fcfb..cd1b7e499b 100644
--- a/src/unicode-data.cpp
+++ b/src/unicode-data.cpp
@@ -7,7 +7,8 @@
 #include <unordered_map>
 #include <unordered_set>
 
-const std::initializer_list<std::pair<uint32_t, uint16_t>> unicode_ranges_flags = {  // start, flags // last=next_start-1
+const std::initializer_list<std::pair<uint32_t, uint16_t>>& get_unicode_ranges_flags() {
+    static const std::initializer_list<std::pair<uint32_t, uint16_t>> unicode_ranges_flags = {  // start, flags // last=next_start-1
 {0x000000, 0x0080},
 {0x000020, 0x0008},
 {0x000021, 0x0020},
@@ -2281,9 +2282,12 @@
 {0x100000, 0x0080},
 {0x10FFFE, 0x0001},
 {0x110000, 0x0000},
-};
+    };
+    return unicode_ranges_flags;
+}
 
-const std::unordered_set<uint32_t> unicode_set_whitespace = {
+const std::unordered_set<uint32_t>& get_unicode_set_whitespace() {
+    static const std::unordered_set<uint32_t> unicode_set_whitespace = {
 0x000009,
 0x00000A,
 0x00000B,
@@ -2309,10 +2313,13 @@
 0x00202F,
 0x00205F,
 0x003000,
-};
+    };
+    return unicode_set_whitespace;
+}
 
 // list is always in ascending order, to enable binary search
-const std::initializer_list<std::pair<uint32_t, uint32_t>> unicode_map_lowercase = {
+const std::initializer_list<std::pair<uint32_t, uint32_t>>& get_unicode_map_lowercase() {
+    static const std::initializer_list<std::pair<uint32_t, uint32_t>> unicode_map_lowercase = {
 {0x000041, 0x000061},
 {0x000042, 0x000062},
 {0x000043, 0x000063},
@@ -3746,10 +3753,13 @@
 {0x01E91F, 0x01E941},
 {0x01E920, 0x01E942},
 {0x01E921, 0x01E943},
-};
+    };
+    return unicode_map_lowercase;
+}
 
 // list is always in ascending order, to enable binary search
-const std::initializer_list<std::pair<uint32_t, uint32_t>> unicode_map_uppercase = {
+const std::initializer_list<std::pair<uint32_t, uint32_t>>& get_unicode_map_uppercase() {
+    static const std::initializer_list<std::pair<uint32_t, uint32_t>> unicode_map_uppercase = {
 {0x000061, 0x000041},
 {0x000062, 0x000042},
 {0x000063, 0x000043},
@@ -5200,9 +5210,12 @@
 {0x01E941, 0x01E91F},
 {0x01E942, 0x01E920},
 {0x01E943, 0x01E921},
-};
+    };
+    return unicode_map_uppercase;
+}
 
-const std::initializer_list<range_nfd> unicode_ranges_nfd = {  // start, last, nfd
+const std::initializer_list<range_nfd>& get_unicode_ranges_nfd() {
+    static const std::initializer_list<range_nfd> unicode_ranges_nfd = {  // start, last, nfd
 {0x000000, 0x000000, 0x000000},
 {0x0000C0, 0x0000C5, 0x000041},
 {0x0000C7, 0x0000C7, 0x000043},
@@ -7031,4 +7044,6 @@
 {0x02FA1B, 0x02FA1B, 0x009F16},
 {0x02FA1C, 0x02FA1C, 0x009F3B},
 {0x02FA1D, 0x02FA1D, 0x02A600},
-};
+    };
+    return unicode_ranges_nfd;
+}
diff --git a/src/unicode-data.h b/src/unicode-data.h
index f6973ebd2e..5b7bdf78fa 100644
--- a/src/unicode-data.h
+++ b/src/unicode-data.h
@@ -13,8 +13,8 @@
 
 static const uint32_t MAX_CODEPOINTS = 0x110000;
 
-extern const std::initializer_list<std::pair<uint32_t, uint16_t>> unicode_ranges_flags;
-extern const std::unordered_set<uint32_t> unicode_set_whitespace;
-extern const std::initializer_list<std::pair<uint32_t, uint32_t>> unicode_map_lowercase;
-extern const std::initializer_list<std::pair<uint32_t, uint32_t>> unicode_map_uppercase;
-extern const std::initializer_list<range_nfd> unicode_ranges_nfd;
+const std::initializer_list<std::pair<uint32_t, uint16_t>>& get_unicode_ranges_flags();
+const std::unordered_set<uint32_t>& get_unicode_set_whitespace();
+const std::initializer_list<std::pair<uint32_t, uint32_t>>& get_unicode_map_lowercase();
+const std::initializer_list<std::pair<uint32_t, uint32_t>>& get_unicode_map_uppercase();
+const std::initializer_list<range_nfd>& get_unicode_ranges_nfd();
diff --git a/src/unicode.cpp b/src/unicode.cpp
index 6ff7075db3..b2e0ef7bf4 100644
--- a/src/unicode.cpp
+++ b/src/unicode.cpp
@@ -124,6 +124,7 @@
 static std::vector<unicode_cpt_flags> unicode_cpt_flags_array() {
     std::vector<unicode_cpt_flags> cpt_flags(MAX_CODEPOINTS, unicode_cpt_flags::UNDEFINED);
 
+    const auto & unicode_ranges_flags = get_unicode_ranges_flags();
     assert (unicode_ranges_flags.begin()[0].first == 0);
     assert (unicode_ranges_flags.begin()[unicode_ranges_flags.size()-1].first == MAX_CODEPOINTS);
     for (size_t i = 1; i < unicode_ranges_flags.size(); ++i) {
@@ -134,18 +135,22 @@
         }
     }
 
+    const auto & unicode_set_whitespace = get_unicode_set_whitespace();
     for (auto cpt : unicode_set_whitespace) {
         cpt_flags[cpt].is_whitespace = true;
     }
 
+    const auto & unicode_map_lowercase = get_unicode_map_lowercase();
     for (auto p : unicode_map_lowercase) {
         cpt_flags[p.second].is_lowercase = true;
     }
 
+    const auto & unicode_map_uppercase = get_unicode_map_uppercase();
     for (auto p : unicode_map_uppercase) {
         cpt_flags[p.second].is_uppercase = true;
     }
 
+    const auto & unicode_ranges_nfd = get_unicode_ranges_nfd();
     for (auto &range : unicode_ranges_nfd) {  // start, last, nfd
         cpt_flags[range.nfd].is_nfd = true;
     }
@@ -788,6 +793,7 @@
     auto comp = [] (const uint32_t cpt, const range_nfd & range) {
         return cpt < range.first;
     };
+    const auto & unicode_ranges_nfd = get_unicode_ranges_nfd();
     std::vector<uint32_t> result(cpts.size());
     for (size_t i = 0; i < cpts.size(); ++i) {
         const uint32_t cpt = cpts[i];
@@ -841,6 +847,7 @@
 
 uint32_t unicode_tolower(uint32_t cpt) {
     // binary search
+    const auto & unicode_map_lowercase = get_unicode_map_lowercase();
     auto it = std::lower_bound(unicode_map_lowercase.begin(), unicode_map_lowercase.end(), cpt,
         [](const std::pair<uint32_t, uint32_t> & pair, uint32_t value) {
             return pair.first < value;
@@ -883,8 +890,7 @@
     return false;
 }
 
-std::vector<std::string> unicode_regex_split(const std::string & text, const std::vector<std::string> & regex_exprs) {
-    // unicode categories
+static const std::map<std::string, int> & get_k_ucat_enum() {
     static const std::map<std::string, int> k_ucat_enum = {
         { "\\p{N}", unicode_cpt_flags::NUMBER },
         { "\\p{L}", unicode_cpt_flags::LETTER },
@@ -892,7 +898,10 @@
         { "\\p{M}", unicode_cpt_flags::ACCENT_MARK },
         { "\\p{S}", unicode_cpt_flags::SYMBOL },
     };
+    return k_ucat_enum;
+}
 
+static const std::map<int, int> & get_k_ucat_cpt() {
     static const std::map<int, int> k_ucat_cpt = {
         { unicode_cpt_flags::NUMBER,      0xD1 },
         { unicode_cpt_flags::LETTER,      0xD2 },
@@ -900,7 +909,10 @@
         { unicode_cpt_flags::ACCENT_MARK, 0xD4 },
         { unicode_cpt_flags::SYMBOL,      0xD5 },
     };
+    return k_ucat_cpt;
+}
 
+static const std::map<int, std::string> & get_k_ucat_map() {
     static const std::map<int, std::string> k_ucat_map = {
         { unicode_cpt_flags::NUMBER,      "\x30-\x39" }, // 0-9
         { unicode_cpt_flags::LETTER,      "\x41-\x5A\x61-\x7A" }, // A-Za-z
@@ -908,6 +920,14 @@
         { unicode_cpt_flags::ACCENT_MARK, "" }, // no sub-128 codepoints
         { unicode_cpt_flags::SYMBOL,      "\\\x24\\\x2B\x3C-\x3E\x5E\x60\\\x7C" }, // $+<=>^`|
     };
+    return k_ucat_map;
+}
+
+std::vector<std::string> unicode_regex_split(const std::string & text, const std::vector<std::string> & regex_exprs) {
+    // unicode categories
+    const auto & k_ucat_enum = get_k_ucat_enum();
+    const auto & k_ucat_cpt = get_k_ucat_cpt();
+    const auto & k_ucat_map = get_k_ucat_map();
 
     // compute collapsed codepoints only if needed by at least one regex
     bool need_collapse = false;
