@@ -147,6 +147,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
147
147
{ LLM_KV_ATTENTION_SCALE, " %s.attention.scale" },
148
148
{ LLM_KV_ATTENTION_KEY_LENGTH_MLA, " %s.attention.key_length_mla" },
149
149
{ LLM_KV_ATTENTION_VALUE_LENGTH_MLA, " %s.attention.value_length_mla" },
150
+ { LLM_KV_ATTENTION_LAYER_INDICES, " %s.attention.layer_indices" },
150
151
151
152
{ LLM_KV_ROPE_DIMENSION_COUNT, " %s.rope.dimension_count" },
152
153
{ LLM_KV_ROPE_DIMENSION_SECTIONS, " %s.rope.dimension_sections" },
@@ -197,6 +198,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
197
198
{ LLM_KV_TOKENIZER_MASK_ID, " tokenizer.ggml.mask_token_id" },
198
199
{ LLM_KV_TOKENIZER_ADD_BOS, " tokenizer.ggml.add_bos_token" },
199
200
{ LLM_KV_TOKENIZER_ADD_EOS, " tokenizer.ggml.add_eos_token" },
201
+ { LLM_KV_TOKENIZER_ADD_SEP, " tokenizer.ggml.add_sep_token" },
200
202
{ LLM_KV_TOKENIZER_ADD_PREFIX, " tokenizer.ggml.add_space_prefix" },
201
203
{ LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, " tokenizer.ggml.remove_extra_whitespaces" },
202
204
{ LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, " tokenizer.ggml.precompiled_charsmap" },
@@ -1816,3 +1818,25 @@ llm_arch llm_arch_from_string(const std::string & name) {
1816
1818
const llm_tensor_info & llm_tensor_info_for (llm_tensor tensor) {
1817
1819
return LLM_TENSOR_INFOS.at (tensor);
1818
1820
}
1821
+
1822
+ bool llm_arch_is_recurrent (const llm_arch & arch) {
1823
+ switch (arch) {
1824
+ case LLM_ARCH_MAMBA:
1825
+ case LLM_ARCH_RWKV6:
1826
+ case LLM_ARCH_RWKV6QWEN2:
1827
+ case LLM_ARCH_RWKV7:
1828
+ case LLM_ARCH_ARWKV7:
1829
+ return true ;
1830
+ default :
1831
+ return false ;
1832
+ }
1833
+ }
1834
+
1835
+ bool llm_arch_is_hybrid (const llm_arch & arch) {
1836
+ // TODO: There are currently no hybrid models! Once there are, this will be
1837
+ // the place to identify them
1838
+ switch (arch) {
1839
+ default :
1840
+ return false ;
1841
+ }
1842
+ }
0 commit comments