Skip to content

Commit 1aa872a

Browse files
committed
remove duplicated code
common_chat_templates_init is already done at end of load_model in server.hpp
1 parent 711990c commit 1aa872a

File tree

1 file changed

+1
-11
lines changed

1 file changed

+1
-11
lines changed

src/main/cpp/jllama.cpp

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -452,16 +452,6 @@ JNIEXPORT void JNICALL Java_de_kherud_llama_LlamaModel_loadModel(JNIEnv *env, jo
452452
llama_init_dft.context.reset();
453453
}
454454

455-
ctx_server->chat_templates = common_chat_templates_init(ctx_server->model, params.chat_template);
456-
try {
457-
common_chat_format_example(ctx_server->chat_templates.get(), params.use_jinja);
458-
} catch (const std::exception &e) {
459-
SRV_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This "
460-
"may cause the model to output suboptimal responses\n",
461-
__func__);
462-
ctx_server->chat_templates = common_chat_templates_init(ctx_server->model, "chatml");
463-
}
464-
465455
// print sample chat example to make it clear which template is used
466456
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
467457
common_chat_templates_source(ctx_server->chat_templates.get()),
@@ -860,4 +850,4 @@ JNIEXPORT jbyteArray JNICALL Java_de_kherud_llama_LlamaModel_jsonSchemaToGrammar
860850
nlohmann::ordered_json c_schema_json = nlohmann::ordered_json::parse(c_schema);
861851
const std::string c_grammar = json_schema_to_grammar(c_schema_json);
862852
return parse_jbytes(env, c_grammar);
863-
}
853+
}

0 commit comments

Comments
 (0)