From 8ebe696889b1760a748a9965864c73ed7fe76be2 Mon Sep 17 00:00:00 2001 From: winter <2453101190@qq.com> Date: Mon, 25 Mar 2024 15:49:41 +0800 Subject: [PATCH 1/2] add fine-tuning model settings: bf16 and fp16 --- finetune/finetune.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/finetune/finetune.py b/finetune/finetune.py index 25713d3..7008ff2 100644 --- a/finetune/finetune.py +++ b/finetune/finetune.py @@ -171,6 +171,8 @@ if __name__ == "__main__": model_path=model_args.model_name_or_path, max_length=training_args.model_max_length, use_lora=training_args.use_lora, + bf16=training_args.bf16, + fp16=training_args.fp16 ) train_dataset = SupervisedDataset( From fa7287db4508a06302a235ee32f7b90badc4b44f Mon Sep 17 00:00:00 2001 From: xuhaifeng Date: Wed, 17 Apr 2024 18:23:16 +0800 Subject: [PATCH 2/2] fix ollama --- README-en.md | 6 +++++- README.md | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/README-en.md b/README-en.md index d02e6eb..23cd075 100644 --- a/README-en.md +++ b/README-en.md @@ -194,7 +194,11 @@ We have supported inference with [llama.cpp](https://github.com/ggerganov/llama. More parameters adjustment [see this](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) **ollama** -Solving [this issue](https://github.com/ollama/ollama/issues/2383) +1. [install ollama](https://github.com/ollama/ollama) +2. In command line: +``` +ollama run modelbest/minicpm-2b-dpo +``` **fastllm** 1. install [fastllm](https://github.com/ztxz16/fastllm) diff --git a/README.md b/README.md index 017f2e9..b3a368e 100644 --- a/README.md +++ b/README.md @@ -201,7 +201,11 @@ MiniCPM支持[llama.cpp](https://github.com/ggerganov/llama.cpp/) 、[ollama](ht 更多参数调整[详见](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) **ollama** -正在解决[这个问题](https://github.com/ollama/ollama/issues/2383) +1. [安装ollama](https://github.com/ollama/ollama) +2. 在命令行运行: +``` +ollama run modelbest/minicpm-2b-dpo +``` **fastllm** 1. [编译安装fastllm](https://github.com/ztxz16/fastllm)