From de1b467e20749568bd0e9df92e3bc915179230b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E4=B8=B9?= Date: Thu, 27 Jun 2024 21:14:00 +0800 Subject: [PATCH 1/3] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=BA=86=E4=B8=80?= =?UTF-8?q?=E4=B8=AA=E4=B8=93=E9=97=A8=E4=B8=BAmlx=E7=9A=84=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E5=A4=84=E7=90=86=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- finetune/data_processing.ipynb | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 finetune/data_processing.ipynb diff --git a/finetune/data_processing.ipynb b/finetune/data_processing.ipynb new file mode 100644 index 0000000..7e2b7df --- /dev/null +++ b/finetune/data_processing.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. 准备数据集\n", + "\n", + "将数据集转换为更通用的格式\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# 转换为 ChatML 格式\n", + "import os\n", + "import shutil\n", + "import json\n", + "\n", + "input_dir = \"data/AdvertiseGen\"\n", + "output_dir = \"data/mlx_AdvertiseGen\"\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", + "os.makedirs(output_dir, exist_ok=True)\n", + "\n", + "for fn in [\"train.json\", \"dev.json\"]:\n", + " data_out_list = []\n", + " with open(os.path.join(input_dir, fn), \"r\") as f, open(os.path.join(output_dir, fn), \"w\") as fo:\n", + " for line in f:\n", + " if len(line.strip()) > 0:\n", + " data = json.loads(line)\n", + " data_out = {\"input\":data['content'],'prompt':\"/n请为以下关键词生成一条广告语。\",'output':data['summary']}\n", + " data_out_list.append(data_out)\n", + "\n", + " for d in data_out_list:\n", + " json_str = json.dumps(d,ensure_ascii=False) # 将字典转换为JSON字符串\n", + " fo.write(json_str + '\\n') # 写入字符串并添加换行符\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 6a48f3595059589ddc6aacdf4178893d93bc947a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E4=B8=B9?= Date: Thu, 27 Jun 2024 21:15:07 +0800 Subject: [PATCH 2/3] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=86minicpm=E7=9A=84?= =?UTF-8?q?readme=E4=B8=AD=E7=9A=84=E5=B0=8F=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c5d8b96..1c7c5cd 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ MiniCPM 是面壁智能与清华大学自然语言处理实验室共同开源的 |-------------|------------|-----------|-----------| |[Transformers](#Huggingface模型)|[Transformers](#transformer_finetune)|[MLC部署](#MLC)|[GPTQ](#gptq)| |[vLLM](#vllm-推理)|[mlx_finetune](#mlx)|[llama.cpp](#llama.cpp)|[AWQ](#awq)| -|[llama.cpp](#llama.cpp)|[llama_factory](https://github.com/OpenBMB/MiniCPM/tree/main/finetune/llama_factory_example/README.md)||[困惑度测试](#quantize_test)| +|[llama.cpp](#llama.cpp)|[llama_factory](./finetune/llama_factory_example/README.md)||[困惑度测试](#quantize_test)| |[ollama](#ollama)|||| |[fastllm](#fastllm)|||| |[mlx_lm](#mlx_lm)|||| From 80e506289a7a28b57b80be1bbb35d75267c62927 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E4=B8=B9?= Date: Thu, 27 Jun 2024 21:17:26 +0800 Subject: [PATCH 3/3] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BA=86=E4=B8=A4?= =?UTF-8?q?=E4=B8=AAbug=EF=BC=8C=E4=B8=80=E4=B8=AA=E6=98=AF=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=E4=B8=AD=E5=AD=98=E5=9C=A8=E4=B8=A4=E4=B8=AAgenerate?= =?UTF-8?q?=E5=87=BD=E6=95=B0=EF=BC=8C=E5=8F=A6=E5=A4=96=E4=B8=80=E4=B8=AA?= =?UTF-8?q?=E6=98=AF<=E7=94=A8=E6=88=B7>=E9=97=AE=E9=A2=98=E8=BF=99?= =?UTF-8?q?=E7=A7=8D=E6=A0=BC=E5=BC=8F=E6=B2=A1=E6=9C=89=E7=94=A8=E5=88=B0?= =?UTF-8?q?=E8=AF=A5=E4=BB=A3=E7=A0=81=E4=B8=AD=E5=8E=BB=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- finetune/mlx_finetune.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/finetune/mlx_finetune.py b/finetune/mlx_finetune.py index 651df31..c5c442f 100644 --- a/finetune/mlx_finetune.py +++ b/finetune/mlx_finetune.py @@ -7,7 +7,8 @@ Using Model with https://huggingface.co/mlx-community/MiniCPM-2B-sft-bf16-llama- Use this Code with command: train: -python mlx_finetune.py --model MiniCPM-2B-sft-bf16-llama-format-mlx --data data/AdvertiseGen --train --seed 2024 --iters 500 +首先处理数据,运行data_processing.ipynb +python mlx_finetune.py --model MiniCPM-2B-sft-bf16-llama-format-mlx --data data/mlx_AdvertiseGen --train --seed 2024 --iters 500 输出结果如下: @@ -19,7 +20,7 @@ Iter 2: Val loss 4.001, Val took 1061.649s 训练结束之后,文件夹下会有 adapters.npz 文件,用于后续的测试。接着,运行测试命令 test: -python mlx_finetune.py --model MiniCPM-2B-sft-bf16-llama-format-mlx --data data/AdvertiseGen --test --seed 2024 +python mlx_finetune.py --model MiniCPM-2B-sft-bf16-llama-format-mlx --data data/mlx_AdvertiseGen --test --seed 2024 输出结果如下: @@ -318,7 +319,7 @@ def build_parser(): parser = argparse.ArgumentParser(description="LoRA or QLoRA finetuning.") parser.add_argument( "--model", - default="mlx_model", + default="/Users/liudan/Downloads/模型/llamaformat_minicpm", help="The path to the local model directory or Hugging Face repo.", ) # Generation args @@ -336,8 +337,7 @@ def build_parser(): "--prompt", "-p", type=str, - help="The prompt for generation", - default=None, + help="The prompt for generation" ) # Training args @@ -349,7 +349,7 @@ def build_parser(): parser.add_argument( "--data", type=str, - default="data/", + default="data/mlx_AdvertiseGen", help="Directory with {train, valid, test}.json files", ) parser.add_argument( @@ -424,9 +424,10 @@ class ConversationDataset: def __getitem__(self, idx: int): entry = self._data[idx] - content = entry.get("content", "") - summary = entry.get("summary", "") - return content, summary + content = entry.get("input", "") + summary = entry.get("output", "") + prompt = entry.get("prompt", "") + return prompt, content, summary def __len__(self): return len(self._data) @@ -479,7 +480,9 @@ def iterate_batches(dset, tokenizer, batch_size, train=False): # Collect batches from dataset for i in range(0, len(indices) - batch_size + 1, batch_size): # Encode batch - batch = [tokenizer.encode(dset[indices[i + j]]) for j in range(batch_size)] + batch_samples=[dset[indices[i + j]] for j in range(batch_size)] + batch_format_text=['<用户>{}{}'.format(i[1]+i[0],i[2]) for i in batch_samples] + batch = [tokenizer.encode(i)+[tokenizer.eos_token_id] for i in batch_format_text] lengths = [len(x) for x in batch] # Check if any sequence is longer than 2048 tokens if max(lengths) > 2048: @@ -645,7 +648,7 @@ def train(model, train_set, val_set, optimizer, loss, tokenizer, args): print(f"Iter {it + 1}: Saved adapter weights to {args.adapter_file}.") -def generate(model, prompt, tokenizer, args): +def generate_string(model, prompt, tokenizer, args): print(prompt, end="", flush=True) prompt = mx.array(tokenizer.encode(prompt)) @@ -736,4 +739,4 @@ if __name__ == "__main__": if args.prompt is not None: print("Generating") - generate(model, args.prompt, tokenizer, args) + generate_string(model, args.prompt, tokenizer, args)