From e6924d83b0bb9e2e63017b1e77a4310518fdba72 Mon Sep 17 00:00:00 2001 From: root <403644786@qq.com> Date: Mon, 29 Jul 2024 10:21:24 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=BA=86llama=5Ffactory?= =?UTF-8?q?=E5=9C=A8github=E7=9A=84=E5=9C=B0=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README-en.md | 4 ++-- README.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README-en.md b/README-en.md index 61461ce..55c6c35 100644 --- a/README-en.md +++ b/README-en.md @@ -64,7 +64,7 @@ We release all model parameters for research and limited commercial use. |-------------|------------|-----------|-----------| |[Transformers](#Huggingface)|[Transformers](#6)|[MLC](#MLC)|[GPTQ](#gptq)| |[vLLM](#vLLM)|[mlx_finetune](#mlx_finetune)|[llama.cpp](#llama.cpp)|[AWQ](#awq)| -|[llama.cpp](#llama.cpp)|[llama_factory](./finetune/llama_factory_example/README.md)||[bnb](#bnb)| +|[llama.cpp](#llama.cpp)|[LLaMA-Factory](./finetune/llama_factory_example/README.md)||[bnb](#bnb)| |[ollama](#ollama)|||[quantize_test](#quantize_test)| |[fastllm](#fastllm)|||| |[mlx_lm](#mlx)|||| @@ -416,7 +416,7 @@ python bnb_quantize.py

## Community - +- [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory.git): [MiniCPM fine-tuning one-click solution](https://www.bilibili.com/video/BV1x1421t7cm/?spm_id_from=333.337.search-card.all.click&vd_source=2cab904f7c47d748c26d0dd8f747f77f) - [ChatLLM](https://github.com/foldl/chatllm.cpp): [Run MiniCPM on CPU](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16/discussions/2#65c59c4f27b8c11e43fc8796) diff --git a/README.md b/README.md index 7502eed..b784189 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ MiniCPM 是面壁智能与清华大学自然语言处理实验室共同开源的 |-------------|------------|-----------|-----------| |[Transformers](#Huggingface模型)|[Transformers](#transformer_finetune)|[MLC部署](#MLC)|[GPTQ](#gptq)| |[vLLM](#vllm-推理)|[mlx_finetune](#mlx)|[llama.cpp](#llama.cpp)|[AWQ](#awq)| -|[llama.cpp](#llama.cpp)|[llama_factory](./finetune/llama_factory_example/README.md)||[bnb](#bnb)| +|[llama.cpp](#llama.cpp)|[LLaMA-Factory](./finetune/llama_factory_example/README.md)||[bnb](#bnb)| |[ollama](#ollama)|||[量化测试](#quantize_test)| |[fastllm](#fastllm)|||| |[mlx_lm](#mlx_lm)|||| @@ -424,7 +424,7 @@ python bnb_quantize.py

## 开源社区 - +- [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory.git):[MiniCPM微调一键式解决方案](https://www.bilibili.com/video/BV1x1421t7cm/?spm_id_from=333.337.search-card.all.click&vd_source=2cab904f7c47d748c26d0dd8f747f77f) - [ChatLLM框架](https://github.com/foldl/chatllm.cpp):[在CPU上跑MiniCPM](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16/discussions/2#65c59c4f27b8c11e43fc8796)