mirror of
https://github.com/RYDE-WORK/MiniCPM.git
synced 2026-01-19 12:53:36 +08:00
Fix: flash_attn and cpu inference
This commit is contained in:
parent
c8ad2ec0d3
commit
5977a63783
@ -174,6 +174,11 @@ class MiniCPMConfig(PretrainedConfig):
|
||||
tie_word_embeddings=tie_word_embeddings,
|
||||
**kwargs,
|
||||
)
|
||||
try:
|
||||
import flash_attn
|
||||
self._attn_implementation = "flash_attention_2"
|
||||
except:
|
||||
pass
|
||||
|
||||
def _rope_scaling_validation(self):
|
||||
"""
|
||||
|
||||
@ -51,10 +51,11 @@ from transformers.utils.import_utils import is_torch_fx_available
|
||||
from .configuration_minicpm import MiniCPMConfig
|
||||
import re
|
||||
|
||||
|
||||
if is_flash_attn_2_available():
|
||||
try:
|
||||
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
||||
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
||||
@ -125,7 +126,7 @@ ALL_LAYERNORM_LAYERS.append(MiniCPMRMSNorm)
|
||||
|
||||
|
||||
class MiniCPMRotaryEmbedding(nn.Module):
|
||||
def __init__(self, dim, max_position_embeddings=2048, base=10000, device="cuda"):
|
||||
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
||||
super().__init__()
|
||||
|
||||
self.dim = dim
|
||||
@ -763,7 +764,6 @@ class MiniCPMDecoderLayer(nn.Module):
|
||||
def __init__(self, config: MiniCPMConfig, layer_idx: int):
|
||||
super().__init__()
|
||||
self.hidden_size = config.hidden_size
|
||||
|
||||
self.self_attn = MINICPM_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
||||
|
||||
self.mlp = MiniCPMMLP(config)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user