mirror of
https://github.com/RYDE-WORK/ktransformers.git
synced 2026-01-19 21:03:18 +08:00
[fix] f16 dequantize device ignored
This commit is contained in:
parent
cbc47d0b68
commit
29f4151ebc
@ -681,7 +681,7 @@ def dequantize_f16_gpu(data, device):
|
||||
res = torch.from_numpy(data)
|
||||
res_gpu = torch.empty_like(res, device=device)
|
||||
res_gpu.copy_(res)
|
||||
return res
|
||||
return res_gpu
|
||||
|
||||
GGML_DEQUANTIZE = {
|
||||
"F32": dequantize_f32,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user