Skip to content

Commit b85ea58

Browse files
committed
Merge branch 'master' into concedo_experimental
# Conflicts: # README.md
2 parents ef3b8dc + 0bc2cdf commit b85ea58

File tree

11 files changed

+166
-50
lines changed

11 files changed

+166
-50
lines changed

CMakeLists.txt

-5
Original file line numberDiff line numberDiff line change
@@ -167,11 +167,6 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES
167167
if (MSVC)
168168
# TODO: arm msvc?
169169
else()
170-
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
171-
# Apple M1, M2, etc.
172-
# Raspberry Pi 3, 4, Zero 2 (64-bit)
173-
add_compile_options(-mcpu=native)
174-
endif()
175170
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6")
176171
# Raspberry Pi 1, Zero
177172
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access)

convert.py

+36-5
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def find_n_mult(n_ff: int, n_embd: int) -> int:
136136
calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
137137
if calc_ff == n_ff:
138138
return n_mult
139-
return 1
139+
raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
140140

141141
@dataclass
142142
class Params:
@@ -321,6 +321,10 @@ def astype(self, data_type: DataType) -> 'Tensor': ...
321321
@abstractmethod
322322
def permute(self, n_head: int) -> 'Tensor': ...
323323
@abstractmethod
324+
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
325+
@abstractmethod
326+
def part(self, n_part: int) -> 'UnquantizedTensor': ...
327+
@abstractmethod
324328
def to_ggml(self) -> 'GGMLCompatibleTensor': ...
325329

326330

@@ -345,6 +349,14 @@ def astype(self, data_type: DataType) -> Tensor:
345349
def to_ggml(self) -> 'UnquantizedTensor':
346350
return self
347351

352+
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
353+
r = self.ndarray.shape[0] // 3
354+
return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
355+
356+
def part(self, n_part: int) -> 'UnquantizedTensor':
357+
r = self.ndarray.shape[0] // 3
358+
return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
359+
348360
def permute(self, n_head: int) -> 'UnquantizedTensor':
349361
return UnquantizedTensor(permute(self.ndarray, n_head))
350362

@@ -642,6 +654,19 @@ def load() -> Tensor:
642654
return lazy_tensor.load().permute(n_head)
643655
return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
644656

657+
def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
658+
def load() -> Tensor:
659+
return lazy_tensor.load().permute_part(n_part, n_head)
660+
s = lazy_tensor.shape.copy()
661+
s[0] = s[0] // 3
662+
return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
663+
664+
def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
665+
def load() -> Tensor:
666+
return lazy_tensor.load().part(n_part)
667+
s = lazy_tensor.shape.copy()
668+
s[0] = s[0] // 3
669+
return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
645670

646671
def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
647672
out: LazyModel = {}
@@ -650,11 +675,17 @@ def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
650675
out["output.weight"] = model["lm_head.weight"]
651676

652677
for i in itertools.count():
653-
if f"model.layers.{i}.self_attn.q_proj.weight" not in model:
678+
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
679+
out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
680+
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
681+
out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
682+
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
683+
out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
684+
out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
685+
out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
686+
else:
654687
break
655-
out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
656-
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
657-
out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
688+
658689
out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"]
659690

660691
out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"]

examples/embd-input/embd-input-lib.cpp

+6-3
Original file line numberDiff line numberDiff line change
@@ -210,9 +210,12 @@ llama_token sampling_id(struct MyModel* mymodel) {
210210
const char * sampling(struct MyModel * mymodel) {
211211
llama_context * ctx = mymodel->ctx;
212212
int id = sampling_id(mymodel);
213-
std::string ret;
214-
if (id == llama_token_eos()) ret = "</s>";
215-
else ret = llama_token_to_str(ctx, id);
213+
static std::string ret;
214+
if (id == llama_token_eos()) {
215+
ret = "</s>";
216+
} else {
217+
ret = llama_token_to_str(ctx, id);
218+
}
216219
eval_id(mymodel, id);
217220
return ret.c_str();
218221
}

examples/embd-input/embd-input.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
#include "llama.h"
66
#include "build-info.h"
77

8-
98
extern "C" {
109

1110
typedef struct MyModel {
@@ -14,14 +13,13 @@ typedef struct MyModel {
1413
int n_past = 0;
1514
} MyModel;
1615

17-
1816
struct MyModel* create_mymodel(int argc, char ** argv);
1917

2018
bool eval_float(void* model, float* input, int N);
2119
bool eval_tokens(void* model, std::vector<llama_token> tokens);
2220
bool eval_id(struct MyModel* mymodel, int id);
2321
bool eval_string(struct MyModel* mymodel, const char* str);
24-
const char* sampling(struct MyModel* mymodel);
22+
const char * sampling(struct MyModel* mymodel);
2523
llama_token sampling_id(struct MyModel* mymodel);
2624
void free_mymodel(struct MyModel* mymodel);
2725

examples/train-text-from-scratch/train-text-from-scratch.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -2671,7 +2671,8 @@ struct train_params {
26712671
const char * fn_checkpoint_out;
26722672
const char * fn_model_out;
26732673

2674-
int seed;
2674+
uint32_t seed;
2675+
26752676
int n_ctx;
26762677
int n_embd;
26772678
int n_mult;

ggml-cuda.cu

+46-17
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,11 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_
214214
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
215215
#endif
216216

217+
struct ggml_tensor_extra_gpu {
218+
void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
219+
cudaEvent_t events[GGML_CUDA_MAX_DEVICES]; // events for synchronizing multiple GPUs
220+
};
221+
217222
static __global__ void add_f32(const float * x, const float * y, float * dst, const int k) {
218223
const int i = blockDim.x*blockIdx.x + threadIdx.x;
219224

@@ -1995,7 +2000,6 @@ inline void ggml_cuda_op_add(
19952000
} else {
19962001
GGML_ASSERT(false);
19972002
}
1998-
CUDA_CHECK(cudaGetLastError());
19992003

20002004
(void) src1;
20012005
(void) dst;
@@ -2027,7 +2031,6 @@ inline void ggml_cuda_op_mul(
20272031

20282032
// compute
20292033
mul_f32_cuda(src0_ddf_i01, src1_ddf_i01, dst_ddf_i01, ne00, ne10, cudaStream_main);
2030-
CUDA_CHECK(cudaGetLastError());
20312034
}
20322035

20332036
(void) dst;
@@ -2048,7 +2051,6 @@ inline void ggml_cuda_op_silu(
20482051

20492052
// compute
20502053
silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
2051-
CUDA_CHECK(cudaGetLastError());
20522054

20532055
(void) src1;
20542056
(void) dst;
@@ -2071,7 +2073,6 @@ inline void ggml_cuda_op_rms_norm(
20712073

20722074
// compute
20732075
rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
2074-
CUDA_CHECK(cudaGetLastError());
20752076

20762077
(void) src1;
20772078
(void) dst;
@@ -2150,7 +2151,6 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec(
21502151
GGML_ASSERT(false);
21512152
break;
21522153
}
2153-
CUDA_CHECK(cudaGetLastError());
21542154

21552155
#ifdef GGML_CUDA_DMMV_F16
21562156
if (src1_convert_f16) {
@@ -2230,7 +2230,6 @@ inline void ggml_cuda_op_rope(
22302230

22312231
// compute
22322232
rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p, theta_scale, cudaStream_main);
2233-
CUDA_CHECK(cudaGetLastError());
22342233

22352234
(void) dst;
22362235
(void) src0_ddq_i;
@@ -2254,7 +2253,6 @@ inline void ggml_cuda_op_diag_mask_inf(
22542253

22552254
// compute
22562255
diag_mask_inf_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
2257-
CUDA_CHECK(cudaGetLastError());
22582256

22592257
(void) dst;
22602258
(void) src0_ddq_i;
@@ -2276,7 +2274,6 @@ inline void ggml_cuda_op_soft_max(
22762274

22772275
// compute
22782276
soft_max_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
2279-
CUDA_CHECK(cudaGetLastError());
22802277

22812278
(void) src1;
22822279
(void) dst;
@@ -2372,10 +2369,11 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
23722369
size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
23732370
size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
23742371

2375-
// if multiple GPUs are used they need to wait for the main GPU to finish
2372+
// if multiple devices are used they need to wait for the main device
2373+
// here an event is recorded that signifies that the main device has finished calculating the input data
23762374
if (split && g_device_count > 1) {
23772375
CUDA_CHECK(cudaSetDevice(g_main_device));
2378-
CUDA_CHECK(cudaDeviceSynchronize());
2376+
CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device], g_cudaStreams_main[g_main_device]));
23792377
}
23802378

23812379
for (int id = 0; id < g_device_count; ++id) {
@@ -2401,6 +2399,12 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
24012399
int64_t row_diff = row_high - row_low;
24022400

24032401
cudaSetDevice(id);
2402+
cudaStream_t cudaStream_main = g_cudaStreams_main[id];
2403+
2404+
// wait for main GPU data if necessary
2405+
if (split && id != g_main_device) {
2406+
CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, src0_extra->events[g_main_device]));
2407+
}
24042408

24052409
if (src0_on_device && src0_is_contiguous) {
24062410
if (src0_is_f32) {
@@ -2476,8 +2480,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
24762480
}
24772481
const int64_t i11 = i13*ne12 + i12;
24782482

2479-
cudaStream_t cudaStream_main = g_cudaStreams_main[id];
2480-
24812483
// for split tensors the data begins at i0 == i0_offset_low
24822484
char * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
24832485
float * src0_ddf_i = src0_ddf[id] + (i0 - i0_offset_low)*src0_stride;
@@ -2537,6 +2539,7 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25372539

25382540
// do the computation
25392541
op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
2542+
CUDA_CHECK(cudaGetLastError());
25402543

25412544
// copy dst to host or other device if necessary
25422545
if (!dst_on_device) {
@@ -2566,6 +2569,11 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25662569
CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
25672570
}
25682571
}
2572+
2573+
// signify to main device that other device is done
2574+
if (split && g_device_count > 1 && id != g_main_device) {
2575+
CUDA_CHECK(cudaEventRecord(src0_extra->events[id], cudaStream_main));
2576+
}
25692577
}
25702578
}
25712579
}
@@ -2577,7 +2585,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25772585
}
25782586

25792587
CUDA_CHECK(cudaSetDevice(id));
2580-
CUDA_CHECK(cudaDeviceSynchronize());
25812588

25822589
if (src0_asq[id] > 0) {
25832590
ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
@@ -2592,6 +2599,21 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25922599
ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
25932600
}
25942601
}
2602+
2603+
// main device waits for all other devices to be finished
2604+
if (split && g_device_count > 1) {
2605+
CUDA_CHECK(cudaSetDevice(g_main_device));
2606+
for (int id = 0; id < g_device_count; ++id) {
2607+
if (id != g_main_device) {
2608+
CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams_main[g_main_device], src0_extra->events[id]));
2609+
}
2610+
}
2611+
}
2612+
2613+
if (dst->backend == GGML_BACKEND_CPU) {
2614+
CUDA_CHECK(cudaSetDevice(g_main_device));
2615+
CUDA_CHECK(cudaDeviceSynchronize());
2616+
}
25952617
}
25962618

25972619
void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
@@ -2831,6 +2853,10 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
28312853
cudaMemcpy(buf, buf_host, size, cudaMemcpyHostToDevice);
28322854

28332855
extra->data_device[id] = buf;
2856+
2857+
if (backend == GGML_BACKEND_GPU_SPLIT) {
2858+
CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id], cudaEventDisableTiming));
2859+
}
28342860
}
28352861

28362862
tensor->extra = extra;
@@ -2844,12 +2870,15 @@ void ggml_cuda_free_data(struct ggml_tensor * tensor) {
28442870
ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
28452871

28462872
for (int id = 0; id < g_device_count; ++id) {
2847-
if (extra->data_device[id] == nullptr) {
2848-
continue;
2873+
if (extra->data_device[id] != nullptr) {
2874+
CUDA_CHECK(cudaSetDevice(id));
2875+
CUDA_CHECK(cudaFree(extra->data_device[id]));
28492876
}
28502877

2851-
CUDA_CHECK(cudaSetDevice(id));
2852-
CUDA_CHECK(cudaFree(extra->data_device[id]));
2878+
if (extra->events[id] != nullptr) {
2879+
CUDA_CHECK(cudaSetDevice(id));
2880+
CUDA_CHECK(cudaEventDestroy(extra->events[id]));
2881+
}
28532882
}
28542883

28552884
delete extra;

ggml-cuda.h

-4
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,6 @@ extern "C" {
88

99
#define GGML_CUDA_MAX_DEVICES 16
1010

11-
struct ggml_tensor_extra_gpu {
12-
void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
13-
};
14-
1511
void ggml_init_cublas(void);
1612
void ggml_cuda_set_tensor_split(const float * tensor_split);
1713

ggml-metal.m

+3-1
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,9 @@ @implementation GGMLMetalClass
202202

203203
void ggml_metal_free(struct ggml_metal_context * ctx) {
204204
fprintf(stderr, "%s: deallocating\n", __func__);
205-
205+
for (int i = 0; i < ctx->n_buffers; ++i) {
206+
[ctx->buffers[i].metal release];
207+
}
206208
free(ctx);
207209
}
208210

0 commit comments

Comments
 (0)