Skip to content

Commit

Permalink
minor : spaces
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov authored and iboB committed Oct 23, 2023
1 parent 933b132 commit 6ced18f
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion src/ggml-alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor)
alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
}

void ggml_allocr_set_tensor_external_data(struct ggml_allocr* alloc, struct ggml_tensor* tensor, void* data, size_t data_offset) {
void ggml_allocr_set_tensor_external_data(struct ggml_allocr * alloc, struct ggml_tensor * tensor, void * data, size_t data_offset) {
GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources
GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
GGML_ASSERT(data_offset == 0); // not supported yet
Expand Down
2 changes: 1 addition & 1 deletion src/ggml-backend.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ ggml_backend_t ggml_backend_cpu_init(void) {

ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));

*cpu_backend = (struct ggml_backend){
*cpu_backend = (struct ggml_backend) {
/* .interface = */ cpu_backend_i,
/* .context = */ ctx,
};
Expand Down
4 changes: 2 additions & 2 deletions src/ggml-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -7539,15 +7539,15 @@ static const char * ggml_backend_cuda_name(ggml_backend_t backend) {
static void ggml_backend_cuda_free(ggml_backend_t backend) {
for (int id = 0; id < GGML_CUDA_MAX_DEVICES; ++id) {
for (int is = 0; is < MAX_STREAMS; ++is) {
auto& stream = g_cudaStreams[id][is];
auto & stream = g_cudaStreams[id][is];
if (!stream) break;
if (!g_cublas_initialized_as_plugin) {
cudaStreamDestroy(stream);
}
stream = nullptr;
}

auto& cublasHandle = g_cublas_handles[id];
auto & cublasHandle = g_cublas_handles[id];
if (!cublasHandle) continue;
if (!g_cublas_initialized_as_plugin) {
cublasDestroy(cublasHandle);
Expand Down

0 comments on commit 6ced18f

Please sign in to comment.