|
|
|
|
|
#ifdef _WIN32 |
|
#define WIN32_LEAN_AND_MEAN |
|
#ifndef NOMINMAX |
|
# define NOMINMAX |
|
#endif |
|
#include <windows.h> |
|
#endif |
|
|
|
#include "ggml-backend.h" |
|
#include "ggml-backend-impl.h" |
|
#include "ggml-alloc.h" |
|
#include "ggml-impl.h" |
|
|
|
#include <assert.h> |
|
#include <limits.h> |
|
#include <stdarg.h> |
|
#include <stdio.h> |
|
#include <stdlib.h> |
|
#include <string.h> |
|
#include <string> |
|
#include <vector> |
|
|
|
#ifdef __APPLE__ |
|
#include <sys/types.h> |
|
#include <sys/sysctl.h> |
|
#endif |
|
|
|
|
|
|
|
|
|
const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { |
|
return buft->iface.get_name(buft); |
|
} |
|
|
|
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { |
|
if (size == 0) { |
|
|
|
return ggml_backend_buffer_init(buft, {}, NULL, 0); |
|
} |
|
|
|
return buft->iface.alloc_buffer(buft, size); |
|
} |
|
|
|
size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) { |
|
return buft->iface.get_alignment(buft); |
|
} |
|
|
|
size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) { |
|
|
|
if (buft->iface.get_max_size) { |
|
return buft->iface.get_max_size(buft); |
|
} |
|
return SIZE_MAX; |
|
} |
|
|
|
size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) { |
|
|
|
if (buft->iface.get_alloc_size) { |
|
size_t size = buft->iface.get_alloc_size(buft, tensor); |
|
assert(size >= ggml_nbytes(tensor)); |
|
return size; |
|
} |
|
return ggml_nbytes(tensor); |
|
} |
|
|
|
bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) { |
|
if (buft->iface.is_host) { |
|
return buft->iface.is_host(buft); |
|
} |
|
return false; |
|
} |
|
|
|
ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) { |
|
return buft->device; |
|
} |
|
|
|
|
|
|
|
ggml_backend_buffer_t ggml_backend_buffer_init( |
|
ggml_backend_buffer_type_t buft, |
|
struct ggml_backend_buffer_i iface, |
|
void * context, |
|
size_t size) { |
|
ggml_backend_buffer_t buffer = new ggml_backend_buffer { |
|
iface, |
|
buft, |
|
context, |
|
size, |
|
GGML_BACKEND_BUFFER_USAGE_ANY |
|
}; |
|
|
|
return buffer; |
|
} |
|
|
|
const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) { |
|
return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer)); |
|
} |
|
|
|
void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { |
|
if (buffer == NULL) { |
|
return; |
|
} |
|
|
|
if (buffer->iface.free_buffer != NULL) { |
|
buffer->iface.free_buffer(buffer); |
|
} |
|
delete buffer; |
|
} |
|
|
|
size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { |
|
return buffer->size; |
|
} |
|
|
|
void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { |
|
|
|
if (buffer->size == 0) { |
|
return NULL; |
|
} |
|
|
|
void * base = buffer->iface.get_base(buffer); |
|
|
|
GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL"); |
|
|
|
return base; |
|
} |
|
|
|
void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { |
|
|
|
if (buffer->iface.init_tensor) { |
|
buffer->iface.init_tensor(buffer, tensor); |
|
} |
|
} |
|
|
|
void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
|
|
|
if (buffer->size == 0) { |
|
return; |
|
} |
|
|
|
buffer->iface.clear(buffer, value); |
|
} |
|
|
|
size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) { |
|
return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer)); |
|
} |
|
|
|
size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) { |
|
return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer)); |
|
} |
|
|
|
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { |
|
return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor); |
|
} |
|
|
|
bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { |
|
return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer)); |
|
} |
|
|
|
void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { |
|
buffer->usage = usage; |
|
|
|
|
|
if (ggml_backend_buffer_is_multi_buffer(buffer)) { |
|
ggml_backend_multi_buffer_set_usage(buffer, usage); |
|
} |
|
} |
|
|
|
enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) { |
|
return buffer->usage; |
|
} |
|
|
|
ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) { |
|
return buffer->buft; |
|
} |
|
|
|
void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) { |
|
if (buffer->iface.reset) { |
|
buffer->iface.reset(buffer); |
|
} |
|
} |
|
|
|
bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) { |
|
ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer; |
|
if (dst_buf->iface.cpy_tensor) { |
|
return dst_buf->iface.cpy_tensor(dst_buf, src, dst); |
|
} |
|
return false; |
|
} |
|
|
|
|
|
|
|
ggml_guid_t ggml_backend_guid(ggml_backend_t backend) { |
|
if (backend == NULL) { |
|
return NULL; |
|
} |
|
return backend->guid; |
|
} |
|
|
|
const char * ggml_backend_name(ggml_backend_t backend) { |
|
if (backend == NULL) { |
|
return "NULL"; |
|
} |
|
return backend->iface.get_name(backend); |
|
} |
|
|
|
void ggml_backend_free(ggml_backend_t backend) { |
|
if (backend == NULL) { |
|
return; |
|
} |
|
|
|
backend->iface.free(backend); |
|
} |
|
|
|
ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) { |
|
return ggml_backend_dev_buffer_type(backend->device); |
|
} |
|
|
|
ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) { |
|
return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size); |
|
} |
|
|
|
size_t ggml_backend_get_alignment(ggml_backend_t backend) { |
|
return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend)); |
|
} |
|
|
|
size_t ggml_backend_get_max_size(ggml_backend_t backend) { |
|
return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend)); |
|
} |
|
|
|
void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
|
GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
|
|
|
if (backend->iface.set_tensor_async == NULL) { |
|
ggml_backend_tensor_set(tensor, data, offset, size); |
|
} else { |
|
backend->iface.set_tensor_async(backend, tensor, data, offset, size); |
|
} |
|
} |
|
|
|
void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
|
GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
|
|
|
if (backend->iface.get_tensor_async == NULL) { |
|
ggml_backend_tensor_get(tensor, data, offset, size); |
|
} else { |
|
backend->iface.get_tensor_async(backend, tensor, data, offset, size); |
|
} |
|
} |
|
|
|
void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
|
GGML_ASSERT(tensor); |
|
ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
|
|
|
if (size == 0) { |
|
return; |
|
} |
|
|
|
GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
|
GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
|
|
|
buf->iface.set_tensor(buf, tensor, data, offset, size); |
|
} |
|
|
|
void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
|
GGML_ASSERT(tensor); |
|
ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
|
|
|
if (size == 0) { |
|
return; |
|
} |
|
|
|
GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
|
GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
|
|
|
buf->iface.get_tensor(buf, tensor, data, offset, size); |
|
} |
|
|
|
void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { |
|
ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
|
|
|
if (size == 0) { |
|
return; |
|
} |
|
|
|
GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
|
GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
|
GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer"); |
|
|
|
buf->iface.memset_tensor(buf, tensor, value, offset, size); |
|
} |
|
|
|
void ggml_backend_synchronize(ggml_backend_t backend) { |
|
if (backend->iface.synchronize == NULL) { |
|
return; |
|
} |
|
|
|
backend->iface.synchronize(backend); |
|
} |
|
|
|
ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
|
GGML_ASSERT(backend->iface.graph_plan_create != NULL); |
|
|
|
return backend->iface.graph_plan_create(backend, cgraph); |
|
} |
|
|
|
void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { |
|
GGML_ASSERT(backend->iface.graph_plan_free != NULL); |
|
|
|
backend->iface.graph_plan_free(backend, plan); |
|
} |
|
|
|
enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { |
|
GGML_ASSERT(backend->iface.graph_plan_compute != NULL); |
|
|
|
return backend->iface.graph_plan_compute(backend, plan); |
|
} |
|
|
|
enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
|
enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph); |
|
ggml_backend_synchronize(backend); |
|
return err; |
|
} |
|
|
|
enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
|
return backend->iface.graph_compute(backend, cgraph); |
|
} |
|
|
|
bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { |
|
return ggml_backend_dev_supports_op(backend->device, op); |
|
} |
|
|
|
bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) { |
|
return ggml_backend_dev_supports_buft(backend->device, buft); |
|
} |
|
|
|
bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) { |
|
return ggml_backend_dev_offload_op(backend->device, op); |
|
} |
|
|
|
ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) { |
|
return backend->device; |
|
} |
|
|
|
|
|
|
|
static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { |
|
if (a->type != b->type) { |
|
return false; |
|
} |
|
for (int i = 0; i < GGML_MAX_DIMS; i++) { |
|
if (a->ne[i] != b->ne[i]) { |
|
return false; |
|
} |
|
if (a->nb[i] != b->nb[i]) { |
|
return false; |
|
} |
|
} |
|
return true; |
|
} |
|
|
|
void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) { |
|
GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); |
|
|
|
if (src == dst) { |
|
return; |
|
} |
|
|
|
if (ggml_backend_buffer_is_host(src->buffer)) { |
|
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); |
|
} else if (ggml_backend_buffer_is_host(dst->buffer)) { |
|
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); |
|
} else if (!ggml_backend_buffer_copy_tensor(src, dst)) { |
|
#ifndef NDEBUG |
|
GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer)); |
|
#endif |
|
size_t nbytes = ggml_nbytes(src); |
|
void * data = malloc(nbytes); |
|
ggml_backend_tensor_get(src, data, 0, nbytes); |
|
ggml_backend_tensor_set(dst, data, 0, nbytes); |
|
free(data); |
|
} |
|
} |
|
|
|
void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) { |
|
GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); |
|
|
|
if (src == dst) { |
|
return; |
|
} |
|
|
|
if (backend_dst->iface.cpy_tensor_async != NULL) { |
|
if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) { |
|
return; |
|
} |
|
} |
|
|
|
|
|
|
|
ggml_backend_synchronize(backend_src); |
|
ggml_backend_synchronize(backend_dst); |
|
ggml_backend_tensor_copy(src, dst); |
|
} |
|
|
|
|
|
|
|
ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) { |
|
|
|
if (device == NULL || device->iface.event_new == NULL) { |
|
return NULL; |
|
} |
|
return device->iface.event_new(device); |
|
} |
|
|
|
void ggml_backend_event_free(ggml_backend_event_t event) { |
|
if (event == NULL) { |
|
return; |
|
} |
|
event->device->iface.event_free(event->device, event); |
|
} |
|
|
|
void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) { |
|
GGML_ASSERT(backend->iface.event_record != NULL); |
|
|
|
backend->iface.event_record(backend, event); |
|
} |
|
|
|
void ggml_backend_event_synchronize(ggml_backend_event_t event) { |
|
GGML_ASSERT(event->device->iface.event_synchronize); |
|
|
|
event->device->iface.event_synchronize(event->device, event); |
|
} |
|
|
|
void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { |
|
GGML_ASSERT(backend->iface.event_wait != NULL); |
|
|
|
backend->iface.event_wait(backend, event); |
|
} |
|
|
|
|
|
|
|
const char * ggml_backend_dev_name(ggml_backend_dev_t device) { |
|
return device->iface.get_name(device); |
|
} |
|
|
|
const char * ggml_backend_dev_description(ggml_backend_dev_t device) { |
|
return device->iface.get_description(device); |
|
} |
|
|
|
void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) { |
|
device->iface.get_memory(device, free, total); |
|
} |
|
|
|
enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) { |
|
return device->iface.get_type(device); |
|
} |
|
|
|
void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) { |
|
memset(props, 0, sizeof(*props)); |
|
device->iface.get_props(device, props); |
|
} |
|
|
|
ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) { |
|
return device->reg; |
|
} |
|
|
|
ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) { |
|
return device->iface.init_backend(device, params); |
|
} |
|
|
|
ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) { |
|
return device->iface.get_buffer_type(device); |
|
} |
|
|
|
ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) { |
|
if (device->iface.get_host_buffer_type == NULL) { |
|
return NULL; |
|
} |
|
|
|
return device->iface.get_host_buffer_type(device); |
|
} |
|
|
|
ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) { |
|
return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size); |
|
} |
|
|
|
bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { |
|
return device->iface.supports_op(device, op); |
|
} |
|
|
|
bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) { |
|
return device->iface.supports_buft(device, buft); |
|
} |
|
|
|
bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { |
|
if (device->iface.offload_op != NULL) { |
|
return device->iface.offload_op(device, op); |
|
} |
|
|
|
return false; |
|
} |
|
|
|
|
|
|
|
const char * ggml_backend_reg_name(ggml_backend_reg_t reg) { |
|
return reg->iface.get_name(reg); |
|
} |
|
|
|
size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) { |
|
return reg->iface.get_device_count(reg); |
|
} |
|
|
|
ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) { |
|
return reg->iface.get_device(reg, index); |
|
} |
|
|
|
void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { |
|
if (!reg->iface.get_proc_address) { |
|
return NULL; |
|
} |
|
return reg->iface.get_proc_address(reg, name); |
|
} |
|
|
|
|
|
|
|
struct ggml_backend_multi_buffer_context { |
|
ggml_backend_buffer_t * buffers; |
|
size_t n_buffers; |
|
}; |
|
|
|
static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) { |
|
ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
|
for (size_t i = 0; i < ctx->n_buffers; i++) { |
|
ggml_backend_buffer_free(ctx->buffers[i]); |
|
} |
|
|
|
free(ctx->buffers); |
|
free(ctx); |
|
} |
|
|
|
static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
|
ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
|
for (size_t i = 0; i < ctx->n_buffers; i++) { |
|
ggml_backend_buffer_clear(ctx->buffers[i], value); |
|
} |
|
} |
|
|
|
static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = { |
|
ggml_backend_multi_buffer_free_buffer, |
|
NULL, |
|
NULL, |
|
NULL, |
|
NULL, |
|
NULL, |
|
NULL, |
|
ggml_backend_multi_buffer_clear, |
|
NULL, |
|
}; |
|
|
|
ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) { |
|
ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context)); |
|
ctx->n_buffers = n_buffers; |
|
ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t)); |
|
|
|
GGML_ASSERT(ctx->buffers != NULL); |
|
|
|
size_t total_size = 0; |
|
for (size_t i = 0; i < n_buffers; i++) { |
|
ctx->buffers[i] = buffers[i]; |
|
total_size += ggml_backend_buffer_get_size(buffers[i]); |
|
} |
|
|
|
return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size); |
|
} |
|
|
|
bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) { |
|
return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer; |
|
} |
|
|
|
void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { |
|
GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer)); |
|
ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
|
for (size_t i = 0; i < ctx->n_buffers; i++) { |
|
ggml_backend_buffer_set_usage(ctx->buffers[i], usage); |
|
} |
|
} |
|
|
|
|
|
static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) { |
|
struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor); |
|
for (int i = 0; i < GGML_MAX_DIMS; i++) { |
|
dup->nb[i] = tensor->nb[i]; |
|
} |
|
return dup; |
|
} |
|
|
|
static bool ggml_is_view_op(enum ggml_op op) { |
|
return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE; |
|
} |
|
|
|
|
|
|
|
#ifndef GGML_SCHED_MAX_BACKENDS |
|
#define GGML_SCHED_MAX_BACKENDS 16 |
|
#endif |
|
|
|
#ifndef GGML_SCHED_MAX_SPLIT_INPUTS |
|
#define GGML_SCHED_MAX_SPLIT_INPUTS GGML_MAX_SRC |
|
#endif |
|
|
|
#ifndef GGML_SCHED_MAX_COPIES |
|
#define GGML_SCHED_MAX_COPIES 4 |
|
#endif |
|
|
|
struct ggml_backend_sched_split { |
|
int backend_id; |
|
int i_start; |
|
int i_end; |
|
struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; |
|
int n_inputs; |
|
|
|
struct ggml_cgraph graph; |
|
}; |
|
|
|
struct ggml_backend_sched { |
|
bool is_reset; |
|
bool is_alloc; |
|
|
|
int n_backends; |
|
|
|
ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS]; |
|
ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS]; |
|
ggml_gallocr_t galloc; |
|
|
|
|
|
struct ggml_hash_set hash_set; |
|
int * hv_tensor_backend_ids; |
|
struct ggml_tensor ** hv_tensor_copies; |
|
|
|
int * node_backend_ids; |
|
int * leaf_backend_ids; |
|
|
|
int * prev_node_backend_ids; |
|
int * prev_leaf_backend_ids; |
|
|
|
|
|
struct ggml_cgraph graph; |
|
|
|
|
|
struct ggml_backend_sched_split * splits; |
|
int n_splits; |
|
int splits_capacity; |
|
|
|
|
|
int n_copies; |
|
int cur_copy; |
|
ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES]; |
|
struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; |
|
int n_graph_inputs; |
|
|
|
struct ggml_context * ctx; |
|
|
|
ggml_backend_sched_eval_callback callback_eval; |
|
void * callback_eval_user_data; |
|
|
|
char * context_buffer; |
|
size_t context_buffer_size; |
|
|
|
int debug; |
|
}; |
|
|
|
#define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) |
|
#define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)] |
|
#define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)] |
|
#define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id) |
|
|
|
|
|
static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) { |
|
for (int i = 0; i < sched->n_backends; i++) { |
|
if (sched->backends[i] == backend) { |
|
return i; |
|
} |
|
} |
|
return -1; |
|
} |
|
|
|
static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) { |
|
ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
|
if (buffer == NULL) { |
|
return -1; |
|
} |
|
|
|
|
|
for (int i = 0; i < sched->n_backends; i++) { |
|
if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) && |
|
ggml_backend_supports_op(sched->backends[i], op)) { |
|
return i; |
|
} |
|
} |
|
|
|
#ifndef NDEBUG |
|
GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n", |
|
__func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name); |
|
#endif |
|
|
|
return -1; |
|
} |
|
|
|
#if 0 |
|
#define GGML_SCHED_MAX_SPLITS_DEBUG 4096 |
|
static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; |
|
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__) |
|
#define GET_CAUSE(node) causes[hash_id(node)] |
|
#else |
|
#define SET_CAUSE(node, ...) |
|
#define GET_CAUSE(node) "" |
|
#endif |
|
|
|
|
|
static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) { |
|
|
|
int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor); |
|
if (cur_backend_id != -1) { |
|
SET_CAUSE(tensor, "1.dst"); |
|
return cur_backend_id; |
|
} |
|
|
|
|
|
if (tensor->view_src != NULL) { |
|
cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor); |
|
if (cur_backend_id != -1) { |
|
SET_CAUSE(tensor, "1.vsrc"); |
|
return cur_backend_id; |
|
} |
|
} |
|
|
|
if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) { |
|
|
|
ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
|
GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op)); |
|
} |
|
|
|
|
|
if (tensor->flags & GGML_TENSOR_FLAG_INPUT) { |
|
cur_backend_id = sched->n_backends - 1; |
|
SET_CAUSE(tensor, "1.inp"); |
|
return cur_backend_id; |
|
} |
|
|
|
|
|
for (int i = 0; i < GGML_MAX_SRC; i++) { |
|
const struct ggml_tensor * src = tensor->src[i]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
|
|
|
|
if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { |
|
int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor); |
|
|
|
if (src_backend_id == sched->n_backends - 1) { |
|
for (int b = 0; b < src_backend_id; b++) { |
|
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) { |
|
SET_CAUSE(tensor, "1.off"); |
|
return b; |
|
} |
|
} |
|
} |
|
SET_CAUSE(tensor, "1.wgt%d", i); |
|
return src_backend_id; |
|
} |
|
} |
|
|
|
return -1; |
|
} |
|
|
|
static char * fmt_size(size_t size) { |
|
static char buffer[128]; |
|
if (size >= 1024*1024) { |
|
snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024); |
|
} else { |
|
snprintf(buffer, sizeof(buffer), "%zuK", size/1024); |
|
} |
|
return buffer; |
|
} |
|
|
|
static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
|
int cur_split = 0; |
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) { |
|
ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id]; |
|
GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend), |
|
sched->splits[cur_split].n_inputs); |
|
for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) { |
|
GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, |
|
fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j]))); |
|
} |
|
GGML_LOG_DEBUG("\n"); |
|
cur_split++; |
|
} |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
if (sched->debug > 1) { |
|
ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node); |
|
GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name, |
|
fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node)); |
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src); |
|
GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name, |
|
fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src)); |
|
} |
|
GGML_LOG_DEBUG("\n"); |
|
} |
|
} |
|
} |
|
|
|
static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) { |
|
ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer; |
|
ggml_backend_buffer_type_t buft = NULL; |
|
|
|
if (buf) { |
|
|
|
buft = buf->buft; |
|
} else { |
|
|
|
int tensor_backend_id = tensor_backend_id(t); |
|
if (tensor_backend_id == -1 && t->view_src) { |
|
tensor_backend_id = tensor_backend_id(t->view_src); |
|
} |
|
if (tensor_backend_id != -1) { |
|
buft = sched->bufts[tensor_backend_id]; |
|
} |
|
} |
|
|
|
return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft); |
|
} |
|
|
|
static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) { |
|
if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) { |
|
*node_backend_id = cur_backend_id; |
|
SET_CAUSE(node, "2.sup"); |
|
} |
|
} |
|
|
|
|
|
static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
|
|
|
sched->n_splits = 0; |
|
sched->n_graph_inputs = 0; |
|
sched->is_reset = false; |
|
|
|
struct ggml_init_params params = { |
|
sched->context_buffer_size, |
|
sched->context_buffer, |
|
true |
|
}; |
|
|
|
ggml_free(sched->ctx); |
|
|
|
sched->ctx = ggml_init(params); |
|
if (sched->ctx == NULL) { |
|
GGML_ABORT("%s: failed to initialize context\n", __func__); |
|
} |
|
|
|
|
|
for (int i = 0; i < graph->n_leafs; i++) { |
|
struct ggml_tensor * leaf = graph->leafs[i]; |
|
int * leaf_backend_id = &tensor_backend_id(leaf); |
|
|
|
if (*leaf_backend_id == -1) { |
|
*leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf); |
|
} |
|
} |
|
|
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
int * node_backend_id = &tensor_backend_id(node); |
|
|
|
if (*node_backend_id == -1) { |
|
*node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node); |
|
|
|
#if 0 |
|
|
|
if (node->op == GGML_OP_NONE) { |
|
continue; |
|
} |
|
|
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
int * src_backend_id = &tensor_backend_id(src); |
|
if (*src_backend_id == -1) { |
|
*src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src); |
|
} |
|
} |
|
#endif |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
|
int cur_backend_id = -1; |
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
int * node_backend_id = &tensor_backend_id(node); |
|
if (*node_backend_id != -1) { |
|
if (*node_backend_id == sched->n_backends - 1) { |
|
|
|
cur_backend_id = -1; |
|
} else { |
|
cur_backend_id = *node_backend_id; |
|
} |
|
} else if (cur_backend_id != -1) { |
|
ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
|
} |
|
} |
|
} |
|
|
|
{ |
|
int cur_backend_id = -1; |
|
for (int i = graph->n_nodes - 1; i >= 0; i--) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
int * node_backend_id = &tensor_backend_id(node); |
|
if (*node_backend_id != -1) { |
|
if (*node_backend_id == sched->n_backends - 1) { |
|
|
|
cur_backend_id = -1; |
|
} else { |
|
cur_backend_id = *node_backend_id; |
|
} |
|
} else if (cur_backend_id != -1) { |
|
ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
|
} |
|
} |
|
} |
|
|
|
{ |
|
int cur_backend_id = -1; |
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
int * node_backend_id = &tensor_backend_id(node); |
|
if (*node_backend_id != -1) { |
|
cur_backend_id = *node_backend_id; |
|
} else if (cur_backend_id != -1) { |
|
ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
|
} |
|
} |
|
} |
|
|
|
{ |
|
int cur_backend_id = -1; |
|
for (int i = graph->n_nodes - 1; i >= 0; i--) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
int * node_backend_id = &tensor_backend_id(node); |
|
if (*node_backend_id != -1) { |
|
cur_backend_id = *node_backend_id; |
|
} else if (cur_backend_id != -1) { |
|
ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
int * node_backend_id = &tensor_backend_id(node); |
|
if (*node_backend_id == -1) { |
|
|
|
int n_supported_best = -1; |
|
for (int b = 0; b < sched->n_backends; b++) { |
|
if (ggml_backend_supports_op(sched->backends[b], node)) { |
|
int n_supported = 0; |
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) { |
|
n_supported++; |
|
} |
|
} |
|
if (n_supported > n_supported_best) { |
|
n_supported_best = n_supported; |
|
*node_backend_id = b; |
|
SET_CAUSE(node, "3.best"); |
|
} |
|
} |
|
} |
|
} else { |
|
|
|
for (int b = 0; b < *node_backend_id; b++) { |
|
if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) { |
|
bool supported = true; |
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
if (!ggml_backend_sched_buffer_supported(sched, src, b)) { |
|
supported = false; |
|
break; |
|
} |
|
} |
|
if (supported) { |
|
*node_backend_id = b; |
|
SET_CAUSE(node, "3.upg"); |
|
break; |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
|
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
int * cur_backend_id = &tensor_backend_id(node); |
|
if (node->view_src != NULL && *cur_backend_id == -1) { |
|
*cur_backend_id = tensor_backend_id(node->view_src); |
|
SET_CAUSE(node, "4.vsrc"); |
|
} |
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
int * src_backend_id = &tensor_backend_id(src); |
|
if (*src_backend_id == -1) { |
|
if (src->view_src != NULL) { |
|
|
|
*src_backend_id = tensor_backend_id(src->view_src); |
|
SET_CAUSE(src, "4.vsrc"); |
|
} else { |
|
*src_backend_id = *cur_backend_id; |
|
SET_CAUSE(src, "4.cur"); |
|
} |
|
} |
|
} |
|
} |
|
|
|
|
|
{ |
|
int i_split = 0; |
|
struct ggml_backend_sched_split * split = &sched->splits[0]; |
|
|
|
int i = 0; |
|
for (; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
if (!ggml_is_view_op(node->op)) { |
|
split->backend_id = tensor_backend_id(node); |
|
break; |
|
} |
|
} |
|
split->i_start = 0; |
|
split->n_inputs = 0; |
|
int cur_backend_id = split->backend_id; |
|
for (; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
|
|
if (ggml_is_view_op(node->op)) { |
|
continue; |
|
} |
|
|
|
const int node_backend_id = tensor_backend_id(node); |
|
|
|
assert(node_backend_id != -1); |
|
|
|
|
|
bool need_new_split = false; |
|
if (node_backend_id == cur_backend_id && split->n_inputs > 0) { |
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
|
|
|
|
if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { |
|
int src_backend_id = tensor_backend_id(src); |
|
if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { |
|
need_new_split = true; |
|
break; |
|
} |
|
} |
|
|
|
|
|
if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) { |
|
const size_t id = hash_id(src); |
|
int src_backend_id = sched->hv_tensor_backend_ids[id]; |
|
bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id); |
|
if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) { |
|
need_new_split = true; |
|
break; |
|
} |
|
} |
|
} |
|
} |
|
|
|
if (node_backend_id != cur_backend_id || need_new_split) { |
|
split->i_end = i; |
|
i_split++; |
|
if (i_split >= sched->splits_capacity) { |
|
sched->splits_capacity *= 2; |
|
sched->splits = (ggml_backend_sched_split *) |
|
realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split)); |
|
GGML_ASSERT(sched->splits != NULL); |
|
} |
|
split = &sched->splits[i_split]; |
|
split->backend_id = node_backend_id; |
|
split->i_start = i; |
|
split->n_inputs = 0; |
|
cur_backend_id = node_backend_id; |
|
} |
|
|
|
|
|
for (int j = 0; j < GGML_MAX_SRC; j++) { |
|
struct ggml_tensor * src = node->src[j]; |
|
if (src == NULL) { |
|
continue; |
|
} |
|
|
|
size_t src_id = hash_id(src); |
|
const int src_backend_id = sched->hv_tensor_backend_ids[src_id]; |
|
assert(src_backend_id != -1); |
|
|
|
if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) { |
|
if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) { |
|
ggml_backend_t backend = sched->backends[src_backend_id]; |
|
for (int c = 0; c < sched->n_copies; c++) { |
|
struct ggml_tensor * tensor_copy; |
|
if (c == sched->cur_copy) { |
|
tensor_copy = src; |
|
} else { |
|
tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); |
|
ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); |
|
} |
|
if (sched->n_copies > 1) { |
|
ggml_set_input(tensor_copy); |
|
ggml_set_output(tensor_copy); |
|
} |
|
tensor_id_copy(src_id, src_backend_id, c) = tensor_copy; |
|
SET_CAUSE(tensor_copy, "4.cpy"); |
|
} |
|
int n_graph_inputs = sched->n_graph_inputs++; |
|
GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); |
|
sched->graph_inputs[n_graph_inputs] = src; |
|
} |
|
} |
|
|
|
if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { |
|
|
|
if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) { |
|
ggml_backend_t backend = sched->backends[cur_backend_id]; |
|
for (int c = 0; c < sched->n_copies; c++) { |
|
struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); |
|
ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); |
|
if (sched->n_copies > 1) { |
|
ggml_set_input(tensor_copy); |
|
ggml_set_output(tensor_copy); |
|
} |
|
tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy; |
|
SET_CAUSE(tensor_copy, "4.cpy"); |
|
} |
|
int n_inputs = split->n_inputs++; |
|
GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); |
|
split->inputs[n_inputs] = src; |
|
} |
|
node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy); |
|
} |
|
} |
|
} |
|
split->i_end = graph->n_nodes; |
|
sched->n_splits = i_split + 1; |
|
} |
|
|
|
if (sched->debug) { |
|
ggml_backend_sched_print_assignments(sched, graph); |
|
} |
|
|
|
|
|
{ |
|
int * tmp = sched->node_backend_ids; |
|
sched->node_backend_ids = sched->prev_node_backend_ids; |
|
sched->prev_node_backend_ids = tmp; |
|
|
|
tmp = sched->leaf_backend_ids; |
|
sched->leaf_backend_ids = sched->prev_leaf_backend_ids; |
|
sched->prev_leaf_backend_ids = tmp; |
|
} |
|
|
|
int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies; |
|
if (sched->graph.size < graph_size) { |
|
sched->graph.size = graph_size; |
|
sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *)); |
|
sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *)); |
|
GGML_ASSERT(sched->graph.nodes != NULL); |
|
GGML_ASSERT(sched->graph.leafs != NULL); |
|
} |
|
sched->graph.n_nodes = 0; |
|
sched->graph.n_leafs = 0; |
|
|
|
struct ggml_cgraph * graph_copy = &sched->graph; |
|
|
|
for (int i = 0; i < sched->n_splits; i++) { |
|
struct ggml_backend_sched_split * split = &sched->splits[i]; |
|
split->graph = ggml_graph_view(graph, split->i_start, split->i_end); |
|
|
|
|
|
for (int j = 0; j < split->n_inputs; j++) { |
|
assert(graph_copy->size > (graph_copy->n_nodes + 1)); |
|
|
|
struct ggml_tensor * input = split->inputs[j]; |
|
const size_t input_id = hash_id(input); |
|
struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy); |
|
|
|
|
|
struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input); |
|
input_dep->src[0] = input; |
|
sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id]; |
|
graph_copy->nodes[graph_copy->n_nodes++] = input_dep; |
|
|
|
|
|
sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id; |
|
graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; |
|
} |
|
|
|
for (int j = split->i_start; j < split->i_end; j++) { |
|
assert(graph_copy->size > graph_copy->n_nodes); |
|
sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]); |
|
graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j]; |
|
} |
|
} |
|
|
|
if (sched->n_copies > 1) { |
|
|
|
for (int i = 0; i < sched->n_graph_inputs; i++) { |
|
struct ggml_tensor * input = sched->graph_inputs[i]; |
|
size_t id = hash_id(input); |
|
int backend_id = tensor_backend_id(input); |
|
for (int c = 0; c < sched->n_copies; c++) { |
|
struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); |
|
sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; |
|
assert(graph_copy->size > graph_copy->n_leafs); |
|
graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; |
|
} |
|
} |
|
|
|
for (int i = 0; i < sched->n_splits; i++) { |
|
struct ggml_backend_sched_split * split = &sched->splits[i]; |
|
int backend_id = split->backend_id; |
|
for (int j = 0; j < split->n_inputs; j++) { |
|
struct ggml_tensor * input = split->inputs[j]; |
|
size_t id = hash_id(input); |
|
for (int c = 0; c < sched->n_copies; c++) { |
|
struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); |
|
sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; |
|
assert(graph_copy->size > graph_copy->n_leafs); |
|
graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; |
|
} |
|
} |
|
} |
|
} |
|
|
|
|
|
for (int i = 0; i < graph->n_leafs; i++) { |
|
struct ggml_tensor * leaf = graph->leafs[i]; |
|
sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf); |
|
assert(graph_copy->size > graph_copy->n_leafs); |
|
graph_copy->leafs[graph_copy->n_leafs++] = leaf; |
|
} |
|
} |
|
|
|
static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { |
|
bool backend_ids_changed = false; |
|
for (int i = 0; i < sched->graph.n_nodes; i++) { |
|
if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] && |
|
sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) { |
|
backend_ids_changed = true; |
|
break; |
|
} |
|
} |
|
if (!backend_ids_changed) { |
|
for (int i = 0; i < sched->graph.n_leafs; i++) { |
|
if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] && |
|
sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) { |
|
backend_ids_changed = true; |
|
break; |
|
} |
|
} |
|
} |
|
|
|
|
|
if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { |
|
|
|
ggml_backend_sched_synchronize(sched); |
|
#ifndef NDEBUG |
|
GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); |
|
#endif |
|
ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids); |
|
if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { |
|
GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__); |
|
return false; |
|
} |
|
} |
|
|
|
return true; |
|
} |
|
|
|
static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) { |
|
struct ggml_backend_sched_split * splits = sched->splits; |
|
|
|
for (int i = 0; i < sched->n_splits; i++) { |
|
struct ggml_backend_sched_split * split = &splits[i]; |
|
int split_backend_id = split->backend_id; |
|
ggml_backend_t split_backend = sched->backends[split_backend_id]; |
|
|
|
|
|
for (int j = 0; j < split->n_inputs; j++) { |
|
ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[j]); |
|
struct ggml_tensor * input = split->inputs[j]; |
|
struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); |
|
|
|
if (input->flags & GGML_TENSOR_FLAG_INPUT) { |
|
|
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
|
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); |
|
} else { |
|
ggml_backend_synchronize(split_backend); |
|
} |
|
ggml_backend_tensor_copy(input, input_cpy); |
|
} else { |
|
|
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
|
ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); |
|
} else { |
|
ggml_backend_synchronize(split_backend); |
|
} |
|
|
|
|
|
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) { |
|
ggml_backend_synchronize(input_backend); |
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
|
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); |
|
} else { |
|
ggml_backend_synchronize(split_backend); |
|
} |
|
ggml_backend_tensor_copy(input, input_cpy); |
|
} |
|
} |
|
} |
|
|
|
if (!sched->callback_eval) { |
|
enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph); |
|
if (ec != GGML_STATUS_SUCCESS) { |
|
return ec; |
|
} |
|
} else { |
|
|
|
for (int j0 = 0; j0 < split->graph.n_nodes; j0++) { |
|
struct ggml_tensor * t = split->graph.nodes[j0]; |
|
|
|
|
|
bool need = sched->callback_eval(t, true, sched->callback_eval_user_data); |
|
|
|
int j1 = j0; |
|
|
|
|
|
while (!need && j1 < split->graph.n_nodes - 1) { |
|
t = split->graph.nodes[++j1]; |
|
need = sched->callback_eval(t, true, sched->callback_eval_user_data); |
|
} |
|
|
|
struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1); |
|
|
|
enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv); |
|
if (ec != GGML_STATUS_SUCCESS) { |
|
return ec; |
|
} |
|
|
|
|
|
ggml_backend_synchronize(split_backend); |
|
|
|
if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) { |
|
break; |
|
} |
|
|
|
j0 = j1; |
|
} |
|
} |
|
|
|
|
|
if (split->n_inputs > 0) { |
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
|
ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend); |
|
} |
|
} |
|
} |
|
|
|
sched->cur_copy = (sched->cur_copy + 1) % sched->n_copies; |
|
|
|
return GGML_STATUS_SUCCESS; |
|
} |
|
|
|
ggml_backend_sched_t ggml_backend_sched_new( |
|
ggml_backend_t * backends, |
|
ggml_backend_buffer_type_t * bufts, |
|
int n_backends, |
|
size_t graph_size, |
|
bool parallel) { |
|
GGML_ASSERT(n_backends > 0); |
|
GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS); |
|
GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU); |
|
|
|
struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched)); |
|
|
|
const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG"); |
|
sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0; |
|
sched->n_backends = n_backends; |
|
sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1; |
|
|
|
|
|
|
|
sched->hash_set = ggml_hash_set_new(graph_size); |
|
sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); |
|
sched->hv_tensor_copies = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); |
|
|
|
const size_t ggml_sched_max_splits = graph_size; |
|
const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2; |
|
sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0])); |
|
sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0])); |
|
sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0])); |
|
sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0])); |
|
|
|
sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false); |
|
sched->context_buffer = (char *) malloc(sched->context_buffer_size); |
|
|
|
const int initial_splits_capacity = 16; |
|
sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0])); |
|
sched->splits_capacity = initial_splits_capacity; |
|
|
|
for (int b = 0; b < n_backends; b++) { |
|
sched->backends[b] = backends[b]; |
|
sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]); |
|
GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b])); |
|
|
|
if (sched->n_copies > 1) { |
|
for (int c = 0; c < sched->n_copies; c++) { |
|
sched->events[b][c] = ggml_backend_event_new(backends[b]->device); |
|
} |
|
} |
|
} |
|
|
|
sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends); |
|
|
|
ggml_backend_sched_reset(sched); |
|
|
|
return sched; |
|
} |
|
|
|
void ggml_backend_sched_free(ggml_backend_sched_t sched) { |
|
if (sched == NULL) { |
|
return; |
|
} |
|
for (int b = 0; b < sched->n_backends; b++) { |
|
for (int c = 0; c < sched->n_copies; c++) { |
|
ggml_backend_event_free(sched->events[b][c]); |
|
} |
|
} |
|
ggml_gallocr_free(sched->galloc); |
|
ggml_free(sched->ctx); |
|
ggml_hash_set_free(&sched->hash_set); |
|
free(sched->splits); |
|
free(sched->hv_tensor_backend_ids); |
|
free(sched->hv_tensor_copies); |
|
free(sched->node_backend_ids); |
|
free(sched->leaf_backend_ids); |
|
free(sched->prev_node_backend_ids); |
|
free(sched->prev_leaf_backend_ids); |
|
free(sched->context_buffer); |
|
free(sched->graph.nodes); |
|
free(sched->graph.leafs); |
|
free(sched); |
|
} |
|
|
|
void ggml_backend_sched_reset(ggml_backend_sched_t sched) { |
|
|
|
if (!sched->is_reset) { |
|
ggml_hash_set_reset(&sched->hash_set); |
|
memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); |
|
memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); |
|
sched->is_reset = true; |
|
} |
|
sched->is_alloc = false; |
|
} |
|
|
|
bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { |
|
GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); |
|
|
|
ggml_backend_sched_split_graph(sched, measure_graph); |
|
|
|
ggml_backend_sched_synchronize(sched); |
|
|
|
if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) { |
|
return false; |
|
} |
|
|
|
ggml_backend_sched_reset(sched); |
|
|
|
return true; |
|
} |
|
|
|
bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
|
GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs); |
|
|
|
ggml_backend_sched_split_graph(sched, graph); |
|
|
|
|
|
if (!ggml_backend_sched_alloc_splits(sched)) { |
|
return false; |
|
} |
|
|
|
sched->is_alloc = true; |
|
|
|
return true; |
|
} |
|
|
|
enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
|
enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph); |
|
ggml_backend_sched_synchronize(sched); |
|
return err; |
|
} |
|
|
|
enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
|
if (!sched->is_reset && !sched->is_alloc) { |
|
ggml_backend_sched_reset(sched); |
|
} |
|
|
|
if (!sched->is_alloc) { |
|
if (!ggml_backend_sched_alloc_graph(sched, graph)) { |
|
return GGML_STATUS_ALLOC_FAILED; |
|
} |
|
} |
|
|
|
return ggml_backend_sched_compute_splits(sched); |
|
} |
|
|
|
void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { |
|
for (int i = 0; i < sched->n_backends; i++) { |
|
ggml_backend_synchronize(sched->backends[i]); |
|
} |
|
} |
|
|
|
void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { |
|
sched->callback_eval = callback; |
|
sched->callback_eval_user_data = user_data; |
|
} |
|
|
|
int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { |
|
return sched->n_splits; |
|
} |
|
|
|
int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) { |
|
return sched->n_copies; |
|
} |
|
|
|
int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) { |
|
return sched->n_backends; |
|
} |
|
|
|
ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) { |
|
GGML_ASSERT(i >= 0 && i < sched->n_backends); |
|
return sched->backends[i]; |
|
} |
|
|
|
size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) { |
|
int backend_index = ggml_backend_sched_backend_id(sched, backend); |
|
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
|
|
|
return ggml_gallocr_get_buffer_size(sched->galloc, backend_index); |
|
} |
|
|
|
void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { |
|
int backend_index = ggml_backend_sched_backend_id(sched, backend); |
|
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
|
tensor_backend_id(node) = backend_index; |
|
SET_CAUSE(node, "usr"); |
|
sched->is_reset = false; |
|
} |
|
|
|
ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) { |
|
int backend_index = tensor_backend_id(node); |
|
if (backend_index == -1) { |
|
return NULL; |
|
} |
|
return sched->backends[backend_index]; |
|
} |
|
|
|
|
|
|
|
void ggml_backend_view_init(struct ggml_tensor * tensor) { |
|
GGML_ASSERT(tensor->buffer == NULL); |
|
GGML_ASSERT(tensor->view_src != NULL); |
|
GGML_ASSERT(tensor->view_src->buffer != NULL); |
|
GGML_ASSERT(tensor->view_src->data != NULL); |
|
|
|
tensor->buffer = tensor->view_src->buffer; |
|
tensor->data = (char *)tensor->view_src->data + tensor->view_offs; |
|
ggml_backend_buffer_init_tensor(tensor->buffer, tensor); |
|
} |
|
|
|
void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) { |
|
GGML_ASSERT(tensor->buffer == NULL); |
|
GGML_ASSERT(tensor->data == NULL); |
|
GGML_ASSERT(tensor->view_src == NULL); |
|
GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer)); |
|
GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <= |
|
(char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer)); |
|
|
|
tensor->buffer = buffer; |
|
tensor->data = addr; |
|
ggml_backend_buffer_init_tensor(buffer, tensor); |
|
} |
|
|
|
static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, |
|
struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) { |
|
|
|
GGML_ASSERT(src != NULL); |
|
GGML_ASSERT(src->data && "graph must be allocated"); |
|
|
|
size_t id = ggml_hash_insert(&hash_set, src); |
|
if (id == GGML_HASHSET_ALREADY_EXISTS) { |
|
return node_copies[ggml_hash_find(&hash_set, src)]; |
|
} |
|
|
|
struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src); |
|
if (src->view_src != NULL) { |
|
dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src); |
|
dst->view_offs = src->view_offs; |
|
} |
|
dst->op = src->op; |
|
memcpy(dst->op_params, src->op_params, sizeof(dst->op_params)); |
|
ggml_set_name(dst, src->name); |
|
|
|
|
|
for (int i = 0; i < GGML_MAX_SRC; i++) { |
|
struct ggml_tensor * s = src->src[i]; |
|
if (s == NULL) { |
|
continue; |
|
} |
|
dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s); |
|
} |
|
|
|
node_copies[id] = dst; |
|
return dst; |
|
} |
|
|
|
static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) { |
|
size_t id = ggml_hash_find(hash_set, src); |
|
if (node_init[id]) { |
|
return; |
|
} |
|
node_init[id] = true; |
|
|
|
struct ggml_tensor * dst = node_copies[id]; |
|
if (dst->view_src != NULL) { |
|
graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src); |
|
ggml_backend_view_init(dst); |
|
} |
|
else { |
|
ggml_backend_tensor_copy(src, dst); |
|
} |
|
|
|
|
|
for (int i = 0; i < GGML_MAX_SRC; i++) { |
|
struct ggml_tensor * s = src->src[i]; |
|
if (s == NULL) { |
|
continue; |
|
} |
|
graph_copy_init_tensor(hash_set, node_copies, node_init, s); |
|
} |
|
} |
|
|
|
struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) { |
|
struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size); |
|
struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); |
|
bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0])); |
|
|
|
struct ggml_init_params params = { |
|
ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false), |
|
NULL, |
|
true |
|
}; |
|
|
|
struct ggml_context * ctx_allocated = ggml_init(params); |
|
struct ggml_context * ctx_unallocated = ggml_init(params); |
|
|
|
if (ctx_allocated == NULL || ctx_unallocated == NULL) { |
|
GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__); |
|
ggml_hash_set_free(&hash_set); |
|
free(node_copies); |
|
free(node_init); |
|
ggml_free(ctx_allocated); |
|
ggml_free(ctx_unallocated); |
|
return { |
|
NULL, |
|
NULL, |
|
NULL, |
|
NULL, |
|
}; |
|
} |
|
|
|
|
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node); |
|
} |
|
|
|
|
|
ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend); |
|
if (buffer == NULL) { |
|
GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__); |
|
ggml_hash_set_free(&hash_set); |
|
free(node_copies); |
|
free(node_init); |
|
ggml_free(ctx_allocated); |
|
ggml_free(ctx_unallocated); |
|
return { |
|
NULL, |
|
NULL, |
|
NULL, |
|
NULL, |
|
}; |
|
} |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
graph_copy_init_tensor(&hash_set, node_copies, node_init, node); |
|
} |
|
|
|
|
|
struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false); |
|
for (int i = 0; i < graph->n_nodes; i++) { |
|
struct ggml_tensor * node = graph->nodes[i]; |
|
struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)]; |
|
graph_copy->nodes[i] = node_copy; |
|
} |
|
graph_copy->n_nodes = graph->n_nodes; |
|
|
|
ggml_hash_set_free(&hash_set); |
|
free(node_copies); |
|
free(node_init); |
|
|
|
return { |
|
buffer, |
|
ctx_allocated, |
|
ctx_unallocated, |
|
graph_copy, |
|
}; |
|
} |
|
|
|
void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { |
|
ggml_backend_buffer_free(copy.buffer); |
|
ggml_free(copy.ctx_allocated); |
|
ggml_free(copy.ctx_unallocated); |
|
} |
|
|
|
bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) { |
|
struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); |
|
if (copy.buffer == NULL) { |
|
return false; |
|
} |
|
|
|
struct ggml_cgraph * g1 = graph; |
|
struct ggml_cgraph * g2 = copy.graph; |
|
|
|
assert(g1->n_nodes == g2->n_nodes); |
|
|
|
for (int i = 0; i < g1->n_nodes; i++) { |
|
|
|
struct ggml_tensor * t1 = g1->nodes[i]; |
|
struct ggml_tensor * t2 = g2->nodes[i]; |
|
|
|
assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); |
|
|
|
struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); |
|
struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); |
|
|
|
ggml_backend_graph_compute(backend1, &g1v); |
|
ggml_backend_graph_compute(backend2, &g2v); |
|
|
|
if (ggml_is_view_op(t1->op)) { |
|
continue; |
|
} |
|
|
|
|
|
if (!callback(i, t1, t2, user_data)) { |
|
break; |
|
} |
|
} |
|
|
|
ggml_backend_graph_copy_free(copy); |
|
|
|
return true; |
|
} |
|
|
|
|
|
|
|
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { |
|
uintptr_t data = (uintptr_t)buffer->context; |
|
|
|
|
|
if (data % TENSOR_ALIGNMENT != 0) { |
|
data = GGML_PAD(data, TENSOR_ALIGNMENT); |
|
} |
|
|
|
return (void *)data; |
|
} |
|
|
|
static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { |
|
ggml_aligned_free(buffer->context, buffer->size); |
|
} |
|
|
|
static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { |
|
memset((char *)tensor->data + offset, value, size); |
|
|
|
GGML_UNUSED(buffer); |
|
} |
|
|
|
static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
|
memcpy((char *)tensor->data + offset, data, size); |
|
|
|
GGML_UNUSED(buffer); |
|
} |
|
|
|
static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
|
memcpy(data, (const char *)tensor->data + offset, size); |
|
|
|
GGML_UNUSED(buffer); |
|
} |
|
|
|
static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { |
|
if (ggml_backend_buffer_is_host(src->buffer)) { |
|
memcpy(dst->data, src->data, ggml_nbytes(src)); |
|
return true; |
|
} |
|
return false; |
|
|
|
GGML_UNUSED(buffer); |
|
} |
|
|
|
static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
|
memset(buffer->context, value, buffer->size); |
|
} |
|
|
|
static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = { |
|
ggml_backend_cpu_buffer_free_buffer, |
|
ggml_backend_cpu_buffer_get_base, |
|
NULL, |
|
ggml_backend_cpu_buffer_memset_tensor, |
|
ggml_backend_cpu_buffer_set_tensor, |
|
ggml_backend_cpu_buffer_get_tensor, |
|
ggml_backend_cpu_buffer_cpy_tensor, |
|
ggml_backend_cpu_buffer_clear, |
|
NULL, |
|
}; |
|
|
|
static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = { |
|
NULL, |
|
ggml_backend_cpu_buffer_get_base, |
|
NULL, |
|
ggml_backend_cpu_buffer_memset_tensor, |
|
ggml_backend_cpu_buffer_set_tensor, |
|
ggml_backend_cpu_buffer_get_tensor, |
|
ggml_backend_cpu_buffer_cpy_tensor, |
|
ggml_backend_cpu_buffer_clear, |
|
NULL, |
|
}; |
|
|
|
|
|
|
|
|
|
|
|
static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { |
|
return "CPU"; |
|
|
|
GGML_UNUSED(buft); |
|
} |
|
|
|
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { |
|
void * data = ggml_aligned_malloc(size); |
|
|
|
if (data == NULL) { |
|
GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size); |
|
return NULL; |
|
} |
|
|
|
return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size); |
|
} |
|
|
|
static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { |
|
return TENSOR_ALIGNMENT; |
|
|
|
GGML_UNUSED(buft); |
|
} |
|
|
|
static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) { |
|
return true; |
|
|
|
GGML_UNUSED(buft); |
|
} |
|
|
|
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { |
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { |
|
{ |
|
ggml_backend_cpu_buffer_type_get_name, |
|
ggml_backend_cpu_buffer_type_alloc_buffer, |
|
ggml_backend_cpu_buffer_type_get_alignment, |
|
NULL, |
|
NULL, |
|
ggml_backend_cpu_buffer_type_is_host, |
|
}, |
|
NULL, |
|
NULL, |
|
}; |
|
|
|
return &ggml_backend_cpu_buffer_type; |
|
} |
|
|
|
static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) { |
|
return "CPU_Mapped"; |
|
|
|
GGML_UNUSED(buft); |
|
} |
|
|
|
static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) { |
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { |
|
{ |
|
ggml_backend_cpu_buffer_from_ptr_type_get_name, |
|
ggml_backend_cpu_buffer_type_alloc_buffer, |
|
ggml_backend_cpu_buffer_type_get_alignment, |
|
NULL, |
|
NULL, |
|
ggml_backend_cpu_buffer_type_is_host, |
|
}, |
|
NULL, |
|
NULL, |
|
}; |
|
|
|
return &ggml_backend_cpu_buffer_type; |
|
} |
|
|
|
ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) { |
|
GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned"); |
|
return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size); |
|
} |
|
|