mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-08 23:19:43 +00:00
40 lines
911 B
C
40 lines
911 B
C
|
#pragma once
|
||
|
|
||
|
struct ggml_context;
|
||
|
struct ggml_tensor;
|
||
|
struct ggml_cgraph;
|
||
|
|
||
|
#ifdef __cplusplus
|
||
|
extern "C" {
|
||
|
#endif
|
||
|
|
||
|
struct ggml_mpi_context;
|
||
|
|
||
|
void ggml_mpi_backend_init(void);
|
||
|
void ggml_mpi_backend_free(void);
|
||
|
|
||
|
struct ggml_mpi_context * ggml_mpi_init(void);
|
||
|
void ggml_mpi_free(struct ggml_mpi_context * ctx);
|
||
|
|
||
|
int ggml_mpi_rank(struct ggml_mpi_context * ctx);
|
||
|
|
||
|
void ggml_mpi_eval_init(
|
||
|
struct ggml_mpi_context * ctx_mpi,
|
||
|
int * n_tokens,
|
||
|
int * n_past,
|
||
|
int * n_threads);
|
||
|
|
||
|
void ggml_mpi_graph_compute_pre(
|
||
|
struct ggml_mpi_context * ctx_mpi,
|
||
|
struct ggml_cgraph * gf,
|
||
|
int n_layers);
|
||
|
|
||
|
void ggml_mpi_graph_compute_post(
|
||
|
struct ggml_mpi_context * ctx_mpi,
|
||
|
struct ggml_cgraph * gf,
|
||
|
int n_layers);
|
||
|
|
||
|
#ifdef __cplusplus
|
||
|
}
|
||
|
#endif
|