mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-08 15:09:44 +00:00
5656d10599
* MPI support, first cut * fix warnings, update README * fixes * wrap includes * PR comments * Update CMakeLists.txt * Add GH workflow, fix test * Add info to README * mpi : trying to move more MPI stuff into ggml-mpi (WIP) (#2099) * mpi : add names for layer inputs + prep ggml_mpi_graph_compute() * mpi : move all MPI logic into ggml-mpi Not tested yet * mpi : various fixes - communication now works but results are wrong * mpi : fix output tensor after MPI compute (still not working) * mpi : fix inference * mpi : minor * Add OpenMPI to GH action * [mpi] continue-on-error: true * mpi : fix after master merge * [mpi] Link MPI C++ libraries to fix OpenMPI * tests : fix new llama_backend API * [mpi] use MPI_INT32_T * mpi : factor out recv / send in functions and reuse * mpi : extend API to allow usage with outer backends (e.g. Metal) --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
39 lines
911 B
C
39 lines
911 B
C
#pragma once
|
|
|
|
struct ggml_context;
|
|
struct ggml_tensor;
|
|
struct ggml_cgraph;
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
struct ggml_mpi_context;
|
|
|
|
void ggml_mpi_backend_init(void);
|
|
void ggml_mpi_backend_free(void);
|
|
|
|
struct ggml_mpi_context * ggml_mpi_init(void);
|
|
void ggml_mpi_free(struct ggml_mpi_context * ctx);
|
|
|
|
int ggml_mpi_rank(struct ggml_mpi_context * ctx);
|
|
|
|
void ggml_mpi_eval_init(
|
|
struct ggml_mpi_context * ctx_mpi,
|
|
int * n_tokens,
|
|
int * n_past,
|
|
int * n_threads);
|
|
|
|
void ggml_mpi_graph_compute_pre(
|
|
struct ggml_mpi_context * ctx_mpi,
|
|
struct ggml_cgraph * gf,
|
|
int n_layers);
|
|
|
|
void ggml_mpi_graph_compute_post(
|
|
struct ggml_mpi_context * ctx_mpi,
|
|
struct ggml_cgraph * gf,
|
|
int n_layers);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|