Skip to content

Commit

Permalink
refector: remove some useless code
Browse files Browse the repository at this point in the history
  • Loading branch information
leejet committed Apr 14, 2024
1 parent afea457 commit ec82d52
Show file tree
Hide file tree
Showing 10 changed files with 23 additions and 126 deletions.
24 changes: 0 additions & 24 deletions clip.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -939,22 +939,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public GGMLModule {
return "clip";
}

size_t get_params_mem_size() {
size_t params_mem_size = text_model.get_params_mem_size();
if (version == VERSION_XL) {
params_mem_size += text_model2.get_params_mem_size();
}
return params_mem_size;
}

size_t get_params_num() {
size_t params_num = text_model.get_params_num();
if (version == VERSION_XL) {
params_num += text_model2.get_params_num();
}
return params_num;
}

void set_clip_skip(int clip_skip) {
text_model.set_clip_skip(clip_skip);
if (version == VERSION_XL) {
Expand Down Expand Up @@ -1385,14 +1369,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLModule {
return "clip_vision";
}

size_t get_params_mem_size() {
return vision_model.get_params_mem_size();
}

size_t get_params_num() {
return vision_model.get_params_num();
}

void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
vision_model.get_param_tensors(tensors, prefix + "transformer");
}
Expand Down
8 changes: 0 additions & 8 deletions control.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -369,14 +369,6 @@ struct ControlNet : public GGMLModule {
return "control_net";
}

size_t get_params_mem_size() {
return control_net.get_params_mem_size();
}

size_t get_params_num() {
return control_net.get_params_num();
}

void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
control_net.get_param_tensors(tensors, prefix);
}
Expand Down
8 changes: 0 additions & 8 deletions esrgan.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,14 +152,6 @@ struct ESRGAN : public GGMLModule {
return "esrgan";
}

size_t get_params_mem_size() {
return rrdb_net.get_params_mem_size();
}

size_t get_params_num() {
return rrdb_net.get_params_num();
}

bool load_from_file(const std::string& file_path) {
LOG_INFO("loading esrgan from '%s'", file_path.c_str());

Expand Down
53 changes: 17 additions & 36 deletions ggml_extend.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -752,18 +752,15 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_timestep_embedding(
return ggml_timestep_embedding(ctx, timesteps, dim, max_period);
}

// struct GGMLComputeGraph {
// virtual void init(struct ggml_context* ctx, ggml_type wtype) = 0;
// virtual std::string get_desc() = 0;
// virtual size_t get_params_mem_size() = 0;
// virtual size_t get_params_num() = 0;
// virtual struct ggml_cgraph* get_ggml_cgraph() = 0;
// };

/*
#define MAX_PARAMS_TENSOR_NUM 10240
#define MAX_GRAPH_SIZE 10240
*/

__STATIC_INLINE__ size_t ggml_tensor_num(ggml_context * ctx) {
size_t num = 0;
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
num++;
}
return num;
}

/* SDXL with LoRA requires more space */
#define MAX_PARAMS_TENSOR_NUM 15360
#define MAX_GRAPH_SIZE 15360
Expand Down Expand Up @@ -854,8 +851,6 @@ struct GGMLModule {
}

public:
virtual size_t get_params_mem_size() = 0;
virtual size_t get_params_num() = 0;
virtual std::string get_desc() = 0;

GGMLModule(ggml_backend_t backend, ggml_type wtype = GGML_TYPE_F32)
Expand All @@ -876,7 +871,7 @@ struct GGMLModule {
}

bool alloc_params_buffer() {
size_t num_tensors = get_params_num();
size_t num_tensors = ggml_tensor_num(params_ctx);
params_buffer = ggml_backend_alloc_ctx_tensors(params_ctx, backend);
if (params_buffer == NULL) {
LOG_ERROR("%s alloc params backend buffer failed", get_desc().c_str());
Expand All @@ -898,6 +893,13 @@ struct GGMLModule {
}
}

size_t get_params_buffer_size() {
if (params_buffer != NULL) {
return ggml_backend_buffer_get_size(params_buffer);
}
return 0;
}

void free_compute_buffer() {
if (compute_allocr != NULL) {
ggml_gallocr_free(compute_allocr);
Expand Down Expand Up @@ -968,19 +970,6 @@ struct GGMLModule {
};

class GGMLBlock {
private:
static char temp_buffer[1024 * 1024 * 10];
ggml_context* get_temp_ctx() {
struct ggml_init_params params;
params.mem_size = sizeof(temp_buffer);
params.mem_buffer = temp_buffer;
params.no_alloc = true;

ggml_context* temp_ctx = ggml_init(params);
GGML_ASSERT(temp_ctx != NULL);
return temp_ctx;
}

protected:
typedef std::unordered_map<std::string, struct ggml_tensor*> ParameterMap;
typedef std::unordered_map<std::string, std::shared_ptr<GGMLBlock>> GGMLBlockMap;
Expand All @@ -1003,14 +992,6 @@ class GGMLBlock {
init_params(ctx, wtype);
}

std::tuple<size_t, size_t> get_params_info(ggml_type wtype) {
ggml_context* temp_ctx = get_temp_ctx();
init(temp_ctx, wtype);
size_t num_tensors = get_params_num();
size_t mem_size = get_params_mem_size();
return {num_tensors, mem_size};
}

size_t get_params_num() {
size_t num_tensors = params.size();
for (auto& pair : blocks) {
Expand Down
8 changes: 0 additions & 8 deletions lora.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,6 @@ struct LoraModel : public GGMLModule {
return "lora";
}

size_t get_params_num() {
return LORA_GRAPH_SIZE;
}

size_t get_params_mem_size() {
return model_loader.get_params_mem_size(NULL);
}

bool load_from_file(bool filter_tensor = false) {
LOG_INFO("loading LoRA from '%s'", file_path.c_str());

Expand Down
10 changes: 0 additions & 10 deletions pmid.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,16 +186,6 @@ struct PhotoMakerIDEncoder : public GGMLModule {
return "pmid";
}

size_t get_params_mem_size() {
size_t params_mem_size = id_encoder.get_params_mem_size();
return params_mem_size;
}

size_t get_params_num() {
size_t params_num = id_encoder.get_params_num();
return params_num;
}

void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
id_encoder.get_param_tensors(tensors, prefix);
}
Expand Down
14 changes: 6 additions & 8 deletions stable-diffusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ const char* sampling_methods_str[] = {
"LCM",
};

char GGMLBlock::temp_buffer[1024 * 1024 * 10];

/*================================================== Helper Functions ================================================*/

void calculate_alphas_cumprod(float* alphas_cumprod,
Expand Down Expand Up @@ -353,27 +351,27 @@ class StableDiffusionGGML {
// first_stage_model->test();
// return false;
} else {
size_t clip_params_mem_size = cond_stage_model->get_params_mem_size();
size_t unet_params_mem_size = diffusion_model->get_params_mem_size();
size_t clip_params_mem_size = cond_stage_model->get_params_buffer_size();
size_t unet_params_mem_size = diffusion_model->get_params_buffer_size();
size_t vae_params_mem_size = 0;
if (!use_tiny_autoencoder) {
vae_params_mem_size = first_stage_model->get_params_mem_size();
vae_params_mem_size = first_stage_model->get_params_buffer_size();
} else {
if (!tae_first_stage->load_from_file(taesd_path)) {
return false;
}
vae_params_mem_size = tae_first_stage->get_params_mem_size();
vae_params_mem_size = tae_first_stage->get_params_buffer_size();
}
size_t control_net_params_mem_size = 0;
if (control_net) {
if (!control_net->load_from_file(control_net_path)) {
return false;
}
control_net_params_mem_size = control_net->get_params_mem_size();
control_net_params_mem_size = control_net->get_params_buffer_size();
}
size_t pmid_params_mem_size = 0;
if (stacked_id) {
pmid_params_mem_size = pmid_model->get_params_mem_size();
pmid_params_mem_size = pmid_model->get_params_buffer_size();
}

size_t total_params_ram_size = 0;
Expand Down
8 changes: 0 additions & 8 deletions tae.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -200,14 +200,6 @@ struct TinyAutoEncoder : public GGMLModule {
return "taesd";
}

size_t get_params_mem_size() {
return taesd.get_params_mem_size();
}

size_t get_params_num() {
return taesd.get_params_num();
}

bool load_from_file(const std::string& file_path) {
LOG_INFO("loading taesd from '%s'", file_path.c_str());
alloc_params_buffer();
Expand Down
8 changes: 0 additions & 8 deletions unet.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -543,14 +543,6 @@ struct UNetModel : public GGMLModule {
return "unet";
}

size_t get_params_mem_size() {
return unet.get_params_mem_size();
}

size_t get_params_num() {
return unet.get_params_num();
}

void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
unet.get_param_tensors(tensors, prefix);
}
Expand Down
8 changes: 0 additions & 8 deletions vae.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -526,14 +526,6 @@ struct AutoEncoderKL : public GGMLModule {
return "vae";
}

size_t get_params_mem_size() {
return ae.get_params_mem_size();
}

size_t get_params_num() {
return ae.get_params_num();
}

void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
ae.get_param_tensors(tensors, prefix);
}
Expand Down

0 comments on commit ec82d52

Please sign in to comment.