Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit d6bc452

Browse files
committedJan 26, 2025·
lora: small clean up
1 parent cd692d4 commit d6bc452

File tree

1 file changed

+2
-5
lines changed

1 file changed

+2
-5
lines changed
 

‎lora.hpp

+2-5
Original file line numberDiff line numberDiff line change
@@ -342,10 +342,7 @@ struct LoraModel : public GGMLRunner {
342342
scale_value = alpha / dim;
343343
}
344344
} else if (lora_tensors.find(fk + ".lokr_w1") != lora_tensors.end() || lora_tensors.find(fk + ".lokr_w1_a") != lora_tensors.end()) {
345-
// LOG_WARN("LoKr is not supported yet");
346-
// break;
347345
std::string alpha_name = fk + ".alpha";
348-
;
349346

350347
ggml_tensor* lokr_w1 = NULL;
351348
ggml_tensor* lokr_w2 = NULL;
@@ -377,7 +374,7 @@ struct LoraModel : public GGMLRunner {
377374
// scale != 1 only when using Low rank form (?)
378375
int64_t dim = down->ne[ggml_n_dims(down) - 1];
379376
if (lora_tensors.find(alpha_name) != lora_tensors.end()) {
380-
float alpha = ggml_backend_tensor_get_f32(to_f32(compute_ctx, lora_tensors[alpha_name]));
377+
float alpha = ggml_backend_tensor_get_f32(lora_tensors[alpha_name]);
381378
scale_value = alpha / dim;
382379
}
383380
}
@@ -408,7 +405,7 @@ struct LoraModel : public GGMLRunner {
408405

409406
updown = ggml_kronecker(compute_ctx, lokr_w1, lokr_w2);
410407

411-
// TODO: double check aplhas
408+
// TODO: double check alpha implementation, it seems strange to not use them most of the time
412409
applied_lora_tensors.insert(alpha_name);
413410
} else {
414411
// LoRA mode

0 commit comments

Comments
 (0)
Please sign in to comment.