We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 322fbe8 commit eded58fCopy full SHA for eded58f
unsloth/models/vision.py
@@ -520,6 +520,12 @@ def from_pretrained(
520
f"To enable float32 training, use `float32_mixed_precision = True` during FastLanguageModel.from_pretrained"
521
)
522
os.environ["UNSLOTH_BFLOAT16_MIXED_PRECISION"] = "1"
523
+ else:
524
+ print(
525
+ f"Unsloth: Using full float32 full finetuning.\n"
526
+ f"To enable bfloat16 training to reduce VRAM usage by 50% albeit with a slightly higher loss, "\
527
+ "use `float32_mixed_precision = False` during FastLanguageModel.from_pretrained"
528
+ )
529
else:
530
print("Unsloth: Float16 full finetuning uses more memory since we upcast weights to float32.")
531
0 commit comments