update optimizer for lora
This commit is contained in:
parent
58586112c1
commit
3ba0c77053
|
@ -2,7 +2,7 @@
|
|||
Author: LiangSong(sl12160010@gmail.com)
|
||||
Date: 2023-04-24 20:05:21
|
||||
LastEditors: LiangSong(sl12160010@gmail.com)
|
||||
LastEditTime: 2023-05-06 23:04:14
|
||||
LastEditTime: 2023-05-08 22:51:42
|
||||
FilePath: /Open-Llama/solver/trainer.py
|
||||
Description:
|
||||
|
||||
|
@ -56,6 +56,9 @@ class Trainer:
|
|||
|
||||
def get_optimizer(self):
|
||||
no_decay = ["bias", "LayerNorm.weight", "layernorm.weight"]
|
||||
if self.config["train"].get("use_lora", False):
|
||||
optimizer_grouped_parameters = self.raw_model.parameters()
|
||||
else:
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": [
|
||||
|
|
Loading…
Reference in New Issue
Block a user