Bläddra i källkod

update comment for ch12

Gen TANG 2 år sedan
förälder
incheckning
ab9a5f2066

+ 10 - 0
ch12_rl/README.md

@@ -0,0 +1,10 @@
+
+|代码|说明|
+|---|---|
+|[intuition_model.ipynb](intuition_model.ipynb)| 将大语言模型与评分模型进行直观的联结 |
+|[utils.py](utils.py)| 定义游戏以及相应的可视化工具 |
+|[value_learning.ipynb](value_learning.ipynb)| 值函数学习 |
+|[policy_learning.ipynb](policy_learning.ipynb)| 策略学习 |
+|[a2c.ipynb](a2c.ipynb)| 基准线和A2C模型 |
+|[llm_ppo.ipynb](llm_ppo.ipynb)| 使用PPO算法优化大语言模型,使得微调之后的模型评分更高 |
+|[llm\_ppo\_correct\_dropout.ipynb](llm_ppo_correct_dropout.ipynb)| 与[llm_ppo.ipynb](llm_ppo.ipynb)的目的一样,在脚本中将着重展示如何在PPO算法中正确使用随机失活 |

+ 66 - 13
ch12_rl/a2c.ipynb

@@ -54,6 +54,9 @@
    ],
    "source": [
     "def get_cum_rewards(r, gamma):\n",
+    "    '''\n",
+    "    计算每一步的游戏得分并返回\n",
+    "    '''\n",
     "    cum_rewards = []\n",
     "    last_cum_reward = 0\n",
     "    for j in reversed(r):\n",
@@ -70,6 +73,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "# 一些超参数\n",
     "gamma = 0.9\n",
     "learning_rate = 0.01\n",
     "grad_clip = 1.0\n",
@@ -85,29 +89,52 @@
     "class ActorNet(nn.Module):\n",
     "    \n",
     "    def __init__(self):\n",
+    "        '''\n",
+    "        游戏策略\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.emb = nn.Embedding(2, 4)\n",
     "        self.ln = nn.Linear(4, 2)\n",
     "\n",
     "    def forward(self, x):\n",
-    "        # x: (G)\n",
+    "        '''\n",
+    "        向前传播\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,游戏状态,形状为(G),其中G表示游戏步数\n",
+    "        返回\n",
+    "        ----\n",
+    "        out :torch.FloatTensor,logits,形状为(G, 2)\n",
+    "        '''\n",
     "        x = F.relu(self.emb(x))\n",
-    "        x = self.ln(x)\n",
-    "        return x\n",
+    "        out = self.ln(x)\n",
+    "        return out\n",
     "\n",
     "class BaselineNet(nn.Module):\n",
     "    \n",
     "    def __init__(self):\n",
+    "        '''\n",
+    "        基准线\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.emb = nn.Embedding(2, 4)\n",
     "        self.ln = nn.Linear(4, 1)\n",
     "\n",
     "    def forward(self, x):\n",
-    "        # x: (G)\n",
+    "        '''\n",
+    "        向前传播\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,游戏状态,形状为(G),其中G表示游戏步数\n",
+    "        返回\n",
+    "        ----\n",
+    "        out :torch.FloatTensor,值函数,形状为(G, 1)\n",
+    "        '''\n",
     "        x = F.relu(self.emb(x))\n",
-    "        x = self.ln(x)\n",
-    "        return x\n",
+    "        out = self.ln(x)\n",
+    "        return out\n",
     "\n",
+    "# 定义游戏状态的数字表示\n",
     "tokenizer = {'w': 0, 'l': 1}"
    ]
   },
@@ -128,8 +155,10 @@
     "        x = torch.tensor([tokenizer[s]])   # (1)\n",
     "        logits = model(x)                  # (1, 2)\n",
     "        probs = F.softmax(logits, dim=-1)  # (1, 2)\n",
+    "        # 利用神经网络得到下一个行动\n",
     "        action = torch.multinomial(probs, 1)\n",
     "        next_s, r = game.step(action)\n",
+    "        # 记录游戏过程,分别是行动、状态和奖励\n",
     "        one_game_action.append(action)\n",
     "        one_game_state.append(s)\n",
     "        one_game_reward.append(r)\n",
@@ -183,15 +212,15 @@
     "    cum_rewards = torch.tensor(cum_rewards)                    # (G)\n",
     "    states = torch.tensor([tokenizer[s] for s in states])      # (G)\n",
     "    actions = torch.concat(actions).squeeze(-1)                # (G)\n",
-    "    # update baseline\n",
+    "    # 更新基准线\n",
     "    baseline_optimizer.zero_grad()\n",
     "    with torch.no_grad():\n",
-    "        # baseline(states): (G, 1)\n",
+    "        # baseline(states)的形状是(G, 1)\n",
     "        advantage = cum_rewards - baseline(states).squeeze(-1)  # (G)\n",
     "    baseline_loss = -advantage * baseline(states)               # (G)\n",
     "    baseline_loss.mean().backward()\n",
     "    baseline_optimizer.step()\n",
-    "    # update actor\n",
+    "    # 更新游戏策略\n",
     "    actor_optimizer.zero_grad()\n",
     "    logits = actor(states)                                     # (G, 2)\n",
     "    # ln(probability)\n",
@@ -199,8 +228,9 @@
     "    actor_loss = -advantage * lnP\n",
     "    actor_loss.mean().backward()\n",
     "    actor_optimizer.step()\n",
-    "    # record actor\n",
+    "    # 记录游戏策略的结果\n",
     "    _a_re = {}\n",
+    "    # 记录基准线的结果\n",
     "    _c_re = {}\n",
     "    for k in tokenizer:\n",
     "        inputs = torch.tensor([tokenizer[k]])\n",
@@ -230,6 +260,7 @@
     }
    ],
    "source": [
+    "# 展示策略\n",
     "fig = plot_action_probs(actor_re)"
    ]
   },
@@ -252,6 +283,7 @@
     }
    ],
    "source": [
+    "# 展示值函数\n",
     "fig = plot_values(baseline_re)"
    ]
   },
@@ -264,13 +296,27 @@
     "class A2C(nn.Module):\n",
     "    \n",
     "    def __init__(self):\n",
+    "        '''\n",
+    "        a2c模型\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.emb = nn.Embedding(2, 4)\n",
+    "        # 游戏策略头\n",
     "        self.action_ln = nn.Linear(4, 2)\n",
+    "        # 值函数估计头\n",
     "        self.critic_ln = nn.Linear(4, 1)\n",
     "\n",
     "    def forward(self, x):\n",
-    "        # x: (G)\n",
+    "        '''\n",
+    "        向前传播\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,游戏状态,形状为(G),其中G表示游戏步数\n",
+    "        返回\n",
+    "        ----\n",
+    "        actions :torch.FloatTensor,游戏策略,形状为(G, 2)\n",
+    "        values :torch.FloatTensor,值函数,形状为(G, 1)\n",
+    "        '''\n",
     "        x = F.relu(self.emb(x))\n",
     "        actions = self.action_ln(x)\n",
     "        values = self.critic_ln(x)\n",
@@ -294,6 +340,7 @@
     }
    ],
    "source": [
+    "# 验证模型是否搭建正确\n",
     "model = A2C()\n",
     "x = torch.randint(2, (10,))\n",
     "logits, values = model(x)\n",
@@ -318,6 +365,7 @@
    ],
    "source": [
     "model = A2C()\n",
+    "# 只使用a2c中的游戏策略头\n",
     "actor = lambda x: model(x)[0]\n",
     "game = Lottery()\n",
     "play_game(actor, game)"
@@ -329,7 +377,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# One-step actor-critic\n",
     "model = A2C()\n",
     "optimizer = optim.AdamW(model.parameters(), lr=learning_rate)\n",
     "actor_re = []\n",
@@ -347,16 +394,20 @@
     "        _, values = model(states)                                # (G, 1)\n",
     "        values = values.squeeze(1)                               # (G)   \n",
     "        vt_next = torch.cat((values[:-1], torch.tensor([0.0])))  # (G)\n",
+    "        # 优势函数\n",
     "        advantage = rewards + gamma * vt_next - values           # (G)\n",
     "    logits, vt = model(states)\n",
     "    vt = vt.squeeze(1)                                           # (G)\n",
     "    lnP = -F.cross_entropy(logits, actions, reduction='none')    # (G)\n",
+    "    # 值函数损失\n",
     "    vf_loss = -advantage * vt\n",
+    "    # 策略损失\n",
     "    pg_loss = -advantage * lnP\n",
+    "    # 定义模型损失\n",
     "    loss = vf_weight * vf_loss.mean() + pg_loss.mean()\n",
     "    loss.backward()\n",
     "    optimizer.step()\n",
-    "    # record actor\n",
+    "    # 记录模型结果\n",
     "    _a_re = {}\n",
     "    _c_re = {}\n",
     "    for k in tokenizer:\n",
@@ -388,6 +439,7 @@
     }
    ],
    "source": [
+    "# 展示游戏策略\n",
     "fig = plot_action_probs(actor_re)"
    ]
   },
@@ -410,6 +462,7 @@
     }
    ],
    "source": [
+    "# 展示值函数\n",
     "fig = plot_values(critic_re)"
    ]
   }

+ 92 - 13
ch12_rl/intuition_model.ipynb

@@ -33,7 +33,7 @@
    "outputs": [],
    "source": [
     "llm = GPT2LMHeadModel.from_pretrained('gpt2')\n",
-    "tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")"
+    "tokenizer = AutoTokenizer.from_pretrained('gpt2')"
    ]
   },
   {
@@ -45,16 +45,35 @@
     "class RewardModel(nn.Module):\n",
     "\n",
     "    def __init__(self, model):\n",
+    "        '''\n",
+    "        评分模型\n",
+    "        参数\n",
+    "        ----\n",
+    "        model :嵌入模型\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.embedding = model\n",
+    "        # 评分建模头\n",
     "        self.score = nn.Linear(model.embed_dim, 1, bias=False)\n",
     "\n",
     "    def forward(self, x, seq_len=None):\n",
-    "        # x:表示文本,形状(B, T, vs)或者(B, T), seq_len:表示文本长度,形状(B)\n",
+    "        '''\n",
+    "        向前传播\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,文本,形状为(B, T)或者(B, T, vs),其中vs表示字典大小\n",
+    "        seq_len :torch.LongTensor,文本的实际长度,形状为(B)\n",
+    "        返回\n",
+    "        ----\n",
+    "        score :torch.FloatTensor,评分,形状为(B, 1)\n",
+    "        '''\n",
+    "        \n",
     "        B = x.shape[0]\n",
     "        T = x.shape[1]\n",
+    "        # 文本的嵌入向量\n",
     "        emb = self.get_last_hidden_state(x)     # (B, T, C)\n",
     "        ind = torch.arange(B, device=x.device)\n",
+    "        # 如果没有传入seq_len,则所有文本的实际长度都等于T\n",
     "        if seq_len == None:\n",
     "            seq_len = torch.tensor([T] * B)\n",
     "        # 获取最后一个词元的特征\n",
@@ -63,14 +82,17 @@
     "        return score\n",
     "    \n",
     "    def get_last_hidden_state(self, x):\n",
+    "        '''\n",
+    "        获取文本的嵌入向量\n",
+    "        '''\n",
+    "        # 普通情况下,x的形状为(B, T)\n",
     "        if len(x.shape) == 2:\n",
-    "            # x shape = (B, T)\n",
     "            emb = self.embedding(x).last_hidden_state  # (B, T, C)\n",
-    "        # 为后面使用gumbel_softmax做准备,直接与embedding的模型参数进行计算\n",
+    "        # 如果使用了gumbel_softmax,则x的形状为(B, T, vs)\n",
+    "        # 这种情况下,需要直接与embedding的模型参数进行计算\n",
     "        else:\n",
-    "            # x shape = (B, T, vs)\n",
     "            w = self.embedding.get_input_embeddings().weight  # (vs, C)\n",
-    "            inputs_embeds = x @ w  # (B, T, C)\n",
+    "            inputs_embeds = x @ w  # (B, T, vs) @ (vs, C) --> (B, T, C)\n",
     "            emb = self.embedding(inputs_embeds=inputs_embeds).last_hidden_state\n",
     "        return emb\n",
     "\n",
@@ -95,6 +117,7 @@
    ],
    "source": [
     "# 验证评分模型计算正确\n",
+    "# x的形状是(B, T),x_hot的形状是(B, T, vs)\n",
     "x = torch.randint(0, tokenizer.vocab_size, (3, 4))\n",
     "x_hot = F.one_hot(x, num_classes=tokenizer.vocab_size).float()\n",
     "(r_model(x) - r_model(x_hot)).abs().max()"
@@ -109,6 +132,13 @@
     "class RLModel(nn.Module):\n",
     "    \n",
     "    def __init__(self, llm, r_model):\n",
+    "        '''\n",
+    "        大语言模型与评分模型的拼接(错误方式)\n",
+    "        参数\n",
+    "        ----\n",
+    "        llm :大语言模型\n",
+    "        r_model :评分模型\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.llm = llm\n",
     "        self.r_model = r_model\n",
@@ -117,6 +147,13 @@
     "            param.requires_grad = False\n",
     "    \n",
     "    def generate(self, idx, max_new_tokens):\n",
+    "        '''\n",
+    "        利用大语言模型生成文本(反复使用模型进行预测)\n",
+    "        参数\n",
+    "        ----\n",
+    "        idx :torch.LongTensor,背景文本,形状为(1, T)\n",
+    "        max_new_tokens :int,生成文本的最大长度\n",
+    "        '''\n",
     "        model = self.llm\n",
     "        for _ in range(max_new_tokens):\n",
     "            logits = model(input_ids=idx).logits\n",
@@ -128,8 +165,18 @@
     "        return idx\n",
     "    \n",
     "    def forward(self, idx):\n",
+    "        '''\n",
+    "        利用大语言模型生成文本,再使用评分模型对生成文本进行评分\n",
+    "        参数\n",
+    "        ----\n",
+    "        idx :torch.LongTensor,背景文本,形状为(1, T)\n",
+    "        返回\n",
+    "        ----\n",
+    "        reward :torch.FloatTensor,评分,形状为(1, 1)\n",
+    "        '''\n",
     "        # 为了代码简洁,我们设置产生文本的长度\n",
     "        ans = self.generate(idx, 20)\n",
+    "        # 对文本进行评分\n",
     "        reward = self.r_model(ans)\n",
     "        return reward"
    ]
@@ -141,7 +188,7 @@
    "outputs": [],
    "source": [
     "inputs = '1 + 2 = 3, 2 + 1 = 3, 1 + 2 ='\n",
-    "ids = tokenizer(inputs, return_tensors=\"pt\")\n",
+    "ids = tokenizer(inputs, return_tensors='pt')\n",
     "model = RLModel(llm, r_model)"
    ]
   },
@@ -185,6 +232,7 @@
     }
    ],
    "source": [
+    "# 使用第三方库封装好的函数生成文本\n",
     "res = model.llm.generate(\n",
     "    input_ids=ids['input_ids'], max_new_tokens=20,\n",
     "    do_sample=True, top_k=0)[0]\n",
@@ -212,7 +260,7 @@
    ],
    "source": [
     "loss = -1 * model(ids['input_ids'])\n",
-    "# 将报错\n",
+    "# 将报错,因为torch.multinomial不可微\n",
     "loss.backward()"
    ]
   },
@@ -231,11 +279,13 @@
     }
    ],
    "source": [
-    "# 验gumbel_softmax\n",
+    "# 验gumbel_softmax可以近似torch.multinomial\n",
     "logits = torch.randn(1, 5)\n",
     "probs = F.softmax(logits, dim=-1)\n",
+    "# 使用torch.multinomial生成结果\n",
     "y = torch.multinomial(probs, num_samples=10000, replacement=True)\n",
     "print(torch.histogram(y.float(), bins=5).hist)\n",
+    "# 使用gumbel_softmax生成结果\n",
     "gumbel_y = torch.argmax(F.gumbel_softmax(logits.repeat(10000, 1), tau=1, hard=True), dim=-1, keepdim=True)\n",
     "print(torch.histogram(gumbel_y.float(), bins=5).hist)"
    ]
@@ -249,6 +299,13 @@
     "class RLModelWithGumbel(nn.Module):\n",
     "    \n",
     "    def __init__(self, llm, r_model):\n",
+    "        '''\n",
+    "        大语言模型与评分模型的拼接(没有明显错误的方式,但也不是合适的方式)\n",
+    "        参数\n",
+    "        ----\n",
+    "        llm :大语言模型\n",
+    "        r_model :评分模型\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.llm = llm\n",
     "        self.r_model = r_model\n",
@@ -257,17 +314,28 @@
     "            param.requires_grad = False\n",
     "    \n",
     "    def generate(self, idx, max_new_tokens):\n",
+    "        '''\n",
+    "        利用大语言模型生成文本(反复使用模型进行预测)\n",
+    "        参数\n",
+    "        ----\n",
+    "        idx :torch.LongTensor,背景文本,形状为(1, T)\n",
+    "        max_new_tokens :int,生成文本的最大长度\n",
+    "        返回\n",
+    "        ----\n",
+    "        idx :torch.LongTensor,背景文本 + 生成文本,形状为(1, T+L),其中L是生成文本的长度\n",
+    "        ans :torch.LongTensor,生成文本,形状为(1, L, vs),其中vs是字典的大小\n",
+    "        '''\n",
     "        model = self.llm\n",
-    "        B, T = idx.shape\n",
     "        ans = None\n",
     "        for _ in range(max_new_tokens):\n",
     "            logits = model(input_ids=idx).logits\n",
     "            logits = logits[:, -1, :]\n",
     "            # 根据概率,随机生成下一个词元\n",
-    "            idx_next_hot = F.gumbel_softmax(logits, tau=1, hard=True)  # (B, vs)\n",
+    "            idx_next_hot = F.gumbel_softmax(logits, tau=1, hard=True)  # (1, vs)\n",
+    "            # torch.argmax不可微,所以idx不可微\n",
     "            idx_next = torch.argmax(idx_next_hot, dim=-1, keepdim=True)\n",
     "            idx = torch.cat((idx, idx_next.long()), dim=1)\n",
-    "            idx_next_hot = idx_next_hot.unsqueeze(1)      # (B, 1, vs)\n",
+    "            idx_next_hot = idx_next_hot.unsqueeze(1)  # (1, 1, vs)\n",
     "            if ans == None:\n",
     "                ans = idx_next_hot\n",
     "            else:\n",
@@ -275,8 +343,18 @@
     "        return idx, ans\n",
     "    \n",
     "    def forward(self, idx):\n",
+    "        '''\n",
+    "        利用大语言模型生成文本,再使用评分模型对生成文本进行评分\n",
+    "        参数\n",
+    "        ----\n",
+    "        idx :torch.LongTensor,背景文本,形状为(1, T)\n",
+    "        返回\n",
+    "        ----\n",
+    "        reward :torch.FloatTensor,评分,形状为(1, 1)\n",
+    "        '''\n",
     "        # 为了代码简洁,我们设置产生文本的长度\n",
     "        _, ans = self.generate(idx, 20)\n",
+    "        # 对生成的文本进行评分\n",
     "        reward = self.r_model(ans)\n",
     "        return reward"
    ]
@@ -306,8 +384,9 @@
     }
    ],
    "source": [
-    "# 验证generate正确\n",
+    "# 验证generate函数是否正确\n",
     "idx, ans = model_gumbel.generate(ids['input_ids'], 20)\n",
+    "# 验证idx和ans的重叠部分是否相同\n",
     "print(idx[:, ids['input_ids'].shape[1]:] == torch.argmax(ans, dim=-1, keepdim=True).squeeze(-1))\n",
     "print(tokenizer.decode(idx[0], skip_special_tokens=True))"
    ]

+ 77 - 9
ch12_rl/llm_ppo.ipynb

@@ -32,6 +32,7 @@
     "from datasets import load_dataset\n",
     "from transformers import pipeline\n",
     "\n",
+    "\n",
     "torch.manual_seed(12046)"
    ]
   },
@@ -43,6 +44,7 @@
    },
    "outputs": [],
    "source": [
+    "# 一些超参数\n",
     "learning_rate = 1e-4\n",
     "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
     "gamma = 1.0\n",
@@ -50,6 +52,7 @@
     "kl_ctl_value = 0.2\n",
     "cliprange = 0.2\n",
     "vf_coef = 0.1\n",
+    "# 经过mini_batch_size步后,更新旧模型\n",
     "mini_batch_size = 20\n",
     "grad_clip = 1.0"
    ]
@@ -75,6 +78,10 @@
    "outputs": [],
    "source": [
     "def prepare_input(data):\n",
+    "    '''\n",
+    "    生成训练数据\n",
+    "    '''\n",
+    "    # 为了使代码容易理解,将前8个词元作为背景文本\n",
     "    data['input_ids'] = [tokenizer.encode(data['text'])[:8]]\n",
     "    return data\n",
     "\n",
@@ -99,9 +106,20 @@
     "    def __init__(self, model):\n",
     "        super().__init__()\n",
     "        self.actor = model\n",
+    "        # 值函数估计头\n",
     "        self.critic = nn.Linear(model.base_model.embed_dim, 1, bias=False)\n",
     "\n",
     "    def forward(self, x):\n",
+    "        '''\n",
+    "        向前传播,为了使代码易懂,该函数只支持单条文本的计算\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,文本,形状为(1, T)\n",
+    "        返回\n",
+    "        ----\n",
+    "        logits :torch.FloatTensor,logits,形状为(1, T, vs)\n",
+    "        values :torch.FloatTensor,值函数,形状为(1, T)\n",
+    "        '''\n",
     "        _res = self.actor(input_ids=x, output_hidden_states=True)\n",
     "        logits = _res.logits\n",
     "        emb = _res.hidden_states[-1]\n",
@@ -109,6 +127,9 @@
     "        return logits, values\n",
     "\n",
     "    def generate(self, idx, max_new_tokens=20):\n",
+    "        '''\n",
+    "        生成文本\n",
+    "        '''\n",
     "        model = self.actor\n",
     "        return model.generate(idx, max_new_tokens=max_new_tokens,\n",
     "                             pad_token_id=tokenizer.eos_token_id)\n",
@@ -136,6 +157,7 @@
     "        modules_to_save=['critic'])\n",
     "    return PeftModel(model, config, adapter_name='lora_ppo')\n",
     "\n",
+    "# 增加LoRA适配器\n",
     "model = init_peft_model(model)"
    ]
   },
@@ -162,14 +184,22 @@
    ],
    "source": [
     "def get_forward_result(model, input_ids, response):\n",
+    "    '''\n",
+    "    记录向前传播的结果,分别是logits,lnp和值函数\n",
+    "    为了使代码易懂,该函数只支持单条文本的计算\n",
+    "    '''\n",
     "    model.eval()\n",
+    "    # 记录背景文本的长度\n",
     "    _, lens = input_ids.shape\n",
     "    logits, values = model(response)\n",
+    "    # 计算交叉熵的时候,需要注意logits和标签的对应关系\n",
     "    lnp = -F.cross_entropy(logits[:, :-1, :].transpose(-2, -1), response[:, 1:], reduction='none')\n",
+    "    # 只记录针对生成文本的结果,其中L表示生成文本的长度\n",
     "    res = {\n",
-    "        'logits': logits[:, lens-1:-1, :],\n",
-    "        'lnp': lnp[:, lens-1:],\n",
-    "        'values': values[:, lens:]\n",
+    "        # 最后一个位置的logits没有作用\n",
+    "        'logits': logits[:, lens-1:-1, :],  # (1, L, vs)\n",
+    "        'lnp': lnp[:, lens-1:],             # (1, L)\n",
+    "        'values': values[:, lens:]          # (1, L)\n",
     "    }\n",
     "    model.train()\n",
     "    return res\n",
@@ -178,6 +208,7 @@
     "input_ids = example['input_ids']\n",
     "response = model.generate(input_ids)\n",
     "\n",
+    "# 验证get_forward_result计算结果的形状是准确的\n",
     "example_re = get_forward_result(model, input_ids, response)\n",
     "for k, v in example_re.items():\n",
     "    print(k, v.shape)"
@@ -209,15 +240,30 @@
     "class RewardModel(nn.Module):\n",
     "\n",
     "    def __init__(self, tokenizer):\n",
+    "        '''\n",
+    "        评分模型\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.model = pipeline(\"sentiment-analysis\", model='lvwerra/distilbert-imdb')\n",
     "        self.tokenizer = tokenizer\n",
     "\n",
     "    def forward(self, x):\n",
+    "        '''\n",
+    "        向前传播,为了使代码易懂,该函数只支持单条文本的计算\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,文本,形状为(1, T)\n",
+    "        返回\n",
+    "        ----\n",
+    "        re :torch.FloatTensor,评分,形状为(1)\n",
+    "        '''\n",
     "        re = []\n",
     "        x = [self.tokenizer.decode(i) for i in x]\n",
+    "        # 此处的x等于背景文本+生成文本,因此得到的scores稍有不妥\n",
+    "        # 更准确的做法是只对生成文本进行评分\n",
     "        scores = self.model(x)\n",
     "        for s in scores:\n",
+    "            # 将POSITIVE的概率视为评分\n",
     "            if s['label'] == 'POSITIVE':\n",
     "                re.append(s['score'])\n",
     "            else:\n",
@@ -252,20 +298,25 @@
    ],
    "source": [
     "def compute_rewards(r_model, response, lnp, ref_lnp):\n",
-    "    # scores: (B), lnp: (B, T), ref_lnp: (B, T)\n",
+    "    '''\n",
+    "    定义游戏奖励\n",
+    "    为了使代码易懂,该函数只支持单条文本的计算\n",
+    "    '''\n",
+    "    # scores的形状为(1), lnp的形状为(1, L), ref_lnp的形状为(1, L)\n",
     "    # r_model:评分模型,response:模型生成的回答\n",
     "    # lnp:新/旧模型的概率对数,ref_lnp:参考模型的概率对数\n",
     "    scores = r_model(response)\n",
     "    rewards = []\n",
     "    for score, lnprob, ref_lnprob in zip(scores, lnp, ref_lnp):\n",
-    "        kl = lnprob - ref_lnprob\n",
+    "        kl = lnprob - ref_lnprob     # (   L)\n",
     "        # kl_ctl_value是调节KL penalty的系数,大于0\n",
-    "        reward = -kl_ctl_value * kl\n",
+    "        reward = -kl_ctl_value * kl  # (   L)\n",
     "        # 游戏奖励等于模型评分 + KL penalty\n",
-    "        reward[-1] += score\n",
+    "        reward[-1] += score          # (   L)\n",
     "        rewards.append(reward)\n",
-    "    return torch.stack(rewards)\n",
+    "    return torch.stack(rewards)      # (1, L)\n",
     "\n",
+    "# 得到参考模型的结果\n",
     "with torch.no_grad():\n",
     "    with model.disable_adapter():\n",
     "        ref_example_re = get_forward_result(model, input_ids, response)\n",
@@ -289,7 +340,7 @@
     "        self.lambda_ = lambda_\n",
     "\n",
     "    def __call__(self, rewards, values):\n",
-    "        # advantages table\n",
+    "        # 优势函数\n",
     "        advantages = []\n",
     "        last_advantage = 0\n",
     "        vt_next = 0\n",
@@ -329,6 +380,14 @@
    ],
    "source": [
     "def compute_loss(old_lnp, lnp, vpred, advantages):\n",
+    "    '''\n",
+    "    定义模型损失\n",
+    "    为了使代码易懂,该函数只支持单条文本的计算\n",
+    "    '''\n",
+    "    # old_lnp:旧模型的概率对数,形状为(1, L)\n",
+    "    # lnp:新/旧模型的概率对数,形状为(1, L)\n",
+    "    # vpred:值函数,形状为(1, L)\n",
+    "    # advantages:优势函数,形状为(1, L)\n",
     "    # 值函数损失\n",
     "    vf_loss = -advantages * vpred\n",
     "    # 策略损失\n",
@@ -371,6 +430,7 @@
    "source": [
     "def play_game(model, r_model, gae, data):\n",
     "    model.eval()\n",
+    "    # 分别是背景文本,回复,向前传播结果和优势函数\n",
     "    all_input_ids, all_response, all_res, all_advantages = [], [], [], []\n",
     "    for input_ids in data['input_ids']:\n",
     "        all_input_ids.append(input_ids)\n",
@@ -389,6 +449,7 @@
     "    model.train()\n",
     "    return all_input_ids, all_response, all_res, all_advantages\n",
     "\n",
+    "# 背景文本的长度都一样\n",
     "play_game(model, r_model, gae, tokenized[:2])[0]"
    ]
   },
@@ -416,12 +477,18 @@
    ],
    "source": [
     "def estimate_rewards(r_model, model, all_input_ids):\n",
+    "    '''\n",
+    "    预估模型评分\n",
+    "    '''\n",
     "    re = {}\n",
     "    # 将模型切换至评估模式\n",
     "    model.eval()\n",
     "    for input_ids in all_input_ids:\n",
+    "        # 生成文本\n",
     "        response = model.generate(input_ids)\n",
+    "        # 记录评分\n",
     "        re['score'] = re.get('score', 0) + r_model(response).item()\n",
+    "        # 记录参考模型的评分\n",
     "        with model.disable_adapter():\n",
     "            response = model.generate(input_ids)\n",
     "            re['ref_score'] = re.get('ref_score', 0) + r_model(response).item()\n",
@@ -495,6 +562,7 @@
     "        # 梯度裁剪\n",
     "        clip_grad_norm_(model.parameters(), grad_clip)\n",
     "        optimizer.step()\n",
+    "    # 将最后一个批次数据作为测试集\n",
     "    res = estimate_rewards(r_model, model, tokenized[-mini_batch_size:]['input_ids'])\n",
     "    print(f'step {s:>4}: score {res[\"score\"]:.4f}, ref_score {res[\"ref_score\"]:.4f}')"
    ]

+ 82 - 13
ch12_rl/llm_ppo_correct_dropout.ipynb

@@ -43,6 +43,7 @@
    },
    "outputs": [],
    "source": [
+    "# 一些超参数\n",
     "learning_rate = 5e-5\n",
     "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
     "gamma = 1.0\n",
@@ -50,6 +51,7 @@
     "kl_ctl_value = 0.2\n",
     "cliprange = 0.2\n",
     "vf_coef = 0.1\n",
+    "# 经过mini_batch_size步后,更新旧模型\n",
     "mini_batch_size = 20\n",
     "grad_clip = 1.0"
    ]
@@ -99,9 +101,20 @@
     "    def __init__(self, model):\n",
     "        super().__init__()\n",
     "        self.actor = model\n",
+    "        # 值函数估计头\n",
     "        self.critic = nn.Linear(model.base_model.embed_dim, 1, bias=False)\n",
     "\n",
     "    def forward(self, x):\n",
+    "        '''\n",
+    "        向前传播,为了使代码易懂,该函数只支持单条文本的计算\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,文本,形状为(1, T)\n",
+    "        返回\n",
+    "        ----\n",
+    "        logits :torch.FloatTensor,logits,形状为(1, T, vs)\n",
+    "        values :torch.FloatTensor,值函数,形状为(1, T)\n",
+    "        '''\n",
     "        _res = self.actor(input_ids=x, output_hidden_states=True)\n",
     "        logits = _res.logits\n",
     "        emb = _res.hidden_states[-1]\n",
@@ -109,6 +122,9 @@
     "        return logits, values\n",
     "\n",
     "    def generate(self, idx, max_new_tokens=20):\n",
+    "        '''\n",
+    "        生成文本\n",
+    "        '''\n",
     "        model = self.actor\n",
     "        return model.generate(idx, max_new_tokens=max_new_tokens,\n",
     "                             pad_token_id=tokenizer.eos_token_id)\n",
@@ -137,6 +153,7 @@
     "        modules_to_save=['critic'])\n",
     "    return PeftModel(model, config, adapter_name='lora_ppo')\n",
     "\n",
+    "# 增加LoRA适配器\n",
     "model = init_peft_model(model)"
    ]
   },
@@ -163,13 +180,21 @@
    ],
    "source": [
     "def get_forward_result(model, input_ids, response):\n",
+    "    '''\n",
+    "    记录向前传播的结果,分别是logits,lnp和值函数\n",
+    "    为了使代码易懂,该函数只支持单条文本的计算\n",
+    "    '''\n",
+    "    # 记录背景文本的长度\n",
     "    _, lens = input_ids.shape\n",
     "    logits, values = model(response)\n",
+    "    # 计算交叉熵的时候,需要注意logits和标签的对应关系\n",
     "    lnp = -F.cross_entropy(logits[:, :-1, :].transpose(-2, -1), response[:, 1:], reduction='none')\n",
+    "    # 只记录针对生成文本的结果,其中L表示生成文本的长度\n",
     "    res = {\n",
-    "        'logits': logits[:, lens-1:-1, :],\n",
-    "        'lnp': lnp[:, lens-1:],\n",
-    "        'values': values[:, lens:]\n",
+    "        # 最后一个位置的logits没有作用\n",
+    "        'logits': logits[:, lens-1:-1, :],  # (1, L, vs)\n",
+    "        'lnp': lnp[:, lens-1:],             # (1, L)\n",
+    "        'values': values[:, lens:]          # (1, L)\n",
     "    }\n",
     "    return res\n",
     "\n",
@@ -177,6 +202,7 @@
     "input_ids = example['input_ids']\n",
     "response = model.generate(input_ids)\n",
     "\n",
+    "# 验证get_forward_result计算结果的形状是准确的\n",
     "example_re = get_forward_result(model, input_ids, response)\n",
     "for k, v in example_re.items():\n",
     "    print(k, v.shape)"
@@ -214,12 +240,18 @@
    ],
    "source": [
     "def turn_on_train_mode(model, target):\n",
+    "    '''\n",
+    "    只将模型中的特定组件设置为训练模式\n",
+    "    '''\n",
     "    for name, module in model.named_modules():\n",
     "        if name.split('.')[-1] in target:\n",
     "            module.train()\n",
     "    return model\n",
     "\n",
     "def _test_turn_on_train_mode():\n",
+    "    '''\n",
+    "    测试turn_on_train_mode是否正确\n",
+    "    '''\n",
     "    test_model = A2CLLM(\n",
     "        AutoModelForCausalLM.from_pretrained('lvwerra/gpt2-imdb')).to(device)\n",
     "    config = LoraConfig(\n",
@@ -231,25 +263,25 @@
     "        bias='none',\n",
     "        init_lora_weights=False)\n",
     "    test_model = PeftModel(test_model, config, adapter_name='lora_ppo')\n",
+    "    # 模型处于训练模式,由于随机失活的原因,每次运算的结果都不相同\n",
     "    test_model.train()\n",
     "    v1 = test_model(response)[1]\n",
     "    v2 = test_model(response)[1]\n",
-    "    # 不相等\n",
     "    print(v1 - v2)\n",
     "\n",
     "    test_model.eval()\n",
+    "    # 只将LoRA换至训练模式,由于LoRA里的随机失活,每次运算的结果也不相同\n",
     "    turn_on_train_mode(test_model, ['c_attn'])\n",
     "    v1 = test_model(response)[1]\n",
     "    v2 = test_model(response)[1]\n",
-    "    # 不相等\n",
     "    print(v1 - v2)\n",
     "\n",
     "    test_model.eval()\n",
     "    turn_on_train_mode(test_model, ['c_attn'])\n",
+    "    # 禁用LoRA之后,运算结果会相同\n",
     "    with test_model.disable_adapter():\n",
     "        v1 = test_model(response)[1]\n",
     "        v2 = test_model(response)[1]\n",
-    "        # 相等\n",
     "        print(v1 - v2)\n",
     "\n",
     "_test_turn_on_train_mode()"
@@ -281,15 +313,30 @@
     "class RewardModel(nn.Module):\n",
     "\n",
     "    def __init__(self, tokenizer):\n",
+    "        '''\n",
+    "        评分模型\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.model = pipeline(\"sentiment-analysis\", model='lvwerra/distilbert-imdb')\n",
     "        self.tokenizer = tokenizer\n",
     "\n",
     "    def forward(self, x):\n",
+    "        '''\n",
+    "        向前传播,为了使代码易懂,该函数只支持单条文本的计算\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,文本,形状为(1, T)\n",
+    "        返回\n",
+    "        ----\n",
+    "        re :torch.FloatTensor,评分,形状为(1)\n",
+    "        '''\n",
     "        re = []\n",
     "        x = [self.tokenizer.decode(i) for i in x]\n",
+    "        # 此处的x等于背景文本+生成文本,因此得到的scores稍有不妥\n",
+    "        # 更准确的做法是只对生成文本进行评分\n",
     "        scores = self.model(x)\n",
     "        for s in scores:\n",
+    "            # 将POSITIVE的概率视为评分\n",
     "            if s['label'] == 'POSITIVE':\n",
     "                re.append(s['score'])\n",
     "            else:\n",
@@ -324,20 +371,25 @@
    ],
    "source": [
     "def compute_rewards(r_model, response, lnp, ref_lnp):\n",
-    "    # scores: (B), lnp: (B, T), ref_lnp: (B, T)\n",
+    "    '''\n",
+    "    定义游戏奖励\n",
+    "    为了使代码易懂,该函数只支持单条文本的计算\n",
+    "    '''\n",
+    "    # scores的形状为(1), lnp的形状为(1, L), ref_lnp的形状为(1, L)\n",
     "    # r_model:评分模型,response:模型生成的回答\n",
     "    # lnp:新/旧模型的概率对数,ref_lnp:参考模型的概率对数\n",
     "    scores = r_model(response)\n",
     "    rewards = []\n",
     "    for score, lnprob, ref_lnprob in zip(scores, lnp, ref_lnp):\n",
-    "        kl = lnprob - ref_lnprob\n",
+    "        kl = lnprob - ref_lnprob     # (   L)\n",
     "        # kl_ctl_value是调节KL penalty的系数,大于0\n",
-    "        reward = -kl_ctl_value * kl\n",
+    "        reward = -kl_ctl_value * kl  # (   L)\n",
     "        # 游戏奖励等于模型评分 + KL penalty\n",
-    "        reward[-1] += score\n",
+    "        reward[-1] += score          # (   L)\n",
     "        rewards.append(reward)\n",
-    "    return torch.stack(rewards)\n",
+    "    return torch.stack(rewards)      # (1, L)\n",
     "\n",
+    "# 得到参考模型的结果\n",
     "with torch.no_grad():\n",
     "    with model.disable_adapter():\n",
     "        ref_example_re = get_forward_result(model, input_ids, response)\n",
@@ -361,7 +413,7 @@
     "        self.lambda_ = lambda_\n",
     "\n",
     "    def __call__(self, rewards, values):\n",
-    "        # advantages table\n",
+    "        # 优势函数\n",
     "        advantages = []\n",
     "        last_advantage = 0\n",
     "        vt_next = 0\n",
@@ -401,6 +453,14 @@
    ],
    "source": [
     "def compute_loss(old_lnp, lnp, vpred, advantages):\n",
+    "    '''\n",
+    "    定义模型损失\n",
+    "    为了使代码易懂,该函数只支持单条文本的计算\n",
+    "    '''\n",
+    "    # old_lnp:旧模型的概率对数,形状为(1, L)\n",
+    "    # lnp:新/旧模型的概率对数,形状为(1, L)\n",
+    "    # vpred:值函数,形状为(1, L)\n",
+    "    # advantages:优势函数,形状为(1, L)\n",
     "    # 值函数损失\n",
     "    vf_loss = -advantages * vpred\n",
     "    # 策略损失\n",
@@ -443,6 +503,7 @@
    "source": [
     "def play_game(model, r_model, gae, data):\n",
     "    model.eval()\n",
+    "    # 分别是背景文本,回复,向前传播结果和优势函数\n",
     "    all_input_ids, all_response, all_res, all_advantages = [], [], [], []\n",
     "    for input_ids in data['input_ids']:\n",
     "        all_input_ids.append(input_ids)\n",
@@ -458,6 +519,7 @@
     "                ref_res = get_forward_result(model, input_ids, response)\n",
     "            rewards = compute_rewards(r_model, response, res['lnp'], ref_res['lnp'])\n",
     "            all_advantages.append(gae(rewards, res['values']))\n",
+    "    # 只将LoRA适配器切换至训练模式\n",
     "    turn_on_train_mode(model, ['c_attn'])\n",
     "    return all_input_ids, all_response, all_res, all_advantages\n",
     "\n",
@@ -488,18 +550,24 @@
    ],
    "source": [
     "def estimate_rewards(r_model, model, all_input_ids):\n",
+    "    '''\n",
+    "    预估模型评分\n",
+    "    '''\n",
     "    re = {}\n",
     "    # 将模型切换至评估模式\n",
     "    model.eval()\n",
     "    for input_ids in all_input_ids:\n",
+    "        # 生成文本\n",
     "        response = model.generate(input_ids)\n",
+    "        # 记录评分\n",
     "        re['score'] = re.get('score', 0) + r_model(response).item()\n",
+    "        # 记录参考模型的评分\n",
     "        with model.disable_adapter():\n",
     "            response = model.generate(input_ids)\n",
     "            re['ref_score'] = re.get('ref_score', 0) + r_model(response).item()\n",
     "    re['score'] /= len(all_input_ids)\n",
     "    re['ref_score'] /= len(all_input_ids)\n",
-    "    # 将模型切换至训练模式\n",
+    "    # 只将LoRA适配器切换至训练模式\n",
     "    turn_on_train_mode(model, ['c_attn'])\n",
     "    return re\n",
     "\n",
@@ -567,6 +635,7 @@
     "        # 梯度裁剪\n",
     "        clip_grad_norm_(model.parameters(), grad_clip)\n",
     "        optimizer.step()\n",
+    "    # 将最后一个批次数据作为测试集\n",
     "    res = estimate_rewards(r_model, model, tokenized[-mini_batch_size:]['input_ids'])\n",
     "    print(f'step {s:>4}: score {res[\"score\"]:.4f}, ref_score {res[\"ref_score\"]:.4f}')"
    ]

+ 24 - 4
ch12_rl/policy_learning.ipynb

@@ -54,6 +54,9 @@
    ],
    "source": [
     "def get_cum_rewards(r, gamma):\n",
+    "    '''\n",
+    "    计算每一步的游戏得分并返回\n",
+    "    '''\n",
     "    cum_rewards = []\n",
     "    last_cum_reward = 0\n",
     "    for j in reversed(r):\n",
@@ -70,6 +73,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "# 一些超参数\n",
     "gamma = 0.9\n",
     "learning_rate = 0.01\n",
     "grad_clip = 1.0"
@@ -84,16 +88,28 @@
     "class PolicyNet(nn.Module):\n",
     "    \n",
     "    def __init__(self):\n",
+    "        '''\n",
+    "        策略学习\n",
+    "        '''\n",
     "        super().__init__()\n",
     "        self.emb = nn.Embedding(2, 4)\n",
     "        self.ln = nn.Linear(4, 2)\n",
     "\n",
     "    def forward(self, x):\n",
-    "        # x: (G)\n",
+    "        '''\n",
+    "        向前传播\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :torch.LongTensor,游戏状态,形状为(G),其中G表示游戏步数\n",
+    "        返回\n",
+    "        ----\n",
+    "        out :torch.FloatTensor,logits,形状为(G, 2)\n",
+    "        '''\n",
     "        x = F.relu(self.emb(x))\n",
-    "        x = self.ln(x)\n",
-    "        return x\n",
+    "        out = self.ln(x)\n",
+    "        return out\n",
     "\n",
+    "# 定义游戏状态的数字表示\n",
     "tokenizer = {'w': 0, 'l': 1}"
    ]
   },
@@ -114,8 +130,10 @@
     "        x = torch.tensor([tokenizer[s]])   # (1)\n",
     "        logits = model(x)                  # (1, 2)\n",
     "        probs = F.softmax(logits, dim=-1)  # (1, 2)\n",
+    "        # 利用神经网络得到下一个行动\n",
     "        action = torch.multinomial(probs, 1)\n",
     "        next_s, r = game.step(action)\n",
+    "        # 记录游戏过程,分别是行动、状态和奖励\n",
     "        one_game_action.append(action)\n",
     "        one_game_state.append(s)\n",
     "        one_game_reward.append(r)\n",
@@ -166,7 +184,7 @@
     }
    ],
    "source": [
-    "# Reinforce (Monte Carlo Learning)\n",
+    "# Reinforce\n",
     "model = PolicyNet()\n",
     "optimizer = optim.AdamW(model.parameters(), lr=learning_rate)\n",
     "v = []\n",
@@ -181,9 +199,11 @@
     "    optimizer.zero_grad()\n",
     "    logits = model(states)                                     # (G, 2)\n",
     "    lnP = -F.cross_entropy(logits, actions, reduction='none')  # (G)\n",
+    "    # 定义模型损失\n",
     "    loss = -cum_rewards * lnP                                  # (G)\n",
     "    loss.mean().backward()\n",
     "    optimizer.step()\n",
+    "    # 记录每个状态下,模型预估的每个行动的概率\n",
     "    eval_re = {}\n",
     "    for k in tokenizer:\n",
     "        _re = F.softmax(model(torch.tensor([tokenizer[k]])), dim=-1)  # (1, 2)\n",

+ 2 - 0
ch12_rl/utils.py

@@ -12,6 +12,7 @@ import pandas as pd
 class Lottery:
     
     def __init__(self):
+        # 定义游戏的两个状态
         self.params = {
             'w': (1, 1),
             'l': (-1, 1)
@@ -64,6 +65,7 @@ def plot_action_probs(v):
     fig = plt.figure(figsize=(6, 6), dpi=100)
     v = pd.DataFrame(v)
     for k in v:
+        # 在图中画出抽奖的概率
         v[k].apply(lambda x: x[1]).plot(label=k, legend=True)
     legend = plt.legend(shadow=True, loc="best", fontsize=20)
     return fig

+ 40 - 13
ch12_rl/value_learning.ipynb

@@ -44,7 +44,7 @@
     "    one_game_state = []\n",
     "    one_game_reward = []\n",
     "    while not done:\n",
-    "        # 简单定义策略一直玩\n",
+    "        # 游戏策略是一直抽奖\n",
     "        action = 1\n",
     "        next_s, r = game.step(action)\n",
     "        one_game_state.append(s)\n",
@@ -74,6 +74,9 @@
    ],
    "source": [
     "def compute_cum_rewards(r, gamma):\n",
+    "    '''\n",
+    "    计算游戏得分\n",
+    "    '''\n",
     "    cum_rewards = 0\n",
     "    for j in reversed(r):\n",
     "        cum_rewards = j + cum_rewards * gamma\n",
@@ -88,10 +91,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Monte Carlo Learning & # Temporal Difference Learning params\n",
+    "# MC学习和TD学习的超参数\n",
     "gamma = 0.9\n",
     "alpha = 0.01\n",
-    "# NN params\n",
+    "# 神经网络的超参数\n",
     "learning_rate = 0.01\n",
     "lambda_ = 0.95"
    ]
@@ -115,7 +118,8 @@
     }
    ],
    "source": [
-    "# Monte Carlo Learning\n",
+    "# MC学习\n",
+    "# 定义初始的值函数\n",
     "v = [{'w': 0.0, 'l': 0.0}]\n",
     "\n",
     "for s, r in zip(states, rewards):\n",
@@ -125,9 +129,12 @@
     "    for i in range(len(s)):\n",
     "        values = copy.deepcopy(v[-1])\n",
     "        # 此处可以加上首次出现(Exploring Starts)的筛选\n",
+    "        # 游戏得分\n",
     "        G = compute_cum_rewards(r[i:], gamma)\n",
+    "        # 计算优势函数\n",
     "        advantage = G - values[s[i]]\n",
     "        vt = values[s[i]]\n",
+    "        # 迭代更新\n",
     "        values[s[i]] = vt + alpha * advantage\n",
     "        v.append(values)\n",
     "\n",
@@ -154,7 +161,8 @@
     }
    ],
    "source": [
-    "# Temporal Difference Learning\n",
+    "# TD学习\n",
+    "# 定义初始的值函数\n",
     "v = [{'w': 0.0, 'l': 0.0}]\n",
     "\n",
     "for s, r in zip(states, rewards):\n",
@@ -163,9 +171,12 @@
     "    for i in range(len(s)):\n",
     "        values = copy.deepcopy(v[-1])\n",
     "        vt_next = values[s[i + 1]] if i < len(s) - 1 else 0\n",
+    "        # 预估游戏得分\n",
     "        G = r[i] + gamma * vt_next\n",
+    "        # 计算优势函数\n",
     "        advantage = G - values[s[i]]\n",
     "        vt = values[s[i]]\n",
+    "        # 迭代更新\n",
     "        values[s[i]] = vt + alpha * advantage\n",
     "        v.append(values)\n",
     "\n",
@@ -193,11 +204,20 @@
     "                torch.nn.init.eye_(p)\n",
     "\n",
     "    def forward(self, x):\n",
+    "        '''\n",
+    "        向前传播\n",
+    "        参数\n",
+    "        ----\n",
+    "        x :str,只有两个取值,分别是w和l\n",
+    "        返回\n",
+    "        ----\n",
+    "        out :torch.FloatTensor,值函数\n",
+    "        '''\n",
     "        ref = {'w': 0, 'l': 1}\n",
     "        x = torch.tensor(ref[x])\n",
     "        x = F.relu(self.emb(x))\n",
-    "        x = self.ln(x)\n",
-    "        return x"
+    "        out = self.ln(x)\n",
+    "        return out"
    ]
   },
   {
@@ -219,7 +239,7 @@
     }
    ],
    "source": [
-    "# Monte Carlo Learning\n",
+    "# MC学习与神经网络的结合\n",
     "model = VQN()\n",
     "optimizer = optim.AdamW(model.parameters(), lr=learning_rate)\n",
     "v = []\n",
@@ -233,8 +253,10 @@
     "        # 此处可以加上首次出现(Exploring Starts)的筛选\n",
     "        with torch.no_grad():\n",
     "            G = compute_cum_rewards(r[i:], gamma)\n",
+    "            # 利用神经网络预估优势函数\n",
     "            advantage = G - model(s[i])\n",
     "        vt = model(s[i])\n",
+    "        # 定义模型损失\n",
     "        loss += -advantage * vt\n",
     "        # 上面对loss的定义等价于:loss = 0.5 * (G - vt) ** 2\n",
     "    loss /= len(s)\n",
@@ -265,7 +287,7 @@
     }
    ],
    "source": [
-    "# Temporal Difference Learning\n",
+    "# TD学习与神经网络的结合\n",
     "model = VQN()\n",
     "optimizer = optim.AdamW(model.parameters(), lr=learning_rate)\n",
     "v = []\n",
@@ -277,10 +299,12 @@
     "    optimizer.zero_grad()\n",
     "    for i in range(len(s)):\n",
     "        with torch.no_grad():\n",
+    "            # 利用神经网络预估游戏得分和优势函数\n",
     "            vt_next = model(s[i + 1]) if i < len(s) - 1 else 0\n",
     "            G = r[i] + gamma * vt_next\n",
     "            advantage = G - model(s[i])\n",
     "        vt = model(s[i])\n",
+    "        # 定义模型损失\n",
     "        loss += -advantage * vt\n",
     "        # 上面对loss的定义等价于:loss = 0.5 * (G - vt) ** 2\n",
     "    loss /= len(s)\n",
@@ -305,7 +329,7 @@
     "        self.lambda_ = lambda_\n",
     "\n",
     "    def __call__(self, rewards, values):\n",
-    "        # advantages table\n",
+    "        # 优势函数\n",
     "        advantages = []\n",
     "        last_advantage = 0\n",
     "        vt_next = 0\n",
@@ -335,10 +359,11 @@
     }
    ],
    "source": [
+    "# 示例数据\n",
     "s = states[-1]\n",
     "r = rewards[-1]\n",
     "values = [model(i).item() for i in s]\n",
-    "# 验证gamma=1时,gae等同于mc learning\n",
+    "# 验证gamma=1时,gae等同于MC学习\n",
     "mc_advantage = []\n",
     "for i in range(len(r)):\n",
     "    G = compute_cum_rewards(r[i:], gamma)\n",
@@ -365,7 +390,7 @@
     }
    ],
    "source": [
-    "# 验证gamma=0时,gae等同于td learning\n",
+    "# 验证gamma=0时,gae等同于TD学习\n",
     "vt_next = values[:-1] + [0.0]\n",
     "td_advantage = torch.tensor(r) + gamma * torch.tensor(vt_next) - torch.tensor(values)\n",
     "gae = GAE(gamma, 0)\n",
@@ -392,7 +417,7 @@
     }
    ],
    "source": [
-    "# GAE\n",
+    "# 使用GAE进行值函数学习\n",
     "model = VQN()\n",
     "optimizer = optim.AdamW(model.parameters(), lr=learning_rate)\n",
     "v = []\n",
@@ -404,8 +429,10 @@
     "    optimizer.zero_grad()\n",
     "    with torch.no_grad():\n",
     "        values = [model(_state).item() for _state in s]\n",
+    "        # 预估优势函数\n",
     "        advantages = torch.tensor(gae(r, values))\n",
     "    vpred = torch.concat([model(_state) for _state in s], dim=0)\n",
+    "    # 定义模型损失\n",
     "    loss = torch.mean(-advantages * vpred)\n",
     "    loss.backward()\n",
     "    optimizer.step()\n",