{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "8D5tOVWvG6Ss", "outputId": "917998d3-b968-48d9-b7f1-a0f159ff1cd2" }, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torch.optim as optim\n", "from torch.utils.data import DataLoader\n", "from datasets import load_dataset\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline\n", "\n", "\n", "torch.manual_seed(12046)\n", "# 计算设备为V100 16G\n", "# 如果使用CPU,需要非常长的时间,建议减少模型规模来加快速度(比如n_layer)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "GwjXDvhCG6Su", "outputId": "0f0cff6f-c785-4259-d271-91217c170986" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[[ 1.0185, -1.3091, 1.2908, 0.5276],\n", " [-0.2985, 1.6259, 2.0433, -0.6417],\n", " [ 0.8795, -1.0512, 1.1491, 0.6116],\n", " [ 0.2128, -0.5512, 0.0450, 0.5010]]])\n", "tensor([[[ 1.0185, -inf, -inf, -inf],\n", " [-0.2985, 1.6259, -inf, -inf],\n", " [ 0.8795, -1.0512, 1.1491, -inf],\n", " [ 0.2128, -0.5512, 0.0450, 0.5010]]])\n", "tensor([[[1.0000, 0.0000, 0.0000, 0.0000],\n", " [0.1274, 0.8726, 0.0000, 0.0000],\n", " [0.4074, 0.0591, 0.5335, 0.0000],\n", " [0.2743, 0.1278, 0.2319, 0.3659]]])\n" ] } ], "source": [ "# 展示mask在注意力机制中的作用\n", "T = 4\n", "scores = torch.randn(1, T, T)\n", "print(scores)\n", "# 定义上三角矩阵\n", "tril = torch.tril(torch.ones(T, T))\n", "scores = scores.masked_fill(tril == 0, float('-inf'))\n", "print(scores)\n", "# 将scores转换为自回归学习的权重\n", "print(F.softmax(scores, dim=-1))" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "2o_FK0sUWW-t", "outputId": "af65728c-5920-4548-b8f5-5c9a925029b3" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor(1.0026) tensor(1.0010) tensor(4.0152)\n", "tensor(1.0026) tensor(1.0010) tensor(1.0038)\n", "tensor([[0.0921, 0.1476, 0.1698, 0.4256, 0.0489, 0.0599, 0.0172, 0.0388]])\n", "tensor([[0., 0., 0., 1., 0., 0., 0., 0.]])\n" ] } ], "source": [ "# 展示对齐分数的方差放大效应\n", "B, T, head_size = 32, 100, 16\n", "\n", "k = torch.randn(B, T, head_size) # (B, T, H)\n", "q = torch.randn(B, T, head_size) # (B, T, H)\n", "scores = q @ k.transpose(-2, -1) # (B, T, T)\n", "print(k.std(), q.std(), scores.std())\n", "# 将对齐分数归一化\n", "scores = scores / head_size ** 0.5\n", "print(k.std(), q.std(), scores.std())\n", "\n", "# Softmax函数在处理方差较大的数据时,会发生聚集效应(结果过于集中在一个点上)\n", "# 这是为什么需要将对齐分数归一化\n", "x = torch.randn(1, 8)\n", "print(torch.softmax(x, dim=-1))\n", "print(torch.softmax(1000 * x, dim=-1))" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "id": "ujPVSgIgG6Sv" }, "outputs": [], "source": [ "def attention(query, key, value, dropout, mask=None):\n", " '''\n", " 注意力机制\n", " 参数\n", " ----\n", " query :torch.FloatTensor,查询向量,形状为(B, T, C)\n", " key :torch.FloatTensor,键向量,形状为(B, T, C)\n", " value :torch.FloatTensor,数值向量,形状为(B, T, C)\n", " dropout :随机失活\n", " mask :torch.FloatTensor,掩码,形状为(T, T)\n", " 返回\n", " ----\n", " out :torch.FloatTensor,根据注意力机制得到的背景向量,形状为(B, T, C)\n", " w_att :torch.FloatTensor,权重向量,形状为(B, T, T)\n", " '''\n", " # query, key, value都有相同的形状\n", " B, T, C = query.shape\n", " # (B, T, C) @ (B, C, T) --> (B, T, T)\n", " scores = query @ key.transpose(-2, -1) / (C ** 0.5)\n", " if mask is not None:\n", " # 如果没有mask,则表示词元可以使用左右两边的背景,也就是双向注意力\n", " # 如果mask是上三角矩阵,则表示自回归模式的单向注意力\n", " # mask的形状是(T, T)\n", " scores = scores.masked_fill(mask == 0, float('-inf'))\n", " w_att = dropout(F.softmax(scores, dim=-1)) # (B, T, T)\n", " out = w_att @ value # (B, T, C)\n", " return out, w_att" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "id": "SbJ2z5U3G6Sw" }, "outputs": [], "source": [ "class MaskedAttention(nn.Module):\n", "\n", " def __init__(self, emb_size, head_size):\n", " '''\n", " 单头单向注意力\n", " 参数\n", " ----\n", " emb_size :int,特征长度\n", " head_size :int,背景向量长度\n", " '''\n", " super().__init__()\n", " self.key = nn.Linear(emb_size, head_size, bias=False)\n", " self.query = nn.Linear(emb_size, head_size, bias=False)\n", " self.value = nn.Linear(emb_size, head_size, bias=False)\n", " # 这个上三角矩阵不参与模型训练\n", " self.register_buffer(\n", " 'tril', torch.tril(torch.ones(sequence_len, sequence_len)))\n", " self.dropout = nn.Dropout(0.4)\n", "\n", " def forward(self, x):\n", " '''\n", " 向前传播\n", " 参数\n", " ----\n", " x :torch.FloatTensor\n", " 文本的特征向量,形状为(B, T, C),其中B表示批量大小,T表示文本长度,C表示特征长度(emb_size)\n", " 返回\n", " ----\n", " out :torch.FloatTensor\n", " 根据注意力机制得到的背景向量,形状为(B, T, H),其中H表示背景向量长度(head_size)\n", " '''\n", " B, T, C = x.shape\n", " q = self.query(x) # (B, T, H)\n", " k = self.key(x) # (B, T, H)\n", " v = self.value(x) # (B, T, H)\n", " mask = self.tril[:T, :T]\n", " out, _ = attention(q, k, v, self.dropout, mask)\n", " return out # (B, T, H)\n", "\n", "class MaskedMultiHeadAttention(nn.Module):\n", "\n", " def __init__(self, emb_size, head_size):\n", " '''\n", " 多头单向注意力\n", " 参数\n", " ----\n", " emb_size :int,特征长度\n", " head_size :int,背景向量长度\n", " '''\n", " super().__init__()\n", " # 确保特征长度是背景向量长度的倍数\n", " assert(emb_size % head_size == 0)\n", " # 定义单头注意力的个数\n", " n_head = emb_size // head_size\n", " heads = [MaskedAttention(emb_size, head_size) for _ in range(n_head)]\n", " self.heads = nn.ModuleList(heads)\n", " # 线性变换\n", " self.proj = nn.Linear(emb_size, emb_size)\n", " # 随机失活\n", " self.dropout = nn.Dropout(0.4)\n", "\n", " def forward(self, x):\n", " '''\n", " 向前传播\n", " 参数\n", " ----\n", " x :torch.FloatTensor\n", " 文本的特征向量,形状为(B, T, C),其中B表示批量大小,T表示文本长度,C表示特征长度(emb_size)\n", " 返回\n", " ----\n", " out :torch.FloatTensor,根据注意力机制得到的背景向量,形状为(B, T, C)\n", " '''\n", " # 将多个单头注意力的结果做张量拼接\n", " out = torch.cat([h(x) for h in self.heads], dim=-1) # (B, T, C)\n", " out = self.dropout(self.proj(out))\n", " return out\n", "\n", "class FeedForward(nn.Module):\n", "\n", " def __init__(self, emb_size):\n", " '''\n", " 多层感知器\n", " '''\n", " super().__init__()\n", " self.l1 = nn.Linear(emb_size, 4 * emb_size)\n", " self.l2 = nn.Linear(4 * emb_size, emb_size)\n", " self.dropout = nn.Dropout(0.4)\n", "\n", " def forward(self, x):\n", " x = F.gelu(self.l1(x))\n", " out = self.dropout(self.l2(x))\n", " return out\n", "\n", "class Block(nn.Module):\n", "\n", " def __init__(self, emb_size, head_size):\n", " '''\n", " 解码块\n", " 参数\n", " ----\n", " emb_size :int,特征长度\n", " head_size :int,单头注意力中的背景向量长度\n", " '''\n", " super().__init__()\n", " self.mha = MaskedMultiHeadAttention(emb_size, head_size)\n", " self.ff = FeedForward(emb_size)\n", " # 层归一化\n", " self.ln1 = nn.LayerNorm(emb_size)\n", " self.ln2 = nn.LayerNorm(emb_size)\n", "\n", " def forward(self, x):\n", " '''\n", " 向前传播\n", " 参数\n", " ----\n", " x :torch.FloatTensor,文本的特征向量,形状为(B, T, C)\n", " 返回\n", " ----\n", " out :torch.FloatTensor,解码块的输出,形状为(B, T, C)\n", " '''\n", " # 残差连接\n", " x = x + self.mha(self.ln1(x)) # (B, T, C)\n", " out = x + self.ff(self.ln2(x)) # (B, T, C)\n", " return out" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "id": "KlDmjSCMG6Sx" }, "outputs": [], "source": [ "# 一些超参数\n", "emb_size = 128\n", "head_size = 8\n", "n_layer = 12\n", "sequence_len = 64\n", "learning_rate = 1e-3\n", "eval_iters = 20\n", "batch_size=500\n", "# 如果有GPU,该脚本将使用GPU进行计算\n", "device = 'cuda' if torch.cuda.is_available() else 'cpu'" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "id": "fwr_fSVpG6Sx" }, "outputs": [], "source": [ "class CharGPT(nn.Module):\n", "\n", " def __init__(self, vs):\n", " '''\n", " 利用GPT-2进行自然语言的自回归学习\n", " 参数\n", " ----\n", " vs :int,字典大小\n", " '''\n", " super().__init__()\n", " # 文字嵌入层\n", " self.token_embedding = nn.Embedding(vs, emb_size)\n", " # 位置嵌入层\n", " self.position_embedding = nn.Embedding(sequence_len, emb_size)\n", " # 解码块\n", " blocks = [Block(emb_size, head_size) for _ in range(n_layer)]\n", " self.blocks = nn.Sequential(*blocks)\n", " self.ln = nn.LayerNorm(emb_size)\n", " # 语言建模头\n", " self.lm_head = nn.Linear(emb_size, vs)\n", "\n", " def forward(self, x):\n", " '''\n", " 向前传播\n", " 参数\n", " ----\n", " x :torch.LongTensor,当前字母在字典中的位置,形状为(B, T)\n", " 返回\n", " ----\n", " logits :torch.FloatTensor,预测结果的logits,形状为(B, T, vs)\n", " '''\n", " B, T = x.shape\n", " # 定义词元的位置,形状为(T)\n", " pos = torch.arange(0, T, dtype=torch.long, device=x.device)\n", " # 词元语义特征\n", " tok_emb = self.token_embedding(x) # (B, T, C)\n", " # 位置特征\n", " pos_emb = self.position_embedding(pos) # ( T, C)\n", " x = tok_emb + pos_emb # (B, T, C)\n", " x = self.blocks(x) # (B, T, C)\n", " x = self.ln(x) # (B, T, C)\n", " logits = self.lm_head(x) # (B, T, vs)\n", " return logits" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "8LkvZyGjG6Sx", "outputId": "05aa4349-4ba4-4516-ee94-a19a08fe65cf" }, "outputs": [ { "data": { "text/plain": [ "98" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "raw_datasets = load_dataset('code_search_net', 'python')\n", "datasets = raw_datasets['train'].filter(lambda x: 'apache/spark' in x['repository_name'])\n", "\n", "class char_tokenizer:\n", "\n", " def __init__(self, data):\n", " # 数据中出现的所有字符构成字典\n", " chars = sorted(list(set(''.join(data))))\n", " # 预留一个位置给结尾的特殊字符\n", " self.char2ind = {s : i + 1 for i, s in enumerate(chars)}\n", " self.char2ind['<|e|>'] = 0\n", " self.ind2char = {i : s for s, i in self.char2ind.items()}\n", "\n", " def encode(self, text):\n", " return [self.char2ind[c] for c in text]\n", "\n", " def decode(self, enc):\n", " if isinstance(enc, int):\n", " return self.ind2char[enc]\n", " return [self.ind2char[i] for i in enc]\n", "\n", "tok = char_tokenizer(datasets['whole_func_string'])\n", "len(tok.char2ind)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "No_AQspwG6Sx", "outputId": "132ff3b8-1399-4982-c7a5-9400eab93eca" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2408290 parameters\n" ] }, { "data": { "text/plain": [ "CharGPT(\n", " (token_embedding): Embedding(98, 128)\n", " (position_embedding): Embedding(64, 128)\n", " (blocks): Sequential(\n", " (0): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (1): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (2): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (3): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (4): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (5): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (6): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (7): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (8): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (9): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (10): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (11): Block(\n", " (mha): MaskedMultiHeadAttention(\n", " (heads): ModuleList(\n", " (0-15): 16 x MaskedAttention(\n", " (key): Linear(in_features=128, out_features=8, bias=False)\n", " (query): Linear(in_features=128, out_features=8, bias=False)\n", " (value): Linear(in_features=128, out_features=8, bias=False)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " )\n", " (proj): Linear(in_features=128, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ff): FeedForward(\n", " (l1): Linear(in_features=128, out_features=512, bias=True)\n", " (l2): Linear(in_features=512, out_features=128, bias=True)\n", " (dropout): Dropout(p=0.4, inplace=False)\n", " )\n", " (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " )\n", " (ln): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (lm_head): Linear(in_features=128, out_features=98, bias=True)\n", ")" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 展示模型结构\n", "model = CharGPT(len(tok.char2ind)).to(device)\n", "# 统计模型的参数个数\n", "print(f'{sum(p.numel() for p in model.parameters())} parameters')\n", "model" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "id": "VOl4s229G6Sy" }, "outputs": [], "source": [ "@torch.no_grad()\n", "def generate_batch(model, idx, max_new_tokens=300):\n", " '''\n", " 利用模型生成文本(反复使用模型进行预测)\n", " 参数\n", " ----\n", " model :CharGPT,生成文本的模型\n", " idx :torch.LongTensor,当前字母在字典中的位置,形状为(1, T)\n", " max_new_tokens :int,生成文本的最大长度\n", " 返回\n", " ----\n", " out :list[int],生成的文本\n", " '''\n", " # 将模型切换至评估模式\n", " model.eval()\n", " for _ in range(max_new_tokens):\n", " # 限制背景长度,否则会报错\n", " context = idx[:, -sequence_len:]\n", " # 在文本生成时,模型的计算效率很低,因为有很多重复计算\n", " logits = model(context)\n", " # 只使用最后一个预测结果\n", " logits = logits[:, -1, :]\n", " probs = F.softmax(logits, dim=-1)\n", " # 根据模型预测的概率,得到最终的预测结果(下一个字母)\n", " # 这一步运算有一定随机性\n", " ix = torch.multinomial(probs, num_samples=1)\n", " idx = torch.cat((idx, ix), dim=1)\n", " if ix.item() == 0:\n", " break\n", " # 将模型切换至训练模式\n", " model.train()\n", " return idx.tolist()[0]" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "HQizE-2mG6Sz", "outputId": "21033dbb-9a8c-4ea2-9692-fcc000227b4c" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "def* O(h/of(\"YP`soE f|dwöR:1'_v?Q9)Nsx/Q=CKf\\M:iKcaI%+Q3m\n" ] } ], "source": [ "# 使用模型来生成文本\n", "begin_text = torch.tensor(tok.encode('def'), device=device).unsqueeze(0)\n", "print(''.join(tok.decode(generate_batch(model, begin_text))))" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "q0Fw0TCyG6Sz", "outputId": "140dc25b-792d-45fa-a8c3-b975461237a7" }, "outputs": [ { "data": { "text/plain": [ "(torch.Size([605913, 64]), torch.Size([605913, 64]))" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "def process(data, sequence_len=sequence_len):\n", " '''\n", " 根据文本生成训练数据\n", " '''\n", " # text是字符串列表\n", " text = data['whole_func_string']\n", " inputs, labels = [], []\n", " for i in text:\n", " enc = tok.encode(i)\n", " # 0对应着文本结束\n", " enc += [0]\n", " # 将文本转换为多个训练数据\n", " for i in range(len(enc) - sequence_len):\n", " inputs.append(enc[i: i + sequence_len])\n", " # 预测标签是下一个字母,因此只需要挪动一个位置即可\n", " labels.append(enc[i + 1: i + 1 + sequence_len])\n", " return {'inputs': inputs, 'labels': labels}\n", "\n", "# 将数据分为训练集和测试集\n", "tokenized = datasets.train_test_split(test_size=0.1, seed=1024, shuffle=True)\n", "# 将文本转换为训练数据,里面包含inputs和labels\n", "tokenized = tokenized.map(process, batched=True, remove_columns=datasets.column_names)\n", "tokenized.set_format(type='torch', device=device)\n", "\n", "tokenized['train']['inputs'].shape, tokenized['train']['labels'].shape" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "-x4i2q1iG6S0", "outputId": "23ec73d3-566d-4bfa-98c0-d3c05be82e70" }, "outputs": [ { "data": { "text/plain": [ "{'inputs': tensor([[ 2, 2, 2, ..., 2, 2, 4],\n", " [81, 80, 88, ..., 2, 2, 10],\n", " [ 4, 37, 84, ..., 2, 2, 2],\n", " ...,\n", " [75, 85, 2, ..., 70, 71, 84],\n", " [ 2, 2, 2, ..., 67, 78, 53],\n", " [87, 84, 67, ..., 89, 2, 38]], device='cuda:0'),\n", " 'labels': tensor([[ 2, 2, 32, ..., 2, 4, 4],\n", " [80, 88, 71, ..., 2, 10, 70],\n", " [37, 84, 71, ..., 2, 2, 2],\n", " ...,\n", " [85, 2, 72, ..., 71, 84, 75],\n", " [ 2, 2, 2, ..., 78, 53, 81],\n", " [84, 67, 86, ..., 2, 38, 53]], device='cuda:0')}" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 构建数据读取器\n", "train_loader = DataLoader(tokenized['train'], batch_size=batch_size, shuffle=True)\n", "test_loader = DataLoader(tokenized['test'], batch_size=batch_size, shuffle=True)\n", "# 获取一个批量的数据\n", "next(iter(test_loader))" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "QD37LTDbG6S0", "outputId": "b51ce4ce-7f4a-41dc-c6a2-f518afaa9ce1" }, "outputs": [ { "data": { "text/plain": [ "{'train': 4.730088233947754, 'test': 4.726046085357666}" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "def estimate_loss(model):\n", " re = {}\n", " # 将模型切换至评估模式\n", " model.eval()\n", " re['train'] = _loss(model, train_loader)\n", " re['test'] = _loss(model, test_loader)\n", " # 将模型切换至训练模式\n", " model.train()\n", " return re\n", "\n", "@torch.no_grad()\n", "def _loss(model, data_loader):\n", " '''\n", " 计算模型在不同数据集下面的评估指标\n", " '''\n", " loss = []\n", " data_iter= iter(data_loader)\n", " # 随机使用多个批量数据来预估模型效果\n", " for k in range(eval_iters):\n", " data = next(data_iter, None)\n", " if data is None:\n", " data_iter = iter(data_loader)\n", " data = next(data_iter, None)\n", " inputs, labels = data['inputs'], data['labels']\n", " logits = model(inputs)\n", " # 根据cross_entropy的定义,需要对logits进行转置运算\n", " # 具体细节请参考cross_entropy的官方文档\n", " logits = logits.transpose(-2, -1)\n", " loss.append(F.cross_entropy(logits, labels).item())\n", " return torch.tensor(loss).mean().item()\n", "\n", "estimate_loss(model)" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "id": "TgKhC5TmG6S0" }, "outputs": [], "source": [ "def train_gpt(model, optimizer, data_loader, epochs=10):\n", " lossi = []\n", " for epoch in range(epochs):\n", " for i, data in enumerate(data_loader, 0):\n", " inputs, labels = data['inputs'], data['labels']\n", " optimizer.zero_grad()\n", " logits = model(inputs)\n", " # 根据cross_entropy的定义,需要对logits进行转置运算\n", " # 具体细节请参考cross_entropy的官方文档\n", " logits = logits.transpose(-2, -1)\n", " loss = F.cross_entropy(logits, labels)\n", " lossi.append(loss.item())\n", " loss.backward()\n", " optimizer.step()\n", " # 评估模型,并输出结果\n", " stats = estimate_loss(model)\n", " train_loss = f'train loss {stats['train']:.4f}'\n", " test_loss = f'test loss {stats['test']:.4f}'\n", " print(f'epoch {epoch:>2}: {train_loss}, {test_loss}')\n", " return lossi" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "MCPIFH2dG6S1", "outputId": "68de3a92-b717-4147-ea07-bf456bae2190" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "epoch 0: train loss 0.9037, test loss 1.1066\n", "epoch 1: train loss 0.7246, test loss 1.0086\n", "epoch 2: train loss 0.6448, test loss 0.9719\n", "epoch 3: train loss 0.5838, test loss 0.9607\n", "epoch 4: train loss 0.5468, test loss 0.9672\n", "epoch 5: train loss 0.5156, test loss 0.9663\n", "epoch 6: train loss 0.4891, test loss 0.9596\n", "epoch 7: train loss 0.4687, test loss 0.9652\n", "epoch 8: train loss 0.4517, test loss 0.9709\n", "epoch 9: train loss 0.4347, test loss 0.9761\n" ] } ], "source": [ "l = train_gpt(model, optim.AdamW(model.parameters(), lr=learning_rate), train_loader)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 448 }, "id": "pgRJxHwOG6S1", "outputId": "8787891f-92aa-4eca-a09b-83b8e5fdd3c9" }, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA5R0lEQVR4nO3deXxU9b3/8fcsyUxCMpONZBKSsAiC7AgiAa9bI4hcK22vtdZbcO2lF+6V2kXRavvz/my819veLtfrclvlZy1irYItUhRBoJbIHiSgLLIkhEzCksxk3+b8/ggZCJCQQDInybyej8c8ZM4y8znfBzLvx/d8v99jMQzDEAAAgEmsZhcAAADCG2EEAACYijACAABMRRgBAACmIowAAABTEUYAAICpCCMAAMBUhBEAAGAqu9kFdEQgENCxY8cUGxsri8VidjkAAKADDMNQRUWF0tLSZLW23f/RK8LIsWPHlJGRYXYZAADgEhQWFio9Pb3N/b0ijMTGxkpqvhiXy2VyNQAAoCP8fr8yMjKCv+Nt6RVhpOXWjMvlIowAANDLXGyIBQNYAQCAqQgjAADAVIQRAABgKsIIAAAwFWEEAACYijACAABMRRgBAACmIowAAABTEUYAAICpCCMAAMBUhBEAAGAqwggAADBVr3hQXnf5zV8P6mhZjb4xOUMjPDyADwAAM4R1z8h7u4q1eONhFZysNrsUAADCVliHEevpRxoHDJMLAQAgjIV1GLGc/q9hkEYAADBLWIeRlp4RoggAAOYJ6zDS0jUSoGcEAADThHUYsZ4OI2QRAADME9ZhxKKWAaykEQAAzBLWYcQa1lcPAEDPENY/x2em9tIzAgCAWcI6jLQIBMyuAACA8BXWYYSpvQAAmC+sw4iFqb0AAJgurMNIS88IXSMAAJgnzMNI83/pGQEAwDxhHUZalmAligAAYJ6wDiP0jAAAYL6wDiNnBrCaWwcAAOEsrMPImQGspBEAAMxCGBE9IwAAmCmsw4iCHSOkEQAAzBLWYYSeEQAAzBfWYeR0xwhTewEAMFFYhxErt2kAADBdWIcRS/A2DWEEAACzhHkYaf4vWQQAAPOEdRhhACsAAOYL6zByZgAraQQAALOEdRhp6RnhNg0AAOYJ6zBiYTYNAACmC/MwwpgRAADMFtZhxBp8ai9pBAAAs4R1GGFqLwAA5gvrMHJmACtpBAAAs4R1GOHZNAAAmC+8wwjLwQMAYLqwDiOsMwIAgPnCOoxYgrNpzK0DAIBwFtZhxMqiZwAAmC6sw0jLmBGiCAAA5gnzMNL83wD3aQAAME2nwsgLL7ygsWPHyuVyyeVyKSsrS3/5y1/aPeett97SiBEj5HQ6NWbMGK1cufKyCu5KFtEzAgCA2ToVRtLT0/Xss89q27Zt2rp1q26++Wbdcccd2r179wWP37hxo+6++2498MAD2rFjh2bPnq3Zs2crPz+/S4q/XCwHDwCA+SzGZY7eTEhI0HPPPacHHnjgvH133XWXqqqqtGLFiuC2KVOmaPz48XrxxRc7/B1+v19ut1s+n08ul+tyym3lP9/fq//+6IDunTpIP/nyqC77XAAA0PHf70seM9LU1KSlS5eqqqpKWVlZFzwmNzdX2dnZrbbNmDFDubm57X52XV2d/H5/q1d3sDCbBgAA03U6jOzatUsxMTFyOByaN2+eli1bppEjR17wWK/Xq5SUlFbbUlJS5PV62/2OnJwcud3u4CsjI6OzZXbImRVYu+XjAQBAB3Q6jAwfPlx5eXnatGmTvvOd72ju3Lnas2dPlxa1aNEi+Xy+4KuwsLBLP7/FmWfTkEYAADCLvbMnREZGaujQoZKkiRMnasuWLfrlL3+pl1566bxjPR6PSkpKWm0rKSmRx+Np9zscDoccDkdnS+s0Kz0jAACY7rLXGQkEAqqrq7vgvqysLK1Zs6bVttWrV7c5xiTUzqzAam4dAACEs071jCxatEgzZ85UZmamKioqtGTJEq1bt07vv/++JGnOnDkaMGCAcnJyJEkPP/ywbrjhBv3sZz/TrFmztHTpUm3dulUvv/xy11/JJWAAKwAA5utUGCktLdWcOXNUXFwst9utsWPH6v3339ctt9wiSSooKJDVeqazZerUqVqyZIl+9KMf6fHHH9ewYcO0fPlyjR49umuv4hJZeGovAACm61QY+e1vf9vu/nXr1p237c4779Sdd97ZqaJCxcKiZwAAmC6sn03DAFYAAMwX5mGk+b9M7QUAwDxhHUaCD8ojiwAAYJrwDiPMpgEAwHRhHkYYMwIAgNnCOoycGTMCAADMEtZhpOXZNEztBQDAPGEdRqzWlgGshBEAAMwS1mGEFVgBADBfeIeR0/9tYgQrAACmCeswYre2zKYhjAAAYJbwDiO25stvpGcEAADThHcYOd0z0thEGAEAwCzhHUZsp8NIIGByJQAAhK/wDiOne0YYwAoAgHnCOozYrM2X38BtGgAATBPWYaTlNg09IwAAmCe8w8jp2zQNTYwZAQDALGEdRmyMGQEAwHRhHUYiTq8zQhgBAMA8YR1GWnpGGpjaCwCAacI6jASn9jKbBgAA04R5GGE5eAAAzBbeYSS4AithBAAAs4R3GAk+m4YxIwAAmCXMwwizaQAAMFtYhxGbrWU2DWEEAACzhHUYiWDRMwAATBfWYeTsFVgNg0ACAIAZwjqMtIwZkZhRAwCAWcI7jJweMyJxqwYAALOEdRhpuU0j8eReAADMEtZhJNJ25vLrGwkjAACYIazDiNVqCQaSOsIIAACmCOswIkmR9uYmoGcEAABzhH0YcdjpGQEAwExhH0boGQEAwFxhH0bO9Iw0mVwJAADhKezDCD0jAACYizDS0jPCOiMAAJgi7MOIw26TJNU1EEYAADBD2IeRlnVG6ukZAQDAFGEfRhwRp2/TNDCAFQAAM4R9GKFnBAAAc4V9GHFEMGYEAAAzhX0YoWcEAABzhX0YOTNmhDACAIAZwj6MnOkZYQArAABmCPswElwOnp4RAABMQRixM2YEAAAzdSqM5OTk6JprrlFsbKySk5M1e/Zs7d27t91zFi9eLIvF0urldDovq+iuxLNpAAAwV6fCyPr16zV//nx98sknWr16tRoaGjR9+nRVVVW1e57L5VJxcXHwdeTIkcsquisFl4MnjAAAYAp7Zw5etWpVq/eLFy9WcnKytm3bpuuvv77N8ywWizwez6VV2M3oGQEAwFyXNWbE5/NJkhISEto9rrKyUgMHDlRGRobuuOMO7d69u93j6+rq5Pf7W726S3AAayOzaQAAMMMlh5FAIKCFCxdq2rRpGj16dJvHDR8+XK+88oreffddvf766woEApo6daqOHj3a5jk5OTlyu93BV0ZGxqWWeVGRwTBCzwgAAGa45DAyf/585efna+nSpe0el5WVpTlz5mj8+PG64YYb9M4776h///566aWX2jxn0aJF8vl8wVdhYeGllnlRjBkBAMBcnRoz0mLBggVasWKFNmzYoPT09E6dGxERoQkTJujAgQNtHuNwOORwOC6ltE6jZwQAAHN1qmfEMAwtWLBAy5Yt09q1azV48OBOf2FTU5N27dql1NTUTp/bHaKCD8pjzAgAAGboVM/I/PnztWTJEr377ruKjY2V1+uVJLndbkVFRUmS5syZowEDBignJ0eS9PTTT2vKlCkaOnSoysvL9dxzz+nIkSN68MEHu/hSLk20ozmMVNY1mlwJAADhqVNh5IUXXpAk3Xjjja22v/rqq7r33nslSQUFBbJaz3S4lJWV6aGHHpLX61V8fLwmTpyojRs3auTIkZdXeReJcTQ3QXU9PSMAAJjBYhiGYXYRF+P3++V2u+Xz+eRyubr0s4+WVeu6f/9IDrtVe//vzC79bAAAwllHf7/D/tk0/SKbe0bqGgNq5Pk0AACEHGHEceZOVRW3agAACLmwDyORdqsibBZJUnU9g1gBAAi1sA8j0pnekSpm1AAAEHKEEUkuZ4QkyVdDGAEAINQII5LiolvCSL3JlQAAEH4II5LcUc1hpLy6weRKAAAIP4QRSXHRkZIIIwAAmIEwIimupWekhjACAECoEUZ01piRasaMAAAQaoQRnTVmhJ4RAABCjjAixowAAGAmwogYMwIAgJkII2LMCAAAZiKMiDEjAACYiTAiyR1cgbVBgYBhcjUAAIQXwojO9IwYhlRRy/NpAAAIJcKIJIfdpuhImySpnOfTAAAQUoSR0+J4Pg0AAKYgjJyWENO81kjuwZMmVwIAQHghjJz2pREpkqRPCCMAAIQUYeS0qwfGS5KKy2tNrgQAgPBCGDltQJxTknTMV2NyJQAAhBfCyGmp7ihJzVN7K2oZxAoAQKgQRk7r57DL5bRLkop93KoBACBUCCNnSYtr7h05Vs6tGgAAQoUwcpaWMELPCAAAoUMYOUuKq3kQa4mfMAIAQKgQRs6S4nJIIowAABBKhJGzeII9I3UmVwIAQPggjJyF2zQAAIQeYeQsydymAQAg5AgjZ2npGTlRWa+GpoDJ1QAAEB4II2dJiI5UhM0iSTpewbgRAABCgTByFqvVouTY5t4RL7dqAAAICcLIOVrGjZQSRgAACAnCyDlSYpneCwBAKBFGzsHCZwAAhBZh5BwJ/ZrDyKmqepMrAQAgPBBGzpEQEymJMAIAQKgQRs6REN0cRsqqCSMAAIQCYeQcCf2aw8hJekYAAAgJwsg5Ek/fpikjjAAAEBKEkXPEn75NU17ToKaAYXI1AAD0fYSRc8RHR0iSDEMqZ9wIAADdjjByDrvNKndUcyBhECsAAN2PMHIBiS2DWCsJIwAAdDfCyAW0DGI9QRgBAKDbEUYuICmmeRXWE5U8nwYAgO5GGLmAljByvIIwAgBAd+tUGMnJydE111yj2NhYJScna/bs2dq7d+9Fz3vrrbc0YsQIOZ1OjRkzRitXrrzkgkOhfyw9IwAAhEqnwsj69es1f/58ffLJJ1q9erUaGho0ffp0VVVVtXnOxo0bdffdd+uBBx7Qjh07NHv2bM2ePVv5+fmXXXx3ie/HkvAAAISKxTCMS17Z6/jx40pOTtb69et1/fXXX/CYu+66S1VVVVqxYkVw25QpUzR+/Hi9+OKLHfoev98vt9stn88nl8t1qeV22J92HtO/vrFDU4YkaOm3s7r9+wAA6Is6+vt9WWNGfD6fJCkhIaHNY3Jzc5Wdnd1q24wZM5Sbm9vmOXV1dfL7/a1eodSyzoivpjGk3wsAQDi65DASCAS0cOFCTZs2TaNHj27zOK/Xq5SUlFbbUlJS5PV62zwnJydHbrc7+MrIyLjUMi9JSxjx1zSE9HsBAAhHlxxG5s+fr/z8fC1durQr65EkLVq0SD6fL/gqLCzs8u9oz5meEcIIAADdzX4pJy1YsEArVqzQhg0blJ6e3u6xHo9HJSUlrbaVlJTI4/G0eY7D4ZDD4biU0rpESxiprGtUY1NAdhszoAEA6C6d+pU1DEMLFizQsmXLtHbtWg0ePPii52RlZWnNmjWttq1evVpZWT13YKjLeSaj+WsZNwIAQHfqVBiZP3++Xn/9dS1ZskSxsbHyer3yer2qqakJHjNnzhwtWrQo+P7hhx/WqlWr9LOf/Uyff/65fvKTn2jr1q1asGBB111FF7PbrIpxNAcSbtUAANC9OhVGXnjhBfl8Pt14441KTU0Nvt58883gMQUFBSouLg6+nzp1qpYsWaKXX35Z48aN0x//+EctX7683UGvPQHjRgAACI1OjRnpyJIk69atO2/bnXfeqTvvvLMzX2U6V1SEisprCCMAAHQzRma2wR3FbRoAAEKBMNKGhNNLwp/gYXkAAHQrwkgbPK4oSVKJv9bkSgAA6NsII21Ii3NKkop9hBEAALoTYaQNaXHNPSN7iv0dGrgLAAAuDWGkDdOGJslutehAaaWOltVc/AQAAHBJCCNtcEdFyONuvlVzvJJBrAAAdBfCSDsSY5qfj3Oyst7kSgAA6LsII+1IOj299yQ9IwAAdBvCSDsSY06HkSp6RgAA6C6EkXZwmwYAgO5HGGlHYsttmipu0wAA0F0II+0I3qahZwQAgG5DGGlHYr/m2zQnGMAKAEC3IYy0o39scxg5zsPyAADoNoSRdqSeXvTsZFW9ahuaTK4GAIC+iTDSDndUhKIibJIkLw/MAwCgWxBG2mGxWJR6+um9x3w8nwYAgO5AGLmINHfz03uLy+kZAQCgOxBGLqJl3EgxPSMAAHQLwshFpMY194wcY8wIAADdgjByEWktPSPl9IwAANAdCCMX0dIzUkzPCAAA3YIwchEtPSPH6BkBAKBbEEYuoqVnxF/bqKq6RpOrAQCg7yGMXESMw65Yp10SM2oAAOgOhJEOaFlr5BhrjQAA0OUIIx3QsgorPSMAAHQ9wkgHpNIzAgBAtyGMdEAaq7ACANBtCCMd0DKjpvAUYQQAgK5GGOmAEZ5YSdLuYz4ZhmFyNQAA9C2EkQ4Y7omVM8Iqf22jtheUmV0OAAB9CmGkAyJsVt06yiNJWvNZqcnVAADQtxBGOmh8RpwkaV9JpbmFAADQxxBGOujKlOZxIwdKK0yuBACAvoUw0kHp8dGSmp/eyyBWAAC6DmGkg5JdDklSXWNA5dUNJlcDAEDfQRjpIGeETYn9IiVJx1j8DACALkMY6YRhKTGSpO0F5eYWAgBAH0IY6YSsIUmSpB2sNQIAQJchjHTCkP79JEmFp6pNrgQAgL6DMNIJmQnNM2qOnCSMAADQVQgjnTC4fz9ZLFJpRZ1KK2rNLgcAgD6BMNIJLmeERnhckqQ/5R0zuRoAAPoGwkgnfe3qAZKkD/aUmFwJAAB9A2GkkyZkxkmSispYawQAgK5AGOmktLgoSZLXX6umAMvCAwBwuQgjnZQc65TdalFTwFCJn0GsAABcLsJIJ9msFnncTknSsXJu1QAAcLkII5dgwOlbNUWEEQAALlunw8iGDRt0++23Ky0tTRaLRcuXL2/3+HXr1slisZz38nq9l1qz6dLjmxc/++J4lcmVAADQ+3U6jFRVVWncuHF6/vnnO3Xe3r17VVxcHHwlJyd39qt7jJYZNRsPnDC3EAAA+gB7Z0+YOXOmZs6c2ekvSk5OVlxcXKfP64luHN5fNqtFW4+U6Yvjlbqif4zZJQEA0GuFbMzI+PHjlZqaqltuuUV/+9vf2j22rq5Ofr+/1asnSY+P1viMOEnSn3eyEisAAJej28NIamqqXnzxRb399tt6++23lZGRoRtvvFHbt29v85ycnBy53e7gKyMjo7vL7LRhyc29Ib/4cL8CrDcCAMAlsxiGccm/pBaLRcuWLdPs2bM7dd4NN9ygzMxM/e53v7vg/rq6OtXV1QXf+/1+ZWRkyOfzyeVyXWq5XWrjgRP65m82SZLWfO8GbtUAAHAOv98vt9t90d9vU6b2Tp48WQcOHGhzv8PhkMvlavXqaaYOTdLEgfGSpF1HfSZXAwBA72VKGMnLy1NqaqoZX92lhntiJUkHSitNrgQAgN6r07NpKisrW/VqHDp0SHl5eUpISFBmZqYWLVqkoqIivfbaa5KkX/ziFxo8eLBGjRql2tpa/eY3v9HatWv1wQcfdN1VmGTo6Vsz+0srTK4EAIDeq9NhZOvWrbrpppuC7x955BFJ0ty5c7V48WIVFxeroKAguL++vl7f+973VFRUpOjoaI0dO1Yffvhhq8/orYalNIcRekYAALh0lzWANVQ6OgAm1Ly+Wk3JWSOb1aI9T8+Qw24zuyQAAHqMHj2Ata9IcTmUFONQU8DQm1sKzS4HAIBeiTByGSwWi6YNTZQkLd542NxiAADopQgjl+n704dLkg4er1Kpv9bkagAA6H0II5cpIyFao9Ka74N9cuiUydUAAND7EEa6wJQhzbdqcr/gKb4AAHQWYaQLXDcsSZK0Yd8J9YLJSQAA9CiEkS4wZXCiIu1WFZXXaMN+ekcAAOgMwkgXiIq06fph/SVJL2/4wuRqAADoXQgjXeRfbh4qSfrk4CmVMKsGAIAOI4x0kdED3BqaHKOmgKGX1h80uxwAAHoNwkgXsVktevTWEZKk13IPs+YIAAAdRBjpQtlXJWtQYrQaA4Z2HvWZXQ4AAL0CYaQLWSwWXZ0ZL0l679NjJlcDAEDvQBjpYrePS5MkrdzlVW1Dk8nVAADQ8xFGutiNw/urf6xD9U0BbTnM8vAAAFwMYaSLWSwW3TIyRZL0pzxu1QAAcDGEkW5w8/BkSdKuIgaxAgBwMYSRbjDy9FN895dW6lRVvcnVAADQsxFGukFaXJTGDHCrKWDo6n9brWJfjdklAQDQYxFGusn91w0K/vmZ9z4zrxAAAHo4wkg3uWPcAE0elCBJem9XsQIBw+SKAADomQgj3cRqteiNb09RVIRNhiH9au1+s0sCAKBHIox0I5vVIrvNIkn6xYf7VXCy2uSKAADoeQgj3exfbh4a/PM/vLhRhsHtGgAAzkYY6Wb3Txus6EibJKm0ok7bjpSZXBEAAD0LYaSb2W1WvfiPE4Pv/+l32+gdAQDgLISRELj+yv66b9ogSdLJqnr9as0BcwsCAKAHIYyEyJOzRgb//F8f7lN9Y8DEagAA6DkIIyFitVr01rys4Pt7X91sYjUAAPQchJEQumZQgjwupyRp4xcn9VruYXMLAgCgByCMhNjrD04O/vmpd3frpytZKh4AEN4IIyE2NDlWEwfGB9+/vOGg6hqbTKwIAABzEUZM8NK3JirWaQ++P8LKrACAMEYYMUFSjEO7fjJDowe4JEnT/2uDDp+oMrkqAADMQRgx0Zwpg4J//vpLuWriyb4AgDBEGDHR16/JCPaOlFbU6YrHV2r3MZ/JVQEAEFqEEZP9cd5UDU2OCb6f9/o21TYwoBUAED4IIyZzRti0fP40xTiaB7QWnqrRd17fZnJVAACEDmGkB4hx2JX/f2bosZkjJEkf7T2uQY+9p6NlzLIBAPR9hJEeZN4NVyj7quTg++v+/SOe8AsA6PMIIz3M/JuGtnr/bt4xkyoBACA0CCM9zITMeOV8dUzw/cI38/TUu/kKMO0XANBHEUZ6oLsnZ+rb1w8Jvn8t94g+2FNiYkUAAHQfwkgP9eitIzQna2Dw/Tvbj7IoGgCgTyKM9FA2q0U/uX2UXrl3kiwW6YM9JRr51Cq9s/2oNh08aXZ5AAB0GcJID2a1WnTziBTdO3WQJKmuMaBH/rBTd738id7NKzK3OAAAughhpBd4ctZIjRngbrXt4aV5mvc7FkcDAPR+hJFewGq16K15Wa3GkEjSqt1e5RfxLBsAQO9GGOklnBE2PX3HaH36k+mttv/9rz/WtiNlJlUFAMDlI4z0Mi5nhD57+tZW21buKjapGgAALl+nw8iGDRt0++23Ky0tTRaLRcuXL7/oOevWrdPVV18th8OhoUOHavHixZdQKlpERdr01rys4PvffnxI33l9mz76vFT/tXqfqusbTawOAIDO6XQYqaqq0rhx4/T888936PhDhw5p1qxZuummm5SXl6eFCxfqwQcf1Pvvv9/pYnHGNYMSdPCnt+nWUR5J0l/yvbpv8Rb9cs1+jXzqfWX/fL2KymtMrhIAgIuzGJfxJDaLxaJly5Zp9uzZbR7z6KOP6r333lN+fn5w2ze+8Q2Vl5dr1apVHfoev98vt9stn88nl8t1qeX2SYZh6P3dJZr3+oVn1nx/+pVacPOwEFcFAEDHf7+7fcxIbm6usrOzW22bMWOGcnNz2zynrq5Ofr+/1QsXZrFYdOtoj96al6V5N1xx3v7//GCfHl+2S4Wnqk2oDgCAi+v2MOL1epWSktJqW0pKivx+v2pqLnwbIScnR263O/jKyMjo7jJ7vWsGJeixmSP0wXev119/eJMevG5wcN+STQW688Vc5RWWy+urNbFKAADO1yNn0yxatEg+ny/4KiwsNLukXuPKlFhlJETr8duu0v/58qjgdq+/VrOf/5um5KzRqnyvLuPuHAAAXarbw4jH41FJSesnzpaUlMjlcikqKuqC5zgcDrlcrlYvdI7VatHcqYP0xU9v0/enX9lq37zXt2nwopX6885jJlUHAMAZ3R5GsrKytGbNmlbbVq9eraysrDbOQFeyWS164LohGpgYfd6+f3ljh37x4T6t21uqvd4KE6oDAECyd/aEyspKHThwIPj+0KFDysvLU0JCgjIzM7Vo0SIVFRXptddekyTNmzdP//3f/60f/vCHuv/++7V27Vr94Q9/0Hvvvdd1V4F2RUXa9Od/uU6+6gbNfXWzDh6vCu77xYf7g39etfDvNMLjUuGpakVH2pQY4zCjXABAmOn01N5169bppptuOm/73LlztXjxYt177706fPiw1q1b1+qc7373u9qzZ4/S09P15JNP6t577+3wdzK1t+vt9Vbo27/bqiMnW8+yGeGJ1eene0l2/ni63FERZpQHAOgDOvr7fVnrjIQKYaR71DU26Xe5R/TB7hJtPnzqvP1x0RH66w9vUrGvVu/mFWn+TUMVHdnpzjQAQJgijKBT/t/Gw3p6xR41Bdr+6zD/pis0c3SqhibHyBlhC2F1AIDeiDCCTqttaJJhSG9vP6ofLc9v99hl/zxVEzLjJTWvAtsUMGS39ciZ4gAAk3T095s+dwS19Hbcc22mPvf69fonBW0e+5X/2SiPy6nbxqTqZFWdNuw7rlULr1eKyxmqcgEAfQQ9I+iQsqp6/XLNfi3eeLjNY4b076el356i5FgCCQCA2zToBoGAoT9sLdTijYeV7HJqw77jFzzuiduu0uCkfnp90xEdLavRGw9NUf9YpgkDQLghjCAkBj3WsfVifjTrKn3z2kxm4wBAGCGMICR81Q266+Vcfe6t0Nh0t6YMSdTLGw62eXysw667r83U96cPV6TdqqaAIaul+enDAIC+hTCCkKpvDEiSIu1WVdQ26O1tR/WTP+9p95xIu1X1jQHdO3WQvnRVsq4ZlMCUYQDoQwgj6DF+uvKzdntLznb35EztKCjTj2aNlCvKrjED3PSaAEAvRRhBjxEIGDpRVafkWKd2Fpbrza2FSoiO1H9/dODiJ0t6+VsTNT4zjlk6ANDLEEbQ4/mqG/RfH+7TriKf9pdUyF/b2O7xt4xMUVJMpL4+KUPj0uNktdJjAgA9GWEEvYphGPreWzv1zvaiDh3vsFv1+G1Xqay6Xn/eeUxfvTpd35ycqfh+kd1cKQCgowgj6JW2F5SpX6Rdwz2x2njghIrKa/TyhoPaX1rZofMnDYzXM18ZoxOVdXpi2S7NGO3RD6YPl9VioScFAEKMMII+ZUdBmTISorUq33vR5+a05T++Nlb9Yx1avPGw7rk2U9NHebq4SgDA2Qgj6LPKq+u1cpdXAxOjtWRzgVJinTp4olLr9l54Rdi2PPvVMRo9wK0BcVHaXlCmd/OO6SdfHqV+DptK/XXKSIjupisAgPBAGEFY2nzolEoratUUMPSrNfslSV8cr+rw+V+dMED+2kZ9+FmJfjBjuL45OVM7Css0eXCiYhysHgsAnUEYAU47fKJKq3Z7tavIp7/uO37RWTsXkpkQrX+YmK5hyTFqDBganxFHzwkAXARhBGhDU8DQf36wV5sOntSoNLf+duCE/n5sqsqqG/S7T450+HMemzlCdQ0BNTQFdOtoj46V12jt56V6OHuYUt1RKvHXKtZp53k8AMIWYQToJMMw9OFnpZKkCJtFf91/QjEOu/647aiKymsu+XP/6foh8tc2qKa+ST++fVRw+nFDU0B2q4UVZgH0WYQRoIuU+mv19vYivbDugDITo/UPV6cr/5hff9x2tEs+/wczhispJlIffX5cT8y6Sikup0orapUez20gAL0bYQToYoGAIUOS7fR6JYZhaH9ppTbsO67/+95nkqQ7J6arpKJOG/Z1bmZPWyLtVj1yy5XaerhMh05UKntkinYUlGvL4VP6/YPXauoVSV3yPQDQHQgjQIg1BYxgUGl5X13fqNqGgHL+8pn+vPOYYp0RGuGJ1Zh0t2rqm/RabsfHqFxIqtup4xV1evqO0SqrrlfhqWotzL5SHjfP8QFgPsII0AtU1Dao2FerLYdP6Ylll7aY24XMGJWi1XtKFDCk7KuS9R//ME4Ou1Wv/u2Qbh3t0dDk2FbH7yup0MDEaDnsNtU2NMkZYeuyWgCEL8II0IsVnKxWdUOjUmKd2l5Qpsq6Rk0bmqRbfr5ekXarXv7WJP3jbzep4hKmKUvS3w1L0qdHfaptaFJdY0CS1C/SpnumDNRv/npQY9Lj9N3sYfLVNCjGYdeXrkqRJB0rr1FiTKQibVYG3gK4KMII0AdV1zeqMWDI5YxQdX2jDENyRtj09vajamwy5PXXBhd760pThiTo/mmD9e3fbQtum3pFor59/RCt33dcf8o7ptQ4p5796liNSnMRVABIIowAYauxKaCDJ6qUmRCt7QVl+ssur0oravX+7hJJ0lN/P1L7Syv0xubCbqthWHKMyk/3qqS6naqsa5Qzwqb9JRX6yoR0zZ6QpqHJMazBAvRxhBEA7aptaJLFIjnsNgUChkor6vThZyW6MiVWK3cVa/HGwyGr5Z+uH6JvTM5Uv0ibtheUacqQRMVFRwb3F56q1hfHK3Xj8OTgNsMw1Bgw1BQwGOMC9FCEEQCXpbahSXarRTarRQ+9tk0fflYim9Wiof1jtLekQt+8NlP3TxusVLdTh09W6fFl+dpZWN5l3x8XHaHy6gbFOu3BsTEup11j0+P0WbFfVadnKknS0OQYfWXCAN09OVMup13r9x3X6AFuxTrtioqwyV/TKJvNorKqepbxB0KIMAKgywQCzb0QkXZru8c1BQzVNTYpOtKuhqaADp2o0uZDp+RxOXVFcoyOV9Tp31d9rm1HypSZEK3hnlit3lMSoqtoNiw5RjPHpMoi6fork7Sz0Kei8hpNGhiv0oo6fXlcWnCV3BaNTQFV1TXJHR0R0lqB3o4wAqDXqKxr1Iqdx/TYO7skSbeN8WjlLq9GeGJ1ZUqsNn5xQicq60NWzw9mDNcnB08q94uTagyc+Sdy0sB4DUuJ0fRRHt14ZX9ZLBadqKxTfpFPU69IumhYA8INYQRAr3P4RJX6OezqH+s4b99L67/QSxsO6pV7r1F+kU9XpsTKX9OgiQPjVXCqWier6rTlcJkOHa/S1QPjVNcQ0C/X7Ff2VSnaV1KhgyeqQnYdYwa4VdfYpH0llRrhidVTfz9SH+wp0d8OnFBUpE0zRnlks1rUFDD0nRuukNVqkWEYrWYhlVXVa29JhdxREboqlX/30DsRRgDgNMMwtL2gXHHREfpd7hFNGhSv6Eib3thcqJr6JiW7HJqQEadIu1XvbC9SXmG5HHar/Je4jktnDUyM1pGT1ZKkqAibahqaLnjcq/ddo6sz47W7yKfjlXX647ajckbYNDAhWvdfN1hpcVEXPK+ovEZRETYlnHP7CehuhBEAuETn9lIYhqG9JRU6Vl4jX02DImxW7Trq00sbDgaPcUdFyGa16FRV6G4ntSU51qGy6nplJkTrxuHJ+u3Hh5TmduqfbrhCIzyxemNzgWobAnro+iEam+6W3WrRyl1eDUqK1qg0t46WVcsdFaFY5/ljZMqq6hUXHcFaMugQwggAhMDnXr8GJ/WTw948vbjYV6OdheVauqVQg5P6aV9JhXYW+nTt4AT9/Ovj9dcDx7VgyQ5JzQvHfW/6lTpeUa9TVfX60fJdCpzzL/JVqS4NSozWX/K9ob40RdqsumZwvG4d5dGmQ6e04tNiSVJ6fJRmjx+gLYdPaefRck29Ikk3j0jW7ePSFAgYChiGin21Gu6JVX1jQP0crCcTrggjANCDHTlZpeRYp6Iiz18jZfcxnzwupxJjzoyd2XTwpJ5d9blOVNbpH67OkMft0KNv79LQ5BgdKK087zNapkb3FE/cdpV+vXZ/8NbXkKR++ua1mfL6arVh/3HtK6lUZkK07p82SPH9IjUqzSVXVISSY5sf+phXWK60OKdiHRGy2yx67v29mjIkQdcP6y9DUoSNwcM9EWEEAMJEZV2jKmsb5XE7FQg0PxagZfyI11erhH6R+mhvqXw1DfLXNGhI/34qPFWjH/9ptyTpX28eqoq6Rv3twAntKzk/2PRGIzyxmj7KI4fdqrKqes2eMEAjU12yWi3afOiUJOmaQfGtbjf5qhskS/Mtt6aAIatF592u4/ZU5xBGAADtqmtsks1ikf2cXoWCk9UqKq9R1hWJMgxD7+YdU7LLoalXJMkwDPlrGvX9P+5UhM2in9w+SnabVW9uKZTDbtWhE1XaU+zXtiNlSuwXqZMXGEOTHh8lZ4RNBaeqVX/6QY1mibRbz6uhX6RNVfVNGpzUTwtuGqojJ6uUHh+tX3+0Xx6XU9cMSlBcdIRuG5Mqu9WqX67Zp0+P+jRrbKpK/XX62tXp8ridSoppHjBssVhU3xgIy6nfhBEAgGl81Q2y2Sxy2q2qbQyoqKxGybGO8xaU81U3qK6pSV5frdLjo7X7mE9/3HZUY9PjtPHACa35vDR47NQrErXxi5OhvpQuk5EQpZuHJ+toWY1OVNZp51GfJOnOiemqrGtU/1iHKmsbta+0QmMGxKnUXyu7zaLJgxNV3xhQenyUUt1ODU2OUT+HXXu9FRqU1E/f/8NOJcRE6ltTBurTo+X6yoR0RdgsqqhrlEVSrDNC+UU+DUyMvuCg5LzCciX2i+yW1YkJIwCAXu9EZZ1iHPbg84eq6hpls1pU3xTQH7YUamSaSxMy4hUwDH1y8GQw7GTERwcX0/O4naqobdTTK/a0+uxRaS7ZrBZ9ejoUnM0ZYdXYAXEqKq9RUXlN919oCNisFmXER6m6vkkBo7ltz/Y/91yt28akdul3EkYAADhLbUOTImxW2awXHvfhq2lQY1Og1cDhFvtKKjQgLioYiqwW6VRVvfZ6KzQgPkqZp3sVjpbVaMGS7cFeD+nMbZ+zOSOswWcr9RTbn7yly9eiIYwAAGCyxqaA7DarSitq5a9p1NDkGPlqGhTrsKuqvlERNqucEbbg4NhtR8q0fEeRRqa5dNvoVB2vrNP/23hYHrdTeYXlWr2nRFlDElXir9WkQfE6XlGnhH4ORdqtemNzQbu1eFxOef21F9z3LzcP1femD+/y6yeMAADQh9Q3BlTb2CTXBcZ9nKu2oUkWi1RT36Rj5bUamdb6t/Pj/Se0v7RCo9Lcmjw4obtK7vDvNyvRAADQC0TarR2ekdNyO8lhtyku+vxbL9cNS9J1w5K6tL7LEX7zjAAAQI9CGAEAAKYijAAAAFMRRgAAgKkIIwAAwFSEEQAAYCrCCAAAMNUlhZHnn39egwYNktPp1LXXXqvNmze3eezixYtlsVhavZxO5yUXDAAA+pZOh5E333xTjzzyiH784x9r+/btGjdunGbMmKHS0tI2z3G5XCouLg6+jhw5cllFAwCAvqPTYeTnP/+5HnroId13330aOXKkXnzxRUVHR+uVV15p8xyLxSKPxxN8paSkXFbRAACg7+hUGKmvr9e2bduUnZ195gOsVmVnZys3N7fN8yorKzVw4EBlZGTojjvu0O7du9v9nrq6Ovn9/lYvAADQN3UqjJw4cUJNTU3n9WykpKTI6/Ve8Jzhw4frlVde0bvvvqvXX39dgUBAU6dO1dGjR9v8npycHLnd7uArIyOjM2UCAIBepNtn02RlZWnOnDkaP368brjhBr3zzjvq37+/XnrppTbPWbRokXw+X/BVWFjY3WUCAACTdOqpvUlJSbLZbCopKWm1vaSkRB6Pp0OfERERoQkTJujAgQNtHuNwOORwOILvDcOQJG7XAADQi7T8brf8jrelU2EkMjJSEydO1Jo1azR79mxJUiAQ0Jo1a7RgwYIOfUZTU5N27dql2267rcPfW1FRIUncrgEAoBeqqKiQ2+1uc3+nwogkPfLII5o7d64mTZqkyZMn6xe/+IWqqqp03333SZLmzJmjAQMGKCcnR5L09NNPa8qUKRo6dKjKy8v13HPP6ciRI3rwwQc7/J1paWkqLCxUbGysLBZLZ0tuk9/vV0ZGhgoLC+Vyubrsc/si2qpjaKeOo606hnbqONqqY0LZToZhqKKiQmlpae0e1+kwctddd+n48eN66qmn5PV6NX78eK1atSo4qLWgoEBW65mhKGVlZXrooYfk9XoVHx+viRMnauPGjRo5cmSHv9NqtSo9Pb2zpXaYy+XiL24H0VYdQzt1HG3VMbRTx9FWHROqdmqvR6SFxbjYjZw+zO/3y+12y+fz8Rf3ImirjqGdOo626hjaqeNoq47pie3Es2kAAICpwjqMOBwO/fjHP241cwcXRlt1DO3UcbRVx9BOHUdbdUxPbKewvk0DAADMF9Y9IwAAwHyEEQAAYCrCCAAAMBVhBAAAmCqsw8jzzz+vQYMGyel06tprr9XmzZvNLimkcnJydM011yg2NlbJycmaPXu29u7d2+qY2tpazZ8/X4mJiYqJidHXvva1855NVFBQoFmzZik6OlrJycn6wQ9+oMbGxlBeSkg9++yzslgsWrhwYXAb7dSsqKhI//iP/6jExERFRUVpzJgx2rp1a3C/YRh66qmnlJqaqqioKGVnZ2v//v2tPuPUqVO655575HK5FBcXpwceeECVlZWhvpRu1dTUpCeffFKDBw9WVFSUrrjiCv3bv/1bq+d3hGtbbdiwQbfffrvS0tJksVi0fPnyVvu7ql0+/fRT/d3f/Z2cTqcyMjL0H//xH919aV2qvXZqaGjQo48+qjFjxqhfv35KS0vTnDlzdOzYsVaf0aPayQhTS5cuNSIjI41XXnnF2L17t/HQQw8ZcXFxRklJidmlhcyMGTOMV1991cjPzzfy8vKM2267zcjMzDQqKyuDx8ybN8/IyMgw1qxZY2zdutWYMmWKMXXq1OD+xsZGY/To0UZ2draxY8cOY+XKlUZSUpKxaNEiMy6p223evNkYNGiQMXbsWOPhhx8ObqedDOPUqVPGwIEDjXvvvdfYtGmTcfDgQeP99983Dhw4EDzm2WefNdxut7F8+XJj586dxpe//GVj8ODBRk1NTfCYW2+91Rg3bpzxySefGH/961+NoUOHGnfffbcZl9RtnnnmGSMxMdFYsWKFcejQIeOtt94yYmJijF/+8pfBY8K1rVauXGk88cQTxjvvvGNIMpYtW9Zqf1e0i8/nM1JSUox77rnHyM/PN9544w0jKirKeOmll0J1mZetvXYqLy83srOzjTfffNP4/PPPjdzcXGPy5MnGxIkTW31GT2qnsA0jkydPNubPnx9839TUZKSlpRk5OTkmVmWu0tJSQ5Kxfv16wzCa/0JHREQYb731VvCYzz77zJBk5ObmGobR/D+E1Wo1vF5v8JgXXnjBcLlcRl1dXWgvoJtVVFQYw4YNM1avXm3ccMMNwTBCOzV79NFHjeuuu67N/YFAwPB4PMZzzz0X3FZeXm44HA7jjTfeMAzDMPbs2WNIMrZs2RI85i9/+YthsViMoqKi7is+xGbNmmXcf//9rbZ99atfNe655x7DMGirFuf+yHZVu/zP//yPER8f3+r/vUcffdQYPnx4N19R97hQaDvX5s2bDUnGkSNHDMPoee0Ulrdp6uvrtW3bNmVnZwe3Wa1WZWdnKzc318TKzOXz+SRJCQkJkqRt27apoaGhVTuNGDFCmZmZwXbKzc3VmDFjgs8mkqQZM2bI7/dr9+7dIay++82fP1+zZs1q1R4S7dTiT3/6kyZNmqQ777xTycnJmjBhgv73f/83uP/QoUPyer2t2sntduvaa69t1U5xcXGaNGlS8Jjs7GxZrVZt2rQpdBfTzaZOnao1a9Zo3759kqSdO3fq448/1syZMyXRVm3pqnbJzc3V9ddfr8jIyOAxM2bM0N69e1VWVhaiqwktn88ni8WiuLg4ST2vnTr9oLy+4MSJE2pqamr1wyBJKSkp+vzzz02qylyBQEALFy7UtGnTNHr0aEmS1+tVZGRk8C9vi5SUFHm93uAxF2rHln19xdKlS7V9+3Zt2bLlvH20U7ODBw/qhRde0COPPKLHH39cW7Zs0b/+678qMjJSc+fODV7nhdrh7HZKTk5utd9utyshIaHPtJMkPfbYY/L7/RoxYoRsNpuampr0zDPP6J577pEk2qoNXdUuXq9XgwcPPu8zWvbFx8d3S/1mqa2t1aOPPqq77747+CyantZOYRlGcL758+crPz9fH3/8sdml9DiFhYV6+OGHtXr1ajmdTrPL6bECgYAmTZqkn/70p5KkCRMmKD8/Xy+++KLmzp1rcnU9yx/+8Af9/ve/15IlSzRq1Cjl5eVp4cKFSktLo63QpRoaGvT1r39dhmHohRdeMLucNoXlbZqkpCTZbLbzZjuUlJTI4/GYVJV5FixYoBUrVuijjz5Senp6cLvH41F9fb3Ky8tbHX92O3k8ngu2Y8u+vmDbtm0qLS3V1VdfLbvdLrvdrvXr1+tXv/qV7Ha7UlJSaCdJqampGjlyZKttV111lQoKCiSduc72/r/zeDwqLS1ttb+xsVGnTp3qM+0kST/4wQ/02GOP6Rvf+IbGjBmjb33rW/rud7+rnJwcSbRVW7qqXcLh/0fpTBA5cuSIVq9e3eoJvT2tncIyjERGRmrixIlas2ZNcFsgENCaNWuUlZVlYmWhZRiGFixYoGXLlmnt2rXndcdNnDhRERERrdpp7969KigoCLZTVlaWdu3a1eovdctf+nN/mHqrL33pS9q1a5fy8vKCr0mTJumee+4J/pl2kqZNm3be1PB9+/Zp4MCBkqTBgwfL4/G0aie/369Nmza1aqfy8nJt27YteMzatWsVCAR07bXXhuAqQqO6ulpWa+t/fm02mwKBgCTaqi1d1S5ZWVnasGGDGhoagsesXr1aw4cP7zO3aFqCyP79+/Xhhx8qMTGx1f4e105dPiS2l1i6dKnhcDiMxYsXG3v27DG+/e1vG3Fxca1mO/R13/nOdwy3222sW7fOKC4uDr6qq6uDx8ybN8/IzMw01q5da2zdutXIysoysrKygvtbpqxOnz7dyMvLM1atWmX079+/T01ZvZCzZ9MYBu1kGM2j9e12u/HMM88Y+/fvN37/+98b0dHRxuuvvx485tlnnzXi4uKMd9991/j000+NO+6444LTMidMmGBs2rTJ+Pjjj41hw4b1+umq55o7d64xYMCA4NTed955x0hKSjJ++MMfBo8J17aqqKgwduzYYezYscOQZPz85z83duzYEZwF0hXtUl5ebqSkpBjf+ta3jPz8fGPp0qVGdHR0r5ra21471dfXG1/+8peN9PR0Iy8vr9W/72fPjOlJ7RS2YcQwDOPXv/61kZmZaURGRhqTJ082PvnkE7NLCilJF3y9+uqrwWNqamqMf/7nfzbi4+ON6Oho4ytf+YpRXFzc6nMOHz5szJw504iKijKSkpKM733ve0ZDQ0OIrya0zg0jtFOzP//5z8bo0aMNh8NhjBgxwnj55Zdb7Q8EAsaTTz5ppKSkGA6Hw/jSl75k7N27t9UxJ0+eNO6++24jJibGcLlcxn333WdUVFSE8jK6nd/vNx5++GEjMzPTcDqdxpAhQ4wnnnii1Q9FuLbVRx99dMF/l+bOnWsYRte1y86dO43rrrvOcDgcxoABA4xnn302VJfYJdprp0OHDrX57/tHH30U/Iye1E4WwzhryT8AAIAQC8sxIwAAoOcgjAAAAFMRRgAAgKkIIwAAwFSEEQAAYCrCCAAAMBVhBAAAmIowAgAATEUYAQAApiKMAAAAUxFGAACAqQgjAADAVP8f4Aty07LcoBkAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plt.plot(torch.tensor(l).view(-1, 10).mean(1).numpy())" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "cPqZDUJ8I2AA", "outputId": "a6adc02a-a989-4bc5-ab8b-8840be56238d" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "def _initialModel._to_java_impl():\n", " \"\"\"\n", " Deprecated in 2.3.0. Use :func:`pyspark.sql.types.DataType`, int or :class:`Column` expression in the given key (default param).\n", "\n", " >>> df = spark.range(1, 0).alias('age')).collect()\n", " [Row(name=u'Alice', age=1, name=u'Alice')]\n", " \"\n" ] } ], "source": [ "# 使用模型来生成文本\n", "begin_text = torch.tensor(tok.encode('def '), device=device).unsqueeze(0)\n", "print(''.join(tok.decode(generate_batch(model, begin_text))))" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "V100", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.5" } }, "nbformat": 4, "nbformat_minor": 1 }