{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "id": "O2CjOgU3Ryet" }, "outputs": [], "source": [ "import torch" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1yv3LclNR3FH", "outputId": "1b2b1fec-9f85-4a60-b3a0-d0ac93ef93be" }, "outputs": [ { "data": { "text/plain": [ "True" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 检查是否有GPU\n", "torch.cuda.is_available()" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "C2lR7FW1R50T", "outputId": "a1e8c64c-f84a-4039-a219-d9c36367036d" }, "outputs": [ { "data": { "text/plain": [ "1" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# GPU的个数\n", "torch.cuda.device_count()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "uST41cZWCFG4", "outputId": "2d6789c8-281b-415a-d4b6-6b0eb70f0738" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "False\n", "True\n" ] } ], "source": [ "# 默认情况下,创建的张量存放在内存,使用CPU进行计算\n", "x = torch.randn(2, 3)\n", "print(x.is_cuda)\n", "# 可以使用张量提供的函数,将数据移到GPU\n", "# 当有n个GPU时,相应的设备id是cuda:0, ... cude:n-1\n", "print(x.to('cuda:0').is_cuda)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "S1WER-1XSQP8", "outputId": "442f1e34-5129-414f-c6b6-e218a5ea0da9" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "True\n", "False\n" ] } ], "source": [ "# 在创建张量时,通过指定device来将张量移到GPU\n", "y = torch.randn(2, 3, device='cuda:0')\n", "print(y.is_cuda)\n", "print(y.to('cpu').is_cuda)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 214 }, "id": "Oy9Ov40TSTUN", "outputId": "823b4cba-e105-4e95-d716-bf568bed34a9" }, "outputs": [ { "ename": "RuntimeError", "evalue": "ignored", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# 不支持跨计算核心运算\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mx\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mRuntimeError\u001b[0m: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!" ] } ], "source": [ "# 不支持跨计算核心运算\n", "x + y" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "id": "LdEB0ngaSY5t" }, "outputs": [], "source": [ "import time\n", "\n", "def measure_compute_time(device_id, dimension):\n", " \"\"\"\n", " 展示GPU在串行计算和并行计算时的表现\n", " 当dimension比较大时,GPU主要进行并行计算\n", " \"\"\"\n", " start_time = time.time()\n", " x = torch.ones((dimension, dimension), device=device_id)\n", " # for循环时串行计算\n", " for _ in range(10 ** 5):\n", " # x是矩阵,x + x是并行计算\n", " x + x\n", " elapsed_time = time.time() - start_time\n", " print(f'For device {device_id}, compute time = {elapsed_time: .4f}')" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "OP8VdqhmE_qa", "outputId": "118345d3-aa01-4942-a9bf-eadc01fa1c8d" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "For device cpu, compute time = 0.5259\n", "For device cuda:0, compute time = 1.0495\n" ] } ], "source": [ "measure_compute_time('cpu', 1)\n", "measure_compute_time('cuda:0', 1)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "-6glK0TfFyTO", "outputId": "4a5cec04-00fb-4b63-b4f3-ad89b3393e14" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "For device cpu, compute time = 27.2803\n", "For device cuda:0, compute time = 1.1041\n" ] } ], "source": [ "measure_compute_time('cpu', 1000)\n", "measure_compute_time('cuda:0', 1000)" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "V100", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.5" } }, "nbformat": 4, "nbformat_minor": 1 }