{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import datetime\n", "\n", "from util.util import importstr\n", "from util.logconf import logging\n", "log = logging.getLogger('nb')" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def run(app, *argv):\n", " argv = list(argv)\n", " argv.insert(0, '--num-workers=4') # <1>\n", " log.info(\"Running: {}({!r}).main()\".format(app, argv))\n", " \n", " app_cls = importstr(*app.rsplit('.', 1)) # <2>\n", " app_cls(argv).main()\n", " \n", " log.info(\"Finished: {}.{!r}).main()\".format(app, argv))" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import os\n", "import shutil\n", "\n", "# clean up any old data that might be around.\n", "# We don't call this by default because it's destructive, \n", "# and would waste a lot of time if it ran when nothing \n", "# on the application side had changed.\n", "def cleanCache():\n", " shutil.rmtree('data-unversioned/cache')\n", " os.mkdir('data-unversioned/cache')\n", "\n", "# cleanCache()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "training_epochs = 20\n", "experiment_epochs = 10\n", "final_epochs = 50\n", "\n", "training_epochs = 2\n", "experiment_epochs = 2\n", "final_epochs = 5\n", "seg_epochs = 10" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Chapter 11" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch11.prepcache.LunaPrepCacheApp')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch11.training.LunaTrainingApp', '--epochs=1')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch11.training.LunaTrainingApp', f'--epochs={experiment_epochs}')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Chapter 12" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.prepcache.LunaPrepCacheApp')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', '--epochs=1', 'unbalanced')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={training_epochs}', '--balanced', 'balanced')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={experiment_epochs}', '--balanced', '--augment-flip', 'flip')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={experiment_epochs}', '--balanced', '--augment-offset', 'offset')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={experiment_epochs}', '--balanced', '--augment-scale', 'scale')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={experiment_epochs}', '--balanced', '--augment-rotate', 'rotate')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={experiment_epochs}', '--balanced', '--augment-noise', 'noise')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch12.training.LunaTrainingApp', f'--epochs={training_epochs}', '--balanced', '--augmented', 'fully-augmented')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Chapter 13" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch13.prepcache.LunaPrepCacheApp')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch13.train_cls.LunaTrainingApp', f'--epochs={final_epochs}', '--balanced', '--augmented', 'final-cls')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "run('p2ch13.train_seg.LunaTrainingApp', f'--epochs={seg_epochs}', '--augmented', 'final-seg')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Chapter 14" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run('p2ch14.prepcache.LunaPrepCacheApp')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2020-02-06 14:16:55,904 INFO pid:140770 nb:004:run Running: p2ch14.training.ClassificationTrainingApp(['--num-workers=4', '--epochs=100', 'final-nodule-nonnodule']).main()\n", "2020-02-06 14:16:55,916 INFO pid:140770 p2ch14.training:149:initModel Using CUDA with 1 devices.\n", "2020-02-06 14:16:57,163 INFO pid:140770 p2ch14.training:226:main Starting ClassificationTrainingApp, Namespace(batch_size=24, comment='final-nodule-nonnodule', dataset='LunaDataset', epochs=100, finetune='', finetune_depth=1, malignant=False, model='LunaModel', num_workers=4, tb_prefix='p2ch14')\n", "2020-02-06 14:16:59,809 INFO pid:140770 p2ch14.dsets:303:__init__ : 498201 training samples, 497172 neg, 1029 pos, 1:1 ratio\n", "2020-02-06 14:16:59,958 INFO pid:140770 p2ch14.dsets:303:__init__ : 51535 validation samples, 51382 neg, 153 pos, unbalanced ratio\n", "2020-02-06 14:16:59,959 INFO pid:140770 p2ch14.training:249:main Epoch 1 of 100, 2084/2148 batches of size 24*1\n", "2020-02-06 14:17:00,476 WARNING pid:140770 util.util:233:enumerateWithEstimate E1 Training ----/2084, starting\n", "../torch/csrc/autograd/generated/python_variable_methods.cpp:1299: UserWarning: This overload of add is deprecated:\n", "add(Number alpha, Tensor other)\n", "Consider using one of the following signatures instead:\n", "add(Tensor other, Number alpha)\n", "../torch/csrc/autograd/generated/python_variable_methods.cpp:1334: UserWarning: This overload of add_ is deprecated:\n", "add_(Number alpha, Tensor other)\n", "Consider using one of the following signatures instead:\n", "add_(Tensor other, Number alpha)\n", "2020-02-06 14:17:02,746 INFO pid:140770 util.util:257:enumerateWithEstimate E1 Training 16/2084, done at 2020-02-06 14:19:30, 0:02:28\n", "2020-02-06 14:17:06,108 INFO pid:140770 util.util:257:enumerateWithEstimate E1 Training 64/2084, done at 2020-02-06 14:19:28, 0:02:26\n", "2020-02-06 14:17:19,550 INFO pid:140770 util.util:257:enumerateWithEstimate E1 Training 256/2084, done at 2020-02-06 14:19:27, 0:02:25\n", "2020-02-06 14:18:18,009 INFO pid:140770 util.util:257:enumerateWithEstimate E1 Training 1024/2084, done at 2020-02-06 14:19:37, 0:02:35\n", "2020-02-06 14:19:42,583 WARNING pid:140770 util.util:270:enumerateWithEstimate E1 Training ----/2084, done at 2020-02-06 14:19:42\n", "WARNING: Logging before flag parsing goes to stderr.\n", "I0206 14:19:45.540602 140642927564608 training.py:395] E1 ClassificationTrainingApp\n", "I0206 14:19:45.549316 140642927564608 training.py:476] E1 trn 0.5666 loss, 69.8% correct, 0.6987 precision, 0.6953 recall, 0.6970 f1 score\n", "I0206 14:19:45.549755 140642927564608 training.py:489] E1 trn_neg 0.5465 loss, 70.0% correct (17504 of 25000)\n", "I0206 14:19:45.550342 140642927564608 training.py:501] E1 trn_pos 0.5868 loss, 69.5% correct (17382 of 25000)\n", "W0206 14:19:45.555670 140642927564608 util.py:233] E1 Validation ----/2148, starting\n", "I0206 14:19:46.153503 140642927564608 util.py:257] E1 Validation 16/2148, done at 2020-02-06 14:20:43, 0:00:57\n", "I0206 14:19:47.351591 140642927564608 util.py:257] E1 Validation 64/2148, done at 2020-02-06 14:20:40, 0:00:54\n", "I0206 14:19:57.012274 140642927564608 util.py:257] E1 Validation 256/2148, done at 2020-02-06 14:21:20, 0:01:34\n", "I0206 14:20:15.283191 140642927564608 util.py:257] E1 Validation 1024/2148, done at 2020-02-06 14:20:47, 0:01:01\n", "W0206 14:20:49.250460 140642927564608 util.py:270] E1 Validation ----/2148, done at 2020-02-06 14:20:49\n", "I0206 14:20:49.251916 140642927564608 training.py:395] E1 ClassificationTrainingApp\n", "I0206 14:20:49.254173 140642927564608 training.py:476] E1 val 0.3416 loss, 87.7% correct, 0.0173 precision, 0.7255 recall, 0.0338 f1 score\n", "I0206 14:20:49.254622 140642927564608 training.py:489] E1 val_neg 0.3408 loss, 87.7% correct (45087 of 51382)\n", "I0206 14:20:49.255002 140642927564608 training.py:501] E1 val_pos 0.6103 loss, 72.5% correct (111 of 153)\n", "I0206 14:20:49.262402 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.50000.state\n", "I0206 14:20:49.263746 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n", "I0206 14:20:49.265289 140642927564608 training.py:617] SHA1: f55e84905e1110ed0733d828aca32fc676a6c2e1\n", "I0206 14:20:49.265702 140642927564608 training.py:249] Epoch 2 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:20:49.794876 140642927564608 util.py:233] E2 Training ----/2084, starting\n", "I0206 14:20:51.227337 140642927564608 util.py:257] E2 Training 16/2084, done at 2020-02-06 14:23:19, 0:02:29\n", "I0206 14:20:54.667339 140642927564608 util.py:257] E2 Training 64/2084, done at 2020-02-06 14:23:19, 0:02:29\n", "I0206 14:21:14.551048 140642927564608 util.py:257] E2 Training 256/2084, done at 2020-02-06 14:24:09, 0:03:19\n", "I0206 14:22:11.172866 140642927564608 util.py:257] E2 Training 1024/2084, done at 2020-02-06 14:23:35, 0:02:44\n", "W0206 14:23:33.690467 140642927564608 util.py:270] E2 Training ----/2084, done at 2020-02-06 14:23:33\n", "I0206 14:23:33.692124 140642927564608 training.py:395] E2 ClassificationTrainingApp\n", "I0206 14:23:33.693891 140642927564608 training.py:476] E2 trn 0.3548 loss, 84.6% correct, 0.8682 precision, 0.8168 recall, 0.8417 f1 score\n", "I0206 14:23:33.694375 140642927564608 training.py:489] E2 trn_neg 0.3181 loss, 87.6% correct (21900 of 25000)\n", "I0206 14:23:33.694750 140642927564608 training.py:501] E2 trn_pos 0.3916 loss, 81.7% correct (20419 of 25000)\n", "I0206 14:23:33.700130 140642927564608 training.py:249] Epoch 3 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:23:34.232563 140642927564608 util.py:233] E3 Training ----/2084, starting\n", "I0206 14:23:39.293881 140642927564608 util.py:257] E3 Training 16/2084, done at 2020-02-06 14:36:02, 0:12:27\n", "I0206 14:23:42.758418 140642927564608 util.py:257] E3 Training 64/2084, done at 2020-02-06 14:28:12, 0:04:37\n", "I0206 14:23:56.621428 140642927564608 util.py:257] E3 Training 256/2084, done at 2020-02-06 14:26:35, 0:03:00\n", "I0206 14:25:00.354252 140642927564608 util.py:257] E3 Training 1024/2084, done at 2020-02-06 14:26:29, 0:02:54\n", "W0206 14:26:21.478146 140642927564608 util.py:270] E3 Training ----/2084, done at 2020-02-06 14:26:21\n", "I0206 14:26:21.546961 140642927564608 training.py:395] E3 ClassificationTrainingApp\n", "I0206 14:26:21.548928 140642927564608 training.py:476] E3 trn 0.2726 loss, 88.9% correct, 0.9116 precision, 0.8608 recall, 0.8854 f1 score\n", "I0206 14:26:21.549433 140642927564608 training.py:489] E3 trn_neg 0.2369 loss, 91.7% correct (22913 of 25000)\n", "I0206 14:26:21.549849 140642927564608 training.py:501] E3 trn_pos 0.3083 loss, 86.1% correct (21519 of 25000)\n", "I0206 14:26:21.555298 140642927564608 training.py:249] Epoch 4 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:26:22.092252 140642927564608 util.py:233] E4 Training ----/2084, starting\n", "I0206 14:26:23.441630 140642927564608 util.py:257] E4 Training 16/2084, done at 2020-02-06 14:28:52, 0:02:30\n", "I0206 14:26:26.906479 140642927564608 util.py:257] E4 Training 64/2084, done at 2020-02-06 14:28:52, 0:02:30\n", "I0206 14:26:44.676611 140642927564608 util.py:257] E4 Training 256/2084, done at 2020-02-06 14:29:24, 0:03:02\n", "I0206 14:27:43.303055 140642927564608 util.py:257] E4 Training 1024/2084, done at 2020-02-06 14:29:07, 0:02:44\n", "W0206 14:29:05.213821 140642927564608 util.py:270] E4 Training ----/2084, done at 2020-02-06 14:29:05\n", "I0206 14:29:05.280069 140642927564608 training.py:395] E4 ClassificationTrainingApp\n", "I0206 14:29:05.282149 140642927564608 training.py:476] E4 trn 0.2206 loss, 91.2% correct, 0.9333 precision, 0.8883 recall, 0.9102 f1 score\n", "I0206 14:29:05.282553 140642927564608 training.py:489] E4 trn_neg 0.1924 loss, 93.7% correct (23413 of 25000)\n", "I0206 14:29:05.283011 140642927564608 training.py:501] E4 trn_pos 0.2488 loss, 88.8% correct (22207 of 25000)\n", "I0206 14:29:05.288227 140642927564608 training.py:249] Epoch 5 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:29:05.819149 140642927564608 util.py:233] E5 Training ----/2084, starting\n", "I0206 14:29:07.165994 140642927564608 util.py:257] E5 Training 16/2084, done at 2020-02-06 14:31:37, 0:02:30\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 14:29:10.648557 140642927564608 util.py:257] E5 Training 64/2084, done at 2020-02-06 14:31:37, 0:02:30\n", "I0206 14:29:28.730707 140642927564608 util.py:257] E5 Training 256/2084, done at 2020-02-06 14:32:11, 0:03:05\n", "I0206 14:30:28.488884 140642927564608 util.py:257] E5 Training 1024/2084, done at 2020-02-06 14:31:53, 0:02:47\n", "W0206 14:31:50.613174 140642927564608 util.py:270] E5 Training ----/2084, done at 2020-02-06 14:31:50\n", "I0206 14:31:50.684572 140642927564608 training.py:395] E5 ClassificationTrainingApp\n", "I0206 14:31:50.686475 140642927564608 training.py:476] E5 trn 0.1892 loss, 92.6% correct, 0.9428 precision, 0.9072 recall, 0.9246 f1 score\n", "I0206 14:31:50.686937 140642927564608 training.py:489] E5 trn_neg 0.1659 loss, 94.5% correct (23623 of 25000)\n", "I0206 14:31:50.687500 140642927564608 training.py:501] E5 trn_pos 0.2124 loss, 90.7% correct (22679 of 25000)\n", "W0206 14:31:50.693280 140642927564608 util.py:233] E5 Validation ----/2148, starting\n", "I0206 14:31:51.231395 140642927564608 util.py:257] E5 Validation 16/2148, done at 2020-02-06 14:32:37, 0:00:46\n", "I0206 14:31:52.187450 140642927564608 util.py:257] E5 Validation 64/2148, done at 2020-02-06 14:32:34, 0:00:43\n", "I0206 14:31:56.558557 140642927564608 util.py:257] E5 Validation 256/2148, done at 2020-02-06 14:32:38, 0:00:47\n", "I0206 14:32:12.556652 140642927564608 util.py:257] E5 Validation 1024/2148, done at 2020-02-06 14:32:36, 0:00:45\n", "W0206 14:32:36.015303 140642927564608 util.py:270] E5 Validation ----/2148, done at 2020-02-06 14:32:36\n", "I0206 14:32:36.016471 140642927564608 training.py:395] E5 ClassificationTrainingApp\n", "I0206 14:32:36.018363 140642927564608 training.py:476] E5 val 0.2718 loss, 90.0% correct, 0.0270 precision, 0.9346 recall, 0.0525 f1 score\n", "I0206 14:32:36.018813 140642927564608 training.py:489] E5 val_neg 0.2721 loss, 90.0% correct (46228 of 51382)\n", "I0206 14:32:36.019345 140642927564608 training.py:501] E5 val_pos 0.1772 loss, 93.5% correct (143 of 153)\n", "I0206 14:32:36.027561 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.250000.state\n", "I0206 14:32:36.029899 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n", "I0206 14:32:36.032553 140642927564608 training.py:617] SHA1: 3b049e90e28fa3eb32c0fd2a2ed33268bb791907\n", "I0206 14:32:36.033156 140642927564608 training.py:249] Epoch 6 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:32:36.560795 140642927564608 util.py:233] E6 Training ----/2084, starting\n", "I0206 14:32:37.903428 140642927564608 util.py:257] E6 Training 16/2084, done at 2020-02-06 14:35:07, 0:02:30\n", "I0206 14:32:41.385310 140642927564608 util.py:257] E6 Training 64/2084, done at 2020-02-06 14:35:07, 0:02:30\n", "I0206 14:32:55.315941 140642927564608 util.py:257] E6 Training 256/2084, done at 2020-02-06 14:35:07, 0:02:30\n", "I0206 14:33:52.148840 140642927564608 util.py:257] E6 Training 1024/2084, done at 2020-02-06 14:35:10, 0:02:33\n", "W0206 14:35:16.482730 140642927564608 util.py:270] E6 Training ----/2084, done at 2020-02-06 14:35:16\n", "I0206 14:35:16.552882 140642927564608 training.py:395] E6 ClassificationTrainingApp\n", "I0206 14:35:16.554864 140642927564608 training.py:476] E6 trn 0.1622 loss, 93.7% correct, 0.9508 precision, 0.9223 recall, 0.9363 f1 score\n", "I0206 14:35:16.555442 140642927564608 training.py:489] E6 trn_neg 0.1436 loss, 95.2% correct (23806 of 25000)\n", "I0206 14:35:16.555926 140642927564608 training.py:501] E6 trn_pos 0.1808 loss, 92.2% correct (23058 of 25000)\n", "I0206 14:35:16.561423 140642927564608 training.py:249] Epoch 7 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:35:17.092241 140642927564608 util.py:233] E7 Training ----/2084, starting\n", "I0206 14:35:18.470813 140642927564608 util.py:257] E7 Training 16/2084, done at 2020-02-06 14:37:48, 0:02:30\n", "I0206 14:35:21.952875 140642927564608 util.py:257] E7 Training 64/2084, done at 2020-02-06 14:37:48, 0:02:30\n", "I0206 14:35:38.437713 140642927564608 util.py:257] E7 Training 256/2084, done at 2020-02-06 14:38:09, 0:02:51\n", "I0206 14:36:35.994328 140642927564608 util.py:257] E7 Training 1024/2084, done at 2020-02-06 14:37:57, 0:02:39\n", "W0206 14:37:56.777386 140642927564608 util.py:270] E7 Training ----/2084, done at 2020-02-06 14:37:56\n", "I0206 14:37:56.849547 140642927564608 training.py:395] E7 ClassificationTrainingApp\n", "I0206 14:37:56.851840 140642927564608 training.py:476] E7 trn 0.1476 loss, 94.4% correct, 0.9562 precision, 0.9316 recall, 0.9437 f1 score\n", "I0206 14:37:56.852451 140642927564608 training.py:489] E7 trn_neg 0.1327 loss, 95.7% correct (23932 of 25000)\n", "I0206 14:37:56.853471 140642927564608 training.py:501] E7 trn_pos 0.1625 loss, 93.2% correct (23289 of 25000)\n", "I0206 14:37:56.859082 140642927564608 training.py:249] Epoch 8 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:37:57.400369 140642927564608 util.py:233] E8 Training ----/2084, starting\n", "I0206 14:38:02.777909 140642927564608 util.py:257] E8 Training 16/2084, done at 2020-02-06 14:40:37, 0:02:35\n", "I0206 14:38:06.245700 140642927564608 util.py:257] E8 Training 64/2084, done at 2020-02-06 14:40:33, 0:02:31\n", "I0206 14:38:20.111638 140642927564608 util.py:257] E8 Training 256/2084, done at 2020-02-06 14:40:32, 0:02:30\n", "I0206 14:39:18.555486 140642927564608 util.py:257] E8 Training 1024/2084, done at 2020-02-06 14:40:38, 0:02:36\n", "W0206 14:40:40.169072 140642927564608 util.py:270] E8 Training ----/2084, done at 2020-02-06 14:40:40\n", "I0206 14:40:40.238354 140642927564608 training.py:395] E8 ClassificationTrainingApp\n", "I0206 14:40:40.241184 140642927564608 training.py:476] E8 trn 0.1355 loss, 94.6% correct, 0.9575 precision, 0.9341 recall, 0.9457 f1 score\n", "I0206 14:40:40.241743 140642927564608 training.py:489] E8 trn_neg 0.1208 loss, 95.9% correct (23963 of 25000)\n", "I0206 14:40:40.242239 140642927564608 training.py:501] E8 trn_pos 0.1503 loss, 93.4% correct (23353 of 25000)\n", "I0206 14:40:40.247388 140642927564608 training.py:249] Epoch 9 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:40:40.776972 140642927564608 util.py:233] E9 Training ----/2084, starting\n", "I0206 14:40:42.132588 140642927564608 util.py:257] E9 Training 16/2084, done at 2020-02-06 14:43:12, 0:02:30\n", "I0206 14:40:45.614756 140642927564608 util.py:257] E9 Training 64/2084, done at 2020-02-06 14:43:12, 0:02:30\n", "I0206 14:40:59.846740 140642927564608 util.py:257] E9 Training 256/2084, done at 2020-02-06 14:43:14, 0:02:33\n", "I0206 14:41:59.319844 140642927564608 util.py:257] E9 Training 1024/2084, done at 2020-02-06 14:43:20, 0:02:39\n", "W0206 14:43:23.988906 140642927564608 util.py:270] E9 Training ----/2084, done at 2020-02-06 14:43:23\n", "I0206 14:43:24.059353 140642927564608 training.py:395] E9 ClassificationTrainingApp\n", "I0206 14:43:24.063016 140642927564608 training.py:476] E9 trn 0.1214 loss, 95.4% correct, 0.9642 precision, 0.9430 recall, 0.9535 f1 score\n", "I0206 14:43:24.063467 140642927564608 training.py:489] E9 trn_neg 0.1096 loss, 96.5% correct (24125 of 25000)\n", "I0206 14:43:24.063841 140642927564608 training.py:501] E9 trn_pos 0.1333 loss, 94.3% correct (23575 of 25000)\n", "I0206 14:43:24.069060 140642927564608 training.py:249] Epoch 10 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:43:24.603905 140642927564608 util.py:233] E10 Training ----/2084, starting\n", "I0206 14:43:25.958705 140642927564608 util.py:257] E10 Training 16/2084, done at 2020-02-06 14:45:56, 0:02:31\n", "I0206 14:43:29.439533 140642927564608 util.py:257] E10 Training 64/2084, done at 2020-02-06 14:45:55, 0:02:30\n", "I0206 14:43:43.367003 140642927564608 util.py:257] E10 Training 256/2084, done at 2020-02-06 14:45:55, 0:02:30\n", "I0206 14:44:42.649519 140642927564608 util.py:257] E10 Training 1024/2084, done at 2020-02-06 14:46:03, 0:02:38\n", "W0206 14:46:06.300241 140642927564608 util.py:270] E10 Training ----/2084, done at 2020-02-06 14:46:06\n", "I0206 14:46:06.370073 140642927564608 training.py:395] E10 ClassificationTrainingApp\n", "I0206 14:46:06.374094 140642927564608 training.py:476] E10 trn 0.1134 loss, 95.7% correct, 0.9653 precision, 0.9479 recall, 0.9565 f1 score\n", "I0206 14:46:06.374593 140642927564608 training.py:489] E10 trn_neg 0.1023 loss, 96.6% correct (24149 of 25000)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 14:46:06.375169 140642927564608 training.py:501] E10 trn_pos 0.1245 loss, 94.8% correct (23697 of 25000)\n", "W0206 14:46:06.380532 140642927564608 util.py:233] E10 Validation ----/2148, starting\n", "I0206 14:46:07.429067 140642927564608 util.py:257] E10 Validation 16/2148, done at 2020-02-06 14:46:44, 0:00:37\n", "I0206 14:46:08.885728 140642927564608 util.py:257] E10 Validation 64/2148, done at 2020-02-06 14:47:06, 0:00:59\n", "I0206 14:46:14.010195 140642927564608 util.py:257] E10 Validation 256/2148, done at 2020-02-06 14:47:04, 0:00:57\n", "I0206 14:46:31.858788 140642927564608 util.py:257] E10 Validation 1024/2148, done at 2020-02-06 14:46:58, 0:00:51\n", "W0206 14:46:57.161756 140642927564608 util.py:270] E10 Validation ----/2148, done at 2020-02-06 14:46:57\n", "I0206 14:46:57.163307 140642927564608 training.py:395] E10 ClassificationTrainingApp\n", "I0206 14:46:57.165011 140642927564608 training.py:476] E10 val 0.1550 loss, 94.7% correct, 0.0506 precision, 0.9477 recall, 0.0960 f1 score\n", "I0206 14:46:57.165568 140642927564608 training.py:489] E10 val_neg 0.1550 loss, 94.7% correct (48660 of 51382)\n", "I0206 14:46:57.165938 140642927564608 training.py:501] E10 val_pos 0.1579 loss, 94.8% correct (145 of 153)\n", "I0206 14:46:57.176019 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.500000.state\n", "I0206 14:46:57.177764 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n", "I0206 14:46:57.179794 140642927564608 training.py:617] SHA1: 8bdecc5ed633c57a2b06d43d9e0765ed280246bf\n", "I0206 14:46:57.180255 140642927564608 training.py:249] Epoch 11 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:46:57.712649 140642927564608 util.py:233] E11 Training ----/2084, starting\n", "I0206 14:46:59.055014 140642927564608 util.py:257] E11 Training 16/2084, done at 2020-02-06 14:49:29, 0:02:30\n", "I0206 14:47:02.539172 140642927564608 util.py:257] E11 Training 64/2084, done at 2020-02-06 14:49:29, 0:02:30\n", "I0206 14:47:17.651178 140642927564608 util.py:257] E11 Training 256/2084, done at 2020-02-06 14:49:38, 0:02:40\n", "I0206 14:48:14.944940 140642927564608 util.py:257] E11 Training 1024/2084, done at 2020-02-06 14:49:34, 0:02:36\n", "W0206 14:49:40.648877 140642927564608 util.py:270] E11 Training ----/2084, done at 2020-02-06 14:49:40\n", "I0206 14:49:40.719040 140642927564608 training.py:395] E11 ClassificationTrainingApp\n", "I0206 14:49:40.723572 140642927564608 training.py:476] E11 trn 0.1078 loss, 95.9% correct, 0.9674 precision, 0.9506 recall, 0.9589 f1 score\n", "I0206 14:49:40.724200 140642927564608 training.py:489] E11 trn_neg 0.1010 loss, 96.8% correct (24198 of 25000)\n", "I0206 14:49:40.724698 140642927564608 training.py:501] E11 trn_pos 0.1147 loss, 95.1% correct (23766 of 25000)\n", "I0206 14:49:40.729813 140642927564608 training.py:249] Epoch 12 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:49:41.264764 140642927564608 util.py:233] E12 Training ----/2084, starting\n", "I0206 14:49:42.636784 140642927564608 util.py:257] E12 Training 16/2084, done at 2020-02-06 14:52:12, 0:02:30\n", "I0206 14:49:46.120265 140642927564608 util.py:257] E12 Training 64/2084, done at 2020-02-06 14:52:12, 0:02:30\n", "I0206 14:50:00.052690 140642927564608 util.py:257] E12 Training 256/2084, done at 2020-02-06 14:52:12, 0:02:30\n", "I0206 14:50:58.728141 140642927564608 util.py:257] E12 Training 1024/2084, done at 2020-02-06 14:52:18, 0:02:36\n", "W0206 14:52:18.695060 140642927564608 util.py:270] E12 Training ----/2084, done at 2020-02-06 14:52:18\n", "I0206 14:52:18.764346 140642927564608 training.py:395] E12 ClassificationTrainingApp\n", "I0206 14:52:18.769140 140642927564608 training.py:476] E12 trn 0.0999 loss, 96.2% correct, 0.9696 precision, 0.9539 recall, 0.9617 f1 score\n", "I0206 14:52:18.769737 140642927564608 training.py:489] E12 trn_neg 0.0936 loss, 97.0% correct (24253 of 25000)\n", "I0206 14:52:18.770233 140642927564608 training.py:501] E12 trn_pos 0.1063 loss, 95.4% correct (23847 of 25000)\n", "I0206 14:52:18.775807 140642927564608 training.py:249] Epoch 13 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:52:19.312257 140642927564608 util.py:233] E13 Training ----/2084, starting\n", "I0206 14:52:20.738514 140642927564608 util.py:257] E13 Training 16/2084, done at 2020-02-06 14:54:50, 0:02:30\n", "I0206 14:52:24.220701 140642927564608 util.py:257] E13 Training 64/2084, done at 2020-02-06 14:54:50, 0:02:30\n", "I0206 14:52:38.155966 140642927564608 util.py:257] E13 Training 256/2084, done at 2020-02-06 14:54:50, 0:02:30\n", "I0206 14:53:38.136444 140642927564608 util.py:257] E13 Training 1024/2084, done at 2020-02-06 14:54:59, 0:02:39\n", "W0206 14:55:01.614954 140642927564608 util.py:270] E13 Training ----/2084, done at 2020-02-06 14:55:01\n", "I0206 14:55:01.683367 140642927564608 training.py:395] E13 ClassificationTrainingApp\n", "I0206 14:55:01.688533 140642927564608 training.py:476] E13 trn 0.0987 loss, 96.2% correct, 0.9701 precision, 0.9539 recall, 0.9619 f1 score\n", "I0206 14:55:01.689131 140642927564608 training.py:489] E13 trn_neg 0.0942 loss, 97.1% correct (24264 of 25000)\n", "I0206 14:55:01.689621 140642927564608 training.py:501] E13 trn_pos 0.1032 loss, 95.4% correct (23847 of 25000)\n", "I0206 14:55:01.694906 140642927564608 training.py:249] Epoch 14 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:55:02.239997 140642927564608 util.py:233] E14 Training ----/2084, starting\n", "I0206 14:55:03.598491 140642927564608 util.py:257] E14 Training 16/2084, done at 2020-02-06 14:57:33, 0:02:30\n", "I0206 14:55:07.082982 140642927564608 util.py:257] E14 Training 64/2084, done at 2020-02-06 14:57:33, 0:02:30\n", "I0206 14:55:23.027722 140642927564608 util.py:257] E14 Training 256/2084, done at 2020-02-06 14:57:50, 0:02:47\n", "I0206 14:56:20.112264 140642927564608 util.py:257] E14 Training 1024/2084, done at 2020-02-06 14:57:40, 0:02:37\n", "W0206 14:57:44.249034 140642927564608 util.py:270] E14 Training ----/2084, done at 2020-02-06 14:57:44\n", "I0206 14:57:44.317534 140642927564608 training.py:395] E14 ClassificationTrainingApp\n", "I0206 14:57:44.322750 140642927564608 training.py:476] E14 trn 0.0912 loss, 96.6% correct, 0.9724 precision, 0.9583 recall, 0.9653 f1 score\n", "I0206 14:57:44.323221 140642927564608 training.py:489] E14 trn_neg 0.0878 loss, 97.3% correct (24321 of 25000)\n", "I0206 14:57:44.323703 140642927564608 training.py:501] E14 trn_pos 0.0946 loss, 95.8% correct (23958 of 25000)\n", "I0206 14:57:44.328980 140642927564608 training.py:249] Epoch 15 of 100, 2084/2148 batches of size 24*1\n", "W0206 14:57:44.857648 140642927564608 util.py:233] E15 Training ----/2084, starting\n", "I0206 14:57:46.318654 140642927564608 util.py:257] E15 Training 16/2084, done at 2020-02-06 15:00:16, 0:02:30\n", "I0206 14:57:49.803446 140642927564608 util.py:257] E15 Training 64/2084, done at 2020-02-06 15:00:16, 0:02:31\n", "I0206 14:58:03.737765 140642927564608 util.py:257] E15 Training 256/2084, done at 2020-02-06 15:00:16, 0:02:30\n", "I0206 14:59:02.578582 140642927564608 util.py:257] E15 Training 1024/2084, done at 2020-02-06 15:00:22, 0:02:37\n", "W0206 15:00:25.046120 140642927564608 util.py:270] E15 Training ----/2084, done at 2020-02-06 15:00:25\n", "I0206 15:00:25.115130 140642927564608 training.py:395] E15 ClassificationTrainingApp\n", "I0206 15:00:25.120580 140642927564608 training.py:476] E15 trn 0.0900 loss, 96.6% correct, 0.9721 precision, 0.9590 recall, 0.9655 f1 score\n", "I0206 15:00:25.121092 140642927564608 training.py:489] E15 trn_neg 0.0883 loss, 97.3% correct (24313 of 25000)\n", "I0206 15:00:25.121581 140642927564608 training.py:501] E15 trn_pos 0.0916 loss, 95.9% correct (23974 of 25000)\n", "W0206 15:00:25.126809 140642927564608 util.py:233] E15 Validation ----/2148, starting\n", "I0206 15:00:25.698140 140642927564608 util.py:257] E15 Validation 16/2148, done at 2020-02-06 15:01:14, 0:00:49\n", "I0206 15:00:26.656863 140642927564608 util.py:257] E15 Validation 64/2148, done at 2020-02-06 15:01:09, 0:00:44\n", "I0206 15:00:30.702880 140642927564608 util.py:257] E15 Validation 256/2148, done at 2020-02-06 15:01:10, 0:00:44\n", "I0206 15:00:46.535900 140642927564608 util.py:257] E15 Validation 1024/2148, done at 2020-02-06 15:01:09, 0:00:44\n", "W0206 15:01:10.771654 140642927564608 util.py:270] E15 Validation ----/2148, done at 2020-02-06 15:01:10\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 15:01:10.772899 140642927564608 training.py:395] E15 ClassificationTrainingApp\n", "I0206 15:01:10.774501 140642927564608 training.py:476] E15 val 0.0784 loss, 97.6% correct, 0.1060 precision, 0.9412 recall, 0.1906 f1 score\n", "I0206 15:01:10.774887 140642927564608 training.py:489] E15 val_neg 0.0781 loss, 97.6% correct (50168 of 51382)\n", "I0206 15:01:10.775223 140642927564608 training.py:501] E15 val_pos 0.1849 loss, 94.1% correct (144 of 153)\n", "I0206 15:01:10.786825 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.750000.state\n", "I0206 15:01:10.789188 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n", "I0206 15:01:10.791765 140642927564608 training.py:617] SHA1: 137bf5b033591fc48ac4431c3cbc58df8c4a080b\n", "I0206 15:01:10.792482 140642927564608 training.py:249] Epoch 16 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:01:11.328937 140642927564608 util.py:233] E16 Training ----/2084, starting\n", "I0206 15:01:12.696629 140642927564608 util.py:257] E16 Training 16/2084, done at 2020-02-06 15:03:42, 0:02:30\n", "I0206 15:01:16.180417 140642927564608 util.py:257] E16 Training 64/2084, done at 2020-02-06 15:03:42, 0:02:30\n", "I0206 15:01:30.116844 140642927564608 util.py:257] E16 Training 256/2084, done at 2020-02-06 15:03:42, 0:02:30\n", "I0206 15:02:30.629392 140642927564608 util.py:257] E16 Training 1024/2084, done at 2020-02-06 15:03:52, 0:02:40\n", "W0206 15:03:52.864069 140642927564608 util.py:270] E16 Training ----/2084, done at 2020-02-06 15:03:52\n", "I0206 15:03:52.932674 140642927564608 training.py:395] E16 ClassificationTrainingApp\n", "I0206 15:03:52.937837 140642927564608 training.py:476] E16 trn 0.0861 loss, 96.8% correct, 0.9732 precision, 0.9626 recall, 0.9678 f1 score\n", "I0206 15:03:52.938444 140642927564608 training.py:489] E16 trn_neg 0.0859 loss, 97.3% correct (24337 of 25000)\n", "I0206 15:03:52.938968 140642927564608 training.py:501] E16 trn_pos 0.0863 loss, 96.3% correct (24064 of 25000)\n", "I0206 15:03:52.944309 140642927564608 training.py:249] Epoch 17 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:03:53.473939 140642927564608 util.py:233] E17 Training ----/2084, starting\n", "I0206 15:03:54.831840 140642927564608 util.py:257] E17 Training 16/2084, done at 2020-02-06 15:06:24, 0:02:30\n", "I0206 15:03:58.300073 140642927564608 util.py:257] E17 Training 64/2084, done at 2020-02-06 15:06:24, 0:02:30\n", "I0206 15:04:14.911618 140642927564608 util.py:257] E17 Training 256/2084, done at 2020-02-06 15:06:46, 0:02:52\n", "I0206 15:05:11.438344 140642927564608 util.py:257] E17 Training 1024/2084, done at 2020-02-06 15:06:31, 0:02:37\n", "W0206 15:06:31.863426 140642927564608 util.py:270] E17 Training ----/2084, done at 2020-02-06 15:06:31\n", "I0206 15:06:31.931170 140642927564608 training.py:395] E17 ClassificationTrainingApp\n", "I0206 15:06:31.936601 140642927564608 training.py:476] E17 trn 0.0818 loss, 96.9% correct, 0.9743 precision, 0.9641 recall, 0.9691 f1 score\n", "I0206 15:06:31.937072 140642927564608 training.py:489] E17 trn_neg 0.0820 loss, 97.5% correct (24363 of 25000)\n", "I0206 15:06:31.937742 140642927564608 training.py:501] E17 trn_pos 0.0817 loss, 96.4% correct (24102 of 25000)\n", "I0206 15:06:31.942771 140642927564608 training.py:249] Epoch 18 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:06:32.473348 140642927564608 util.py:233] E18 Training ----/2084, starting\n", "I0206 15:06:33.887759 140642927564608 util.py:257] E18 Training 16/2084, done at 2020-02-06 15:09:03, 0:02:30\n", "I0206 15:06:40.493861 140642927564608 util.py:257] E18 Training 64/2084, done at 2020-02-06 15:10:50, 0:04:17\n", "I0206 15:06:54.427487 140642927564608 util.py:257] E18 Training 256/2084, done at 2020-02-06 15:09:29, 0:02:56\n", "I0206 15:07:53.972424 140642927564608 util.py:257] E18 Training 1024/2084, done at 2020-02-06 15:09:18, 0:02:45\n", "W0206 15:09:13.834794 140642927564608 util.py:270] E18 Training ----/2084, done at 2020-02-06 15:09:13\n", "I0206 15:09:13.903917 140642927564608 training.py:395] E18 ClassificationTrainingApp\n", "I0206 15:09:13.909251 140642927564608 training.py:476] E18 trn 0.0771 loss, 97.0% correct, 0.9744 precision, 0.9663 recall, 0.9704 f1 score\n", "I0206 15:09:13.909725 140642927564608 training.py:489] E18 trn_neg 0.0782 loss, 97.5% correct (24366 of 25000)\n", "I0206 15:09:13.910206 140642927564608 training.py:501] E18 trn_pos 0.0761 loss, 96.6% correct (24158 of 25000)\n", "I0206 15:09:13.915245 140642927564608 training.py:249] Epoch 19 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:09:14.456347 140642927564608 util.py:233] E19 Training ----/2084, starting\n", "I0206 15:09:15.832561 140642927564608 util.py:257] E19 Training 16/2084, done at 2020-02-06 15:11:45, 0:02:31\n", "I0206 15:09:19.316933 140642927564608 util.py:257] E19 Training 64/2084, done at 2020-02-06 15:11:45, 0:02:31\n", "I0206 15:09:33.264099 140642927564608 util.py:257] E19 Training 256/2084, done at 2020-02-06 15:11:45, 0:02:31\n", "I0206 15:10:29.009478 140642927564608 util.py:257] E19 Training 1024/2084, done at 2020-02-06 15:11:45, 0:02:31\n", "W0206 15:11:49.118031 140642927564608 util.py:270] E19 Training ----/2084, done at 2020-02-06 15:11:49\n", "I0206 15:11:49.184273 140642927564608 training.py:395] E19 ClassificationTrainingApp\n", "I0206 15:11:49.189837 140642927564608 training.py:476] E19 trn 0.0735 loss, 97.1% correct, 0.9755 precision, 0.9660 recall, 0.9707 f1 score\n", "I0206 15:11:49.190322 140642927564608 training.py:489] E19 trn_neg 0.0734 loss, 97.6% correct (24394 of 25000)\n", "I0206 15:11:49.190931 140642927564608 training.py:501] E19 trn_pos 0.0735 loss, 96.6% correct (24149 of 25000)\n", "I0206 15:11:49.196304 140642927564608 training.py:249] Epoch 20 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:11:49.730784 140642927564608 util.py:233] E20 Training ----/2084, starting\n", "I0206 15:11:51.076792 140642927564608 util.py:257] E20 Training 16/2084, done at 2020-02-06 15:14:21, 0:02:30\n", "I0206 15:11:54.561518 140642927564608 util.py:257] E20 Training 64/2084, done at 2020-02-06 15:14:21, 0:02:30\n", "I0206 15:12:10.745333 140642927564608 util.py:257] E20 Training 256/2084, done at 2020-02-06 15:14:39, 0:02:49\n", "I0206 15:13:08.053777 140642927564608 util.py:257] E20 Training 1024/2084, done at 2020-02-06 15:14:28, 0:02:38\n", "W0206 15:14:27.712789 140642927564608 util.py:270] E20 Training ----/2084, done at 2020-02-06 15:14:27\n", "I0206 15:14:27.781065 140642927564608 training.py:395] E20 ClassificationTrainingApp\n", "I0206 15:14:27.786394 140642927564608 training.py:476] E20 trn 0.0710 loss, 97.3% correct, 0.9771 precision, 0.9682 recall, 0.9726 f1 score\n", "I0206 15:14:27.787028 140642927564608 training.py:489] E20 trn_neg 0.0718 loss, 97.7% correct (24434 of 25000)\n", "I0206 15:14:27.787542 140642927564608 training.py:501] E20 trn_pos 0.0701 loss, 96.8% correct (24204 of 25000)\n", "W0206 15:14:27.792844 140642927564608 util.py:233] E20 Validation ----/2148, starting\n", "I0206 15:14:28.368175 140642927564608 util.py:257] E20 Validation 16/2148, done at 2020-02-06 15:15:18, 0:00:50\n", "I0206 15:14:29.418836 140642927564608 util.py:257] E20 Validation 64/2148, done at 2020-02-06 15:15:15, 0:00:47\n", "I0206 15:14:33.565924 140642927564608 util.py:257] E20 Validation 256/2148, done at 2020-02-06 15:15:14, 0:00:46\n", "I0206 15:14:50.602360 140642927564608 util.py:257] E20 Validation 1024/2148, done at 2020-02-06 15:15:15, 0:00:47\n", "W0206 15:15:17.635368 140642927564608 util.py:270] E20 Validation ----/2148, done at 2020-02-06 15:15:17\n", "I0206 15:15:17.636918 140642927564608 training.py:395] E20 ClassificationTrainingApp\n", "I0206 15:15:17.638709 140642927564608 training.py:476] E20 val 0.0520 loss, 98.5% correct, 0.1574 precision, 0.9412 recall, 0.2697 f1 score\n", "I0206 15:15:17.639129 140642927564608 training.py:489] E20 val_neg 0.0515 loss, 98.5% correct (50611 of 51382)\n", "I0206 15:15:17.639542 140642927564608 training.py:501] E20 val_pos 0.2279 loss, 94.1% correct (144 of 153)\n", "I0206 15:15:17.648515 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.1000000.state\n", "I0206 15:15:17.650082 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 15:15:17.651842 140642927564608 training.py:617] SHA1: 7b8cd08571eec2ced9516570ebfee578476f0257\n", "I0206 15:15:17.652386 140642927564608 training.py:249] Epoch 21 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:15:18.186684 140642927564608 util.py:233] E21 Training ----/2084, starting\n", "I0206 15:15:19.545641 140642927564608 util.py:257] E21 Training 16/2084, done at 2020-02-06 15:17:49, 0:02:30\n", "I0206 15:15:23.029853 140642927564608 util.py:257] E21 Training 64/2084, done at 2020-02-06 15:17:49, 0:02:30\n", "I0206 15:15:37.290381 140642927564608 util.py:257] E21 Training 256/2084, done at 2020-02-06 15:17:52, 0:02:33\n", "I0206 15:16:35.153860 140642927564608 util.py:257] E21 Training 1024/2084, done at 2020-02-06 15:17:54, 0:02:35\n", "W0206 15:17:55.859828 140642927564608 util.py:270] E21 Training ----/2084, done at 2020-02-06 15:17:55\n", "I0206 15:17:55.926746 140642927564608 training.py:395] E21 ClassificationTrainingApp\n", "I0206 15:17:55.932224 140642927564608 training.py:476] E21 trn 0.0708 loss, 97.4% correct, 0.9785 precision, 0.9688 recall, 0.9736 f1 score\n", "I0206 15:17:55.932696 140642927564608 training.py:489] E21 trn_neg 0.0752 loss, 97.9% correct (24468 of 25000)\n", "I0206 15:17:55.933143 140642927564608 training.py:501] E21 trn_pos 0.0665 loss, 96.9% correct (24221 of 25000)\n", "I0206 15:17:55.938204 140642927564608 training.py:249] Epoch 22 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:17:56.474834 140642927564608 util.py:233] E22 Training ----/2084, starting\n", "I0206 15:17:57.840744 140642927564608 util.py:257] E22 Training 16/2084, done at 2020-02-06 15:20:27, 0:02:30\n", "I0206 15:18:01.325833 140642927564608 util.py:257] E22 Training 64/2084, done at 2020-02-06 15:20:27, 0:02:31\n", "I0206 15:18:15.858009 140642927564608 util.py:257] E22 Training 256/2084, done at 2020-02-06 15:20:32, 0:02:35\n", "I0206 15:19:13.226121 140642927564608 util.py:257] E22 Training 1024/2084, done at 2020-02-06 15:20:32, 0:02:35\n", "W0206 15:20:32.119459 140642927564608 util.py:270] E22 Training ----/2084, done at 2020-02-06 15:20:32\n", "I0206 15:20:32.185018 140642927564608 training.py:395] E22 ClassificationTrainingApp\n", "I0206 15:20:32.190445 140642927564608 training.py:476] E22 trn 0.0703 loss, 97.3% correct, 0.9760 precision, 0.9696 recall, 0.9728 f1 score\n", "I0206 15:20:32.190910 140642927564608 training.py:489] E22 trn_neg 0.0739 loss, 97.6% correct (24404 of 25000)\n", "I0206 15:20:32.191488 140642927564608 training.py:501] E22 trn_pos 0.0667 loss, 97.0% correct (24240 of 25000)\n", "I0206 15:20:32.196434 140642927564608 training.py:249] Epoch 23 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:20:32.728997 140642927564608 util.py:233] E23 Training ----/2084, starting\n", "I0206 15:20:34.207378 140642927564608 util.py:257] E23 Training 16/2084, done at 2020-02-06 15:23:04, 0:02:30\n", "I0206 15:20:37.693376 140642927564608 util.py:257] E23 Training 64/2084, done at 2020-02-06 15:23:04, 0:02:31\n", "I0206 15:20:51.629726 140642927564608 util.py:257] E23 Training 256/2084, done at 2020-02-06 15:23:04, 0:02:30\n", "I0206 15:21:47.737848 140642927564608 util.py:257] E23 Training 1024/2084, done at 2020-02-06 15:23:04, 0:02:31\n", "W0206 15:23:04.745638 140642927564608 util.py:270] E23 Training ----/2084, done at 2020-02-06 15:23:04\n", "I0206 15:23:04.811769 140642927564608 training.py:395] E23 ClassificationTrainingApp\n", "I0206 15:23:04.817318 140642927564608 training.py:476] E23 trn 0.0687 loss, 97.5% correct, 0.9782 precision, 0.9712 recall, 0.9747 f1 score\n", "I0206 15:23:04.817929 140642927564608 training.py:489] E23 trn_neg 0.0743 loss, 97.8% correct (24459 of 25000)\n", "I0206 15:23:04.818451 140642927564608 training.py:501] E23 trn_pos 0.0631 loss, 97.1% correct (24279 of 25000)\n", "I0206 15:23:04.823557 140642927564608 training.py:249] Epoch 24 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:23:05.390276 140642927564608 util.py:233] E24 Training ----/2084, starting\n", "I0206 15:23:06.866730 140642927564608 util.py:257] E24 Training 16/2084, done at 2020-02-06 15:25:36, 0:02:31\n", "I0206 15:23:12.496309 140642927564608 util.py:257] E24 Training 64/2084, done at 2020-02-06 15:26:50, 0:03:44\n", "I0206 15:23:26.430289 140642927564608 util.py:257] E24 Training 256/2084, done at 2020-02-06 15:25:54, 0:02:48\n", "I0206 15:24:22.173238 140642927564608 util.py:257] E24 Training 1024/2084, done at 2020-02-06 15:25:41, 0:02:35\n", "W0206 15:25:42.906771 140642927564608 util.py:270] E24 Training ----/2084, done at 2020-02-06 15:25:42\n", "I0206 15:25:42.972372 140642927564608 training.py:395] E24 ClassificationTrainingApp\n", "I0206 15:25:42.977930 140642927564608 training.py:476] E24 trn 0.0652 loss, 97.6% correct, 0.9789 precision, 0.9721 recall, 0.9755 f1 score\n", "I0206 15:25:42.978627 140642927564608 training.py:489] E24 trn_neg 0.0695 loss, 97.9% correct (24477 of 25000)\n", "I0206 15:25:42.979076 140642927564608 training.py:501] E24 trn_pos 0.0609 loss, 97.2% correct (24302 of 25000)\n", "I0206 15:25:42.984478 140642927564608 training.py:249] Epoch 25 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:25:43.518209 140642927564608 util.py:233] E25 Training ----/2084, starting\n", "I0206 15:25:44.962636 140642927564608 util.py:257] E25 Training 16/2084, done at 2020-02-06 15:28:15, 0:02:31\n", "I0206 15:25:48.447070 140642927564608 util.py:257] E25 Training 64/2084, done at 2020-02-06 15:28:15, 0:02:30\n", "I0206 15:26:04.225130 140642927564608 util.py:257] E25 Training 256/2084, done at 2020-02-06 15:28:30, 0:02:46\n", "I0206 15:27:00.590468 140642927564608 util.py:257] E25 Training 1024/2084, done at 2020-02-06 15:28:20, 0:02:35\n", "W0206 15:28:21.322966 140642927564608 util.py:270] E25 Training ----/2084, done at 2020-02-06 15:28:21\n", "I0206 15:28:21.388554 140642927564608 training.py:395] E25 ClassificationTrainingApp\n", "I0206 15:28:21.394115 140642927564608 training.py:476] E25 trn 0.0631 loss, 97.6% correct, 0.9782 precision, 0.9742 recall, 0.9762 f1 score\n", "I0206 15:28:21.394593 140642927564608 training.py:489] E25 trn_neg 0.0689 loss, 97.8% correct (24457 of 25000)\n", "I0206 15:28:21.395120 140642927564608 training.py:501] E25 trn_pos 0.0573 loss, 97.4% correct (24356 of 25000)\n", "W0206 15:28:21.400490 140642927564608 util.py:233] E25 Validation ----/2148, starting\n", "I0206 15:28:21.958443 140642927564608 util.py:257] E25 Validation 16/2148, done at 2020-02-06 15:29:08, 0:00:47\n", "I0206 15:28:22.891452 140642927564608 util.py:257] E25 Validation 64/2148, done at 2020-02-06 15:29:04, 0:00:42\n", "I0206 15:28:26.948331 140642927564608 util.py:257] E25 Validation 256/2148, done at 2020-02-06 15:29:06, 0:00:44\n", "I0206 15:28:43.361787 140642927564608 util.py:257] E25 Validation 1024/2148, done at 2020-02-06 15:29:07, 0:00:45\n", "W0206 15:29:05.461277 140642927564608 util.py:270] E25 Validation ----/2148, done at 2020-02-06 15:29:05\n", "I0206 15:29:05.462596 140642927564608 training.py:395] E25 ClassificationTrainingApp\n", "I0206 15:29:05.464859 140642927564608 training.py:476] E25 val 0.0848 loss, 97.5% correct, 0.1022 precision, 0.9477 recall, 0.1845 f1 score\n", "I0206 15:29:05.465337 140642927564608 training.py:489] E25 val_neg 0.0846 loss, 97.5% correct (50108 of 51382)\n", "I0206 15:29:05.465837 140642927564608 training.py:501] E25 val_pos 0.1605 loss, 94.8% correct (145 of 153)\n", "I0206 15:29:05.474387 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.1250000.state\n", "I0206 15:29:05.476453 140642927564608 training.py:617] SHA1: e1a29113b59b0560842c70078f0f76fa6c00d6cd\n", "I0206 15:29:05.476998 140642927564608 training.py:249] Epoch 26 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:29:06.031041 140642927564608 util.py:233] E26 Training ----/2084, starting\n", "I0206 15:29:07.391623 140642927564608 util.py:257] E26 Training 16/2084, done at 2020-02-06 15:31:37, 0:02:31\n", "I0206 15:29:10.877000 140642927564608 util.py:257] E26 Training 64/2084, done at 2020-02-06 15:31:37, 0:02:31\n", "I0206 15:29:24.814799 140642927564608 util.py:257] E26 Training 256/2084, done at 2020-02-06 15:31:37, 0:02:31\n", "I0206 15:30:24.978418 140642927564608 util.py:257] E26 Training 1024/2084, done at 2020-02-06 15:31:46, 0:02:39\n", "W0206 15:31:41.908418 140642927564608 util.py:270] E26 Training ----/2084, done at 2020-02-06 15:31:41\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 15:31:41.973445 140642927564608 training.py:395] E26 ClassificationTrainingApp\n", "I0206 15:31:41.979084 140642927564608 training.py:476] E26 trn 0.0619 loss, 97.7% correct, 0.9783 precision, 0.9753 recall, 0.9768 f1 score\n", "I0206 15:31:41.979583 140642927564608 training.py:489] E26 trn_neg 0.0684 loss, 97.8% correct (24460 of 25000)\n", "I0206 15:31:41.980063 140642927564608 training.py:501] E26 trn_pos 0.0554 loss, 97.5% correct (24382 of 25000)\n", "I0206 15:31:41.985476 140642927564608 training.py:249] Epoch 27 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:31:42.525178 140642927564608 util.py:233] E27 Training ----/2084, starting\n", "I0206 15:31:43.874643 140642927564608 util.py:257] E27 Training 16/2084, done at 2020-02-06 15:34:13, 0:02:30\n", "I0206 15:31:47.358914 140642927564608 util.py:257] E27 Training 64/2084, done at 2020-02-06 15:34:13, 0:02:30\n", "I0206 15:32:02.819437 140642927564608 util.py:257] E27 Training 256/2084, done at 2020-02-06 15:34:26, 0:02:43\n", "I0206 15:33:00.792541 140642927564608 util.py:257] E27 Training 1024/2084, done at 2020-02-06 15:34:21, 0:02:38\n", "W0206 15:34:21.203074 140642927564608 util.py:270] E27 Training ----/2084, done at 2020-02-06 15:34:21\n", "I0206 15:34:21.268492 140642927564608 training.py:395] E27 ClassificationTrainingApp\n", "I0206 15:34:21.274059 140642927564608 training.py:476] E27 trn 0.0588 loss, 97.8% correct, 0.9804 precision, 0.9746 recall, 0.9775 f1 score\n", "I0206 15:34:21.274534 140642927564608 training.py:489] E27 trn_neg 0.0642 loss, 98.0% correct (24512 of 25000)\n", "I0206 15:34:21.275241 140642927564608 training.py:501] E27 trn_pos 0.0533 loss, 97.5% correct (24365 of 25000)\n", "I0206 15:34:21.280223 140642927564608 training.py:249] Epoch 28 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:34:21.809926 140642927564608 util.py:233] E28 Training ----/2084, starting\n", "I0206 15:34:23.204879 140642927564608 util.py:257] E28 Training 16/2084, done at 2020-02-06 15:36:53, 0:02:31\n", "I0206 15:34:26.689859 140642927564608 util.py:257] E28 Training 64/2084, done at 2020-02-06 15:36:53, 0:02:31\n", "I0206 15:34:40.625777 140642927564608 util.py:257] E28 Training 256/2084, done at 2020-02-06 15:36:53, 0:02:30\n", "I0206 15:35:37.159817 140642927564608 util.py:257] E28 Training 1024/2084, done at 2020-02-06 15:36:54, 0:02:32\n", "W0206 15:36:55.906468 140642927564608 util.py:270] E28 Training ----/2084, done at 2020-02-06 15:36:55\n", "I0206 15:36:55.970431 140642927564608 training.py:395] E28 ClassificationTrainingApp\n", "I0206 15:36:55.976146 140642927564608 training.py:476] E28 trn 0.0576 loss, 97.8% correct, 0.9807 precision, 0.9760 recall, 0.9784 f1 score\n", "I0206 15:36:55.976612 140642927564608 training.py:489] E28 trn_neg 0.0653 loss, 98.1% correct (24521 of 25000)\n", "I0206 15:36:55.977098 140642927564608 training.py:501] E28 trn_pos 0.0500 loss, 97.6% correct (24400 of 25000)\n", "I0206 15:36:55.982633 140642927564608 training.py:249] Epoch 29 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:36:56.539322 140642927564608 util.py:233] E29 Training ----/2084, starting\n", "I0206 15:36:57.892982 140642927564608 util.py:257] E29 Training 16/2084, done at 2020-02-06 15:39:28, 0:02:31\n", "I0206 15:37:01.378277 140642927564608 util.py:257] E29 Training 64/2084, done at 2020-02-06 15:39:27, 0:02:31\n", "I0206 15:37:15.314223 140642927564608 util.py:257] E29 Training 256/2084, done at 2020-02-06 15:39:27, 0:02:30\n", "I0206 15:38:12.392914 140642927564608 util.py:257] E29 Training 1024/2084, done at 2020-02-06 15:39:30, 0:02:33\n", "W0206 15:39:29.305689 140642927564608 util.py:270] E29 Training ----/2084, done at 2020-02-06 15:39:29\n", "I0206 15:39:29.368910 140642927564608 training.py:395] E29 ClassificationTrainingApp\n", "I0206 15:39:29.374540 140642927564608 training.py:476] E29 trn 0.0566 loss, 97.9% correct, 0.9813 precision, 0.9767 recall, 0.9790 f1 score\n", "I0206 15:39:29.375046 140642927564608 training.py:489] E29 trn_neg 0.0621 loss, 98.1% correct (24535 of 25000)\n", "I0206 15:39:29.375551 140642927564608 training.py:501] E29 trn_pos 0.0511 loss, 97.7% correct (24418 of 25000)\n", "I0206 15:39:29.380450 140642927564608 training.py:249] Epoch 30 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:39:29.914402 140642927564608 util.py:233] E30 Training ----/2084, starting\n", "I0206 15:39:31.272629 140642927564608 util.py:257] E30 Training 16/2084, done at 2020-02-06 15:42:01, 0:02:31\n", "I0206 15:39:34.757306 140642927564608 util.py:257] E30 Training 64/2084, done at 2020-02-06 15:42:01, 0:02:31\n", "I0206 15:39:48.692937 140642927564608 util.py:257] E30 Training 256/2084, done at 2020-02-06 15:42:01, 0:02:30\n", "I0206 15:40:44.706930 140642927564608 util.py:257] E30 Training 1024/2084, done at 2020-02-06 15:42:01, 0:02:31\n", "W0206 15:42:01.990622 140642927564608 util.py:270] E30 Training ----/2084, done at 2020-02-06 15:42:01\n", "I0206 15:42:02.053372 140642927564608 training.py:395] E30 ClassificationTrainingApp\n", "I0206 15:42:02.058952 140642927564608 training.py:476] E30 trn 0.0532 loss, 98.0% correct, 0.9812 precision, 0.9778 recall, 0.9795 f1 score\n", "I0206 15:42:02.060214 140642927564608 training.py:489] E30 trn_neg 0.0600 loss, 98.1% correct (24531 of 25000)\n", "I0206 15:42:02.060967 140642927564608 training.py:501] E30 trn_pos 0.0464 loss, 97.8% correct (24444 of 25000)\n", "W0206 15:42:02.066256 140642927564608 util.py:233] E30 Validation ----/2148, starting\n", "I0206 15:42:02.664026 140642927564608 util.py:257] E30 Validation 16/2148, done at 2020-02-06 15:42:54, 0:00:52\n", "I0206 15:42:03.768949 140642927564608 util.py:257] E30 Validation 64/2148, done at 2020-02-06 15:42:52, 0:00:49\n", "I0206 15:42:07.954527 140642927564608 util.py:257] E30 Validation 256/2148, done at 2020-02-06 15:42:49, 0:00:47\n", "I0206 15:42:28.151097 140642927564608 util.py:257] E30 Validation 1024/2148, done at 2020-02-06 15:42:56, 0:00:54\n", "W0206 15:42:54.098073 140642927564608 util.py:270] E30 Validation ----/2148, done at 2020-02-06 15:42:54\n", "I0206 15:42:54.099723 140642927564608 training.py:395] E30 ClassificationTrainingApp\n", "I0206 15:42:54.101296 140642927564608 training.py:476] E30 val 0.0585 loss, 98.4% correct, 0.1480 precision, 0.9412 recall, 0.2558 f1 score\n", "I0206 15:42:54.101779 140642927564608 training.py:489] E30 val_neg 0.0580 loss, 98.4% correct (50553 of 51382)\n", "I0206 15:42:54.102159 140642927564608 training.py:501] E30 val_pos 0.2214 loss, 94.1% correct (144 of 153)\n", "I0206 15:42:54.109718 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.1500000.state\n", "I0206 15:42:54.111334 140642927564608 training.py:617] SHA1: 27e3f2a7506c184291834e961960be5c3f67ecdb\n", "I0206 15:42:54.111760 140642927564608 training.py:249] Epoch 31 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:42:54.656118 140642927564608 util.py:233] E31 Training ----/2084, starting\n", "I0206 15:42:55.996992 140642927564608 util.py:257] E31 Training 16/2084, done at 2020-02-06 15:45:26, 0:02:31\n", "I0206 15:42:59.482344 140642927564608 util.py:257] E31 Training 64/2084, done at 2020-02-06 15:45:26, 0:02:31\n", "I0206 15:43:13.421674 140642927564608 util.py:257] E31 Training 256/2084, done at 2020-02-06 15:45:26, 0:02:31\n", "I0206 15:44:09.155820 140642927564608 util.py:257] E31 Training 1024/2084, done at 2020-02-06 15:45:26, 0:02:30\n", "W0206 15:45:26.451232 140642927564608 util.py:270] E31 Training ----/2084, done at 2020-02-06 15:45:26\n", "I0206 15:45:26.514629 140642927564608 training.py:395] E31 ClassificationTrainingApp\n", "I0206 15:45:26.520435 140642927564608 training.py:476] E31 trn 0.0542 loss, 98.0% correct, 0.9813 precision, 0.9786 recall, 0.9800 f1 score\n", "I0206 15:45:26.520920 140642927564608 training.py:489] E31 trn_neg 0.0620 loss, 98.1% correct (24533 of 25000)\n", "I0206 15:45:26.521442 140642927564608 training.py:501] E31 trn_pos 0.0463 loss, 97.9% correct (24466 of 25000)\n", "I0206 15:45:26.526522 140642927564608 training.py:249] Epoch 32 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:45:27.057990 140642927564608 util.py:233] E32 Training ----/2084, starting\n", "I0206 15:45:28.472446 140642927564608 util.py:257] E32 Training 16/2084, done at 2020-02-06 15:47:58, 0:02:30\n", "I0206 15:45:31.956554 140642927564608 util.py:257] E32 Training 64/2084, done at 2020-02-06 15:47:58, 0:02:30\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 15:45:45.892068 140642927564608 util.py:257] E32 Training 256/2084, done at 2020-02-06 15:47:58, 0:02:30\n", "I0206 15:46:42.011992 140642927564608 util.py:257] E32 Training 1024/2084, done at 2020-02-06 15:47:59, 0:02:31\n", "W0206 15:48:00.972617 140642927564608 util.py:270] E32 Training ----/2084, done at 2020-02-06 15:48:00\n", "I0206 15:48:01.035237 140642927564608 training.py:395] E32 ClassificationTrainingApp\n", "I0206 15:48:01.040879 140642927564608 training.py:476] E32 trn 0.0531 loss, 97.9% correct, 0.9806 precision, 0.9782 recall, 0.9794 f1 score\n", "I0206 15:48:01.041356 140642927564608 training.py:489] E32 trn_neg 0.0591 loss, 98.1% correct (24516 of 25000)\n", "I0206 15:48:01.042004 140642927564608 training.py:501] E32 trn_pos 0.0471 loss, 97.8% correct (24454 of 25000)\n", "I0206 15:48:01.047167 140642927564608 training.py:249] Epoch 33 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:48:01.592511 140642927564608 util.py:233] E33 Training ----/2084, starting\n", "I0206 15:48:03.147814 140642927564608 util.py:257] E33 Training 16/2084, done at 2020-02-06 15:50:33, 0:02:31\n", "I0206 15:48:06.631737 140642927564608 util.py:257] E33 Training 64/2084, done at 2020-02-06 15:50:33, 0:02:30\n", "I0206 15:48:20.564869 140642927564608 util.py:257] E33 Training 256/2084, done at 2020-02-06 15:50:33, 0:02:30\n", "I0206 15:49:16.304271 140642927564608 util.py:257] E33 Training 1024/2084, done at 2020-02-06 15:50:33, 0:02:30\n", "W0206 15:50:33.839139 140642927564608 util.py:270] E33 Training ----/2084, done at 2020-02-06 15:50:33\n", "I0206 15:50:33.901730 140642927564608 training.py:395] E33 ClassificationTrainingApp\n", "I0206 15:50:33.907574 140642927564608 training.py:476] E33 trn 0.0522 loss, 98.0% correct, 0.9823 precision, 0.9785 recall, 0.9804 f1 score\n", "I0206 15:50:33.908184 140642927564608 training.py:489] E33 trn_neg 0.0592 loss, 98.2% correct (24560 of 25000)\n", "I0206 15:50:33.909336 140642927564608 training.py:501] E33 trn_pos 0.0453 loss, 97.9% correct (24463 of 25000)\n", "I0206 15:50:33.914885 140642927564608 training.py:249] Epoch 34 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:50:34.446976 140642927564608 util.py:233] E34 Training ----/2084, starting\n", "I0206 15:50:35.878371 140642927564608 util.py:257] E34 Training 16/2084, done at 2020-02-06 15:53:06, 0:02:31\n", "I0206 15:50:39.361372 140642927564608 util.py:257] E34 Training 64/2084, done at 2020-02-06 15:53:05, 0:02:30\n", "I0206 15:50:53.296176 140642927564608 util.py:257] E34 Training 256/2084, done at 2020-02-06 15:53:05, 0:02:30\n", "I0206 15:51:49.033519 140642927564608 util.py:257] E34 Training 1024/2084, done at 2020-02-06 15:53:05, 0:02:30\n", "W0206 15:53:06.318003 140642927564608 util.py:270] E34 Training ----/2084, done at 2020-02-06 15:53:06\n", "I0206 15:53:06.380782 140642927564608 training.py:395] E34 ClassificationTrainingApp\n", "I0206 15:53:06.386868 140642927564608 training.py:476] E34 trn 0.0492 loss, 98.1% correct, 0.9824 precision, 0.9798 recall, 0.9811 f1 score\n", "I0206 15:53:06.387334 140642927564608 training.py:489] E34 trn_neg 0.0550 loss, 98.2% correct (24560 of 25000)\n", "I0206 15:53:06.387809 140642927564608 training.py:501] E34 trn_pos 0.0434 loss, 98.0% correct (24495 of 25000)\n", "I0206 15:53:06.392936 140642927564608 training.py:249] Epoch 35 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:53:06.924360 140642927564608 util.py:233] E35 Training ----/2084, starting\n", "I0206 15:53:08.269657 140642927564608 util.py:257] E35 Training 16/2084, done at 2020-02-06 15:55:38, 0:02:30\n", "I0206 15:53:11.755108 140642927564608 util.py:257] E35 Training 64/2084, done at 2020-02-06 15:55:38, 0:02:31\n", "I0206 15:53:25.690394 140642927564608 util.py:257] E35 Training 256/2084, done at 2020-02-06 15:55:38, 0:02:30\n", "I0206 15:54:23.002907 140642927564608 util.py:257] E35 Training 1024/2084, done at 2020-02-06 15:55:41, 0:02:34\n", "W0206 15:55:39.916308 140642927564608 util.py:270] E35 Training ----/2084, done at 2020-02-06 15:55:39\n", "I0206 15:55:39.979928 140642927564608 training.py:395] E35 ClassificationTrainingApp\n", "I0206 15:55:39.986041 140642927564608 training.py:476] E35 trn 0.0537 loss, 98.1% correct, 0.9824 precision, 0.9787 recall, 0.9805 f1 score\n", "I0206 15:55:39.986512 140642927564608 training.py:489] E35 trn_neg 0.0625 loss, 98.2% correct (24562 of 25000)\n", "I0206 15:55:39.987102 140642927564608 training.py:501] E35 trn_pos 0.0450 loss, 97.9% correct (24467 of 25000)\n", "W0206 15:55:39.992474 140642927564608 util.py:233] E35 Validation ----/2148, starting\n", "I0206 15:55:40.692413 140642927564608 util.py:257] E35 Validation 16/2148, done at 2020-02-06 15:56:50, 0:01:10\n", "I0206 15:55:41.775847 140642927564608 util.py:257] E35 Validation 64/2148, done at 2020-02-06 15:56:33, 0:00:53\n", "I0206 15:55:45.819490 140642927564608 util.py:257] E35 Validation 256/2148, done at 2020-02-06 15:56:27, 0:00:47\n", "I0206 15:56:01.426654 140642927564608 util.py:257] E35 Validation 1024/2148, done at 2020-02-06 15:56:24, 0:00:44\n", "W0206 15:56:23.707417 140642927564608 util.py:270] E35 Validation ----/2148, done at 2020-02-06 15:56:23\n", "I0206 15:56:23.708814 140642927564608 training.py:395] E35 ClassificationTrainingApp\n", "I0206 15:56:23.711288 140642927564608 training.py:476] E35 val 0.0422 loss, 98.9% correct, 0.1992 precision, 0.9346 recall, 0.3284 f1 score\n", "I0206 15:56:23.711703 140642927564608 training.py:489] E35 val_neg 0.0416 loss, 98.9% correct (50807 of 51382)\n", "I0206 15:56:23.712294 140642927564608 training.py:501] E35 val_pos 0.2336 loss, 93.5% correct (143 of 153)\n", "I0206 15:56:23.720847 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.1750000.state\n", "I0206 15:56:23.722790 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n", "I0206 15:56:23.725317 140642927564608 training.py:617] SHA1: db629bb7912917ba3175590f62a8c277220c629a\n", "I0206 15:56:23.725852 140642927564608 training.py:249] Epoch 36 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:56:24.253796 140642927564608 util.py:233] E36 Training ----/2084, starting\n", "I0206 15:56:25.613322 140642927564608 util.py:257] E36 Training 16/2084, done at 2020-02-06 15:58:55, 0:02:31\n", "I0206 15:56:29.097562 140642927564608 util.py:257] E36 Training 64/2084, done at 2020-02-06 15:58:55, 0:02:31\n", "I0206 15:56:43.038787 140642927564608 util.py:257] E36 Training 256/2084, done at 2020-02-06 15:58:55, 0:02:31\n", "I0206 15:57:39.310736 140642927564608 util.py:257] E36 Training 1024/2084, done at 2020-02-06 15:58:56, 0:02:32\n", "W0206 15:58:56.406122 140642927564608 util.py:270] E36 Training ----/2084, done at 2020-02-06 15:58:56\n", "I0206 15:58:56.469916 140642927564608 training.py:395] E36 ClassificationTrainingApp\n", "I0206 15:58:56.476073 140642927564608 training.py:476] E36 trn 0.0495 loss, 98.2% correct, 0.9821 precision, 0.9814 recall, 0.9818 f1 score\n", "I0206 15:58:56.476588 140642927564608 training.py:489] E36 trn_neg 0.0572 loss, 98.2% correct (24553 of 25000)\n", "I0206 15:58:56.477119 140642927564608 training.py:501] E36 trn_pos 0.0419 loss, 98.1% correct (24536 of 25000)\n", "I0206 15:58:56.482503 140642927564608 training.py:249] Epoch 37 of 100, 2084/2148 batches of size 24*1\n", "W0206 15:58:57.014295 140642927564608 util.py:233] E37 Training ----/2084, starting\n", "I0206 15:58:58.424066 140642927564608 util.py:257] E37 Training 16/2084, done at 2020-02-06 16:01:28, 0:02:30\n", "I0206 15:59:01.909656 140642927564608 util.py:257] E37 Training 64/2084, done at 2020-02-06 16:01:28, 0:02:31\n", "I0206 15:59:15.840789 140642927564608 util.py:257] E37 Training 256/2084, done at 2020-02-06 16:01:28, 0:02:30\n", "I0206 16:00:14.573641 140642927564608 util.py:257] E37 Training 1024/2084, done at 2020-02-06 16:01:34, 0:02:37\n", "W0206 16:01:31.484334 140642927564608 util.py:270] E37 Training ----/2084, done at 2020-02-06 16:01:31\n", "I0206 16:01:31.549990 140642927564608 training.py:395] E37 ClassificationTrainingApp\n", "I0206 16:01:31.556272 140642927564608 training.py:476] E37 trn 0.0473 loss, 98.2% correct, 0.9833 precision, 0.9807 recall, 0.9820 f1 score\n", "I0206 16:01:31.556870 140642927564608 training.py:489] E37 trn_neg 0.0546 loss, 98.3% correct (24583 of 25000)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 16:01:31.557457 140642927564608 training.py:501] E37 trn_pos 0.0401 loss, 98.1% correct (24518 of 25000)\n", "I0206 16:01:31.562527 140642927564608 training.py:249] Epoch 38 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:01:32.092707 140642927564608 util.py:233] E38 Training ----/2084, starting\n", "I0206 16:01:33.444912 140642927564608 util.py:257] E38 Training 16/2084, done at 2020-02-06 16:04:03, 0:02:31\n", "I0206 16:01:36.928963 140642927564608 util.py:257] E38 Training 64/2084, done at 2020-02-06 16:04:03, 0:02:31\n", "I0206 16:01:50.863913 140642927564608 util.py:257] E38 Training 256/2084, done at 2020-02-06 16:04:03, 0:02:30\n", "I0206 16:02:46.597713 140642927564608 util.py:257] E38 Training 1024/2084, done at 2020-02-06 16:04:03, 0:02:30\n", "W0206 16:04:04.853248 140642927564608 util.py:270] E38 Training ----/2084, done at 2020-02-06 16:04:04\n", "I0206 16:04:04.919383 140642927564608 training.py:395] E38 ClassificationTrainingApp\n", "I0206 16:04:04.925765 140642927564608 training.py:476] E38 trn 0.0478 loss, 98.2% correct, 0.9845 precision, 0.9796 recall, 0.9821 f1 score\n", "I0206 16:04:04.926285 140642927564608 training.py:489] E38 trn_neg 0.0552 loss, 98.5% correct (24614 of 25000)\n", "I0206 16:04:04.926761 140642927564608 training.py:501] E38 trn_pos 0.0403 loss, 98.0% correct (24491 of 25000)\n", "I0206 16:04:04.931621 140642927564608 training.py:249] Epoch 39 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:04:05.480839 140642927564608 util.py:233] E39 Training ----/2084, starting\n", "I0206 16:04:06.918951 140642927564608 util.py:257] E39 Training 16/2084, done at 2020-02-06 16:06:36, 0:02:30\n", "I0206 16:04:10.403338 140642927564608 util.py:257] E39 Training 64/2084, done at 2020-02-06 16:06:36, 0:02:30\n", "I0206 16:04:24.339357 140642927564608 util.py:257] E39 Training 256/2084, done at 2020-02-06 16:06:36, 0:02:30\n", "I0206 16:05:20.078970 140642927564608 util.py:257] E39 Training 1024/2084, done at 2020-02-06 16:06:36, 0:02:30\n", "W0206 16:06:38.943922 140642927564608 util.py:270] E39 Training ----/2084, done at 2020-02-06 16:06:38\n", "I0206 16:06:39.007967 140642927564608 training.py:395] E39 ClassificationTrainingApp\n", "I0206 16:06:39.014312 140642927564608 training.py:476] E39 trn 0.0477 loss, 98.2% correct, 0.9834 precision, 0.9816 recall, 0.9825 f1 score\n", "I0206 16:06:39.014932 140642927564608 training.py:489] E39 trn_neg 0.0558 loss, 98.3% correct (24586 of 25000)\n", "I0206 16:06:39.015529 140642927564608 training.py:501] E39 trn_pos 0.0395 loss, 98.2% correct (24539 of 25000)\n", "I0206 16:06:39.020653 140642927564608 training.py:249] Epoch 40 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:06:39.568649 140642927564608 util.py:233] E40 Training ----/2084, starting\n", "I0206 16:06:40.932077 140642927564608 util.py:257] E40 Training 16/2084, done at 2020-02-06 16:09:10, 0:02:30\n", "I0206 16:06:44.416463 140642927564608 util.py:257] E40 Training 64/2084, done at 2020-02-06 16:09:10, 0:02:30\n", "I0206 16:06:58.352196 140642927564608 util.py:257] E40 Training 256/2084, done at 2020-02-06 16:09:10, 0:02:30\n", "I0206 16:07:54.087799 140642927564608 util.py:257] E40 Training 1024/2084, done at 2020-02-06 16:09:10, 0:02:30\n", "W0206 16:09:11.725785 140642927564608 util.py:270] E40 Training ----/2084, done at 2020-02-06 16:09:11\n", "I0206 16:09:11.792655 140642927564608 training.py:395] E40 ClassificationTrainingApp\n", "I0206 16:09:11.799138 140642927564608 training.py:476] E40 trn 0.0478 loss, 98.2% correct, 0.9836 precision, 0.9807 recall, 0.9822 f1 score\n", "I0206 16:09:11.799648 140642927564608 training.py:489] E40 trn_neg 0.0567 loss, 98.4% correct (24592 of 25000)\n", "I0206 16:09:11.800124 140642927564608 training.py:501] E40 trn_pos 0.0389 loss, 98.1% correct (24517 of 25000)\n", "W0206 16:09:11.805236 140642927564608 util.py:233] E40 Validation ----/2148, starting\n", "I0206 16:09:12.401014 140642927564608 util.py:257] E40 Validation 16/2148, done at 2020-02-06 16:10:05, 0:00:53\n", "I0206 16:09:13.444230 140642927564608 util.py:257] E40 Validation 64/2148, done at 2020-02-06 16:10:00, 0:00:47\n", "I0206 16:09:17.770963 140642927564608 util.py:257] E40 Validation 256/2148, done at 2020-02-06 16:10:00, 0:00:48\n", "I0206 16:09:36.322448 140642927564608 util.py:257] E40 Validation 1024/2148, done at 2020-02-06 16:10:02, 0:00:50\n", "W0206 16:10:03.596500 140642927564608 util.py:270] E40 Validation ----/2148, done at 2020-02-06 16:10:03\n", "I0206 16:10:03.598066 140642927564608 training.py:395] E40 ClassificationTrainingApp\n", "I0206 16:10:03.599720 140642927564608 training.py:476] E40 val 0.0634 loss, 98.1% correct, 0.1275 precision, 0.9412 recall, 0.2246 f1 score\n", "I0206 16:10:03.600150 140642927564608 training.py:489] E40 val_neg 0.0630 loss, 98.1% correct (50397 of 51382)\n", "I0206 16:10:03.600627 140642927564608 training.py:501] E40 val_pos 0.2177 loss, 94.1% correct (144 of 153)\n", "I0206 16:10:03.608407 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.2000000.state\n", "I0206 16:10:03.610111 140642927564608 training.py:617] SHA1: d00fcc827187c9b2ecfd722f28a7a0a7fcc537d3\n", "I0206 16:10:03.611457 140642927564608 training.py:249] Epoch 41 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:10:04.146091 140642927564608 util.py:233] E41 Training ----/2084, starting\n", "I0206 16:10:05.497051 140642927564608 util.py:257] E41 Training 16/2084, done at 2020-02-06 16:12:35, 0:02:30\n", "I0206 16:10:08.981894 140642927564608 util.py:257] E41 Training 64/2084, done at 2020-02-06 16:12:35, 0:02:31\n", "I0206 16:10:22.917566 140642927564608 util.py:257] E41 Training 256/2084, done at 2020-02-06 16:12:35, 0:02:30\n", "I0206 16:11:18.877017 140642927564608 util.py:257] E41 Training 1024/2084, done at 2020-02-06 16:12:35, 0:02:31\n", "W0206 16:12:36.351361 140642927564608 util.py:270] E41 Training ----/2084, done at 2020-02-06 16:12:36\n", "I0206 16:12:36.416750 140642927564608 training.py:395] E41 ClassificationTrainingApp\n", "I0206 16:12:36.423398 140642927564608 training.py:476] E41 trn 0.0454 loss, 98.4% correct, 0.9846 precision, 0.9826 recall, 0.9836 f1 score\n", "I0206 16:12:36.423892 140642927564608 training.py:489] E41 trn_neg 0.0540 loss, 98.5% correct (24617 of 25000)\n", "I0206 16:12:36.424460 140642927564608 training.py:501] E41 trn_pos 0.0367 loss, 98.3% correct (24566 of 25000)\n", "I0206 16:12:36.429392 140642927564608 training.py:249] Epoch 42 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:12:36.963735 140642927564608 util.py:233] E42 Training ----/2084, starting\n", "I0206 16:12:38.314126 140642927564608 util.py:257] E42 Training 16/2084, done at 2020-02-06 16:15:08, 0:02:30\n", "I0206 16:12:41.798344 140642927564608 util.py:257] E42 Training 64/2084, done at 2020-02-06 16:15:08, 0:02:30\n", "I0206 16:12:55.736653 140642927564608 util.py:257] E42 Training 256/2084, done at 2020-02-06 16:15:08, 0:02:30\n", "I0206 16:13:51.483577 140642927564608 util.py:257] E42 Training 1024/2084, done at 2020-02-06 16:15:08, 0:02:30\n", "W0206 16:15:08.419517 140642927564608 util.py:270] E42 Training ----/2084, done at 2020-02-06 16:15:08\n", "I0206 16:15:08.484403 140642927564608 training.py:395] E42 ClassificationTrainingApp\n", "I0206 16:15:08.491121 140642927564608 training.py:476] E42 trn 0.0430 loss, 98.4% correct, 0.9845 precision, 0.9827 recall, 0.9836 f1 score\n", "I0206 16:15:08.491597 140642927564608 training.py:489] E42 trn_neg 0.0494 loss, 98.5% correct (24614 of 25000)\n", "I0206 16:15:08.492112 140642927564608 training.py:501] E42 trn_pos 0.0365 loss, 98.3% correct (24568 of 25000)\n", "I0206 16:15:08.497528 140642927564608 training.py:249] Epoch 43 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:15:09.028102 140642927564608 util.py:233] E43 Training ----/2084, starting\n", "I0206 16:15:10.405563 140642927564608 util.py:257] E43 Training 16/2084, done at 2020-02-06 16:17:40, 0:02:30\n", "I0206 16:15:13.889479 140642927564608 util.py:257] E43 Training 64/2084, done at 2020-02-06 16:17:40, 0:02:30\n", "I0206 16:15:27.824491 140642927564608 util.py:257] E43 Training 256/2084, done at 2020-02-06 16:17:40, 0:02:30\n", "I0206 16:16:23.572499 140642927564608 util.py:257] E43 Training 1024/2084, done at 2020-02-06 16:17:40, 0:02:30\n", "W0206 16:17:40.965630 140642927564608 util.py:270] E43 Training ----/2084, done at 2020-02-06 16:17:40\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 16:17:41.030321 140642927564608 training.py:395] E43 ClassificationTrainingApp\n", "I0206 16:17:41.037077 140642927564608 training.py:476] E43 trn 0.0442 loss, 98.4% correct, 0.9848 precision, 0.9822 recall, 0.9835 f1 score\n", "I0206 16:17:41.037555 140642927564608 training.py:489] E43 trn_neg 0.0520 loss, 98.5% correct (24621 of 25000)\n", "I0206 16:17:41.037979 140642927564608 training.py:501] E43 trn_pos 0.0365 loss, 98.2% correct (24554 of 25000)\n", "I0206 16:17:41.043254 140642927564608 training.py:249] Epoch 44 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:17:41.571367 140642927564608 util.py:233] E44 Training ----/2084, starting\n", "I0206 16:17:43.064770 140642927564608 util.py:257] E44 Training 16/2084, done at 2020-02-06 16:20:13, 0:02:31\n", "I0206 16:17:46.549664 140642927564608 util.py:257] E44 Training 64/2084, done at 2020-02-06 16:20:13, 0:02:31\n", "I0206 16:18:00.486612 140642927564608 util.py:257] E44 Training 256/2084, done at 2020-02-06 16:20:13, 0:02:30\n", "I0206 16:18:56.233889 140642927564608 util.py:257] E44 Training 1024/2084, done at 2020-02-06 16:20:13, 0:02:30\n", "W0206 16:20:13.964460 140642927564608 util.py:270] E44 Training ----/2084, done at 2020-02-06 16:20:13\n", "I0206 16:20:14.029644 140642927564608 training.py:395] E44 ClassificationTrainingApp\n", "I0206 16:20:14.036406 140642927564608 training.py:476] E44 trn 0.0432 loss, 98.3% correct, 0.9832 precision, 0.9825 recall, 0.9829 f1 score\n", "I0206 16:20:14.036875 140642927564608 training.py:489] E44 trn_neg 0.0501 loss, 98.3% correct (24581 of 25000)\n", "I0206 16:20:14.037374 140642927564608 training.py:501] E44 trn_pos 0.0363 loss, 98.2% correct (24562 of 25000)\n", "I0206 16:20:14.042807 140642927564608 training.py:249] Epoch 45 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:20:14.574131 140642927564608 util.py:233] E45 Training ----/2084, starting\n", "I0206 16:20:16.060374 140642927564608 util.py:257] E45 Training 16/2084, done at 2020-02-06 16:22:46, 0:02:31\n", "I0206 16:20:19.545327 140642927564608 util.py:257] E45 Training 64/2084, done at 2020-02-06 16:22:46, 0:02:31\n", "I0206 16:20:33.482677 140642927564608 util.py:257] E45 Training 256/2084, done at 2020-02-06 16:22:46, 0:02:31\n", "I0206 16:21:29.228503 140642927564608 util.py:257] E45 Training 1024/2084, done at 2020-02-06 16:22:46, 0:02:30\n", "W0206 16:22:46.716002 140642927564608 util.py:270] E45 Training ----/2084, done at 2020-02-06 16:22:46\n", "I0206 16:22:46.780592 140642927564608 training.py:395] E45 ClassificationTrainingApp\n", "I0206 16:22:46.787407 140642927564608 training.py:476] E45 trn 0.0419 loss, 98.4% correct, 0.9842 precision, 0.9839 recall, 0.9840 f1 score\n", "I0206 16:22:46.787916 140642927564608 training.py:489] E45 trn_neg 0.0500 loss, 98.4% correct (24604 of 25000)\n", "I0206 16:22:46.788396 140642927564608 training.py:501] E45 trn_pos 0.0338 loss, 98.4% correct (24598 of 25000)\n", "W0206 16:22:46.793792 140642927564608 util.py:233] E45 Validation ----/2148, starting\n", "I0206 16:22:47.350058 140642927564608 util.py:257] E45 Validation 16/2148, done at 2020-02-06 16:23:34, 0:00:47\n", "I0206 16:22:48.296263 140642927564608 util.py:257] E45 Validation 64/2148, done at 2020-02-06 16:23:30, 0:00:43\n", "I0206 16:22:52.595401 140642927564608 util.py:257] E45 Validation 256/2148, done at 2020-02-06 16:23:33, 0:00:46\n", "I0206 16:23:08.288334 140642927564608 util.py:257] E45 Validation 1024/2148, done at 2020-02-06 16:23:31, 0:00:44\n", "W0206 16:23:32.587522 140642927564608 util.py:270] E45 Validation ----/2148, done at 2020-02-06 16:23:32\n", "I0206 16:23:32.588803 140642927564608 training.py:395] E45 ClassificationTrainingApp\n", "I0206 16:23:32.591001 140642927564608 training.py:476] E45 val 0.0613 loss, 98.3% correct, 0.1411 precision, 0.9477 recall, 0.2456 f1 score\n", "I0206 16:23:32.591587 140642927564608 training.py:489] E45 val_neg 0.0610 loss, 98.3% correct (50499 of 51382)\n", "I0206 16:23:32.591921 140642927564608 training.py:501] E45 val_pos 0.1802 loss, 94.8% correct (145 of 153)\n", "I0206 16:23:32.599515 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.2250000.state\n", "I0206 16:23:32.601161 140642927564608 training.py:617] SHA1: 576e71a428bfe4a1b93d596466dce65fef5b72f4\n", "I0206 16:23:32.601602 140642927564608 training.py:249] Epoch 46 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:23:33.129938 140642927564608 util.py:233] E46 Training ----/2084, starting\n", "I0206 16:23:34.473319 140642927564608 util.py:257] E46 Training 16/2084, done at 2020-02-06 16:26:04, 0:02:30\n", "I0206 16:23:37.959309 140642927564608 util.py:257] E46 Training 64/2084, done at 2020-02-06 16:26:04, 0:02:31\n", "I0206 16:23:51.898109 140642927564608 util.py:257] E46 Training 256/2084, done at 2020-02-06 16:26:04, 0:02:31\n", "I0206 16:24:47.654457 140642927564608 util.py:257] E46 Training 1024/2084, done at 2020-02-06 16:26:04, 0:02:31\n", "W0206 16:26:04.598875 140642927564608 util.py:270] E46 Training ----/2084, done at 2020-02-06 16:26:04\n", "I0206 16:26:04.664536 140642927564608 training.py:395] E46 ClassificationTrainingApp\n", "I0206 16:26:04.671407 140642927564608 training.py:476] E46 trn 0.0445 loss, 98.4% correct, 0.9841 precision, 0.9834 recall, 0.9838 f1 score\n", "I0206 16:26:04.671890 140642927564608 training.py:489] E46 trn_neg 0.0531 loss, 98.4% correct (24604 of 25000)\n", "I0206 16:26:04.672534 140642927564608 training.py:501] E46 trn_pos 0.0359 loss, 98.3% correct (24584 of 25000)\n", "I0206 16:26:04.677443 140642927564608 training.py:249] Epoch 47 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:26:05.210582 140642927564608 util.py:233] E47 Training ----/2084, starting\n", "I0206 16:26:06.657774 140642927564608 util.py:257] E47 Training 16/2084, done at 2020-02-06 16:28:36, 0:02:31\n", "I0206 16:26:10.142689 140642927564608 util.py:257] E47 Training 64/2084, done at 2020-02-06 16:28:36, 0:02:31\n", "I0206 16:26:24.078581 140642927564608 util.py:257] E47 Training 256/2084, done at 2020-02-06 16:28:36, 0:02:30\n", "I0206 16:27:19.852995 140642927564608 util.py:257] E47 Training 1024/2084, done at 2020-02-06 16:28:36, 0:02:31\n", "W0206 16:28:36.803803 140642927564608 util.py:270] E47 Training ----/2084, done at 2020-02-06 16:28:36\n", "I0206 16:28:36.870685 140642927564608 training.py:395] E47 ClassificationTrainingApp\n", "I0206 16:28:36.877494 140642927564608 training.py:476] E47 trn 0.0441 loss, 98.4% correct, 0.9837 precision, 0.9838 recall, 0.9837 f1 score\n", "I0206 16:28:36.878013 140642927564608 training.py:489] E47 trn_neg 0.0536 loss, 98.4% correct (24593 of 25000)\n", "I0206 16:28:36.878828 140642927564608 training.py:501] E47 trn_pos 0.0345 loss, 98.4% correct (24594 of 25000)\n", "I0206 16:28:36.884503 140642927564608 training.py:249] Epoch 48 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:28:37.437121 140642927564608 util.py:233] E48 Training ----/2084, starting\n", "I0206 16:28:38.928605 140642927564608 util.py:257] E48 Training 16/2084, done at 2020-02-06 16:31:08, 0:02:30\n", "I0206 16:28:42.415360 140642927564608 util.py:257] E48 Training 64/2084, done at 2020-02-06 16:31:09, 0:02:31\n", "I0206 16:28:56.359844 140642927564608 util.py:257] E48 Training 256/2084, done at 2020-02-06 16:31:09, 0:02:31\n", "I0206 16:29:52.137563 140642927564608 util.py:257] E48 Training 1024/2084, done at 2020-02-06 16:31:09, 0:02:31\n", "W0206 16:31:09.853976 140642927564608 util.py:270] E48 Training ----/2084, done at 2020-02-06 16:31:09\n", "I0206 16:31:09.920932 140642927564608 training.py:395] E48 ClassificationTrainingApp\n", "I0206 16:31:09.928071 140642927564608 training.py:476] E48 trn 0.0414 loss, 98.5% correct, 0.9850 precision, 0.9855 recall, 0.9852 f1 score\n", "I0206 16:31:09.928748 140642927564608 training.py:489] E48 trn_neg 0.0504 loss, 98.5% correct (24625 of 25000)\n", "I0206 16:31:09.929077 140642927564608 training.py:501] E48 trn_pos 0.0324 loss, 98.5% correct (24637 of 25000)\n", "I0206 16:31:09.934358 140642927564608 training.py:249] Epoch 49 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:31:10.507389 140642927564608 util.py:233] E49 Training ----/2084, starting\n", "I0206 16:31:11.974009 140642927564608 util.py:257] E49 Training 16/2084, done at 2020-02-06 16:33:42, 0:02:30\n", "I0206 16:31:15.459766 140642927564608 util.py:257] E49 Training 64/2084, done at 2020-02-06 16:33:42, 0:02:31\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 16:31:29.401004 140642927564608 util.py:257] E49 Training 256/2084, done at 2020-02-06 16:33:42, 0:02:31\n", "I0206 16:32:25.171614 140642927564608 util.py:257] E49 Training 1024/2084, done at 2020-02-06 16:33:42, 0:02:31\n", "W0206 16:33:42.154011 140642927564608 util.py:270] E49 Training ----/2084, done at 2020-02-06 16:33:42\n", "I0206 16:33:42.219898 140642927564608 training.py:395] E49 ClassificationTrainingApp\n", "I0206 16:33:42.226999 140642927564608 training.py:476] E49 trn 0.0404 loss, 98.5% correct, 0.9843 precision, 0.9857 recall, 0.9850 f1 score\n", "I0206 16:33:42.227586 140642927564608 training.py:489] E49 trn_neg 0.0492 loss, 98.4% correct (24606 of 25000)\n", "I0206 16:33:42.228185 140642927564608 training.py:501] E49 trn_pos 0.0315 loss, 98.6% correct (24642 of 25000)\n", "I0206 16:33:42.233313 140642927564608 training.py:249] Epoch 50 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:33:42.776868 140642927564608 util.py:233] E50 Training ----/2084, starting\n", "I0206 16:33:44.139658 140642927564608 util.py:257] E50 Training 16/2084, done at 2020-02-06 16:36:14, 0:02:31\n", "I0206 16:33:47.625366 140642927564608 util.py:257] E50 Training 64/2084, done at 2020-02-06 16:36:14, 0:02:31\n", "I0206 16:34:01.571563 140642927564608 util.py:257] E50 Training 256/2084, done at 2020-02-06 16:36:14, 0:02:31\n", "I0206 16:34:57.344727 140642927564608 util.py:257] E50 Training 1024/2084, done at 2020-02-06 16:36:14, 0:02:31\n", "W0206 16:36:14.320061 140642927564608 util.py:270] E50 Training ----/2084, done at 2020-02-06 16:36:14\n", "I0206 16:36:14.393631 140642927564608 training.py:395] E50 ClassificationTrainingApp\n", "I0206 16:36:14.401016 140642927564608 training.py:476] E50 trn 0.0386 loss, 98.6% correct, 0.9854 precision, 0.9856 recall, 0.9855 f1 score\n", "I0206 16:36:14.401659 140642927564608 training.py:489] E50 trn_neg 0.0458 loss, 98.5% correct (24636 of 25000)\n", "I0206 16:36:14.402059 140642927564608 training.py:501] E50 trn_pos 0.0313 loss, 98.6% correct (24641 of 25000)\n", "W0206 16:36:14.409385 140642927564608 util.py:233] E50 Validation ----/2148, starting\n", "I0206 16:36:15.004642 140642927564608 util.py:257] E50 Validation 16/2148, done at 2020-02-06 16:37:05, 0:00:51\n", "I0206 16:36:16.039097 140642927564608 util.py:257] E50 Validation 64/2148, done at 2020-02-06 16:37:01, 0:00:47\n", "I0206 16:36:20.182445 140642927564608 util.py:257] E50 Validation 256/2148, done at 2020-02-06 16:37:01, 0:00:46\n", "I0206 16:36:38.698946 140642927564608 util.py:257] E50 Validation 1024/2148, done at 2020-02-06 16:37:05, 0:00:50\n", "W0206 16:37:03.138122 140642927564608 util.py:270] E50 Validation ----/2148, done at 2020-02-06 16:37:03\n", "I0206 16:37:03.139303 140642927564608 training.py:395] E50 ClassificationTrainingApp\n", "I0206 16:37:03.140866 140642927564608 training.py:476] E50 val 0.0453 loss, 98.8% correct, 0.1908 precision, 0.9477 recall, 0.3176 f1 score\n", "I0206 16:37:03.141287 140642927564608 training.py:489] E50 val_neg 0.0448 loss, 98.8% correct (50767 of 51382)\n", "I0206 16:37:03.141943 140642927564608 training.py:501] E50 val_pos 0.2204 loss, 94.8% correct (145 of 153)\n", "I0206 16:37:03.150197 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.2500000.state\n", "I0206 16:37:03.151963 140642927564608 training.py:617] SHA1: ada0d2f5cc44d93343fda6c5aa142dd26bf43702\n", "I0206 16:37:03.152366 140642927564608 training.py:249] Epoch 51 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:37:03.693057 140642927564608 util.py:233] E51 Training ----/2084, starting\n", "I0206 16:37:05.056564 140642927564608 util.py:257] E51 Training 16/2084, done at 2020-02-06 16:39:35, 0:02:30\n", "I0206 16:37:08.542978 140642927564608 util.py:257] E51 Training 64/2084, done at 2020-02-06 16:39:35, 0:02:31\n", "I0206 16:37:22.485014 140642927564608 util.py:257] E51 Training 256/2084, done at 2020-02-06 16:39:35, 0:02:31\n", "I0206 16:38:18.249008 140642927564608 util.py:257] E51 Training 1024/2084, done at 2020-02-06 16:39:35, 0:02:31\n", "W0206 16:39:35.221870 140642927564608 util.py:270] E51 Training ----/2084, done at 2020-02-06 16:39:35\n", "I0206 16:39:35.291424 140642927564608 training.py:395] E51 ClassificationTrainingApp\n", "I0206 16:39:35.299116 140642927564608 training.py:476] E51 trn 0.0408 loss, 98.6% correct, 0.9860 precision, 0.9858 recall, 0.9859 f1 score\n", "I0206 16:39:35.299736 140642927564608 training.py:489] E51 trn_neg 0.0496 loss, 98.6% correct (24650 of 25000)\n", "I0206 16:39:35.300187 140642927564608 training.py:501] E51 trn_pos 0.0319 loss, 98.6% correct (24644 of 25000)\n", "I0206 16:39:35.304971 140642927564608 training.py:249] Epoch 52 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:39:35.893383 140642927564608 util.py:233] E52 Training ----/2084, starting\n", "I0206 16:39:37.340690 140642927564608 util.py:257] E52 Training 16/2084, done at 2020-02-06 16:42:07, 0:02:30\n", "I0206 16:39:40.825155 140642927564608 util.py:257] E52 Training 64/2084, done at 2020-02-06 16:42:07, 0:02:30\n", "I0206 16:39:54.767701 140642927564608 util.py:257] E52 Training 256/2084, done at 2020-02-06 16:42:07, 0:02:31\n", "I0206 16:40:50.533667 140642927564608 util.py:257] E52 Training 1024/2084, done at 2020-02-06 16:42:07, 0:02:31\n", "W0206 16:42:07.513623 140642927564608 util.py:270] E52 Training ----/2084, done at 2020-02-06 16:42:07\n", "I0206 16:42:07.581821 140642927564608 training.py:395] E52 ClassificationTrainingApp\n", "I0206 16:42:07.589236 140642927564608 training.py:476] E52 trn 0.0372 loss, 98.6% correct, 0.9859 precision, 0.9866 recall, 0.9862 f1 score\n", "I0206 16:42:07.589723 140642927564608 training.py:489] E52 trn_neg 0.0443 loss, 98.6% correct (24646 of 25000)\n", "I0206 16:42:07.590244 140642927564608 training.py:501] E52 trn_pos 0.0301 loss, 98.7% correct (24666 of 25000)\n", "I0206 16:42:07.595247 140642927564608 training.py:249] Epoch 53 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:42:08.154516 140642927564608 util.py:233] E53 Training ----/2084, starting\n", "I0206 16:42:10.031376 140642927564608 util.py:257] E53 Training 16/2084, done at 2020-02-06 16:44:40, 0:02:31\n", "I0206 16:42:13.517731 140642927564608 util.py:257] E53 Training 64/2084, done at 2020-02-06 16:44:40, 0:02:31\n", "I0206 16:42:27.457237 140642927564608 util.py:257] E53 Training 256/2084, done at 2020-02-06 16:44:40, 0:02:31\n", "I0206 16:43:23.216253 140642927564608 util.py:257] E53 Training 1024/2084, done at 2020-02-06 16:44:40, 0:02:31\n", "W0206 16:44:40.532951 140642927564608 util.py:270] E53 Training ----/2084, done at 2020-02-06 16:44:40\n", "I0206 16:44:40.600579 140642927564608 training.py:395] E53 ClassificationTrainingApp\n", "I0206 16:44:40.608226 140642927564608 training.py:476] E53 trn 0.0383 loss, 98.6% correct, 0.9864 precision, 0.9858 recall, 0.9861 f1 score\n", "I0206 16:44:40.608702 140642927564608 training.py:489] E53 trn_neg 0.0464 loss, 98.6% correct (24661 of 25000)\n", "I0206 16:44:40.609254 140642927564608 training.py:501] E53 trn_pos 0.0302 loss, 98.6% correct (24646 of 25000)\n", "I0206 16:44:40.614285 140642927564608 training.py:249] Epoch 54 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:44:41.186750 140642927564608 util.py:233] E54 Training ----/2084, starting\n", "I0206 16:44:42.584903 140642927564608 util.py:257] E54 Training 16/2084, done at 2020-02-06 16:47:12, 0:02:30\n", "I0206 16:44:46.070747 140642927564608 util.py:257] E54 Training 64/2084, done at 2020-02-06 16:47:12, 0:02:30\n", "I0206 16:45:00.010965 140642927564608 util.py:257] E54 Training 256/2084, done at 2020-02-06 16:47:12, 0:02:30\n", "I0206 16:45:55.781852 140642927564608 util.py:257] E54 Training 1024/2084, done at 2020-02-06 16:47:12, 0:02:31\n", "W0206 16:47:12.759092 140642927564608 util.py:270] E54 Training ----/2084, done at 2020-02-06 16:47:12\n", "I0206 16:47:12.825463 140642927564608 training.py:395] E54 ClassificationTrainingApp\n", "I0206 16:47:12.832871 140642927564608 training.py:476] E54 trn 0.0388 loss, 98.6% correct, 0.9846 precision, 0.9869 recall, 0.9858 f1 score\n", "I0206 16:47:12.833450 140642927564608 training.py:489] E54 trn_neg 0.0470 loss, 98.5% correct (24614 of 25000)\n", "I0206 16:47:12.833965 140642927564608 training.py:501] E54 trn_pos 0.0307 loss, 98.7% correct (24673 of 25000)\n", "I0206 16:47:12.838894 140642927564608 training.py:249] Epoch 55 of 100, 2084/2148 batches of size 24*1\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "W0206 16:47:13.388124 140642927564608 util.py:233] E55 Training ----/2084, starting\n", "I0206 16:47:14.886085 140642927564608 util.py:257] E55 Training 16/2084, done at 2020-02-06 16:49:44, 0:02:31\n", "I0206 16:47:18.371454 140642927564608 util.py:257] E55 Training 64/2084, done at 2020-02-06 16:49:44, 0:02:31\n", "I0206 16:47:32.315498 140642927564608 util.py:257] E55 Training 256/2084, done at 2020-02-06 16:49:44, 0:02:31\n", "I0206 16:48:28.078366 140642927564608 util.py:257] E55 Training 1024/2084, done at 2020-02-06 16:49:44, 0:02:31\n", "W0206 16:49:45.046478 140642927564608 util.py:270] E55 Training ----/2084, done at 2020-02-06 16:49:45\n", "I0206 16:49:45.111718 140642927564608 training.py:395] E55 ClassificationTrainingApp\n", "I0206 16:49:45.119357 140642927564608 training.py:476] E55 trn 0.0362 loss, 98.7% correct, 0.9868 precision, 0.9867 recall, 0.9868 f1 score\n", "I0206 16:49:45.119850 140642927564608 training.py:489] E55 trn_neg 0.0446 loss, 98.7% correct (24670 of 25000)\n", "I0206 16:49:45.120397 140642927564608 training.py:501] E55 trn_pos 0.0278 loss, 98.7% correct (24668 of 25000)\n", "W0206 16:49:45.125757 140642927564608 util.py:233] E55 Validation ----/2148, starting\n", "I0206 16:49:45.697918 140642927564608 util.py:257] E55 Validation 16/2148, done at 2020-02-06 16:50:33, 0:00:47\n", "I0206 16:49:46.642819 140642927564608 util.py:257] E55 Validation 64/2148, done at 2020-02-06 16:50:28, 0:00:43\n", "I0206 16:49:50.500817 140642927564608 util.py:257] E55 Validation 256/2148, done at 2020-02-06 16:50:28, 0:00:43\n", "I0206 16:50:06.188533 140642927564608 util.py:257] E55 Validation 1024/2148, done at 2020-02-06 16:50:29, 0:00:43\n", "W0206 16:50:29.310073 140642927564608 util.py:270] E55 Validation ----/2148, done at 2020-02-06 16:50:29\n", "I0206 16:50:29.311220 140642927564608 training.py:395] E55 ClassificationTrainingApp\n", "I0206 16:50:29.312874 140642927564608 training.py:476] E55 val 0.0493 loss, 98.7% correct, 0.1747 precision, 0.9477 recall, 0.2950 f1 score\n", "I0206 16:50:29.313348 140642927564608 training.py:489] E55 val_neg 0.0488 loss, 98.7% correct (50697 of 51382)\n", "I0206 16:50:29.315028 140642927564608 training.py:501] E55 val_pos 0.2279 loss, 94.8% correct (145 of 153)\n", "I0206 16:50:29.322606 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.2750000.state\n", "I0206 16:50:29.324363 140642927564608 training.py:617] SHA1: c057a9a52131194953ae06a45b9ba0f9a3f44a94\n", "I0206 16:50:29.324950 140642927564608 training.py:249] Epoch 56 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:50:29.876778 140642927564608 util.py:233] E56 Training ----/2084, starting\n", "I0206 16:50:31.266508 140642927564608 util.py:257] E56 Training 16/2084, done at 2020-02-06 16:53:01, 0:02:31\n", "I0206 16:50:34.753538 140642927564608 util.py:257] E56 Training 64/2084, done at 2020-02-06 16:53:01, 0:02:31\n", "I0206 16:50:48.697777 140642927564608 util.py:257] E56 Training 256/2084, done at 2020-02-06 16:53:01, 0:02:31\n", "I0206 16:51:44.472215 140642927564608 util.py:257] E56 Training 1024/2084, done at 2020-02-06 16:53:01, 0:02:31\n", "W0206 16:53:01.464626 140642927564608 util.py:270] E56 Training ----/2084, done at 2020-02-06 16:53:01\n", "I0206 16:53:01.539079 140642927564608 training.py:395] E56 ClassificationTrainingApp\n", "I0206 16:53:01.546908 140642927564608 training.py:476] E56 trn 0.0389 loss, 98.6% correct, 0.9849 precision, 0.9864 recall, 0.9857 f1 score\n", "I0206 16:53:01.547535 140642927564608 training.py:489] E56 trn_neg 0.0483 loss, 98.5% correct (24623 of 25000)\n", "I0206 16:53:01.547955 140642927564608 training.py:501] E56 trn_pos 0.0295 loss, 98.6% correct (24661 of 25000)\n", "I0206 16:53:01.553537 140642927564608 training.py:249] Epoch 57 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:53:02.131755 140642927564608 util.py:233] E57 Training ----/2084, starting\n", "I0206 16:53:03.618706 140642927564608 util.py:257] E57 Training 16/2084, done at 2020-02-06 16:55:33, 0:02:31\n", "I0206 16:53:07.105749 140642927564608 util.py:257] E57 Training 64/2084, done at 2020-02-06 16:55:33, 0:02:31\n", "I0206 16:53:21.051006 140642927564608 util.py:257] E57 Training 256/2084, done at 2020-02-06 16:55:33, 0:02:31\n", "I0206 16:54:17.020503 140642927564608 util.py:257] E57 Training 1024/2084, done at 2020-02-06 16:55:34, 0:02:31\n", "W0206 16:55:33.996428 140642927564608 util.py:270] E57 Training ----/2084, done at 2020-02-06 16:55:33\n", "I0206 16:55:34.063117 140642927564608 training.py:395] E57 ClassificationTrainingApp\n", "I0206 16:55:34.071614 140642927564608 training.py:476] E57 trn 0.0372 loss, 98.6% correct, 0.9854 precision, 0.9874 recall, 0.9864 f1 score\n", "I0206 16:55:34.072209 140642927564608 training.py:489] E57 trn_neg 0.0462 loss, 98.5% correct (24634 of 25000)\n", "I0206 16:55:34.074154 140642927564608 training.py:501] E57 trn_pos 0.0282 loss, 98.7% correct (24686 of 25000)\n", "I0206 16:55:34.080282 140642927564608 training.py:249] Epoch 58 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:55:34.626259 140642927564608 util.py:233] E58 Training ----/2084, starting\n", "I0206 16:55:36.057002 140642927564608 util.py:257] E58 Training 16/2084, done at 2020-02-06 16:58:06, 0:02:31\n", "I0206 16:55:39.543339 140642927564608 util.py:257] E58 Training 64/2084, done at 2020-02-06 16:58:06, 0:02:31\n", "I0206 16:55:53.484439 140642927564608 util.py:257] E58 Training 256/2084, done at 2020-02-06 16:58:06, 0:02:31\n", "I0206 16:56:49.264161 140642927564608 util.py:257] E58 Training 1024/2084, done at 2020-02-06 16:58:06, 0:02:31\n", "W0206 16:58:06.224854 140642927564608 util.py:270] E58 Training ----/2084, done at 2020-02-06 16:58:06\n", "I0206 16:58:06.291784 140642927564608 training.py:395] E58 ClassificationTrainingApp\n", "I0206 16:58:06.299770 140642927564608 training.py:476] E58 trn 0.0342 loss, 98.7% correct, 0.9872 precision, 0.9870 recall, 0.9871 f1 score\n", "I0206 16:58:06.300365 140642927564608 training.py:489] E58 trn_neg 0.0414 loss, 98.7% correct (24679 of 25000)\n", "I0206 16:58:06.300839 140642927564608 training.py:501] E58 trn_pos 0.0271 loss, 98.7% correct (24674 of 25000)\n", "I0206 16:58:06.305950 140642927564608 training.py:249] Epoch 59 of 100, 2084/2148 batches of size 24*1\n", "W0206 16:58:06.868083 140642927564608 util.py:233] E59 Training ----/2084, starting\n", "I0206 16:58:08.360366 140642927564608 util.py:257] E59 Training 16/2084, done at 2020-02-06 17:00:38, 0:02:31\n", "I0206 16:58:11.847714 140642927564608 util.py:257] E59 Training 64/2084, done at 2020-02-06 17:00:38, 0:02:31\n", "I0206 16:58:25.789596 140642927564608 util.py:257] E59 Training 256/2084, done at 2020-02-06 17:00:38, 0:02:31\n", "I0206 16:59:22.868868 140642927564608 util.py:257] E59 Training 1024/2084, done at 2020-02-06 17:00:41, 0:02:33\n", "W0206 17:00:39.843994 140642927564608 util.py:270] E59 Training ----/2084, done at 2020-02-06 17:00:39\n", "I0206 17:00:39.911031 140642927564608 training.py:395] E59 ClassificationTrainingApp\n", "I0206 17:00:39.918734 140642927564608 training.py:476] E59 trn 0.0350 loss, 98.7% correct, 0.9873 precision, 0.9873 recall, 0.9873 f1 score\n", "I0206 17:00:39.919381 140642927564608 training.py:489] E59 trn_neg 0.0432 loss, 98.7% correct (24682 of 25000)\n", "I0206 17:00:39.920049 140642927564608 training.py:501] E59 trn_pos 0.0268 loss, 98.7% correct (24682 of 25000)\n", "I0206 17:00:39.924902 140642927564608 training.py:249] Epoch 60 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:00:40.480063 140642927564608 util.py:233] E60 Training ----/2084, starting\n", "I0206 17:00:41.862060 140642927564608 util.py:257] E60 Training 16/2084, done at 2020-02-06 17:03:12, 0:02:31\n", "I0206 17:00:45.348422 140642927564608 util.py:257] E60 Training 64/2084, done at 2020-02-06 17:03:11, 0:02:31\n", "I0206 17:00:59.294464 140642927564608 util.py:257] E60 Training 256/2084, done at 2020-02-06 17:03:11, 0:02:31\n", "I0206 17:01:55.064342 140642927564608 util.py:257] E60 Training 1024/2084, done at 2020-02-06 17:03:11, 0:02:31\n", "W0206 17:03:12.033614 140642927564608 util.py:270] E60 Training ----/2084, done at 2020-02-06 17:03:12\n", "I0206 17:03:12.099320 140642927564608 training.py:395] E60 ClassificationTrainingApp\n", "I0206 17:03:12.108074 140642927564608 training.py:476] E60 trn 0.0383 loss, 98.6% correct, 0.9853 precision, 0.9872 recall, 0.9863 f1 score\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 17:03:12.108750 140642927564608 training.py:489] E60 trn_neg 0.0483 loss, 98.5% correct (24633 of 25000)\n", "I0206 17:03:12.109355 140642927564608 training.py:501] E60 trn_pos 0.0283 loss, 98.7% correct (24680 of 25000)\n", "W0206 17:03:12.116044 140642927564608 util.py:233] E60 Validation ----/2148, starting\n", "I0206 17:03:12.707041 140642927564608 util.py:257] E60 Validation 16/2148, done at 2020-02-06 17:04:05, 0:00:52\n", "I0206 17:03:13.735325 140642927564608 util.py:257] E60 Validation 64/2148, done at 2020-02-06 17:03:59, 0:00:47\n", "I0206 17:03:17.891752 140642927564608 util.py:257] E60 Validation 256/2148, done at 2020-02-06 17:03:59, 0:00:46\n", "I0206 17:03:34.655935 140642927564608 util.py:257] E60 Validation 1024/2148, done at 2020-02-06 17:03:59, 0:00:46\n", "W0206 17:03:59.483727 140642927564608 util.py:270] E60 Validation ----/2148, done at 2020-02-06 17:03:59\n", "I0206 17:03:59.485464 140642927564608 training.py:395] E60 ClassificationTrainingApp\n", "I0206 17:03:59.487377 140642927564608 training.py:476] E60 val 0.0358 loss, 99.0% correct, 0.2227 precision, 0.9346 recall, 0.3597 f1 score\n", "I0206 17:03:59.487843 140642927564608 training.py:489] E60 val_neg 0.0352 loss, 99.0% correct (50883 of 51382)\n", "I0206 17:03:59.489617 140642927564608 training.py:501] E60 val_pos 0.2345 loss, 93.5% correct (143 of 153)\n", "I0206 17:03:59.498612 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.3000000.state\n", "I0206 17:03:59.500269 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n", "I0206 17:03:59.502307 140642927564608 training.py:617] SHA1: f396ff20769469b90ee3bd5f2f9b93a0dd32a6ff\n", "I0206 17:03:59.502822 140642927564608 training.py:249] Epoch 61 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:04:00.044205 140642927564608 util.py:233] E61 Training ----/2084, starting\n", "I0206 17:04:01.413112 140642927564608 util.py:257] E61 Training 16/2084, done at 2020-02-06 17:06:31, 0:02:31\n", "I0206 17:04:04.899992 140642927564608 util.py:257] E61 Training 64/2084, done at 2020-02-06 17:06:31, 0:02:31\n", "I0206 17:04:18.845306 140642927564608 util.py:257] E61 Training 256/2084, done at 2020-02-06 17:06:31, 0:02:31\n", "I0206 17:05:14.613901 140642927564608 util.py:257] E61 Training 1024/2084, done at 2020-02-06 17:06:31, 0:02:31\n", "W0206 17:06:31.586977 140642927564608 util.py:270] E61 Training ----/2084, done at 2020-02-06 17:06:31\n", "I0206 17:06:31.654204 140642927564608 training.py:395] E61 ClassificationTrainingApp\n", "I0206 17:06:31.662799 140642927564608 training.py:476] E61 trn 0.0376 loss, 98.7% correct, 0.9859 precision, 0.9874 recall, 0.9867 f1 score\n", "I0206 17:06:31.663476 140642927564608 training.py:489] E61 trn_neg 0.0476 loss, 98.6% correct (24647 of 25000)\n", "I0206 17:06:31.663932 140642927564608 training.py:501] E61 trn_pos 0.0276 loss, 98.7% correct (24686 of 25000)\n", "I0206 17:06:31.669260 140642927564608 training.py:249] Epoch 62 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:06:32.239861 140642927564608 util.py:233] E62 Training ----/2084, starting\n", "I0206 17:06:33.681343 140642927564608 util.py:257] E62 Training 16/2084, done at 2020-02-06 17:09:03, 0:02:31\n", "I0206 17:06:37.168828 140642927564608 util.py:257] E62 Training 64/2084, done at 2020-02-06 17:09:03, 0:02:31\n", "I0206 17:06:51.112718 140642927564608 util.py:257] E62 Training 256/2084, done at 2020-02-06 17:09:03, 0:02:31\n", "I0206 17:07:46.881075 140642927564608 util.py:257] E62 Training 1024/2084, done at 2020-02-06 17:09:03, 0:02:31\n", "W0206 17:09:03.838736 140642927564608 util.py:270] E62 Training ----/2084, done at 2020-02-06 17:09:03\n", "I0206 17:09:03.903843 140642927564608 training.py:395] E62 ClassificationTrainingApp\n", "I0206 17:09:03.911593 140642927564608 training.py:476] E62 trn 0.0356 loss, 98.7% correct, 0.9864 precision, 0.9883 recall, 0.9874 f1 score\n", "I0206 17:09:03.912080 140642927564608 training.py:489] E62 trn_neg 0.0451 loss, 98.6% correct (24659 of 25000)\n", "I0206 17:09:03.912603 140642927564608 training.py:501] E62 trn_pos 0.0261 loss, 98.8% correct (24708 of 25000)\n", "I0206 17:09:03.918194 140642927564608 training.py:249] Epoch 63 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:09:04.479707 140642927564608 util.py:233] E63 Training ----/2084, starting\n", "I0206 17:09:05.873148 140642927564608 util.py:257] E63 Training 16/2084, done at 2020-02-06 17:11:35, 0:02:30\n", "I0206 17:09:09.358931 140642927564608 util.py:257] E63 Training 64/2084, done at 2020-02-06 17:11:35, 0:02:31\n", "I0206 17:09:23.297466 140642927564608 util.py:257] E63 Training 256/2084, done at 2020-02-06 17:11:35, 0:02:31\n", "I0206 17:10:19.057796 140642927564608 util.py:257] E63 Training 1024/2084, done at 2020-02-06 17:11:35, 0:02:31\n", "W0206 17:11:36.026950 140642927564608 util.py:270] E63 Training ----/2084, done at 2020-02-06 17:11:36\n", "I0206 17:11:36.097749 140642927564608 training.py:395] E63 ClassificationTrainingApp\n", "I0206 17:11:36.106268 140642927564608 training.py:476] E63 trn 0.0362 loss, 98.7% correct, 0.9870 precision, 0.9877 recall, 0.9874 f1 score\n", "I0206 17:11:36.106894 140642927564608 training.py:489] E63 trn_neg 0.0458 loss, 98.7% correct (24676 of 25000)\n", "I0206 17:11:36.107381 140642927564608 training.py:501] E63 trn_pos 0.0265 loss, 98.8% correct (24692 of 25000)\n", "I0206 17:11:36.112605 140642927564608 training.py:249] Epoch 64 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:11:36.670311 140642927564608 util.py:233] E64 Training ----/2084, starting\n", "I0206 17:11:38.045944 140642927564608 util.py:257] E64 Training 16/2084, done at 2020-02-06 17:14:08, 0:02:31\n", "I0206 17:11:41.530149 140642927564608 util.py:257] E64 Training 64/2084, done at 2020-02-06 17:14:08, 0:02:31\n", "I0206 17:11:55.475123 140642927564608 util.py:257] E64 Training 256/2084, done at 2020-02-06 17:14:08, 0:02:31\n", "I0206 17:12:51.247586 140642927564608 util.py:257] E64 Training 1024/2084, done at 2020-02-06 17:14:08, 0:02:31\n", "W0206 17:14:08.212733 140642927564608 util.py:270] E64 Training ----/2084, done at 2020-02-06 17:14:08\n", "I0206 17:14:08.280565 140642927564608 training.py:395] E64 ClassificationTrainingApp\n", "I0206 17:14:08.289222 140642927564608 training.py:476] E64 trn 0.0350 loss, 98.7% correct, 0.9868 precision, 0.9880 recall, 0.9874 f1 score\n", "I0206 17:14:08.289920 140642927564608 training.py:489] E64 trn_neg 0.0442 loss, 98.7% correct (24669 of 25000)\n", "I0206 17:14:08.290335 140642927564608 training.py:501] E64 trn_pos 0.0258 loss, 98.8% correct (24701 of 25000)\n", "I0206 17:14:08.295672 140642927564608 training.py:249] Epoch 65 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:14:08.854907 140642927564608 util.py:233] E65 Training ----/2084, starting\n", "I0206 17:14:10.286550 140642927564608 util.py:257] E65 Training 16/2084, done at 2020-02-06 17:16:40, 0:02:31\n", "I0206 17:14:13.772806 140642927564608 util.py:257] E65 Training 64/2084, done at 2020-02-06 17:16:40, 0:02:31\n", "I0206 17:14:27.713052 140642927564608 util.py:257] E65 Training 256/2084, done at 2020-02-06 17:16:40, 0:02:31\n", "I0206 17:15:23.481701 140642927564608 util.py:257] E65 Training 1024/2084, done at 2020-02-06 17:16:40, 0:02:31\n", "W0206 17:16:40.424092 140642927564608 util.py:270] E65 Training ----/2084, done at 2020-02-06 17:16:40\n", "I0206 17:16:40.497716 140642927564608 training.py:395] E65 ClassificationTrainingApp\n", "I0206 17:16:40.505880 140642927564608 training.py:476] E65 trn 0.0338 loss, 98.8% correct, 0.9887 precision, 0.9878 recall, 0.9882 f1 score\n", "I0206 17:16:40.506544 140642927564608 training.py:489] E65 trn_neg 0.0415 loss, 98.9% correct (24717 of 25000)\n", "I0206 17:16:40.507003 140642927564608 training.py:501] E65 trn_pos 0.0260 loss, 98.8% correct (24694 of 25000)\n", "W0206 17:16:40.512161 140642927564608 util.py:233] E65 Validation ----/2148, starting\n", "I0206 17:16:41.080003 140642927564608 util.py:257] E65 Validation 16/2148, done at 2020-02-06 17:17:27, 0:00:47\n", "I0206 17:16:42.048626 140642927564608 util.py:257] E65 Validation 64/2148, done at 2020-02-06 17:17:24, 0:00:44\n", "I0206 17:16:45.916668 140642927564608 util.py:257] E65 Validation 256/2148, done at 2020-02-06 17:17:24, 0:00:43\n", "I0206 17:17:01.355215 140642927564608 util.py:257] E65 Validation 1024/2148, done at 2020-02-06 17:17:23, 0:00:43\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "W0206 17:17:24.314698 140642927564608 util.py:270] E65 Validation ----/2148, done at 2020-02-06 17:17:24\n", "I0206 17:17:24.316103 140642927564608 training.py:395] E65 ClassificationTrainingApp\n", "I0206 17:17:24.318617 140642927564608 training.py:476] E65 val 0.0433 loss, 98.9% correct, 0.2014 precision, 0.9542 recall, 0.3326 f1 score\n", "I0206 17:17:24.319190 140642927564608 training.py:489] E65 val_neg 0.0429 loss, 98.9% correct (50803 of 51382)\n", "I0206 17:17:24.319510 140642927564608 training.py:501] E65 val_pos 0.1847 loss, 95.4% correct (146 of 153)\n", "I0206 17:17:24.327301 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.3250000.state\n", "I0206 17:17:24.329004 140642927564608 training.py:617] SHA1: d1c660df275ee7671bdcf3db8fcdc77eaa9fff22\n", "I0206 17:17:24.329565 140642927564608 training.py:249] Epoch 66 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:17:24.880342 140642927564608 util.py:233] E66 Training ----/2084, starting\n", "I0206 17:17:26.250344 140642927564608 util.py:257] E66 Training 16/2084, done at 2020-02-06 17:19:56, 0:02:31\n", "I0206 17:17:29.736280 140642927564608 util.py:257] E66 Training 64/2084, done at 2020-02-06 17:19:56, 0:02:31\n", "I0206 17:17:43.682802 140642927564608 util.py:257] E66 Training 256/2084, done at 2020-02-06 17:19:56, 0:02:31\n", "I0206 17:18:39.465184 140642927564608 util.py:257] E66 Training 1024/2084, done at 2020-02-06 17:19:56, 0:02:31\n", "W0206 17:19:56.446858 140642927564608 util.py:270] E66 Training ----/2084, done at 2020-02-06 17:19:56\n", "I0206 17:19:56.513879 140642927564608 training.py:395] E66 ClassificationTrainingApp\n", "I0206 17:19:56.521991 140642927564608 training.py:476] E66 trn 0.0350 loss, 98.8% correct, 0.9874 precision, 0.9881 recall, 0.9877 f1 score\n", "I0206 17:19:56.522601 140642927564608 training.py:489] E66 trn_neg 0.0443 loss, 98.7% correct (24684 of 25000)\n", "I0206 17:19:56.523131 140642927564608 training.py:501] E66 trn_pos 0.0258 loss, 98.8% correct (24703 of 25000)\n", "I0206 17:19:56.527936 140642927564608 training.py:249] Epoch 67 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:19:57.073323 140642927564608 util.py:233] E67 Training ----/2084, starting\n", "I0206 17:19:58.433229 140642927564608 util.py:257] E67 Training 16/2084, done at 2020-02-06 17:22:28, 0:02:30\n", "I0206 17:20:01.920174 140642927564608 util.py:257] E67 Training 64/2084, done at 2020-02-06 17:22:28, 0:02:31\n", "I0206 17:20:15.861522 140642927564608 util.py:257] E67 Training 256/2084, done at 2020-02-06 17:22:28, 0:02:31\n", "I0206 17:21:11.627637 140642927564608 util.py:257] E67 Training 1024/2084, done at 2020-02-06 17:22:28, 0:02:31\n", "W0206 17:22:28.603476 140642927564608 util.py:270] E67 Training ----/2084, done at 2020-02-06 17:22:28\n", "I0206 17:22:28.671540 140642927564608 training.py:395] E67 ClassificationTrainingApp\n", "I0206 17:22:28.679616 140642927564608 training.py:476] E67 trn 0.0353 loss, 98.8% correct, 0.9877 precision, 0.9874 recall, 0.9875 f1 score\n", "I0206 17:22:28.680219 140642927564608 training.py:489] E67 trn_neg 0.0439 loss, 98.8% correct (24692 of 25000)\n", "I0206 17:22:28.680847 140642927564608 training.py:501] E67 trn_pos 0.0266 loss, 98.7% correct (24684 of 25000)\n", "I0206 17:22:28.686053 140642927564608 training.py:249] Epoch 68 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:22:29.234840 140642927564608 util.py:233] E68 Training ----/2084, starting\n", "I0206 17:22:30.682783 140642927564608 util.py:257] E68 Training 16/2084, done at 2020-02-06 17:25:01, 0:02:31\n", "I0206 17:22:34.169135 140642927564608 util.py:257] E68 Training 64/2084, done at 2020-02-06 17:25:00, 0:02:31\n", "I0206 17:22:48.111884 140642927564608 util.py:257] E68 Training 256/2084, done at 2020-02-06 17:25:00, 0:02:31\n", "I0206 17:23:43.892599 140642927564608 util.py:257] E68 Training 1024/2084, done at 2020-02-06 17:25:00, 0:02:31\n", "W0206 17:25:00.869797 140642927564608 util.py:270] E68 Training ----/2084, done at 2020-02-06 17:25:00\n", "I0206 17:25:00.940529 140642927564608 training.py:395] E68 ClassificationTrainingApp\n", "I0206 17:25:00.948723 140642927564608 training.py:476] E68 trn 0.0346 loss, 98.8% correct, 0.9869 precision, 0.9882 recall, 0.9875 f1 score\n", "I0206 17:25:00.949202 140642927564608 training.py:489] E68 trn_neg 0.0436 loss, 98.7% correct (24671 of 25000)\n", "I0206 17:25:00.949652 140642927564608 training.py:501] E68 trn_pos 0.0257 loss, 98.8% correct (24706 of 25000)\n", "I0206 17:25:00.954640 140642927564608 training.py:249] Epoch 69 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:25:01.516795 140642927564608 util.py:233] E69 Training ----/2084, starting\n", "I0206 17:25:02.900220 140642927564608 util.py:257] E69 Training 16/2084, done at 2020-02-06 17:27:33, 0:02:31\n", "I0206 17:25:06.387271 140642927564608 util.py:257] E69 Training 64/2084, done at 2020-02-06 17:27:33, 0:02:31\n", "I0206 17:25:20.329697 140642927564608 util.py:257] E69 Training 256/2084, done at 2020-02-06 17:27:33, 0:02:31\n", "I0206 17:26:16.091498 140642927564608 util.py:257] E69 Training 1024/2084, done at 2020-02-06 17:27:32, 0:02:31\n", "W0206 17:27:33.058212 140642927564608 util.py:270] E69 Training ----/2084, done at 2020-02-06 17:27:33\n", "I0206 17:27:33.128519 140642927564608 training.py:395] E69 ClassificationTrainingApp\n", "I0206 17:27:33.136802 140642927564608 training.py:476] E69 trn 0.0350 loss, 98.8% correct, 0.9868 precision, 0.9886 recall, 0.9877 f1 score\n", "I0206 17:27:33.137482 140642927564608 training.py:489] E69 trn_neg 0.0451 loss, 98.7% correct (24669 of 25000)\n", "I0206 17:27:33.137872 140642927564608 training.py:501] E69 trn_pos 0.0249 loss, 98.9% correct (24716 of 25000)\n", "I0206 17:27:33.143012 140642927564608 training.py:249] Epoch 70 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:27:33.707870 140642927564608 util.py:233] E70 Training ----/2084, starting\n", "I0206 17:27:35.081329 140642927564608 util.py:257] E70 Training 16/2084, done at 2020-02-06 17:30:05, 0:02:31\n", "I0206 17:27:38.567990 140642927564608 util.py:257] E70 Training 64/2084, done at 2020-02-06 17:30:05, 0:02:31\n", "I0206 17:27:52.511196 140642927564608 util.py:257] E70 Training 256/2084, done at 2020-02-06 17:30:05, 0:02:31\n", "I0206 17:28:48.280476 140642927564608 util.py:257] E70 Training 1024/2084, done at 2020-02-06 17:30:05, 0:02:31\n", "W0206 17:30:05.562639 140642927564608 util.py:270] E70 Training ----/2084, done at 2020-02-06 17:30:05\n", "I0206 17:30:05.630517 140642927564608 training.py:395] E70 ClassificationTrainingApp\n", "I0206 17:30:05.638585 140642927564608 training.py:476] E70 trn 0.0325 loss, 98.9% correct, 0.9884 precision, 0.9895 recall, 0.9889 f1 score\n", "I0206 17:30:05.639191 140642927564608 training.py:489] E70 trn_neg 0.0411 loss, 98.8% correct (24709 of 25000)\n", "I0206 17:30:05.639735 140642927564608 training.py:501] E70 trn_pos 0.0239 loss, 99.0% correct (24738 of 25000)\n", "W0206 17:30:05.645146 140642927564608 util.py:233] E70 Validation ----/2148, starting\n", "I0206 17:30:06.263046 140642927564608 util.py:257] E70 Validation 16/2148, done at 2020-02-06 17:30:57, 0:00:51\n", "I0206 17:30:07.303658 140642927564608 util.py:257] E70 Validation 64/2148, done at 2020-02-06 17:30:53, 0:00:47\n", "I0206 17:30:11.432864 140642927564608 util.py:257] E70 Validation 256/2148, done at 2020-02-06 17:30:52, 0:00:46\n", "I0206 17:30:28.088779 140642927564608 util.py:257] E70 Validation 1024/2148, done at 2020-02-06 17:30:52, 0:00:46\n", "W0206 17:30:53.057849 140642927564608 util.py:270] E70 Validation ----/2148, done at 2020-02-06 17:30:53\n", "I0206 17:30:53.059296 140642927564608 training.py:395] E70 ClassificationTrainingApp\n", "I0206 17:30:53.061108 140642927564608 training.py:476] E70 val 0.0295 loss, 99.2% correct, 0.2558 precision, 0.9346 recall, 0.4017 f1 score\n", "I0206 17:30:53.061517 140642927564608 training.py:489] E70 val_neg 0.0287 loss, 99.2% correct (50966 of 51382)\n", "I0206 17:30:53.061972 140642927564608 training.py:501] E70 val_pos 0.3043 loss, 93.5% correct (143 of 153)\n", "I0206 17:30:53.070618 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.3500000.state\n", "I0206 17:30:53.072670 140642927564608 training.py:614] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.best.state\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 17:30:53.074802 140642927564608 training.py:617] SHA1: 03a17539fb45b5e05141e21e1408a1d01a49e20f\n", "I0206 17:30:53.075356 140642927564608 training.py:249] Epoch 71 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:30:53.643440 140642927564608 util.py:233] E71 Training ----/2084, starting\n", "I0206 17:30:54.999347 140642927564608 util.py:257] E71 Training 16/2084, done at 2020-02-06 17:33:25, 0:02:31\n", "I0206 17:30:58.484849 140642927564608 util.py:257] E71 Training 64/2084, done at 2020-02-06 17:33:25, 0:02:31\n", "I0206 17:31:12.428197 140642927564608 util.py:257] E71 Training 256/2084, done at 2020-02-06 17:33:25, 0:02:31\n", "I0206 17:32:08.204355 140642927564608 util.py:257] E71 Training 1024/2084, done at 2020-02-06 17:33:25, 0:02:31\n", "W0206 17:33:25.170791 140642927564608 util.py:270] E71 Training ----/2084, done at 2020-02-06 17:33:25\n", "I0206 17:33:25.236906 140642927564608 training.py:395] E71 ClassificationTrainingApp\n", "I0206 17:33:25.245322 140642927564608 training.py:476] E71 trn 0.0317 loss, 98.8% correct, 0.9870 precision, 0.9893 recall, 0.9882 f1 score\n", "I0206 17:33:25.245809 140642927564608 training.py:489] E71 trn_neg 0.0398 loss, 98.7% correct (24674 of 25000)\n", "I0206 17:33:25.246390 140642927564608 training.py:501] E71 trn_pos 0.0235 loss, 98.9% correct (24733 of 25000)\n", "I0206 17:33:25.251350 140642927564608 training.py:249] Epoch 72 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:33:25.784030 140642927564608 util.py:233] E72 Training ----/2084, starting\n", "I0206 17:33:27.143799 140642927564608 util.py:257] E72 Training 16/2084, done at 2020-02-06 17:35:57, 0:02:31\n", "I0206 17:33:30.627650 140642927564608 util.py:257] E72 Training 64/2084, done at 2020-02-06 17:35:57, 0:02:31\n", "I0206 17:33:44.565805 140642927564608 util.py:257] E72 Training 256/2084, done at 2020-02-06 17:35:57, 0:02:31\n", "I0206 17:34:40.317292 140642927564608 util.py:257] E72 Training 1024/2084, done at 2020-02-06 17:35:57, 0:02:30\n", "W0206 17:35:57.249374 140642927564608 util.py:270] E72 Training ----/2084, done at 2020-02-06 17:35:57\n", "I0206 17:35:57.315437 140642927564608 training.py:395] E72 ClassificationTrainingApp\n", "I0206 17:35:57.323734 140642927564608 training.py:476] E72 trn 0.0337 loss, 98.8% correct, 0.9875 precision, 0.9894 recall, 0.9885 f1 score\n", "I0206 17:35:57.324291 140642927564608 training.py:489] E72 trn_neg 0.0437 loss, 98.7% correct (24687 of 25000)\n", "I0206 17:35:57.324780 140642927564608 training.py:501] E72 trn_pos 0.0237 loss, 98.9% correct (24735 of 25000)\n", "I0206 17:35:57.330108 140642927564608 training.py:249] Epoch 73 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:35:57.863294 140642927564608 util.py:233] E73 Training ----/2084, starting\n", "I0206 17:35:59.297493 140642927564608 util.py:257] E73 Training 16/2084, done at 2020-02-06 17:38:29, 0:02:30\n", "I0206 17:36:02.782888 140642927564608 util.py:257] E73 Training 64/2084, done at 2020-02-06 17:38:29, 0:02:31\n", "I0206 17:36:16.719225 140642927564608 util.py:257] E73 Training 256/2084, done at 2020-02-06 17:38:29, 0:02:30\n", "I0206 17:37:12.470906 140642927564608 util.py:257] E73 Training 1024/2084, done at 2020-02-06 17:38:29, 0:02:30\n", "W0206 17:38:29.414797 140642927564608 util.py:270] E73 Training ----/2084, done at 2020-02-06 17:38:29\n", "I0206 17:38:29.480715 140642927564608 training.py:395] E73 ClassificationTrainingApp\n", "I0206 17:38:29.489035 140642927564608 training.py:476] E73 trn 0.0319 loss, 98.7% correct, 0.9869 precision, 0.9880 recall, 0.9875 f1 score\n", "I0206 17:38:29.489521 140642927564608 training.py:489] E73 trn_neg 0.0397 loss, 98.7% correct (24673 of 25000)\n", "I0206 17:38:29.490162 140642927564608 training.py:501] E73 trn_pos 0.0241 loss, 98.8% correct (24700 of 25000)\n", "I0206 17:38:29.494972 140642927564608 training.py:249] Epoch 74 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:38:30.029112 140642927564608 util.py:233] E74 Training ----/2084, starting\n", "I0206 17:38:31.391970 140642927564608 util.py:257] E74 Training 16/2084, done at 2020-02-06 17:41:01, 0:02:31\n", "I0206 17:38:34.877654 140642927564608 util.py:257] E74 Training 64/2084, done at 2020-02-06 17:41:01, 0:02:31\n", "I0206 17:38:48.814964 140642927564608 util.py:257] E74 Training 256/2084, done at 2020-02-06 17:41:01, 0:02:31\n", "I0206 17:39:44.562973 140642927564608 util.py:257] E74 Training 1024/2084, done at 2020-02-06 17:41:01, 0:02:30\n", "W0206 17:41:01.505350 140642927564608 util.py:270] E74 Training ----/2084, done at 2020-02-06 17:41:01\n", "I0206 17:41:01.569506 140642927564608 training.py:395] E74 ClassificationTrainingApp\n", "I0206 17:41:01.577795 140642927564608 training.py:476] E74 trn 0.0330 loss, 98.8% correct, 0.9874 precision, 0.9884 recall, 0.9879 f1 score\n", "I0206 17:41:01.578315 140642927564608 training.py:489] E74 trn_neg 0.0419 loss, 98.7% correct (24685 of 25000)\n", "I0206 17:41:01.578814 140642927564608 training.py:501] E74 trn_pos 0.0242 loss, 98.8% correct (24710 of 25000)\n", "I0206 17:41:01.584218 140642927564608 training.py:249] Epoch 75 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:41:02.115249 140642927564608 util.py:233] E75 Training ----/2084, starting\n", "I0206 17:41:03.502988 140642927564608 util.py:257] E75 Training 16/2084, done at 2020-02-06 17:43:33, 0:02:31\n", "I0206 17:41:06.987936 140642927564608 util.py:257] E75 Training 64/2084, done at 2020-02-06 17:43:33, 0:02:31\n", "I0206 17:41:20.927115 140642927564608 util.py:257] E75 Training 256/2084, done at 2020-02-06 17:43:33, 0:02:31\n", "I0206 17:42:16.675398 140642927564608 util.py:257] E75 Training 1024/2084, done at 2020-02-06 17:43:33, 0:02:30\n", "W0206 17:43:33.605095 140642927564608 util.py:270] E75 Training ----/2084, done at 2020-02-06 17:43:33\n", "I0206 17:43:33.669129 140642927564608 training.py:395] E75 ClassificationTrainingApp\n", "I0206 17:43:33.677525 140642927564608 training.py:476] E75 trn 0.0316 loss, 98.8% correct, 0.9873 precision, 0.9894 recall, 0.9884 f1 score\n", "I0206 17:43:33.678128 140642927564608 training.py:489] E75 trn_neg 0.0402 loss, 98.7% correct (24683 of 25000)\n", "I0206 17:43:33.678608 140642927564608 training.py:501] E75 trn_pos 0.0230 loss, 98.9% correct (24734 of 25000)\n", "W0206 17:43:33.684032 140642927564608 util.py:233] E75 Validation ----/2148, starting\n", "I0206 17:43:34.271921 140642927564608 util.py:257] E75 Validation 16/2148, done at 2020-02-06 17:44:27, 0:00:53\n", "I0206 17:43:35.356495 140642927564608 util.py:257] E75 Validation 64/2148, done at 2020-02-06 17:44:23, 0:00:49\n", "I0206 17:43:39.441048 140642927564608 util.py:257] E75 Validation 256/2148, done at 2020-02-06 17:44:20, 0:00:46\n", "I0206 17:43:55.803945 140642927564608 util.py:257] E75 Validation 1024/2148, done at 2020-02-06 17:44:19, 0:00:45\n", "W0206 17:44:21.019209 140642927564608 util.py:270] E75 Validation ----/2148, done at 2020-02-06 17:44:21\n", "I0206 17:44:21.020605 140642927564608 training.py:395] E75 ClassificationTrainingApp\n", "I0206 17:44:21.022432 140642927564608 training.py:476] E75 val 0.0537 loss, 98.5% correct, 0.1625 precision, 0.9412 recall, 0.2772 f1 score\n", "I0206 17:44:21.022849 140642927564608 training.py:489] E75 val_neg 0.0533 loss, 98.6% correct (50640 of 51382)\n", "I0206 17:44:21.023235 140642927564608 training.py:501] E75 val_pos 0.2050 loss, 94.1% correct (144 of 153)\n", "I0206 17:44:21.031092 140642927564608 training.py:597] Saved model params to data-unversioned/part2/models/p2ch14/cls_2020-02-06_14.16.55_final-nodule-nonnodule.3750000.state\n", "I0206 17:44:21.032827 140642927564608 training.py:617] SHA1: b4d8a2d2dffa50f5a59a643ac87271d4ba86d1fa\n", "I0206 17:44:21.033276 140642927564608 training.py:249] Epoch 76 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:44:21.565176 140642927564608 util.py:233] E76 Training ----/2084, starting\n", "I0206 17:44:22.931928 140642927564608 util.py:257] E76 Training 16/2084, done at 2020-02-06 17:46:53, 0:02:31\n", "I0206 17:44:26.416841 140642927564608 util.py:257] E76 Training 64/2084, done at 2020-02-06 17:46:53, 0:02:31\n", "I0206 17:44:40.356106 140642927564608 util.py:257] E76 Training 256/2084, done at 2020-02-06 17:46:53, 0:02:31\n", "I0206 17:45:36.107136 140642927564608 util.py:257] E76 Training 1024/2084, done at 2020-02-06 17:46:52, 0:02:30\n", "W0206 17:46:53.065853 140642927564608 util.py:270] E76 Training ----/2084, done at 2020-02-06 17:46:53\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "I0206 17:46:53.131109 140642927564608 training.py:395] E76 ClassificationTrainingApp\n", "I0206 17:46:53.139310 140642927564608 training.py:476] E76 trn 0.0311 loss, 98.9% correct, 0.9881 precision, 0.9896 recall, 0.9888 f1 score\n", "I0206 17:46:53.139803 140642927564608 training.py:489] E76 trn_neg 0.0392 loss, 98.8% correct (24701 of 25000)\n", "I0206 17:46:53.140289 140642927564608 training.py:501] E76 trn_pos 0.0230 loss, 99.0% correct (24741 of 25000)\n", "I0206 17:46:53.145021 140642927564608 training.py:249] Epoch 77 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:46:53.676151 140642927564608 util.py:233] E77 Training ----/2084, starting\n", "I0206 17:46:55.027523 140642927564608 util.py:257] E77 Training 16/2084, done at 2020-02-06 17:49:25, 0:02:31\n", "I0206 17:46:58.512448 140642927564608 util.py:257] E77 Training 64/2084, done at 2020-02-06 17:49:25, 0:02:31\n", "I0206 17:47:12.451045 140642927564608 util.py:257] E77 Training 256/2084, done at 2020-02-06 17:49:25, 0:02:31\n", "I0206 17:48:08.193310 140642927564608 util.py:257] E77 Training 1024/2084, done at 2020-02-06 17:49:25, 0:02:30\n", "W0206 17:49:25.124576 140642927564608 util.py:270] E77 Training ----/2084, done at 2020-02-06 17:49:25\n", "I0206 17:49:25.190138 140642927564608 training.py:395] E77 ClassificationTrainingApp\n", "I0206 17:49:25.198528 140642927564608 training.py:476] E77 trn 0.0328 loss, 98.9% correct, 0.9888 precision, 0.9892 recall, 0.9890 f1 score\n", "I0206 17:49:25.199007 140642927564608 training.py:489] E77 trn_neg 0.0419 loss, 98.9% correct (24721 of 25000)\n", "I0206 17:49:25.199494 140642927564608 training.py:501] E77 trn_pos 0.0237 loss, 98.9% correct (24731 of 25000)\n", "I0206 17:49:25.204474 140642927564608 training.py:249] Epoch 78 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:49:25.736556 140642927564608 util.py:233] E78 Training ----/2084, starting\n", "I0206 17:49:27.106358 140642927564608 util.py:257] E78 Training 16/2084, done at 2020-02-06 17:51:57, 0:02:31\n", "I0206 17:49:30.590118 140642927564608 util.py:257] E78 Training 64/2084, done at 2020-02-06 17:51:57, 0:02:31\n", "I0206 17:49:44.525344 140642927564608 util.py:257] E78 Training 256/2084, done at 2020-02-06 17:51:57, 0:02:30\n", "I0206 17:50:40.269158 140642927564608 util.py:257] E78 Training 1024/2084, done at 2020-02-06 17:51:57, 0:02:30\n", "W0206 17:51:57.201412 140642927564608 util.py:270] E78 Training ----/2084, done at 2020-02-06 17:51:57\n", "I0206 17:51:57.266761 140642927564608 training.py:395] E78 ClassificationTrainingApp\n", "I0206 17:51:57.275140 140642927564608 training.py:476] E78 trn 0.0314 loss, 98.9% correct, 0.9883 precision, 0.9904 recall, 0.9893 f1 score\n", "I0206 17:51:57.275619 140642927564608 training.py:489] E78 trn_neg 0.0402 loss, 98.8% correct (24706 of 25000)\n", "I0206 17:51:57.276233 140642927564608 training.py:501] E78 trn_pos 0.0226 loss, 99.0% correct (24759 of 25000)\n", "I0206 17:51:57.281072 140642927564608 training.py:249] Epoch 79 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:51:57.811746 140642927564608 util.py:233] E79 Training ----/2084, starting\n", "I0206 17:51:59.271262 140642927564608 util.py:257] E79 Training 16/2084, done at 2020-02-06 17:54:29, 0:02:30\n", "I0206 17:52:02.756605 140642927564608 util.py:257] E79 Training 64/2084, done at 2020-02-06 17:54:29, 0:02:31\n", "I0206 17:52:16.693745 140642927564608 util.py:257] E79 Training 256/2084, done at 2020-02-06 17:54:29, 0:02:30\n", "I0206 17:53:12.433187 140642927564608 util.py:257] E79 Training 1024/2084, done at 2020-02-06 17:54:29, 0:02:30\n", "W0206 17:54:29.365415 140642927564608 util.py:270] E79 Training ----/2084, done at 2020-02-06 17:54:29\n", "I0206 17:54:29.431446 140642927564608 training.py:395] E79 ClassificationTrainingApp\n", "I0206 17:54:29.440056 140642927564608 training.py:476] E79 trn 0.0314 loss, 98.9% correct, 0.9879 precision, 0.9893 recall, 0.9886 f1 score\n", "I0206 17:54:29.440573 140642927564608 training.py:489] E79 trn_neg 0.0401 loss, 98.8% correct (24696 of 25000)\n", "I0206 17:54:29.441135 140642927564608 training.py:501] E79 trn_pos 0.0227 loss, 98.9% correct (24732 of 25000)\n", "I0206 17:54:29.445934 140642927564608 training.py:249] Epoch 80 of 100, 2084/2148 batches of size 24*1\n", "W0206 17:54:29.978035 140642927564608 util.py:233] E80 Training ----/2084, starting\n", "I0206 17:54:31.359939 140642927564608 util.py:257] E80 Training 16/2084, done at 2020-02-06 17:57:01, 0:02:31\n", "I0206 17:54:34.844545 140642927564608 util.py:257] E80 Training 64/2084, done at 2020-02-06 17:57:01, 0:02:31\n", "I0206 17:54:48.779664 140642927564608 util.py:257] E80 Training 256/2084, done at 2020-02-06 17:57:01, 0:02:30\n", "I0206 17:55:44.518726 140642927564608 util.py:257] E80 Training 1024/2084, done at 2020-02-06 17:57:01, 0:02:30\n", "W0206 17:57:01.434102 140642927564608 util.py:270] E80 Training ----/2084, done at 2020-02-06 17:57:01\n", "I0206 17:57:01.498972 140642927564608 training.py:395] E80 ClassificationTrainingApp\n", "I0206 17:57:01.507477 140642927564608 training.py:476] E80 trn 0.0305 loss, 98.9% correct, 0.9889 precision, 0.9893 recall, 0.9891 f1 score\n", "I0206 17:57:01.507988 140642927564608 training.py:489] E80 trn_neg 0.0390 loss, 98.9% correct (24723 of 25000)\n", "I0206 17:57:01.508642 140642927564608 training.py:501] E80 trn_pos 0.0220 loss, 98.9% correct (24733 of 25000)\n", "W0206 17:57:01.514642 140642927564608 util.py:233] E80 Validation ----/2148, starting\n", "I0206 17:57:02.086089 140642927564608 util.py:257] E80 Validation 16/2148, done at 2020-02-06 17:57:51, 0:00:49\n", "I0206 17:57:03.112519 140642927564608 util.py:257] E80 Validation 64/2148, done at 2020-02-06 17:57:48, 0:00:46\n", "I0206 17:57:07.262106 140642927564608 util.py:257] E80 Validation 256/2148, done at 2020-02-06 17:57:48, 0:00:46\n" ] } ], "source": [ "run('p2ch14.training.ClassificationTrainingApp', f'--epochs=100', 'final-nodule-nonnodule')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.5" } }, "nbformat": 4, "nbformat_minor": 2 }