diff --git a/Makefile b/Makefile index dae4368..f621c5a 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ install: .PHONY: lint lint: install - uv run ruff check --output-format=github . + uv run ruff check --output-format=github scripts uv run nbqa ruff notebooks .PHONY: format @@ -27,7 +27,7 @@ format: install .PHONY: typecheck typecheck: install - uv run mypy .jupytext + uv run mypy scripts uv run nbqa mypy notebooks .PHONY: apply-formatter diff --git a/notebooks/4-2_clip.ipynb b/notebooks/4-2_clip.ipynb index 3ef1eb2..a67391c 100644 --- a/notebooks/4-2_clip.ipynb +++ b/notebooks/4-2_clip.ipynb @@ -502,7 +502,7 @@ }, { "cell_type": "markdown", - "id": "634cf091-dc89-47a4-a7dd-623059b7655f", + "id": "6cf5440f-a7ee-4b38-95eb-5654f5562338", "metadata": {}, "source": [ "### 画像とテキストのペアの構築" @@ -519,7 +519,6 @@ "\n", "import skimage\n", "from more_itertools import sort_together\n", - "from PIL import Image\n", "\n", "original_imgs, original_txts = [], []\n", "\n", diff --git a/notebooks/5-4-1_lora.ipynb b/notebooks/5-4-1_lora.ipynb index 224eb5c..082b5d4 100644 --- a/notebooks/5-4-1_lora.ipynb +++ b/notebooks/5-4-1_lora.ipynb @@ -164,7 +164,9 @@ "cell_type": "code", "execution_count": 6, "id": "8d44b56e", - "metadata": {}, + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { "data": { @@ -216,6 +218,11 @@ "metadata": {}, "outputs": [], "source": [ + "import random\n", + "\n", + "import numpy as np\n", + "\n", + "\n", "def tokenize_captions(examples, is_train=True):\n", " # Preprocessing the datasets.\n", " # We need to tokenize input captions and transform the images.\n", diff --git a/scripts/4-2_clip.py b/scripts/4-2_clip.py index 9ad035b..60bd0cd 100644 --- a/scripts/4-2_clip.py +++ b/scripts/4-2_clip.py @@ -148,7 +148,6 @@ import skimage from more_itertools import sort_together -from PIL import Image original_imgs, original_txts = [], [] diff --git a/scripts/5-1-1_textual-inversion.py b/scripts/5-1-1_textual-inversion.py index a05d13c..08afe8d 100644 --- a/scripts/5-1-1_textual-inversion.py +++ b/scripts/5-1-1_textual-inversion.py @@ -354,7 +354,7 @@ def training_function( ) learning_rate = hparams.learning_rate max_train_steps = hparams.max_train_steps - output_dir_path = hparams.output_dir_path + # output_dir_path = hparams.output_dir_path gradient_checkpointing = hparams.gradient_checkpointing # 学習を効率化する Accelerator の設定 @@ -579,7 +579,7 @@ def training_function( pipeline.save_pretrained(hparams.output_dir_path) # 新たに追加した概念に対応するパラメータも保存 save_path = os.path.join( - hparams.output_dir_path, f"learned_embeds.bin" + hparams.output_dir_path, "learned_embeds.bin" ) save_progress( text_encoder,