|
128 | 128 | "execution_count": 5, |
129 | 129 | "id": "4122d05f-8153-4dde-9abf-362b8311217b", |
130 | 130 | "metadata": {}, |
131 | | - "outputs": [ |
132 | | - { |
133 | | - "name": "stderr", |
134 | | - "output_type": "stream", |
135 | | - "text": [ |
136 | | - "Skip loading parameter 'roi_heads.box_predictor.cls_score.weight' to the model due to incompatible shapes: (3, 1024) in the checkpoint but (2, 1024) in the model! You might want to double check if this is expected.\n", |
137 | | - "Skip loading parameter 'roi_heads.box_predictor.cls_score.bias' to the model due to incompatible shapes: (3,) in the checkpoint but (2,) in the model! You might want to double check if this is expected.\n", |
138 | | - "Skip loading parameter 'roi_heads.box_predictor.bbox_pred.weight' to the model due to incompatible shapes: (8, 1024) in the checkpoint but (4, 1024) in the model! You might want to double check if this is expected.\n", |
139 | | - "Skip loading parameter 'roi_heads.box_predictor.bbox_pred.bias' to the model due to incompatible shapes: (8,) in the checkpoint but (4,) in the model! You might want to double check if this is expected.\n", |
140 | | - "Some model parameters or buffers are not found in the checkpoint:\n", |
141 | | - "\u001b[34mroi_heads.box_predictor.bbox_pred.{bias, weight}\u001b[0m\n", |
142 | | - "\u001b[34mroi_heads.box_predictor.cls_score.{bias, weight}\u001b[0m\n" |
143 | | - ] |
144 | | - } |
145 | | - ], |
| 131 | + "outputs": [], |
146 | 132 | "source": [ |
147 | 133 | "model = Detectron2DetectionPredictor(\n", |
148 | 134 | " output_dir=output_path,\n", |
|
227 | 213 | }, |
228 | 214 | { |
229 | 215 | "cell_type": "code", |
230 | | - "execution_count": 8, |
| 216 | + "execution_count": 9, |
231 | 217 | "id": "c9f7e6e3-5bb5-4dd7-a9d4-7097700691de", |
232 | 218 | "metadata": { |
233 | 219 | "tags": [] |
|
272 | 258 | " ratio = spacing/spacing_min\n", |
273 | 259 | " with WholeSlideImage(image_path) as wsi:\n", |
274 | 260 | " spacing = wsi.get_real_spacing(spacing_min)\n", |
275 | | - " print(f\"Spacing: {spacing}\\nSpacing const: {SPACING_CONST}\")\n", |
| 261 | + " print(f\"Spacing: {spacing} - Spacing const: {SPACING_CONST} - ratio: {ratio}\")\n", |
276 | 262 | "\n", |
277 | 263 | "\n", |
278 | 264 | " for x_batch, y_batch, info in tqdm(iterator):\n", |
|
393 | 379 | }, |
394 | 380 | { |
395 | 381 | "cell_type": "code", |
396 | | - "execution_count": 9, |
| 382 | + "execution_count": 10, |
397 | 383 | "id": "7bf43390-c419-4b8a-9f5a-104cb8040592", |
398 | 384 | "metadata": {}, |
399 | 385 | "outputs": [ |
|
402 | 388 | "output_type": "stream", |
403 | 389 | "text": [ |
404 | 390 | "predicting...\n", |
405 | | - "Spacing: 0.24199951445730394\n", |
406 | | - "Spacing const: 0.24199951445730394\n" |
| 391 | + "Spacing: 0.24199951445730394 - Spacing const: 0.24199951445730394 - ratio: 2.0\n" |
407 | 392 | ] |
408 | 393 | }, |
409 | 394 | { |
|
421 | 406 | " max_size = (max_size + (stride - 1)) // stride * stride\n", |
422 | 407 | "/venv/lib/python3.8/site-packages/torch/functional.py:445: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2157.)\n", |
423 | 408 | " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n", |
424 | | - "100%|██████████| 379/379 [00:44<00:00, 8.43it/s]\n" |
| 409 | + "100%|██████████| 379/379 [00:42<00:00, 8.95it/s]\n" |
425 | 410 | ] |
426 | 411 | }, |
427 | 412 | { |
428 | 413 | "name": "stdout", |
429 | 414 | "output_type": "stream", |
430 | 415 | "text": [ |
431 | | - "Predicted 30007 points\n", |
| 416 | + "Predicted 14925 points\n", |
432 | 417 | "saving predictions...\n", |
433 | 418 | "finished!\n" |
434 | 419 | ] |
|
0 commit comments