============================================================================ test session starts =============================================================================
platform win32 -- Python 3.7.10, pytest-6.2.4, py-1.10.0, pluggy-0.13.1
rootdir: C:\Users\bobturner\Documents\pykale
collected 1 item
tests\pipeline\test_deep_dti.py F [100%]
================================================================================== FAILURES ==================================================================================
_______________________________________________________________________________ test_deep_data _______________________________________________________________________________
download_path = 'tests\\test_data'
def test_deep_data(download_path):
test_dataset = BindingDBDataset(name=DATASET, split="test", path=download_path)
test_batch = next(iter(DataLoader(dataset=test_dataset, shuffle=True, batch_size=32)))
drug_encoder = CNNEncoder(num_embeddings=64, embedding_dim=128, sequence_length=85, num_kernels=32, kernel_length=8)
target_encoder = CNNEncoder(
num_embeddings=25, embedding_dim=128, sequence_length=1200, num_kernels=32, kernel_length=8
)
decoder = MLPDecoder(in_dim=192, hidden_dim=16, out_dim=16)
# test deep_dta trainer
save_parameters = {"seed": 2020, "batch_size": 256}
model = DeepDTATrainer(drug_encoder, target_encoder, decoder, lr=0.001, ci_metric=True, **save_parameters).eval()
assert isinstance(model.drug_encoder, CNNEncoder)
assert isinstance(model.target_encoder, CNNEncoder)
assert isinstance(model.decoder, MLPDecoder)
model.configure_optimizers()
> assert torch.is_tensor(model.validation_step(test_batch, 0))
tests\pipeline\test_deep_dti.py:29:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
kale\pipeline\deep_dti.py:115: in validation_step
self.log("val_ci", ci, on_epoch=True, on_step=False)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = DeepDTATrainer(
(drug_encoder): CNNEncoder(
(embedding): Embedding(65, 128)
(conv1): Conv1d(85, 32, kernel_s...s=True)
(fc4): Linear(in_features=16, out_features=1, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
)
)
name = 'val_ci', value = tensor([0.4316]), prog_bar = False, logger = True, on_step = False, on_epoch = True, reduce_fx = 'mean', tbptt_reduce_fx = None
tbptt_pad_token = None, enable_graph = False, sync_dist = False, sync_dist_op = None, sync_dist_group = None, add_dataloader_idx = True, batch_size = None
metric_attribute = None, rank_zero_only = None
def log(
self,
name: str,
value: _METRIC_COLLECTION,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Union[str, Callable] = "default", # TODO: change to 'mean' when `sync_dist_op` is removed in 1.6
tbptt_reduce_fx: Optional = None, # noqa: Remove in 1.6
tbptt_pad_token: Optional = None, # noqa: Remove in 1.6
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Optional = None, # noqa: Remove in 1.6
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: Optional[bool] = None,
) -> None:
"""
Log a key, value pair.
Example::
self.log('train_loss', loss)
The default behavior per hook is as follows:
.. csv-table:: ``*`` also applies to the test loop
:header: "LightningModule Hook", "on_step", "on_epoch", "prog_bar", "logger"
:widths: 20, 10, 10, 10, 10
"training_step", "T", "F", "F", "T"
"training_step_end", "T", "F", "F", "T"
"training_epoch_end", "F", "T", "F", "T"
"validation_step*", "F", "T", "F", "T"
"validation_step_end*", "F", "T", "F", "T"
"validation_epoch_end*", "F", "T", "F", "T"
Args:
name: key to log
value: value to log. Can be a ``float``, ``Tensor``, ``Metric``, or a dictionary of the former.
prog_bar: if True logs to the progress bar
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. :meth:`torch.mean` by default.
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_group: the ddp group to sync across
add_dataloader_idx: if True, appends the index of the current dataloader to
the name (when using multiple). If False, user needs to give unique names for
each dataloader to not mix values
batch_size: Current batch_size. This will be directly inferred from the loaded batch,
but some data structures might need to explicitly provide it.
metric_attribute: To restore the metric state, Lightning requires the reference of the
:class:`torchmetrics.Metric` in your model. This is found automatically if it is a model attribute.
rank_zero_only: Whether the value will be logged only on rank 0. This will prevent synchronization which
would produce a deadlock as not all processes would perform this log call.
"""
if tbptt_reduce_fx is not None:
rank_zero_deprecation(
"`self.log(tbptt_reduce_fx=...)` is no longer supported. The flag will be removed in v1.6."
" Please, open a discussion explaining your use-case in"
" `https://github.com/PyTorchLightning/pytorch-lightning/discussions`"
)
if tbptt_pad_token is not None:
rank_zero_deprecation(
"`self.log(tbptt_pad_token=...)` is no longer supported. The flag will be removed in v1.6."
" Please, open a discussion explaining your use-case in"
" `https://github.com/PyTorchLightning/pytorch-lightning/discussions`"
)
if sync_dist_op is not None:
rank_zero_deprecation(
f"`self.log(sync_dist_op='{sync_dist_op}')` is deprecated and will be removed in v.1.6."
f" Use `self.log(reduce_fx={sync_dist_op})` instead."
)
if reduce_fx == "default":
reduce_fx = sync_dist_op
elif reduce_fx == "default":
reduce_fx = "mean"
# check for invalid values
apply_to_collection(value, dict, self.__check_not_nested, name)
apply_to_collection(
value, object, self.__check_allowed, name, value, wrong_dtype=(numbers.Number, Metric, Tensor, dict)
)
# set the default depending on the fx_name
on_step = self.__auto_choose_log_on_step(on_step)
on_epoch = self.__auto_choose_log_on_epoch(on_epoch)
> results = self.trainer._results
E AttributeError: 'NoneType' object has no attribute '_results'
..\..\.conda\envs\bindingdb\lib\site-packages\pytorch_lightning\core\lightning.py:407: AttributeError
---------------------------------------------------------------------------- Captured stderr call ----------------------------------------------------------------------------
Found local copy...
Loading...
Done!
To log space...
========================================================================== short test summary info ===========================================================================
FAILED tests/pipeline/test_deep_dti.py::test_deep_data - AttributeError: 'NoneType' object has no attribute '_results'
============================================================================= 1 failed in 3.50s ==============================================================================
Test passes.
PyTorch version: 1.9.0
Is debug build: False
CUDA used to build PyTorch: Could not collect
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 10 Pro
GCC version: (tdm64-1) 9.2.0
Clang version: Could not collect
CMake version: Could not collect
Libc version: N/A
Python version: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 15:37:01) [MSC v.1916 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.19041-SP0
Is CUDA available: False
CUDA runtime version: 10.0.130
GPU models and configuration: GPU 0: GeForce GTX 1050 Ti with Max-Q Design
Nvidia driver version: 462.31
cuDNN version: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin\cudnn64_7.dll
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] numpy==1.21.1
[pip3] pytorch-lightning==1.4.0
[pip3] pytorch-memlab==0.2.3
[pip3] torch==1.9.0
[pip3] torch-cluster==1.5.9
[pip3] torch-geometric==1.7.2
[pip3] torch-scatter==2.0.8
[pip3] torch-sparse==0.6.10
[pip3] torch-spline-conv==1.2.1
[pip3] torchaudio==0.9.0
[pip3] torchmetrics==0.4.1
[pip3] torchsummary==1.5.1
[pip3] torchvision==0.10.0
[conda] blas 2.110 mkl conda-forge
[conda] blas-devel 3.9.0 10_mkl conda-forge
[conda] cpuonly 1.0 0 pytorch
[conda] libblas 3.9.0 10_mkl conda-forge
[conda] libcblas 3.9.0 10_mkl conda-forge
[conda] liblapack 3.9.0 10_mkl conda-forge
[conda] liblapacke 3.9.0 10_mkl conda-forge
[conda] mkl 2021.3.0 hb70f87d_564 conda-forge
[conda] mkl-devel 2021.3.0 h57928b3_565 conda-forge
[conda] mkl-include 2021.3.0 hb70f87d_564 conda-forge
[conda] numpy 1.21.1 py37hcbcd69c_0 conda-forge
[conda] pytorch 1.9.0 py3.7_cpu_0 [cpuonly] pytorch
[conda] pytorch-cluster 1.5.9 py37_torch_1.9.0_cpu rusty1s
[conda] pytorch-geometric 1.7.2 py37_torch_1.9.0_cpu rusty1s
[conda] pytorch-lightning 1.4.0 pypi_0 pypi
[conda] pytorch-memlab 0.2.3 pypi_0 pypi
[conda] pytorch-scatter 2.0.8 py37_torch_1.9.0_cpu rusty1s
[conda] pytorch-sparse 0.6.10 py37_torch_1.9.0_cpu rusty1s
[conda] pytorch-spline-conv 1.2.1 py37_torch_1.9.0_cpu rusty1s
[conda] torchaudio 0.9.0 py37 pytorch
[conda] torchmetrics 0.4.1 pypi_0 pypi
[conda] torchsummary 1.5.1 pypi_0 pypi
[conda] torchvision 0.10.0 py37_cpu [cpuonly] pytorch
Seems that there are also problems with CI runs on Ubuntu where environment is more tightly controlled.