It's me again :P Thank you for your patience and time.
The spec of my usage GPU: 4x Nvidia GTX 1080 Ti (Pascal, 11GB memory), in 24 cores/48 threads/256 GB memory server
num_samples = "all" # "all" or number
if num_samples != "all":
num_samples = int(num_samples)
checkp = "mscoco" # refcoco, mscoco, vqa, flickr30k
write_res = "yes" # "yes" or "no"
task = "image_sentence_alignment" # image_sentence_alignment, vqa, gqa
other_tasks_than_valse = ['mscoco', 'vqa', 'gqa', 'gqa_balanced', 'nlvr2']
use_cuda = True
DATA = {
"existence": ["/home/students/cheng/MM-SHAP/visual7w/images",
'/home/students/cheng/MM-SHAP/data/existence.json'],
}
However, these two issues sound not like the case I have here.
Do you encounter any similar problem?
Argument interpolation should be of type InterpolationMode instead of int. Please, use InterpolationMode enum.
0%| | 0/534 [00:00<?, ?it/s]
0%| | 0/534 [00:00<?, ?it/s]
Traceback (most recent call last):
File "mm-shap_albef_dataset.py", line 306, in <module>
shap_values = explainer(X)
File "/home/students/cheng/MM-SHAP/shap/explainers/_permutation.py", line 62, in __call__
batch_size=batch_size, outputs=outputs, silent=silent
File "/home/students/cheng/MM-SHAP/shap/explainers/_permutation.py", line 76, in __call__
outputs=outputs, silent=silent
File "/home/students/cheng/MM-SHAP/shap/explainers/_explainer.py", line 260, in __call__
batch_size=batch_size, outputs=outputs, silent=silent, **kwargs
File "/home/students/cheng/MM-SHAP/shap/explainers/_permutation.py", line 134, in explain_row
outputs = fm(masks, zero_index=0, batch_size=batch_size)
File "/home/students/cheng/MM-SHAP/shap/utils/_masked_model.py", line 65, in __call__
return self._full_masking_call(full_masks, zero_index=zero_index, batch_size=batch_size)
File "/home/students/cheng/MM-SHAP/shap/utils/_masked_model.py", line 141, in _full_masking_call
outputs = self.model(*joined_masked_inputs)
File "/home/students/cheng/MM-SHAP/shap/models/_model.py", line 21, in __call__
return np.array(self.inner_model(*args))
File "mm-shap_albef_dataset.py", line 184, in get_model_prediction
masked_text_inputs.to("cuda"))
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "mm-shap_albef_dataset.py", line 92, in forward
return_dict=True,
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/students/cheng/MM-SHAP/ALBEF/models/xbert.py", line 1067, in forward
mode=mode,
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/students/cheng/MM-SHAP/ALBEF/models/xbert.py", line 601, in forward
output_attentions,
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/students/cheng/MM-SHAP/ALBEF/models/xbert.py", line 504, in forward
output_attentions=output_attentions,
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/students/cheng/MM-SHAP/ALBEF/models/xbert.py", line 407, in forward
output_attentions,
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/students/cheng/MM-SHAP/ALBEF/models/xbert.py", line 329, in forward
attention_probs.register_hook(self.save_attn_gradients)
File "/home/students/cheng/anaconda3/envs/shap/lib/python3.6/site-packages/torch/_tensor.py", line 289, in register_hook
raise RuntimeError("cannot register a hook on a tensor that "
RuntimeError: cannot register a hook on a tensor that doesn't require gradient
srun: error: gpu08: task 0: Exited with exit code 1