你好,我在测试时发现如下提醒:请帮我看一看,谢谢
WARNING [07/15 08:50:58 fvcore.common.checkpoint]: The checkpoint state_dict contains keys that are not used by the model:
clip_adapter.clip_model.visual.learnable_weight
clip_adapter.clip_model.visual.cxt_decoder.layers.0.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.self_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.multihead_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.linear1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.linear2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.norm1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.norm2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.0.norm3.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.self_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.multihead_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.linear1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.linear2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.norm1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.norm2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.1.norm3.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.self_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.multihead_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.linear1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.linear2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.norm1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.norm2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.2.norm3.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.self_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.multihead_attn.out_proj.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.linear1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.linear2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.norm1.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.norm2.{bias, weight}
clip_adapter.clip_model.visual.cxt_decoder.layers.3.norm3.{bias, weight}
clip_adapter.original_clip.learnable_weight
clip_adapter.original_clip.cxt_decoder.layers.0.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.0.self_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.0.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.0.multihead_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.0.linear1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.0.linear2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.0.norm1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.0.norm2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.0.norm3.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.1.self_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.1.multihead_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.linear1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.linear2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.norm1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.norm2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.1.norm3.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.2.self_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.2.multihead_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.linear1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.linear2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.norm1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.norm2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.2.norm3.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.self_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.3.self_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.multihead_attn.{in_proj_bias, in_proj_weight}
clip_adapter.original_clip.cxt_decoder.layers.3.multihead_attn.out_proj.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.linear1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.linear2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.norm1.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.norm2.{bias, weight}
clip_adapter.original_clip.cxt_decoder.layers.3.norm3.{bias, weight}
0%| | 0/1 [00:00<?, ?it/s]G
:\program\SCAN-main\scan\frequency.py:42: UserWarning: Casting complex values to real discards the imaginary part (Triggere
d internally at ..\aten\src\ATen\native\Copy.cpp:244.)
y = torch.fft.ifft2(y, s=(h, w)).float()
G:\program\SCAN-main\scan\modeling\transformer_decoder\position_encoding.py:41: UserWarning: floordiv is deprecated, an
d its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'flo
or'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mo
de='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
G:\program\SCAN-main\scan\modeling\transformer_decoder\position_encoding.py:41: UserWarning: floordiv is deprecated, an
d its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'flo
or'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mo
de='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
这是模型参数未被完全使用?请问你知道这个问题的解决方法,这些形状不匹配的问题该如何解决呢?