import numpy as np import torch import torch.nn as nn import torch.nn.functional as F def init_modules(modules): for m in modules: if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) def init_modules_2d(modules): for m in modules: if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) def extend_mask(mask, kernel_size=(1, 7, 7), padding=(0, 3, 3), min_value=0.01): mask = F.avg_pool3d(mask.float(), kernel_size=kernel_size, padding=padding, stride=1) #将取完平均池化后的结果与0.01相比较,如果>=0.01则将其值变为1,否则为0 mask = mask.ge(min_value) return mask def std_mean(data, kernel_size=(9, 9)): result = torch.empty((data.size(0), data.size(1), data.size(2), data.size(3), data.size(4), kernel_size[0] * kernel_size[1]), device=data.device) pdata = F.pad(data, [kernel_size[1] // 2, kernel_size[1] // 2, kernel_size[0] // 2, kernel_size[0] // 2, 0, 0], mode='constant', value=0) for w in range(0, kernel_size[0], 1): for h in range(0, kernel_size[1], 1): result[:, :, :, :, :, kernel_size[0] * w + h] = pdata[:, :, :, w:w + data.size(3), h:h + data.size(4)] std_mean_result = torch.std_mean(result, dim=5) return std_mean_result def get_data(x, mask=None): x = torch.tensor(x) std_mean_result_k5 = std_mean(x, kernel_size=(5, 5)) std_mean_result_k7 = std_mean(x, kernel_size=(7, 7)) std_mean_result_k9 = std_mean(x, kernel_size=(9, 9)) y = x data = torch.cat((x, y, std_mean_result_k5[1], std_mean_result_k5[0], std_mean_result_k7[1], std_mean_result_k7[0], std_mean_result_k9[1], std_mean_result_k9[0]), 1) #将data降维 data = torch.squeeze(data, dim=0) return data