本文围绕遥感分类任务展开,使用西北工业大学2016年发布的含45类土地利用类型的遥感影像数据集,构建RESISC45Dataset自定义数据集,搭建ResNet50模型,经训练、验证,模型精度达0.83左右,最后进行了模型预测与效果展示。
☞☞☞AI 智能聊天, 问答助手, AI 智能搜索, 免费无限量使用 DeepSeek R1 模型☜☜☜

①. 关于任务
遥感分类,是指根据不同的分类标志以及遥感探测及应用侧重的方面不同,将遥感分成不同的类型。遥感图像计算机分类的依据是遥感图像像素的相似度。常使用距离和相关系数来衡量相似度。常见的分类方法有:监督分类、非监督分类法。
按遥感平台的不同,可把遥感分为航天遥感、航空遥感和地面(近地)遥感。按探测的电磁波段不同,可分为可见光遥感,红外遥感,微波遥感等。
②. 关于数据集
该数据集是由西北工业大学于2016年发布,包含提取自Google Earth的45种土地利用类型的遥感影像
数据集包含45个类别文件夹,每个文件夹下对应各自700幅遥感影像,一共有31500幅。
影像文件为三通道、大小为256*256的jpg格式文件
# 解压数据集!unzip -oq /home/aistudio/data/data131697/NWPU-RESISC45.zip
# 查看数据集文件结构!tree NWPU-RESISC45 -L 1
# 导入包import paddlefrom PIL import Imageimport osimport numpy as npimport random# 打印paddle版本print(paddle.__version__)
2.2.2
class RESISC45Dataset(paddle.io.Dataset):
def __init__(self, mode='train', label_path='NWPU-RESISC45/train_list.txt'):
"""
初始化函数
"""
assert mode in ['train', 'eval', 'test'], 'mode is one of train, eval, test.'
self.mode = mode.lower()
self.label_path = label_path
self.data = [] with open(label_path) as f: for line in f.readlines():
info = line.strip().split(' ') if len(info) > 0:
image_root = label_path.split('/')[0]
info[0]=os.path.join(image_root,info[0])
self.data.append([info[0].strip(), info[1].strip()]) def preprocess(self,image):
"""
数据增强函数
"""
# 训练模式下的数据增强
if self.mode == 'train': # 裁剪大小
image = image.resize((224, 224), Image.BICUBIC)
# 随机水平翻转
if random.randint(0, 1) == 1:
image = image.transpose(Image.FLIP_LEFT_RIGHT) else: pass
# 随机垂直翻转
if random.randint(0, 1) == 1:
image = image.transpose(Image.FLIP_TOP_BOTTOM) else: pass
# 图像归一化
image = np.asarray(image)
image = image.astype('float32')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
max_value = [255, 255, 255]
min_value = [0, 0, 0]
mean = np.asarray(mean, dtype=np.float32)[np.newaxis, np.newaxis, :]
std = np.asarray(std, dtype=np.float32)[np.newaxis, np.newaxis, :]
range_value = np.asarray([1. / (max_value[i] - min_value[i]) for i in range(len(max_value))],dtype=np.float32)
image = (image - np.asarray(min_value, dtype=np.float32)) * range_value
image -= mean
image /= std # 数据格式转换
return paddle.to_tensor(image.transpose((2,0,1))) # 验证和测试模型下的数据增强
else: # 裁剪大小
image = image.resize((224, 224), Image.BICUBIC)
# 图像归一化
image = np.asarray(image)
image = image.astype('float32')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
max_value = [255, 255, 255]
min_value = [0, 0, 0]
mean = np.asarray(mean, dtype=np.float32)[np.newaxis, np.newaxis, :]
std = np.asarray(std, dtype=np.float32)[np.newaxis, np.newaxis, :]
range_value = np.asarray([1. / (max_value[i] - min_value[i]) for i in range(len(max_value))],dtype=np.float32)
image = (image - np.asarray(min_value, dtype=np.float32)) * range_value
image -= mean
image /= std # 数据格式转换
return paddle.to_tensor(image.transpose((2,0,1))) def __getitem__(self, index):
"""
根据索引获取单个样本
"""
image_file, label = self.data[index]
image = Image.open(image_file) # 图片通道对齐
if image.mode != 'RGB':
image = image.convert('RGB') # 进行数据增强
image = self.preprocess(image) return image, np.array(label, dtype='int64') def __len__(self):
"""
获取样本总数
"""
return len(self.data)train_dataset=RESISC45Dataset(mode='train', label_path='NWPU-RESISC45/train_list.txt') val_dataset=RESISC45Dataset(mode='eval',label_path='NWPU-RESISC45/val_list.txt') test_dataset=RESISC45Dataset(mode='test',label_path='NWPU-RESISC45/test_list.txt')
这里搭建的模型是ResNet50,论文地址:Deep_Residual_Learning_for_Image_Recognition
ResNet(Residual Neural Network)由微软研究院的Kaiming He等四名华人提出,通过使用ResNet Unit成功训练出了152层的神经网络,并在ILSVRC2015比赛中取得冠军,在top5上的错误率为3.57%,同时参数量比VGGNet低,效果非常突出。ResNet的结构可以极快的加速神经网络的训练,模型的准确率也有比较大的提升。同时ResNet的推广性非常好,甚至可以直接用到InceptionNet网络中。
import paddleimport paddle.nn as nnfrom paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, Linear, ReLU, BatchNorm2Dimport paddle.nn.functional as F# 定义卷积批归一化块class ConvBNLayer(paddle.nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
act=None):
super(ConvBNLayer, self).__init__() # 创建卷积层
self._conv = Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
bias_attr=False) # 创建BatchNorm层
self._batch_norm = BatchNorm2D(out_channels) # 创建activate层
self.act = act def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y) if self.act == 'relu':
y = F.relu(x=y) return y
# 定义残差块class Bottleneckblock(paddle.nn.Layer):
def __init__(self, inplane, in_channel, out_channel, stride = 1, start = False):
super(Bottleneckblock, self).__init__()
self.stride = stride
self.start = start
self.conv0 = ConvBNLayer(in_channel, inplane, 1, stride = stride, act='relu')
self.conv1 = ConvBNLayer(inplane, inplane, 3, stride=1, act='relu')
self.conv2 = ConvBNLayer(inplane, out_channel, 1, stride=1, act=None)
self.conv3 = ConvBNLayer(in_channel, out_channel, 1, stride = stride, act=None)
self.relu = nn.ReLU() def forward(self, inputs):
y = inputs
x = self.conv0(inputs)
x = self.conv1(x)
x = self.conv2(x) if self.start:
y = self.conv3(y)
z = self.relu(x+y) return zclass Resnet50(paddle.nn.Layer):
def __init__(self, num_classes=45):
super().__init__() # stem layers
self.stem = nn.Sequential(
nn.Conv2D(3, out_channels=64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2D(64),
nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) # blocks
self.layer1 = self.add_bottleneck_layer(3, 64, start = True)
self.layer2 = self.add_bottleneck_layer(4, 128)
self.layer3 = self.add_bottleneck_layer(6, 256)
self.layer4 = self.add_bottleneck_layer(3, 512) # head layer
self.avgpool = nn.AdaptiveAvgPool2D(1)
self.classifier = nn.Linear(2048, num_classes) def add_bottleneck_layer(self, num, inplane, start = False):
layer = [] if start:
layer.append(Bottleneckblock(inplane, inplane, inplane*4, start = True)) else:
layer.append(Bottleneckblock(inplane, inplane*2, inplane*4, stride = 2, start = True)) for i in range(num-1):
layer.append(Bottleneckblock(inplane, inplane*4, inplane*4)) return nn.Sequential(*layer) def forward(self, inputs):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.flatten(1)
x = self.classifier(x) return xresnet50 = Resnet50(num_classes=45)
W0310 10:31:31.892053 141 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.0, Runtime API Version: 10.1 W0310 10:31:31.896260 141 device_context.cc:465] device: 0, cuDNN Version: 7.6.
paddle.summary(resnet50, (1, 3, 224, 224))
-------------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===============================================================================
Conv2D-1 [[1, 3, 224, 224]] [1, 64, 112, 112] 9,472
BatchNorm2D-1 [[1, 64, 112, 112]] [1, 64, 112, 112] 256
ReLU-1 [[1, 64, 112, 112]] [1, 64, 112, 112] 0
MaxPool2D-1 [[1, 64, 112, 112]] [1, 64, 56, 56] 0
Conv2D-2 [[1, 64, 56, 56]] [1, 64, 56, 56] 4,096
BatchNorm2D-2 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
ConvBNLayer-1 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
Conv2D-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,864
BatchNorm2D-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
ConvBNLayer-2 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
Conv2D-4 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
BatchNorm2D-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
ConvBNLayer-3 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
Conv2D-5 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
BatchNorm2D-5 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
ConvBNLayer-4 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
ReLU-2 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
Bottleneckblock-1 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
Conv2D-6 [[1, 256, 56, 56]] [1, 64, 56, 56] 16,384
BatchNorm2D-6 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
ConvBNLayer-5 [[1, 256, 56, 56]] [1, 64, 56, 56] 0
Conv2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,864
BatchNorm2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
ConvBNLayer-6 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
Conv2D-8 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
BatchNorm2D-8 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
ConvBNLayer-7 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
ReLU-3 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
Bottleneckblock-2 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
Conv2D-10 [[1, 256, 56, 56]] [1, 64, 56, 56] 16,384
BatchNorm2D-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
ConvBNLayer-9 [[1, 256, 56, 56]] [1, 64, 56, 56] 0
Conv2D-11 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,864
BatchNorm2D-11 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
ConvBNLayer-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
Conv2D-12 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
BatchNorm2D-12 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
ConvBNLayer-11 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
ReLU-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
Bottleneckblock-3 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
Conv2D-14 [[1, 256, 56, 56]] [1, 128, 28, 28] 32,768
BatchNorm2D-14 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-13 [[1, 256, 56, 56]] [1, 128, 28, 28] 0
Conv2D-15 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
BatchNorm2D-15 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-14 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
Conv2D-16 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
BatchNorm2D-16 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
ConvBNLayer-15 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
Conv2D-17 [[1, 256, 56, 56]] [1, 512, 28, 28] 131,072
BatchNorm2D-17 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
ConvBNLayer-16 [[1, 256, 56, 56]] [1, 512, 28, 28] 0
ReLU-5 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Bottleneckblock-4 [[1, 256, 56, 56]] [1, 512, 28, 28] 0
Conv2D-18 [[1, 512, 28, 28]] [1, 128, 28, 28] 65,536
BatchNorm2D-18 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-17 [[1, 512, 28, 28]] [1, 128, 28, 28] 0
Conv2D-19 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
BatchNorm2D-19 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-18 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
Conv2D-20 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
BatchNorm2D-20 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
ConvBNLayer-19 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
ReLU-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Bottleneckblock-5 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Conv2D-22 [[1, 512, 28, 28]] [1, 128, 28, 28] 65,536
BatchNorm2D-22 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-21 [[1, 512, 28, 28]] [1, 128, 28, 28] 0
Conv2D-23 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
BatchNorm2D-23 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-22 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
Conv2D-24 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
BatchNorm2D-24 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
ConvBNLayer-23 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
ReLU-7 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Bottleneckblock-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Conv2D-26 [[1, 512, 28, 28]] [1, 128, 28, 28] 65,536
BatchNorm2D-26 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-25 [[1, 512, 28, 28]] [1, 128, 28, 28] 0
Conv2D-27 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
BatchNorm2D-27 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
ConvBNLayer-26 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
Conv2D-28 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
BatchNorm2D-28 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
ConvBNLayer-27 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
ReLU-8 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Bottleneckblock-7 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
Conv2D-30 [[1, 512, 28, 28]] [1, 256, 14, 14] 131,072
BatchNorm2D-30 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-29 [[1, 512, 28, 28]] [1, 256, 14, 14] 0
Conv2D-31 [[1, 256, 14, 14]] [1, 256, 14, 14] 589,824
BatchNorm2D-31 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-30 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
Conv2D-32 [[1, 256, 14, 14]] [1, 1024, 14, 14] 262,144
BatchNorm2D-32 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-31 [[1, 256, 14, 14]] [1, 1024, 14, 14] 0
Conv2D-33 [[1, 512, 28, 28]] [1, 1024, 14, 14] 524,288
BatchNorm2D-33 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-32 [[1, 512, 28, 28]] [1, 1024, 14, 14] 0
ReLU-9 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Bottleneckblock-8 [[1, 512, 28, 28]] [1, 1024, 14, 14] 0
Conv2D-34 [[1, 1024, 14, 14]] [1, 256, 14, 14] 262,144
BatchNorm2D-34 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-33 [[1, 1024, 14, 14]] [1, 256, 14, 14] 0
Conv2D-35 [[1, 256, 14, 14]] [1, 256, 14, 14] 589,824
BatchNorm2D-35 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-34 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
Conv2D-36 [[1, 256, 14, 14]] [1, 1024, 14, 14] 262,144
BatchNorm2D-36 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-35 [[1, 256, 14, 14]] [1, 1024, 14, 14] 0
ReLU-10 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Bottleneckblock-9 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Conv2D-38 [[1, 1024, 14, 14]] [1, 256, 14, 14] 262,144
BatchNorm2D-38 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-37 [[1, 1024, 14, 14]] [1, 256, 14, 14] 0
Conv2D-39 [[1, 256, 14, 14]] [1, 256, 14, 14] 589,824
BatchNorm2D-39 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-38 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
Conv2D-40 [[1, 256, 14, 14]] [1, 1024, 14, 14] 262,144
BatchNorm2D-40 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-39 [[1, 256, 14, 14]] [1, 1024, 14, 14] 0
ReLU-11 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Bottleneckblock-10 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Conv2D-42 [[1, 1024, 14, 14]] [1, 256, 14, 14] 262,144
BatchNorm2D-42 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-41 [[1, 1024, 14, 14]] [1, 256, 14, 14] 0
Conv2D-43 [[1, 256, 14, 14]] [1, 256, 14, 14] 589,824
BatchNorm2D-43 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-42 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
Conv2D-44 [[1, 256, 14, 14]] [1, 1024, 14, 14] 262,144
BatchNorm2D-44 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-43 [[1, 256, 14, 14]] [1, 1024, 14, 14] 0
ReLU-12 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Bottleneckblock-11 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Conv2D-46 [[1, 1024, 14, 14]] [1, 256, 14, 14] 262,144
BatchNorm2D-46 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-45 [[1, 1024, 14, 14]] [1, 256, 14, 14] 0
Conv2D-47 [[1, 256, 14, 14]] [1, 256, 14, 14] 589,824
BatchNorm2D-47 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-46 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
Conv2D-48 [[1, 256, 14, 14]] [1, 1024, 14, 14] 262,144
BatchNorm2D-48 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-47 [[1, 256, 14, 14]] [1, 1024, 14, 14] 0
ReLU-13 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Bottleneckblock-12 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Conv2D-50 [[1, 1024, 14, 14]] [1, 256, 14, 14] 262,144
BatchNorm2D-50 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-49 [[1, 1024, 14, 14]] [1, 256, 14, 14] 0
Conv2D-51 [[1, 256, 14, 14]] [1, 256, 14, 14] 589,824
BatchNorm2D-51 [[1, 256, 14, 14]] [1, 256, 14, 14] 1,024
ConvBNLayer-50 [[1, 256, 14, 14]] [1, 256, 14, 14] 0
Conv2D-52 [[1, 256, 14, 14]] [1, 1024, 14, 14] 262,144
BatchNorm2D-52 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 4,096
ConvBNLayer-51 [[1, 256, 14, 14]] [1, 1024, 14, 14] 0
ReLU-14 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Bottleneckblock-13 [[1, 1024, 14, 14]] [1, 1024, 14, 14] 0
Conv2D-54 [[1, 1024, 14, 14]] [1, 512, 7, 7] 524,288
BatchNorm2D-54 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
ConvBNLayer-53 [[1, 1024, 14, 14]] [1, 512, 7, 7] 0
Conv2D-55 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,296
BatchNorm2D-55 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
ConvBNLayer-54 [[1, 512, 7, 7]] [1, 512, 7, 7] 0
Conv2D-56 [[1, 512, 7, 7]] [1, 2048, 7, 7] 1,048,576
BatchNorm2D-56 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 8,192
ConvBNLayer-55 [[1, 512, 7, 7]] [1, 2048, 7, 7] 0
Conv2D-57 [[1, 1024, 14, 14]] [1, 2048, 7, 7] 2,097,152
BatchNorm2D-57 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 8,192
ConvBNLayer-56 [[1, 1024, 14, 14]] [1, 2048, 7, 7] 0
ReLU-15 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 0
Bottleneckblock-14 [[1, 1024, 14, 14]] [1, 2048, 7, 7] 0
Conv2D-58 [[1, 2048, 7, 7]] [1, 512, 7, 7] 1,048,576
BatchNorm2D-58 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
ConvBNLayer-57 [[1, 2048, 7, 7]] [1, 512, 7, 7] 0
Conv2D-59 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,296
BatchNorm2D-59 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
ConvBNLayer-58 [[1, 512, 7, 7]] [1, 512, 7, 7] 0
Conv2D-60 [[1, 512, 7, 7]] [1, 2048, 7, 7] 1,048,576
BatchNorm2D-60 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 8,192
ConvBNLayer-59 [[1, 512, 7, 7]] [1, 2048, 7, 7] 0
ReLU-16 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 0
Bottleneckblock-15 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 0
Conv2D-62 [[1, 2048, 7, 7]] [1, 512, 7, 7] 1,048,576
BatchNorm2D-62 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
ConvBNLayer-61 [[1, 2048, 7, 7]] [1, 512, 7, 7] 0
Conv2D-63 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,359,296
BatchNorm2D-63 [[1, 512, 7, 7]] [1, 512, 7, 7] 2,048
ConvBNLayer-62 [[1, 512, 7, 7]] [1, 512, 7, 7] 0
Conv2D-64 [[1, 512, 7, 7]] [1, 2048, 7, 7] 1,048,576
BatchNorm2D-64 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 8,192
ConvBNLayer-63 [[1, 512, 7, 7]] [1, 2048, 7, 7] 0
ReLU-17 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 0
Bottleneckblock-16 [[1, 2048, 7, 7]] [1, 2048, 7, 7] 0
AdaptiveAvgPool2D-1 [[1, 2048, 7, 7]] [1, 2048, 1, 1] 0
Linear-1 [[1, 2048]] [1, 45] 92,205
===============================================================================
Total params: 23,653,421
Trainable params: 23,547,181
Non-trainable params: 106,240
-------------------------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 328.09
Params size (MB): 90.23
Estimated Total Size (MB): 418.89
-------------------------------------------------------------------------------{'total_params': 23653421, 'trainable_params': 23547181}from paddle.optimizer import Momentumfrom paddle.optimizer.lr import CosineAnnealingDecayfrom paddle.regularizer import L2Decayfrom paddle.nn import CrossEntropyLossfrom paddle.metric import Accuracyimport math# 总训练轮数Epochs = 30# 数据集读取的批次大小Batch_size = 64# 每轮的训练步数Step_each_epoch = math.ceil(len(train_dataset.data)/Batch_size)# 配置学习率Lr=CosineAnnealingDecay(learning_rate=0.06, T_max=Step_each_epoch * Epochs)# 配置优化器Optimizer = Momentum(learning_rate=Lr,
momentum=0.9,
weight_decay=L2Decay(1e-4),
parameters=resnet50.parameters())# 设置损失函数Loss_fn = CrossEntropyLoss()# 构建数据读取器 Train_loader = paddle.io.DataLoader(train_dataset, batch_size=Batch_size, shuffle=True)
Val_loader = paddle.io.DataLoader(val_dataset, batch_size=Batch_size)def train(model, epochs, train_loader, val_loader, optimizer, loss_fn):
'''
训练函数
'''
acc_history = [0] for epoch in range(epochs):
model.train() # 训练模式
for batch_id, data in enumerate(train_loader()): # 读取批次数据
x_data = data[0] # 训练数据
y_data = data[1] # 训练数据标签
y_data = paddle.reshape(y_data, (-1, 1))
predicts = model(x_data) # 预测结果
loss = loss_fn(predicts, y_data) # 计算损失
loss.backward() # 反向传播
optimizer.step() # 更新参数
optimizer.clear_grad() # 梯度清零
print("[TRAIN] epoch: {}/{}, loss is: {}".format(epoch+1, epochs, loss.numpy()))
model.eval() # 验证模式
loss_list = []
acc_list = [] for batch_id, data in enumerate(val_loader()): # 读取批次数据
x_data = data[0] # 验证数据
y_data = data[1] # 验证数据标签
y_data = paddle.reshape(y_data, (-1, 1))
predicts = model(x_data) # 预测结果
loss = loss_fn(predicts, y_data) # 计算损失
acc = paddle.metric.accuracy(predicts, y_data) # 计算精度
loss_list.append(np.mean(loss.numpy()))
acc_list.append(np.mean(acc.numpy())) print("[EVAL] Finished, Epoch={}, loss={}, acc={}".format(epoch+1, np.mean(loss_list), np.mean(acc_list))) if acc_history[-1] < np.mean(acc_list):
paddle.save(resnet50.state_dict(),'output/resnet50.pdparams'.format(epoch))
acc_history.append(np.mean(acc_list))# 进行训练train(resnet50, Epochs, Train_loader, Val_loader, Optimizer, Loss_fn)
通过下面的代码可以看出我们的模型达到了0.83左右的精度
def val(model, val_loader):
'''
验证函数
'''
model.eval() #验证模式
acc_list = [] for batch_id, data in enumerate(val_loader()):
x_data = data[0] # 验证数据
y_data = data[1] # 验证数据标签
y_data = paddle.reshape(y_data, (-1, 1))
predicts = model(x_data) # 预测结果
acc = paddle.metric.accuracy(predicts, y_data) # 计算精度
acc_list.append(np.mean(acc.numpy())) print("Eval finished, acc={}".format(np.mean(acc_list)))# 加载保存的模型resnet50.set_state_dict(paddle.load('output/resnet50.pdparams'))# 进行验证val(resnet50,Val_loader)Eval finished, acc=0.8262536525726318
我们将模型预测的标签结果存入列表results下。
def test(model, test_loader):
model.eval()
result_list = [] for batch_id, data in enumerate(test_loader()):
x_data = data[0] # 测试数据
predicts = model(x_data) # 测试数据标签
result_list.append(np.argmax(predicts.numpy(),axis=1)) # 存入列表
print("predict finished") return result_list# 加载测试数据集Test_loader = paddle.io.DataLoader(test_dataset, batch_size=64)# 加载保存的模型resnet50.set_state_dict(paddle.load('output/resnet50.pdparams'))# 进行预测results = test(resnet50,Test_loader)predict finished
# 导入包%matplotlib inlineimport numpy as npimport cv2import matplotlib.pyplot as plt# 搭建label.txt的映射列表test_list = []with open('NWPU-RESISC45/labels.txt', 'r') as labels: for line in labels:
test_list.append(line.strip())# 画图fig, axs = plt.subplots(nrows=5, ncols=1,figsize=(20,20))for i in range(5):
img = cv2.imread(test_dataset.data[i+10][0],1) # 读取图片
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 通道转换
ax = axs[i]
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(img) #展示图片
ax.set_title('Real: %s \n Predict: %s'%(test_list[i+10],test_list[results[0][i+10]])) #展示原先标签和预测结果<Figure size 1440x1440 with 5 Axes>
以上就是【遥感影像分类】使用PaddleAPI搭建ResNet50实现遥感影像分类任务的详细内容,更多请关注php中文网其它相关文章!
每个人都需要一台速度更快、更稳定的 PC。随着时间的推移,垃圾文件、旧注册表数据和不必要的后台进程会占用资源并降低性能。幸运的是,许多工具可以让 Windows 保持平稳运行。
Copyright 2014-2025 https://www.php.cn/ All Rights Reserved | php.cn | 湘ICP备2023035733号