600字范文,内容丰富有趣,生活中的好帮手!
600字范文 > datawhalechina-GNN组队学习 作业:PyG不同模块在PyG数据集上的应用

datawhalechina-GNN组队学习 作业:PyG不同模块在PyG数据集上的应用

时间:2023-10-02 05:48:17

相关推荐

datawhalechina-GNN组队学习 作业:PyG不同模块在PyG数据集上的应用

MLP、GCN、GAT在数据集citeseer等上的节点分类任务

算是GNN的helloworld,直接上代码,注释很详细

# -*- coding: utf-8 -*-"""Created on Fri Feb 18 19:10:05 @author: lz"""from torch_geometric.datasets import Planetoidfrom torch_geometric.transforms import NormalizeFeaturesdataset = Planetoid(root = 'dataset', name='CiteSeer', transform=NormalizeFeatures())print()print(f'Dataset:{dataset}')print(f'Number of Graph:{len(dataset)}')print(f'Number of features:{dataset.num_features}')print(f'Number of classes:{dataset.num_classes}')data = dataset[0]print()print(data)print()print(f'Number of nodes:{data.num_nodes}')print(f'Number of edges:{data.num_edges}')print(f'Average node degree:{data.num_edges / data.num_nodes:.2f}')print(f'Number of training nodes:{data.train_mask.sum()}')print(f'Training node label rate:{data.train_mask.sum() / data.num_nodes:.2f}')print(f'Contains isolated nodes:{data.has_isolated_nodes()}')print(f'Contains self-loops:{data.has_self_loops()}')print(f'Is undirected:{data.is_undirected()}')'''可视化节点表征分布的方法'''import matplotlib.pyplot as pltfrom sklearn.manifold import TSNEdef visualize(h, color):z = TSNE(n_components=2).fit_transform(out.detach().cpu().numpy())plt.figure(figsize=(10,10))plt.xticks([])plt.yticks([])plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap="Set2")plt.show()'''MLP神经网络的构造'''import torchfrom torch.nn import Modulefrom torch.nn import Linearimport torch.nn.functional as Fclass MLP(Module):def __init__(self, hidden_channels):super(MLP, self).__init__()torch.manual_seed(12345)self.lin1 = Linear(dataset.num_features, hidden_channels)#dataset.num?不应该是dataset[0].num?难道dataset也有Num属性?self.lin2 = Linear(hidden_channels, dataset.num_classes)def forward(self, x):x = self.lin1(x)#等价于 self.lin1.forward(x),还是module call的forwardrelu = torch.nn.ReLU(inplace = True)x = relu(x)x = F.dropout(x, p=0.5, training=self.training)#!x = self.lin2(x)return xmodel = MLP(hidden_channels=16)print()print('MLP神经网络的构造')print(model)print()print('利用交叉熵损失和Adam优化器来训练这个简单的MLP神经网络')criterion = torch.nn.CrossEntropyLoss()optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay= 5e-4)def train():model.train()#!optimizer.zero_grad()out = model(data.x)loss = criterion(out[data.train_mask], data.y[data.train_mask])loss.backward()#!optimizer.step()return lossprint('开始训练')for epoch in range(1, 201):loss = train()print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')print('看看测试集上的表现')def test():model.eval()#!out = model(data.x)pred = out.argmax(dim = 1)#选择概率最大的类test_correct = pred[data.test_mask] == data.y[data.test_mask]#检查标签是否正确test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) return test_acctest_acc = test()print(f'Test Accuracy:{test_acc:.4f}')print('将MLP中的torch.nn.Linear 替换为torch_geometric.nn.GCNConv,我们就可以得到一个GCN网络')from torch_geometric.nn import GCNConvclass GCN(Module):def __init__(self, hidden_channels):super(GCN, self).__init__()torch.manual_seed(12345)self.conv1 = GCNConv(dataset.num_features, hidden_channels)#dataset.num?不应该是dataset[0].num?难道dataset也有Num属性?self.conv2 = GCNConv(hidden_channels, dataset.num_classes)def forward(self, x, edge_index):x = self.conv1(x, edge_index)#等价于 self.lin1.forward(x),还是module call的forwardrelu = torch.nn.ReLU(inplace = True)x = relu(x)x = F.dropout(x, p=0.5, training=self.training)#!x = self.conv2(x, edge_index)return xmodel = GCN(hidden_channels=16)print(model) print()print('可视化未经训练的GCN生成的节点表征')model.eval() out = model(data.x, data.edge_index)visualize(out, color=data.y)print()print('训练GCN图神经网络')optimizer = torch.optim.Adam(model.parameters(), lr = 0.01, weight_decay=5e-4)criterion = torch.nn.CrossEntropyLoss()def train():model.train()optimizer.zero_grad()out = model(data.x, data.edge_index)#进行一次正向计算loss = criterion(out[data.train_mask], data.y[data.train_mask])loss.backward()optimizer.step()return lossfor epoch in range(1, 201):loss = train()print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')print('测试集上的准确性')def test():model.eval()out = model(data.x, data.edge_index)pred = out.argmax(dim = 1)#选择概率最大的类test_correct = pred[data.test_mask] == data.y[data.test_mask]test_acc = int(test_correct.sum()) / int(data.test_mask.sum())return test_acctest_acc = test()print(f'Test Accuracy: {test_acc:.4f}')print()print('可视化训练后的GCN生成的节点表征')model.eval()out = model(data.x, data.edge_index)visualize(out, color=data.y)print()print('将MLP中的torch.nn.Linear 替换为torch_geometric.nn.GCNConv,我们就可以得到一个GCN网络')from torch_geometric.nn import GATConvclass GAT(Module):def __init__(self, hidden_channels):super(GAT, self).__init__()torch.manual_seed(12345)self.conv1 = GATConv(dataset.num_features, hidden_channels)#dataset.num?不应该是dataset[0].num?难道dataset也有Num属性?self.conv2 = GATConv(hidden_channels, dataset.num_classes)def forward(self, x, edge_index):x = self.conv1(x, edge_index)#等价于 self.lin1.forward(x),还是module call的forwardrelu = torch.nn.ReLU(inplace = True)x = relu(x)x = F.dropout(x, p=0.5, training=self.training)#!x = self.conv2(x, edge_index)return xmodel = GAT(hidden_channels=16)print(model)print()print('训练GAT图神经网络')optimizer = torch.optim.Adam(model.parameters(), lr = 0.01, weight_decay=5e-4)criterion = torch.nn.CrossEntropyLoss()def train():model.train()optimizer.zero_grad()out = model(data.x, data.edge_index)#进行一次正向计算loss = criterion(out[data.train_mask], data.y[data.train_mask])loss.backward()optimizer.step()return lossfor epoch in range(1, 201):loss = train()print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')print('测试集上的准确性')def test():model.eval()out = model(data.x, data.edge_index)pred = out.argmax(dim = 1)#选择概率最大的类test_correct = pred[data.test_mask] == data.y[data.test_mask]test_acc = int(test_correct.sum()) / int(data.test_mask.sum())return test_acctest_acc = test()print(f'Test Accuracy: {test_acc:.4f}')print()print('可视化训练后的GAT生成的节点表征')model.eval()out = model(data.x, data.edge_index)visualize(out, color=data.y)

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。