Source code for torch_geometric.datasets.dbp15k

from typing import Optional, Callable, List, Dict, Tuple

import os
import os.path as osp
import shutil

import torch
from torch import Tensor
from torch_geometric.data import Data, InMemoryDataset, extract_zip
from torch_geometric.io import read_txt_array
from torch_geometric.utils import sort_edge_index


[docs]class DBP15K(InMemoryDataset): r"""The DBP15K dataset from the `"Cross-lingual Entity Alignment via Joint Attribute-Preserving Embedding" <https://arxiv.org/abs/1708.05045>`_ paper, where Chinese, Japanese and French versions of DBpedia were linked to its English version. Node features are given by pre-trained and aligned monolingual word embeddings from the `"Cross-lingual Knowledge Graph Alignment via Graph Matching Neural Network" <https://arxiv.org/abs/1905.11605>`_ paper. Args: root (string): Root directory where the dataset should be saved. pair (string): The pair of languages (:obj:`"en_zh"`, :obj:`"en_fr"`, :obj:`"en_ja"`, :obj:`"zh_en"`, :obj:`"fr_en"`, :obj:`"ja_en"`). transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) """ file_id = '1dYJtj1_J4nYJdrDY95ucGLCuZXDXI7PL' def __init__(self, root: str, pair: str, transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None): assert pair in ['en_zh', 'en_fr', 'en_ja', 'zh_en', 'fr_en', 'ja_en'] self.pair = pair super().__init__(root, transform, pre_transform) self.data, self.slices = torch.load(self.processed_paths[0]) @property def raw_file_names(self) -> List[str]: return ['en_zh', 'en_fr', 'en_ja', 'zh_en', 'fr_en', 'ja_en'] @property def processed_file_names(self) -> str: return f'{self.pair}.pt' def download(self): from google_drive_downloader import GoogleDriveDownloader as gdd path = osp.join(self.root, 'raw.zip') gdd.download_file_from_google_drive(self.file_id, path) extract_zip(path, self.root) os.unlink(path) shutil.rmtree(self.raw_dir) os.rename(osp.join(self.root, 'DBP15K'), self.raw_dir) def process(self): embs = {} with open(osp.join(self.raw_dir, 'sub.glove.300d'), 'r') as f: for i, line in enumerate(f): info = line.strip().split(' ') if len(info) > 300: embs[info[0]] = torch.tensor([float(x) for x in info[1:]]) else: embs['**UNK**'] = torch.tensor([float(x) for x in info]) g1_path = osp.join(self.raw_dir, self.pair, 'triples_1') x1_path = osp.join(self.raw_dir, self.pair, 'id_features_1') g2_path = osp.join(self.raw_dir, self.pair, 'triples_2') x2_path = osp.join(self.raw_dir, self.pair, 'id_features_2') x1, edge_index1, rel1, assoc1 = self.process_graph( g1_path, x1_path, embs) x2, edge_index2, rel2, assoc2 = self.process_graph( g2_path, x2_path, embs) train_path = osp.join(self.raw_dir, self.pair, 'train.examples.20') train_y = self.process_y(train_path, assoc1, assoc2) test_path = osp.join(self.raw_dir, self.pair, 'test.examples.1000') test_y = self.process_y(test_path, assoc1, assoc2) data = Data(x1=x1, edge_index1=edge_index1, rel1=rel1, x2=x2, edge_index2=edge_index2, rel2=rel2, train_y=train_y, test_y=test_y) torch.save(self.collate([data]), self.processed_paths[0]) def process_graph( self, triple_path: str, feature_path: str, embeddings: Dict[str, Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: g1 = read_txt_array(triple_path, sep='\t', dtype=torch.long) subj, rel, obj = g1.t() x_dict = {} with open(feature_path, 'r') as f: for line in f: info = line.strip().split('\t') info = info if len(info) == 2 else info + ['**UNK**'] seq = info[1].lower().split() hs = [embeddings.get(w, embeddings['**UNK**']) for w in seq] x_dict[int(info[0])] = torch.stack(hs, dim=0) idx = torch.tensor(list(x_dict.keys())) assoc = torch.full((idx.max().item() + 1, ), -1, dtype=torch.long) assoc[idx] = torch.arange(idx.size(0)) subj, obj = assoc[subj], assoc[obj] edge_index = torch.stack([subj, obj], dim=0) edge_index, rel = sort_edge_index(edge_index, rel) xs = [None for _ in range(idx.size(0))] for i in x_dict.keys(): xs[assoc[i]] = x_dict[i] x = torch.nn.utils.rnn.pad_sequence(xs, batch_first=True) return x, edge_index, rel, assoc def process_y(self, path: str, assoc1: Tensor, assoc2: Tensor) -> Tensor: row, col, mask = read_txt_array(path, sep='\t', dtype=torch.long).t() mask = mask.to(torch.bool) return torch.stack([assoc1[row[mask]], assoc2[col[mask]]], dim=0) def __repr__(self) -> str: return f'{self.__class__.__name__}({self.pair})'