Source code for torch_geometric.datasets.zinc

import os
import os.path as osp
import shutil
import pickle

import torch
from tqdm import tqdm
from torch_geometric.data import (InMemoryDataset, Data, download_url,
                                  extract_zip)


[docs]class ZINC(InMemoryDataset): r"""The ZINC dataset from the `ZINC database <https://pubs.acs.org/doi/abs/10.1021/acs.jcim.5b00559>`_ and the `"Automatic Chemical Design Using a Data-Driven Continuous Representation of Molecules" <https://arxiv.org/abs/1610.02415>`_ paper, containing about 250,000 molecular graphs with up to 38 heavy atoms. The task is to regress a synthetic computed property dubbed as the constrained solubility. Args: root (string): Root directory where the dataset should be saved. subset (boolean, optional): If set to :obj:`True`, will only load a subset of the dataset (12,000 molecular graphs), following the `"Benchmarking Graph Neural Networks" <https://arxiv.org/abs/2003.00982>`_ paper. (default: :obj:`False`) split (string, optional): If :obj:`"train"`, loads the training dataset. If :obj:`"val"`, loads the validation dataset. If :obj:`"test"`, loads the test dataset. (default: :obj:`"train"`) transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) pre_filter (callable, optional): A function that takes in an :obj:`torch_geometric.data.Data` object and returns a boolean value, indicating whether the data object should be included in the final dataset. (default: :obj:`None`) """ url = 'https://www.dropbox.com/s/feo9qle74kg48gy/molecules.zip?dl=1' split_url = ('https://raw.githubusercontent.com/graphdeeplearning/' 'benchmarking-gnns/master/data/molecules/{}.index') def __init__(self, root, subset=False, split='train', transform=None, pre_transform=None, pre_filter=None): self.subset = subset assert split in ['train', 'val', 'test'] super(ZINC, self).__init__(root, transform, pre_transform, pre_filter) path = osp.join(self.processed_dir, f'{split}.pt') self.data, self.slices = torch.load(path) @property def raw_file_names(self): return [ 'train.pickle', 'val.pickle', 'test.pickle', 'train.index', 'val.index', 'test.index' ] @property def processed_dir(self): name = 'subset' if self.subset else 'full' return osp.join(self.root, name, 'processed') @property def processed_file_names(self): return ['train.pt', 'val.pt', 'test.pt'] def download(self): shutil.rmtree(self.raw_dir) path = download_url(self.url, self.root) extract_zip(path, self.root) os.rename(osp.join(self.root, 'molecules'), self.raw_dir) os.unlink(path) for split in ['train', 'val', 'test']: download_url(self.split_url.format(split), self.raw_dir) def process(self): for split in ['train', 'val', 'test']: with open(osp.join(self.raw_dir, f'{split}.pickle'), 'rb') as f: mols = pickle.load(f) indices = range(len(mols)) if self.subset: with open(osp.join(self.raw_dir, f'{split}.index'), 'r') as f: indices = [int(x) for x in f.read()[:-1].split(',')] pbar = tqdm(total=len(indices)) pbar.set_description(f'Processing {split} dataset') data_list = [] for idx in indices: mol = mols[idx] x = mol['atom_type'].to(torch.long).view(-1, 1) y = mol['logP_SA_cycle_normalized'].to(torch.float) adj = mol['bond_type'] edge_index = adj.nonzero(as_tuple=False).t().contiguous() edge_attr = adj[edge_index[0], edge_index[1]].to(torch.long) data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y) if self.pre_filter is not None and not self.pre_filter(data): continue if self.pre_transform is not None: data = self.pre_transform(data) data_list.append(data) pbar.update(1) pbar.close() torch.save(self.collate(data_list), osp.join(self.processed_dir, f'{split}.pt'))