Shared parts of networks

This page documents the shared layers in different networks, along with the convention about data structure.

Convention

Sparse indices

PiNet architecture

Since atomistic data comes in irregular shapes, PiNN uses sparse data structure for data batching, and the construction of pairwise, triplet-wise tensors. All these tensors are associated with indices named as ind_*, with shapes ind_1: [n_atoms, 1], ind_2: [n_pairs, 2], etc. The meanings of indices are:

Using sparse indices

To construct a pairwise variable from atom-centered ones, use the tf.gather_nd operator, for instance the following operation broadcasts the atom-centered \(\mathbb{P}_{i\alpha}\) variable to each pair of atoms (\(\mathbb{I}_{ij\alpha}=\mathbf{1}_{ij}\mathbb{P}_{i\alpha}\)).

I = tf.gather_nd(P, ind_2[:, 0]) + tf.gather_nd(P, ind_2[:, 1])

To accumulate pairwise predictions over neighbors (\(\mathbb{P}_{i\alpha} = \sum_{j} \mathbb{I}_{ij\alpha}\)), use the tf.scatter_nd operation:

P = tf.scatter_nd(ind_2[:, :1], I, shape=[n_atoms, n_alpha])

or tf.math.unsorted_segment_sum:

P = tf.math.unsorted_segment_sum(I, ind_2[:, 0], natoms)

Note that the number of atoms must be supplied since it cannot be inferred from ind_2, it can be inferred from a per-atom tensor instead, e.g.:

n_atoms = tf.shape(P)[0]

Neighbor list

CellListNL

Bases: Layer

Compute neighbour list with cell lists approach, see https://en.wikipedia.org/wiki/Cell_lists.

Source code in pinn/layers/nl.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
class CellListNL(tf.keras.layers.Layer):
    """Compute neighbour list with cell lists approach, see
    <https://en.wikipedia.org/wiki/Cell_lists>.

    """
    def __init__(self, rc=5.0):
        """
        Args:
            rc (float): cutoff radius
        """
        super(CellListNL, self).__init__()
        self.rc = rc

    def call(self, tensors):
        """
        The layer expects a dictionary of tensors from a sparse_batch
        with keys:

        - `ind_1`: [sparse indices](layers.md#sparse-indices) of atoms in batch, with shape `(n_atoms, 2)`
        - `coord`: atomic coordinate, with shape `(n_atoms, 3)`
        - `cell` (optional): cell vectors with shape`(n_batch,3,3)`

        It output a dictionary

        - `ind_2`: [sparse indices](layers.md#sparse-indices) of neighbor list, with shape `(n_pairs, 2)`
        - `diff`: displacement vectors, with shape `(n_pairs, 3)`
        - `dist`: pairwise distances, with shape `(n_pairs)`

        Args:
            tensors (dict of tensor): input tensors, with keys: `{"ind_1", "coord", "cell"}`

        Returns:
            output (dict of tensor): output tensors, with keys: {"ind_2", "diff", "dist"}`
        """
        atom_sind = tensors['ind_1']
        atom_apos = tensors['coord']
        atom_gind = tf.cumsum(tf.ones_like(atom_sind), 0)
        atom_aind = atom_gind - 1
        to_collect = atom_aind
        if 'cell' in tensors:
            atom_apos = _wrap_coord(tensors)
            rep_apos, rep_sind, rep_aind = _pbc_repeat(
                atom_apos, tensors['cell'], tensors['ind_1'], self.rc)
            atom_sind = tf.concat([atom_sind, rep_sind], 0)
            atom_apos = tf.concat([atom_apos, rep_apos], 0)
            atom_aind = tf.concat([atom_aind, rep_aind], 0)
            atom_gind = tf.cumsum(tf.ones_like(atom_sind), 0)
        atom_apos = atom_apos - tf.reduce_min(atom_apos, axis=0)
        atom_cpos = tf.concat(
            [atom_sind, tf.cast(atom_apos//self.rc, tf.int32)], axis=1)
        cpos_shap = tf.concat([tf.reduce_max(atom_cpos, axis=0) + 1, [1]], axis=0)
        samp_ccnt = tf.squeeze(tf.scatter_nd(
            atom_cpos, tf.ones_like(atom_sind, tf.int32), cpos_shap), axis=-1)
        cell_cpos = tf.cast(tf.where(samp_ccnt), tf.int32)
        cell_cind = tf.cumsum(tf.ones(tf.shape(cell_cpos)[0], tf.int32))
        cell_cind = tf.expand_dims(cell_cind, 1)
        samp_cind = tf.squeeze(tf.scatter_nd(
            cell_cpos, cell_cind, cpos_shap), axis=-1)
        # Get the atom's relative index(rind) and position(rpos) in cell
        # And each cell's atom list (alst)
        atom_cind = tf.gather_nd(samp_cind, atom_cpos) - 1
        atom_cind_args = tf.argsort(atom_cind, axis=0)
        atom_cind_sort = tf.gather(atom_cind, atom_cind_args)

        atom_rind_sort = tf.cumsum(tf.ones_like(atom_cind, tf.int32))
        cell_rind_min = tf.math.segment_min(atom_rind_sort, atom_cind_sort)
        atom_rind_sort = atom_rind_sort - tf.gather(cell_rind_min, atom_cind_sort)
        atom_rpos_sort = tf.stack([atom_cind_sort, atom_rind_sort], axis=1)
        atom_rpos = tf.math.unsorted_segment_sum(atom_rpos_sort, atom_cind_args,
                                            tf.shape(atom_gind)[0])
        cell_alst_shap = [tf.shape(cell_cind)[0], tf.reduce_max(samp_ccnt), 1]
        cell_alst = tf.squeeze(tf.scatter_nd(
            atom_rpos, atom_gind, cell_alst_shap), axis=-1)
        # Get cell's linked cell list, for cells in to_collect only
        disp_mat = np.zeros([3, 3, 3, 4], np.int32)
        disp_mat[:, :, :, 1] = np.reshape([-1, 0, 1], (3, 1, 1))
        disp_mat[:, :, :, 2] = np.reshape([-1, 0, 1], (1, 3, 1))
        disp_mat[:, :, :, 3] = np.reshape([-1, 0, 1], (1, 1, 3))
        disp_mat = np.reshape(disp_mat, (1, 27, 4))
        cell_npos = tf.expand_dims(cell_cpos, 1) + disp_mat
        npos_mask = tf.reduce_all(
            (cell_npos >= 0) & (cell_npos < cpos_shap[:-1]), 2)
        cell_nind = tf.squeeze(tf.scatter_nd(
            tf.cast(tf.where(npos_mask), tf.int32),
            tf.expand_dims(tf.gather_nd(
                samp_cind, tf.boolean_mask(cell_npos, npos_mask)), 1),
            tf.concat([tf.shape(cell_npos)[:-1], [1]], 0)), -1)
        # Finally, a sparse list of atom pairs
        coll_nind = tf.gather(cell_nind, tf.gather_nd(atom_cind, to_collect))
        pair_ic = tf.cast(tf.where(coll_nind), tf.int32)
        pair_ic_i = pair_ic[:, 0]
        pair_ic_c = tf.gather_nd(coll_nind, pair_ic) - 1
        pair_ic_alst = tf.gather(cell_alst, pair_ic_c)

        pair_ij = tf.cast(tf.where(pair_ic_alst), tf.int32)
        pair_ij_i = tf.gather(pair_ic_i, pair_ij[:, 0])
        pair_ij_j = tf.gather_nd(pair_ic_alst, pair_ij) - 1

        diff = tf.gather(atom_apos, pair_ij_j) - tf.gather(atom_apos, pair_ij_i)
        dist = tf.norm(diff, axis=-1)
        ind_rc = tf.where((dist < self.rc) & (dist > 0))
        dist = tf.gather_nd(dist, ind_rc)
        diff = tf.gather_nd(diff, ind_rc)
        pair_i_aind = tf.gather_nd(tf.gather(atom_aind, pair_ij_i), ind_rc)
        pair_j_aind = tf.gather_nd(tf.gather(atom_aind, pair_ij_j), ind_rc)

        output = {
            'ind_2': tf.concat([pair_i_aind, pair_j_aind], 1),
            'dist': dist,
            'diff': diff
           }
        return output

__init__(rc=5.0)

Parameters:

Name Type Description Default
rc float

cutoff radius

5.0
Source code in pinn/layers/nl.py
58
59
60
61
62
63
64
def __init__(self, rc=5.0):
    """
    Args:
        rc (float): cutoff radius
    """
    super(CellListNL, self).__init__()
    self.rc = rc

call(tensors)

The layer expects a dictionary of tensors from a sparse_batch with keys:

  • ind_1: sparse indices of atoms in batch, with shape (n_atoms, 2)
  • coord: atomic coordinate, with shape (n_atoms, 3)
  • cell (optional): cell vectors with shape(n_batch,3,3)

It output a dictionary

  • ind_2: sparse indices of neighbor list, with shape (n_pairs, 2)
  • diff: displacement vectors, with shape (n_pairs, 3)
  • dist: pairwise distances, with shape (n_pairs)

Parameters:

Name Type Description Default
tensors dict of tensor

input tensors, with keys: {"ind_1", "coord", "cell"}

required

Returns:

Name Type Description
output dict of tensor

output tensors, with keys: {"ind_2", "diff", "dist"}`

Source code in pinn/layers/nl.py
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def call(self, tensors):
    """
    The layer expects a dictionary of tensors from a sparse_batch
    with keys:

    - `ind_1`: [sparse indices](layers.md#sparse-indices) of atoms in batch, with shape `(n_atoms, 2)`
    - `coord`: atomic coordinate, with shape `(n_atoms, 3)`
    - `cell` (optional): cell vectors with shape`(n_batch,3,3)`

    It output a dictionary

    - `ind_2`: [sparse indices](layers.md#sparse-indices) of neighbor list, with shape `(n_pairs, 2)`
    - `diff`: displacement vectors, with shape `(n_pairs, 3)`
    - `dist`: pairwise distances, with shape `(n_pairs)`

    Args:
        tensors (dict of tensor): input tensors, with keys: `{"ind_1", "coord", "cell"}`

    Returns:
        output (dict of tensor): output tensors, with keys: {"ind_2", "diff", "dist"}`
    """
    atom_sind = tensors['ind_1']
    atom_apos = tensors['coord']
    atom_gind = tf.cumsum(tf.ones_like(atom_sind), 0)
    atom_aind = atom_gind - 1
    to_collect = atom_aind
    if 'cell' in tensors:
        atom_apos = _wrap_coord(tensors)
        rep_apos, rep_sind, rep_aind = _pbc_repeat(
            atom_apos, tensors['cell'], tensors['ind_1'], self.rc)
        atom_sind = tf.concat([atom_sind, rep_sind], 0)
        atom_apos = tf.concat([atom_apos, rep_apos], 0)
        atom_aind = tf.concat([atom_aind, rep_aind], 0)
        atom_gind = tf.cumsum(tf.ones_like(atom_sind), 0)
    atom_apos = atom_apos - tf.reduce_min(atom_apos, axis=0)
    atom_cpos = tf.concat(
        [atom_sind, tf.cast(atom_apos//self.rc, tf.int32)], axis=1)
    cpos_shap = tf.concat([tf.reduce_max(atom_cpos, axis=0) + 1, [1]], axis=0)
    samp_ccnt = tf.squeeze(tf.scatter_nd(
        atom_cpos, tf.ones_like(atom_sind, tf.int32), cpos_shap), axis=-1)
    cell_cpos = tf.cast(tf.where(samp_ccnt), tf.int32)
    cell_cind = tf.cumsum(tf.ones(tf.shape(cell_cpos)[0], tf.int32))
    cell_cind = tf.expand_dims(cell_cind, 1)
    samp_cind = tf.squeeze(tf.scatter_nd(
        cell_cpos, cell_cind, cpos_shap), axis=-1)
    # Get the atom's relative index(rind) and position(rpos) in cell
    # And each cell's atom list (alst)
    atom_cind = tf.gather_nd(samp_cind, atom_cpos) - 1
    atom_cind_args = tf.argsort(atom_cind, axis=0)
    atom_cind_sort = tf.gather(atom_cind, atom_cind_args)

    atom_rind_sort = tf.cumsum(tf.ones_like(atom_cind, tf.int32))
    cell_rind_min = tf.math.segment_min(atom_rind_sort, atom_cind_sort)
    atom_rind_sort = atom_rind_sort - tf.gather(cell_rind_min, atom_cind_sort)
    atom_rpos_sort = tf.stack([atom_cind_sort, atom_rind_sort], axis=1)
    atom_rpos = tf.math.unsorted_segment_sum(atom_rpos_sort, atom_cind_args,
                                        tf.shape(atom_gind)[0])
    cell_alst_shap = [tf.shape(cell_cind)[0], tf.reduce_max(samp_ccnt), 1]
    cell_alst = tf.squeeze(tf.scatter_nd(
        atom_rpos, atom_gind, cell_alst_shap), axis=-1)
    # Get cell's linked cell list, for cells in to_collect only
    disp_mat = np.zeros([3, 3, 3, 4], np.int32)
    disp_mat[:, :, :, 1] = np.reshape([-1, 0, 1], (3, 1, 1))
    disp_mat[:, :, :, 2] = np.reshape([-1, 0, 1], (1, 3, 1))
    disp_mat[:, :, :, 3] = np.reshape([-1, 0, 1], (1, 1, 3))
    disp_mat = np.reshape(disp_mat, (1, 27, 4))
    cell_npos = tf.expand_dims(cell_cpos, 1) + disp_mat
    npos_mask = tf.reduce_all(
        (cell_npos >= 0) & (cell_npos < cpos_shap[:-1]), 2)
    cell_nind = tf.squeeze(tf.scatter_nd(
        tf.cast(tf.where(npos_mask), tf.int32),
        tf.expand_dims(tf.gather_nd(
            samp_cind, tf.boolean_mask(cell_npos, npos_mask)), 1),
        tf.concat([tf.shape(cell_npos)[:-1], [1]], 0)), -1)
    # Finally, a sparse list of atom pairs
    coll_nind = tf.gather(cell_nind, tf.gather_nd(atom_cind, to_collect))
    pair_ic = tf.cast(tf.where(coll_nind), tf.int32)
    pair_ic_i = pair_ic[:, 0]
    pair_ic_c = tf.gather_nd(coll_nind, pair_ic) - 1
    pair_ic_alst = tf.gather(cell_alst, pair_ic_c)

    pair_ij = tf.cast(tf.where(pair_ic_alst), tf.int32)
    pair_ij_i = tf.gather(pair_ic_i, pair_ij[:, 0])
    pair_ij_j = tf.gather_nd(pair_ic_alst, pair_ij) - 1

    diff = tf.gather(atom_apos, pair_ij_j) - tf.gather(atom_apos, pair_ij_i)
    dist = tf.norm(diff, axis=-1)
    ind_rc = tf.where((dist < self.rc) & (dist > 0))
    dist = tf.gather_nd(dist, ind_rc)
    diff = tf.gather_nd(diff, ind_rc)
    pair_i_aind = tf.gather_nd(tf.gather(atom_aind, pair_ij_i), ind_rc)
    pair_j_aind = tf.gather_nd(tf.gather(atom_aind, pair_ij_j), ind_rc)

    output = {
        'ind_2': tf.concat([pair_i_aind, pair_j_aind], 1),
        'dist': dist,
        'diff': diff
       }
    return output

Basis functions

CutoffFunc

Bases: Layer

Cutoff function layer

The following types of cutoff function are implemented (all functions are defined within \(r_{ij}<r_{c}\) and decay to zero at \(r_{c}\)):

  • \(f^1(r_{ij}) = 0.5 (\mathrm{cos}(\pi r_{ij}/r_{c})+1)\)
  • \(f^2(r_{ij}) = \mathrm{tanh}^3(1- r_{ij}/r_{c})/\mathrm{tanh}^3(1)\)
  • \(hip(r_{ij}) = \mathrm{cos}^2(\pi r_{ij}/2r_{c})\)
Source code in pinn/layers/basis.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
class CutoffFunc(tf.keras.layers.Layer):
    R"""Cutoff function layer

    The following types of cutoff function are implemented (all functions are
    defined within $r_{ij}<r_{c}$ and decay to zero at $r_{c}$):

    - $f^1(r_{ij}) = 0.5 (\mathrm{cos}(\pi r_{ij}/r_{c})+1)$
    - $f^2(r_{ij}) = \mathrm{tanh}^3(1- r_{ij}/r_{c})/\mathrm{tanh}^3(1)$
    - $hip(r_{ij}) = \mathrm{cos}^2(\pi r_{ij}/2r_{c})$

    """

    def __init__(self, rc=5.0, cutoff_type="f1"):
        """
        Args:
            rc (float): cutoff radius
            cutoff_type (string): name of the cutoff function
        """
        super(CutoffFunc, self).__init__()
        self.cutoff_type = cutoff_type
        self.rc = rc
        f1 = lambda x: 0.5 * (tf.cos(np.pi * x / rc) + 1)
        f2 = lambda x: (tf.tanh(1 - x / rc) / np.tanh(1)) ** 3
        hip = lambda x: tf.cos(np.pi * x / rc / 2) ** 2
        self.cutoff_fn = {"f1": f1, "f2": f2, "hip": hip}[cutoff_type]

    def call(self, dist):
        """
        Args:
            dist (tensor): distance tensor with arbitrary shape

        Returns:
            fc (tensor): cutoff function with the same shape as the input
        """
        return self.cutoff_fn(dist)

__init__(rc=5.0, cutoff_type='f1')

Parameters:

Name Type Description Default
rc float

cutoff radius

5.0
cutoff_type string

name of the cutoff function

'f1'
Source code in pinn/layers/basis.py
21
22
23
24
25
26
27
28
29
30
31
32
33
def __init__(self, rc=5.0, cutoff_type="f1"):
    """
    Args:
        rc (float): cutoff radius
        cutoff_type (string): name of the cutoff function
    """
    super(CutoffFunc, self).__init__()
    self.cutoff_type = cutoff_type
    self.rc = rc
    f1 = lambda x: 0.5 * (tf.cos(np.pi * x / rc) + 1)
    f2 = lambda x: (tf.tanh(1 - x / rc) / np.tanh(1)) ** 3
    hip = lambda x: tf.cos(np.pi * x / rc / 2) ** 2
    self.cutoff_fn = {"f1": f1, "f2": f2, "hip": hip}[cutoff_type]

call(dist)

Parameters:

Name Type Description Default
dist tensor

distance tensor with arbitrary shape

required

Returns:

Name Type Description
fc tensor

cutoff function with the same shape as the input

Source code in pinn/layers/basis.py
35
36
37
38
39
40
41
42
43
def call(self, dist):
    """
    Args:
        dist (tensor): distance tensor with arbitrary shape

    Returns:
        fc (tensor): cutoff function with the same shape as the input
    """
    return self.cutoff_fn(dist)

GaussianBasis

Bases: Layer

Gaussian Basis Layer

Builds the Gaussian basis function:

\[ e_{ijb} = e^{-\eta_b (r_{ij}-r_{b})^2} \]

Both the Gaussian centers \(r_{b}\) and width \(\eta_{b}\) can be arrays that specifies the parameter for each basis function. When \(\eta\) is given as a single float, the same value is assigned to every basis. When center is not given, n_basis and rc are used to generat a linearly spaced set of basis.

Source code in pinn/layers/basis.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
class GaussianBasis(tf.keras.layers.Layer):
    R"""Gaussian Basis Layer

    Builds the Gaussian basis function:

    $$
    e_{ijb} = e^{-\eta_b (r_{ij}-r_{b})^2}
    $$


    Both the Gaussian centers $r_{b}$ and width $\eta_{b}$ can be arrays that
    specifies the parameter for each basis function. When $\eta$ is given as a
    single float, the same value is assigned to every basis. When center is not
    given, `n_basis` and `rc` are used to generat a linearly spaced set of
    basis.

    """

    def __init__(self, center=None, gamma=None, rc=None, n_basis=None):
        """
        Args:
            center (float|array): Gaussian centers
            gamma (float|array): inverse Gaussian width
            rc (float): cutoff radius
            n_basis (int): number of basis function

        """
        super(GaussianBasis, self).__init__()
        if center is None:
            self.center = np.linspace(0, rc, n_basis)
        else:
            self.center = np.array(center)
        self.gamma = np.broadcast_to(gamma, self.center.shape)

    def call(self, dist, fc=None):
        """
        Args:
           dist (tensor): distance tensor with shape (n_pairs)
           fc (tensor, optional): when supplied, apply a cutoff function to the basis

        Returns:
            basis (tensor): basis functions with shape (n_pairs, n_basis)
        """
        basis = tf.stack(
            [
                tf.exp(-gamma * (dist - center) ** 2)
                for (center, gamma) in zip(self.center, self.gamma)
            ],
            axis=1,
        )
        if fc is not None:
            basis = tf.einsum("pb,p->pb", basis, fc)  # p-> pair; b-> basis
        return basis

__init__(center=None, gamma=None, rc=None, n_basis=None)

Parameters:

Name Type Description Default
center float | array

Gaussian centers

None
gamma float | array

inverse Gaussian width

None
rc float

cutoff radius

None
n_basis int

number of basis function

None
Source code in pinn/layers/basis.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def __init__(self, center=None, gamma=None, rc=None, n_basis=None):
    """
    Args:
        center (float|array): Gaussian centers
        gamma (float|array): inverse Gaussian width
        rc (float): cutoff radius
        n_basis (int): number of basis function

    """
    super(GaussianBasis, self).__init__()
    if center is None:
        self.center = np.linspace(0, rc, n_basis)
    else:
        self.center = np.array(center)
    self.gamma = np.broadcast_to(gamma, self.center.shape)

call(dist, fc=None)

Parameters:

Name Type Description Default
dist tensor

distance tensor with shape (n_pairs)

required
fc tensor

when supplied, apply a cutoff function to the basis

None

Returns:

Name Type Description
basis tensor

basis functions with shape (n_pairs, n_basis)

Source code in pinn/layers/basis.py
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def call(self, dist, fc=None):
    """
    Args:
       dist (tensor): distance tensor with shape (n_pairs)
       fc (tensor, optional): when supplied, apply a cutoff function to the basis

    Returns:
        basis (tensor): basis functions with shape (n_pairs, n_basis)
    """
    basis = tf.stack(
        [
            tf.exp(-gamma * (dist - center) ** 2)
            for (center, gamma) in zip(self.center, self.gamma)
        ],
        axis=1,
    )
    if fc is not None:
        basis = tf.einsum("pb,p->pb", basis, fc)  # p-> pair; b-> basis
    return basis

PolynomialBasis

Bases: Layer

Polynomial Basis Layer

Builds the polynomial basis function:

\[ e_{ijb} = \mathrm{power}(r_{ij}, {n_{b}}) \]

, where \(n_b\) is specified by n_basis. n_basis can be a list that explicitly specifies polynomail orders, or an integer that specifies a the orders as [0, 1, ..., n_basis-1].

Source code in pinn/layers/basis.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
class PolynomialBasis(tf.keras.layers.Layer):
    R"""Polynomial Basis Layer

    Builds the polynomial basis function:

    $$
    e_{ijb} = \mathrm{power}(r_{ij}, {n_{b}})
    $$

    , where $n_b$ is specified by `n_basis`. `n_basis` can be a list that
    explicitly specifies polynomail orders, or an integer that specifies a the
    orders as `[0, 1, ..., n_basis-1]`.

    """

    def __init__(self, n_basis):
        """

        Args:
            n_basis (int|list): number of basis function
        """
        super(PolynomialBasis, self).__init__()
        if type(n_basis) != list:
            n_basis = [(i + 1) for i in range(n_basis)]
        self.n_basis = n_basis

    def call(self, dist, fc=None):
        """
        Args:
           dist (tensor): distance tensor with shape (n_pairs)
           fc (tensor): when supplied, apply a cutoff function to the basis

        Returns:
            basis (tensor): basis functions with shape (n_pairs, n_basis)
        """
        assert fc is not None, "Polynomail basis requires a cutoff function."
        basis = tf.stack([fc**i for i in self.n_basis], axis=1)
        return basis

__init__(n_basis)

Parameters:

Name Type Description Default
n_basis int | list

number of basis function

required
Source code in pinn/layers/basis.py
116
117
118
119
120
121
122
123
124
125
def __init__(self, n_basis):
    """

    Args:
        n_basis (int|list): number of basis function
    """
    super(PolynomialBasis, self).__init__()
    if type(n_basis) != list:
        n_basis = [(i + 1) for i in range(n_basis)]
    self.n_basis = n_basis

call(dist, fc=None)

Parameters:

Name Type Description Default
dist tensor

distance tensor with shape (n_pairs)

required
fc tensor

when supplied, apply a cutoff function to the basis

None

Returns:

Name Type Description
basis tensor

basis functions with shape (n_pairs, n_basis)

Source code in pinn/layers/basis.py
127
128
129
130
131
132
133
134
135
136
137
138
def call(self, dist, fc=None):
    """
    Args:
       dist (tensor): distance tensor with shape (n_pairs)
       fc (tensor): when supplied, apply a cutoff function to the basis

    Returns:
        basis (tensor): basis functions with shape (n_pairs, n_basis)
    """
    assert fc is not None, "Polynomail basis requires a cutoff function."
    basis = tf.stack([fc**i for i in self.n_basis], axis=1)
    return basis

Misc

AtomicOneHot

Bases: Layer

One-hot embedding layer

Given the atomic number of each atom (\(Z_{i}\)) and a list of specified element types denoted as (\(Z^{\mathrm{0}}_{\alpha}\)), returns:

\[\mathbb{P}_{i\alpha} = \delta_{Z_{i},Z^{\mathrm{0}}_{\alpha}}\]
Source code in pinn/layers/misc.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
class AtomicOnehot(tf.keras.layers.Layer):
    R"""One-hot embedding layer

    Given the atomic number of each atom ($Z_{i}$) and a list of specified
    element types denoted as ($Z^{\mathrm{0}}_{\alpha}$), returns:

    $$\mathbb{P}_{i\alpha} = \delta_{Z_{i},Z^{\mathrm{0}}_{\alpha}}$$
    """
    def __init__(self, atom_types=[1, 6, 7, 8, 9]):
        """
        Args:
            atom_types (list of int): list of elements ($Z^{0}$)
        """
        super(AtomicOnehot, self).__init__()
        self.atom_types = atom_types

    def call(self, elems):
        """
        Args:
           elems (tensor): atomic indices of atoms, with shape `(n_atoms)`

        Returns:
           prop (tensor): atomic property tensor, with shape `(n_atoms, n_elems)`
        """
        prop = tf.equal(tf.expand_dims(elems, 1),
                          tf.expand_dims(self.atom_types, 0))
        return prop

__init__(atom_types=[1, 6, 7, 8, 9])

Parameters:

Name Type Description Default
atom_types list of int

list of elements (\(Z^{0}\))

[1, 6, 7, 8, 9]
Source code in pinn/layers/misc.py
15
16
17
18
19
20
21
def __init__(self, atom_types=[1, 6, 7, 8, 9]):
    """
    Args:
        atom_types (list of int): list of elements ($Z^{0}$)
    """
    super(AtomicOnehot, self).__init__()
    self.atom_types = atom_types

call(elems)

Parameters:

Name Type Description Default
elems tensor

atomic indices of atoms, with shape (n_atoms)

required

Returns:

Name Type Description
prop tensor

atomic property tensor, with shape (n_atoms, n_elems)

Source code in pinn/layers/misc.py
23
24
25
26
27
28
29
30
31
32
33
def call(self, elems):
    """
    Args:
       elems (tensor): atomic indices of atoms, with shape `(n_atoms)`

    Returns:
       prop (tensor): atomic property tensor, with shape `(n_atoms, n_elems)`
    """
    prop = tf.equal(tf.expand_dims(elems, 1),
                      tf.expand_dims(self.atom_types, 0))
    return prop

ANNOutput

Bases: Layer

ANN Ouput layer

Output atomic or molecular (system) properties depending on out_pool

\[ \begin{cases} \mathbb{P}^{\mathrm{out}}_i &, \textrm{if out_pool is False}\\ \mathrm{pool}_i(\mathbb{P}^{\mathrm{out}}_i) &, \textrm{if out_pool} \end{cases} \]

, where \(\mathrm{pool}\) is a reducing operation specified with out_pool, it can be one of 'sum', 'max', 'min', 'avg'.

Source code in pinn/layers/misc.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
class ANNOutput(tf.keras.layers.Layer):
    R"""ANN Ouput layer

    Output atomic or molecular (system) properties depending on `out_pool`

    $$
    \begin{cases}
     \mathbb{P}^{\mathrm{out}}_i  &, \textrm{if out_pool is False}\\
     \mathrm{pool}_i(\mathbb{P}^{\mathrm{out}}_i)  &, \textrm{if out_pool}
    \end{cases}
    $$

    , where $\mathrm{pool}$ is a reducing operation specified with `out_pool`,
    it can be one of 'sum', 'max', 'min', 'avg'.

    """
    def __init__(self, out_pool):
        super(ANNOutput, self).__init__()
        self.out_pool = out_pool

    def call(self, tensors):
        """
        Args:
            tensors (list of tensor): ind_1 and output tensors

        Returns:
            output (tensor): atomic or per-structure predictions
        """
        ind_1, output = tensors

        if self.out_pool:
            out_pool = {'sum': tf.math.unsorted_segment_sum,
                        'max': tf.math.unsorted_segment_max,
                        'min': tf.math.unsorted_segment_min,
                        'avg': tf.math.unsorted_segment_mean,
            }[self.out_pool]
            output =  out_pool(output, ind_1[:,0],
                               tf.reduce_max(ind_1)+1)
        output = tf.squeeze(output, axis=1)

        return output

call(tensors)

Parameters:

Name Type Description Default
tensors list of tensor

ind_1 and output tensors

required

Returns:

Name Type Description
output tensor

atomic or per-structure predictions

Source code in pinn/layers/misc.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def call(self, tensors):
    """
    Args:
        tensors (list of tensor): ind_1 and output tensors

    Returns:
        output (tensor): atomic or per-structure predictions
    """
    ind_1, output = tensors

    if self.out_pool:
        out_pool = {'sum': tf.math.unsorted_segment_sum,
                    'max': tf.math.unsorted_segment_max,
                    'min': tf.math.unsorted_segment_min,
                    'avg': tf.math.unsorted_segment_mean,
        }[self.out_pool]
        output =  out_pool(output, ind_1[:,0],
                           tf.reduce_max(ind_1)+1)
    output = tf.squeeze(output, axis=1)

    return output
« Previous
Next »