Commit d7bc324c by zlj

increament mtgnn

parent abb7e9e8
......@@ -353,7 +353,7 @@ def main():
print('dim_node {} dim_edge {}\n'.format(gnn_dim_node,gnn_dim_edge))
avg_time = 0
if use_cuda:
model = GeneralModel(gnn_dim_node, gnn_dim_edge, sample_param, memory_param, gnn_param, train_param,graph.ids.shape[0],mailbox).cuda()
model = GeneralModel(gnn_dim_node, gnn_dim_edge, sample_param, memory_param, gnn_param, train_param,graph.ids.shape[0],mailbox,num_node=graph.num_nodes, num_edge=graph.num_edges).cuda()
device = torch.device('cuda')
else:
model = GeneralModel(gnn_dim_node, gnn_dim_edge, sample_param, memory_param, gnn_param, train_param,graph.ids.shape[0],mailbox)
......
$h_u(t) = \sum_{(v,\tau)\in N_u(t)} \phi(q_u)^T\phi(k_v)x_v $
in special,
$h_u(t) = \sum_{(v,\tau)\in N_u(t)} \text{softmax}(q_u)^T \text{edgesoftmax}(k_v)x_v $
$w_u(t^-) = \sum_{(v,\tau)\in N_u(t^-)}\phi(k_v,t^-)x_v$
$\phi(k_v,t^-) = e^{k_v-edgemax(k_v)} / edge sum (exp(k_v-edge max(k_v)))$
$w_u(t) = \sum_{(v,\tau)\in N_u(t)}\phi(k_v,t)x_v$
In special,
$newmax = max(lastmax(k_v),max_{(v,\tau)\in N_u(t)/N_u(t^-)} k_v)$
$newsum = lastsum(exp(k_v-lastmax(k_v)))*exp(lastmax(k_v)- newmax) + \sum_{(v,\tau)\in N_u(t)/N_u(t^-)} e^{k_v-newmax}$
$h_u(t^-) = \sum_{(v,\tau)\in N_u(t^-)} \phi(q_u) ^T\phi(k_v)x_v$
$h_u(t) = \phi(q_u)^T \{ \sum_{(v,\tau)\in N_u(t^-)} \phi(k_v,t^-)*lastsum(exp(k_v-lastmax(k_v)))exp(lastmax(k_v)-edgemax(k_v))/newsum *x_v + \sum_{(v,\tau)\in N_u(t)/N_u(t^-)} e^{k_v-newmax}/newsum *x_v\}$
$x_v = s_v + \phi(wt+b)$
$[cos(w(t+\Delta t)+b),sin(wt+b+\Delta t)] = [cos(wt+b)cos(wt+b) -sin(w\Delta t)sin(w\Delta t) ,sin(wt+b)cos(w\Delta t)+ cos(wt+b)sin(w\Delta t)]$
......@@ -69,7 +69,7 @@ class NegFixLayer(torch.autograd.Function):
class GeneralModel(torch.nn.Module):
def __init__(self, dim_node, dim_edge, sample_param, memory_param, gnn_param, train_param, num_nodes = None,mailbox = None,combined=False,train_ratio = None):
def __init__(self, dim_node, dim_edge, sample_param, memory_param, gnn_param, train_param, num_nodes = None,mailbox = None,combined=False,train_ratio = None,num_node = 0, num_edge = 0):
super(GeneralModel, self).__init__()
self.dim_node = dim_node
self.dim_node_input = dim_node
......@@ -110,10 +110,10 @@ class GeneralModel(torch.nn.Module):
self.layers = torch.nn.ModuleDict()
if gnn_param['arch'] == 'transformer_attention':
for h in range(sample_param['history']):
self.layers['l0h' + str(h)] = TransfomerAttentionLayer(self.dim_node_input, dim_edge, gnn_param['dim_time'], gnn_param['att_head'], train_param['dropout'], train_param['att_dropout'], gnn_param['dim_out'], combined=combined)
self.layers['l0h' + str(h)] = TransfomerAttentionLayer(self.dim_node_input, dim_edge, gnn_param['dim_time'], gnn_param['att_head'], train_param['dropout'], train_param['att_dropout'], gnn_param['dim_out'], combined=combined, num_node=num_node, num_edge=num_edge)
for l in range(1, gnn_param['layer']):
for h in range(sample_param['history']):
self.layers['l' + str(l) + 'h' + str(h)] = TransfomerAttentionLayer(gnn_param['dim_out'], dim_edge, gnn_param['dim_time'], gnn_param['att_head'], train_param['dropout'], train_param['att_dropout'], gnn_param['dim_out'], combined=False)
self.layers['l' + str(l) + 'h' + str(h)] = TransfomerAttentionLayer(gnn_param['dim_out'], dim_edge, gnn_param['dim_time'], gnn_param['att_head'], train_param['dropout'], train_param['att_dropout'], gnn_param['dim_out'], combined=False, num_node=num_node, num_edge=num_edge)
elif gnn_param['arch'] == 'identity':
self.gnn_param['layer'] = 1
for h in range(sample_param['history']):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment