Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Forward_Pass/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/build/
89 changes: 89 additions & 0 deletions Forward_Pass/Aggregator.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
// Aggregator.h
#pragma once

#include "Graph.h"
#include <vector>
#include <functional>
#include <numeric>
#include <algorithm>
#include <cmath>

namespace OutputConverter {

//──────────────────────────────────────────────────────────────────────────
// Type aliases matching OutputConverter signatures
//──────────────────────────────────────────────────────────────────────────

// A vector of per‐node scalar scores
using NodeScores = std::vector<float>;

// A vector of per‐edge scalar scores
using EdgeScores = std::vector<float>;

// A vector of boolean flags
using BinaryVector = std::vector<bool>;

// Combines two node‐scores into one edge‐score
using EdgeCombiner = std::function<float(float, float)>;

// Aggregates all node‐scores into one graph‐score
using GraphAggregator = std::function<float(const NodeScores&)>;


//──────────────────────────────────────────────────────────────────────────
// Default implementations (used when the user omits their own)
//──────────────────────────────────────────────────────────────────────────
namespace DefaultAgg {

// sum of endpoint scores
inline float sumCombiner(float a, float b) {
return a + b;
}

// product of endpoint scores
inline float prodCombiner(float a, float b) {
return a * b;
}

// maximum of endpoint scores
inline float maxCombiner(float a, float b) {
return std::max(a, b);
}

// minimum of endpoint scores
inline float minCombiner(float a, float b) {
return std::min(a, b);
}

// absolute difference of endpoint scores
inline float absDiffCombiner(float a, float b) {
return std::fabs(a - b);
}

// sum of all node scores
inline float sumGraph(const NodeScores& scores) {
return std::accumulate(scores.begin(), scores.end(), 0.0f);
}

// mean of all node scores
inline float meanGraph(const NodeScores& scores) {
if (scores.empty()) return 0.0f;
return std::accumulate(scores.begin(), scores.end(), 0.0f)
/ static_cast<float>(scores.size());
}

// maximum of all node scores
inline float maxGraph(const NodeScores& scores) {
if (scores.empty()) return 0.0f;
return *std::max_element(scores.begin(), scores.end());
}

// minimum of all node scores
inline float minGraph(const NodeScores& scores) {
if (scores.empty()) return 0.0f;
return *std::min_element(scores.begin(), scores.end());
}

} // namespace DefaultAgg

} // namespace OutputConverter
16 changes: 16 additions & 0 deletions Forward_Pass/BaseLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// BaseLayer.h
#pragma once
#include <vector>
using namespace std;

// BaseLayer provides a standard interface for all GNN layers (GAT, GCN, GraphSAGE, etc.)
class BaseLayer {
public:
virtual ~BaseLayer() {}

// Forward pass interface to be overridden by all derived GNN layers
virtual vector<vector<float>> forward(
const vector<vector<float>>& node_features,
const vector<vector<int>>& adjacency_list
) = 0;
};
34 changes: 34 additions & 0 deletions Forward_Pass/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
cmake_minimum_required(VERSION 3.10)
project(GraphGNN LANGUAGES CXX)

# Choose C++ standard
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)

# All source files
set(SOURCE_FILES
GATL.cpp
GCNL.cpp
GCNTest.cpp
Graph.cpp
GraphReader.cpp
GraphSage.cpp
output.cpp
output_main.cpp
)

# Build the executable
add_executable(graph_app ${SOURCE_FILES})

# Make headers in this folder visible
target_include_directories(graph_app PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})

# After building graph_app, copy graph_data.txt into the build folder
add_custom_command(TARGET graph_app
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different
"${CMAKE_SOURCE_DIR}/graph_data.txt"
"${CMAKE_BINARY_DIR}/graph_data.txt"
COMMENT "Copying graph_data.txt to build directory"
)
109 changes: 109 additions & 0 deletions Forward_Pass/GATL.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
// GATLayer.cpp

#include "GATL.h"
#include <random>
#include <cmath>
#include <algorithm>

// Constructor with Xavier initialization
GATLayer::GATLayer(int input_dim, int output_dim) : input_dim(input_dim), output_dim(output_dim) {
W.resize(input_dim, vector<float>(output_dim));
a.resize(2 * output_dim);

float limit = sqrt(6.0f / (input_dim + output_dim));
random_device rd;
mt19937 gen(rd());
uniform_real_distribution<> dis(0, limit);

for (int i = 0; i < input_dim; i++)
for (int j = 0; j < output_dim; j++)
W[i][j] = dis(gen);

for (int i = 0; i < 2 * output_dim; i++)
a[i] = dis(gen);
}

// ReLU activation
float GATLayer::relu(float x) {
return max(0.0f, x);
}

// Leaky ReLU activation
float GATLayer::leaky_relu(float x, float alpha) {
return (x > 0) ? x : alpha * x;
}

// Linear transformation for a single node
vector<float> GATLayer::linear_transform(const vector<float>& features) {
vector<float> z(output_dim, 0.0f);
for (int o = 0; o < output_dim; o++)
for (int d = 0; d < input_dim; d++)
z[o] += features[d] * W[d][o];
return z;
}

// Compute attention score e_ij using attention vector 'a'
float GATLayer::compute_attention_score(const vector<float>& z_i, const vector<float>& z_j) {
float score = 0.0f;
for (int o = 0; o < output_dim; o++) {
score += a[o] * z_i[o] + a[o + output_dim] * z_j[o];
}
return leaky_relu(score);
}

// Stable softmax computation
vector<float> GATLayer::softmax(const vector<float>& scores) {
float max_val = *max_element(scores.begin(), scores.end());
float sum_exp = 0.0f;
vector<float> exp_scores(scores.size());
for (size_t i = 0; i < scores.size(); i++) {
exp_scores[i] = exp(scores[i] - max_val);
sum_exp += exp_scores[i];
}
if(sum_exp!=0.0f) {
for (float &val : exp_scores) {
val /= sum_exp;
}
}
return exp_scores;
}

// Forward pass
vector<vector<float>> GATLayer::forward(
const vector<vector<float>>& node_features,
const vector<vector<int>>& adjacency_list
) {
int n_nodes = node_features.size();
vector<vector<float>> updated_features(n_nodes, vector<float>(output_dim, 0.0f));
vector<vector<float>> z(n_nodes);

// Step 1: Linear transform each node's features
for (int i = 0; i < n_nodes; i++) {
z[i] = linear_transform(node_features[i]);
}

// Step 2: Compute attention and aggregate
for (int i = 0; i < n_nodes; i++) {
vector<int> neighbors = adjacency_list[i];
neighbors.push_back(i); // self-loop

vector<float> e_ij(neighbors.size());
for (size_t idx = 0; idx < neighbors.size(); idx++) {
int j = neighbors[idx];
e_ij[idx] = compute_attention_score(z[i], z[j]);
}

vector<float> alpha_ij = softmax(e_ij);

for (int o = 0; o < output_dim; o++) {
float agg = 0.0f;
for (size_t idx = 0; idx < neighbors.size(); idx++) {
int j = neighbors[idx];
agg += alpha_ij[idx] * z[j][o];
}
updated_features[i][o] = relu(agg); // ReLU activation
}
}

return updated_features;
}
52 changes: 52 additions & 0 deletions Forward_Pass/GATL.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// GATL.h
#pragma once
#include "BaseLayer.h"
#include <vector>
using namespace std;

// Implements a layer of Graph Attention Network(GAT)
// It takes into account the importance of each neighbour also in aggregation.
// It uses self-attention mechanism on graphs to compute this importance
class GATLayer : public BaseLayer {
public:
int input_dim, output_dim; // Input and output dimension
vector<vector<float>> W; // Weight matrix for linear transformation
vector<float> a; // Attention vector used for computing attention coefficients

// Constructor initializes the GAT layer with input and output dimensions
// and performs Xavier initialization for weights and attention parameters.
GATLayer(int input_dim, int output_dim);

// Forward pass computes the updated node features based on attention mechanism.
// It projects input features, computes attention scores with neighbours, applies softmax,
// aggregates neighbour features weighted by attention, and applies ReLU.
vector<vector<float>> forward(
const vector<vector<float>>& node_features, // node-feature matrix:[number of nodes][input_dim]
const vector<vector<int>>& adjacency_list // represents the graph
) override;

private:
// Applies ReLU activation to a single float value
float relu(float x);

// Applies LeakyReLU activation with a configurable alpha slope for negative inputs.
float leaky_relu(float x, float alpha = 0.2f);

// Applies weight matrix to a single node's feature vector to transform feature vector of size output_dim.
vector<float> linear_transform(
const vector<float>& features // Input feature vector of a node
);

// Computes attention score (unnormalised) for node i and node j
// using attention vector applied to concatenation of projected features of node i and node j
float compute_attention_score(
const vector<float>& z_i, // feature vector of node i
const vector<float>& z_j // feature vector of node j (neighbour)
);

// Applies softmax function to a vector of unnormalised attention scores
// returns a vector of normalised attention coefficients
vector<float> softmax(
const vector<float>& scores // Unnormalised attention scores for a node and its neighbours
);
};
79 changes: 79 additions & 0 deletions Forward_Pass/GCNL.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// GCNL.cpp

#include "GCNL.h"
#include <random>
#include <algorithm>
#include <cmath>

// Xavier Initialization
GCNLayer::GCNLayer(int input_dim, int output_dim) : input_dim(input_dim), output_dim(output_dim) {
weight_matrix.resize(input_dim, vector<float>(output_dim));
float limit = sqrt(6.0f / (input_dim + output_dim));
random_device rd;
mt19937 gen(rd());
uniform_real_distribution<> dis(0, limit);
for (int i = 0; i < input_dim; i++)
for (int j = 0; j < output_dim; j++)
weight_matrix[i][j] = dis(gen);
}

// ReLU activation
float GCNLayer::relu(float x) {
return max(0.0f, x);
}

// Aggregates normalized neighbor features for a node
vector<float> GCNLayer::aggregate_neighbors(
int node,
const vector<vector<float>>& node_features,
const vector<vector<int>>& adjacency_list,
const vector<int>& degrees
) {
vector<float> aggregated(input_dim, 0.0f);
for (int neighbor : adjacency_list[node]) {
float normalization = sqrt(degrees[node] * degrees[neighbor]);
if (normalization != 0.0f) {
for (int d = 0; d < input_dim; d++) {
aggregated[d] += node_features[neighbor][d] / normalization;
}
}
}
return aggregated;
}

// Applies weight matrix for a given output dimension
float GCNLayer::linear_transform(
const vector<float>& aggregated_features,
int output_index
) {
float val = 0.0f;
for (int d = 0; d < input_dim; d++) {
val += aggregated_features[d] * weight_matrix[d][output_index];
}
return val;
}

// Forward pass for GCN Layer
vector<vector<float>> GCNLayer::forward(
const vector<vector<float>>& node_features,
const vector<vector<int>>& adjacency_list
) {
int n_nodes = node_features.size();
vector<vector<float>> updated_features(n_nodes, vector<float>(output_dim, 0.0f));

// Precompute degrees
vector<int> degrees(n_nodes);
for (int i = 0; i < n_nodes; i++) {
degrees[i] = adjacency_list[i].size();
}

for (int i = 0; i < n_nodes; i++) {
vector<float> aggregated = aggregate_neighbors(i, node_features, adjacency_list, degrees);
for (int o = 0; o < output_dim; o++) {
float val = linear_transform(aggregated, o);
updated_features[i][o] = relu(val);
}
}

return updated_features;
}
Loading