# These are the standard imports for CS 111.
# This list may change as the quarter goes on.
import sys
sys.path.append("/Users/youngmin/Documents/CS/Introduction to Computational Science - F21/CS111-2021-fall/Python")
import os
import time
import math
import numpy as np
import numpy.linalg as npla
import scipy
from scipy import linalg as spla
import scipy.sparse
import scipy.sparse.linalg
from scipy import integrate
import networkx as nx
import cs111
# If this import for matplotlib doesn't work, try saying
# conda install -c conda-forge ipympl
# at a shell prompt on your computer
import matplotlib
#%matplotlib ipympl
%matplotlib inline
import matplotlib.pyplot as plt
np.set_printoptions(precision = 4)
**Acknowledgement**: Constrained Extrema of Quadratic Forms. (2020, November 17). Trinity University. https://math.libretexts.org/@go/page/17318.
Suppose $A$ is an $m$-by-$m$ *real symmetric matrix*. For any vector $\mathbf{x} \in \mathbb{R}^m$, we can define the quadratic form by
$$ Q(\mathbf{x}) = \mathbf{x}^T A\mathbf{x} = \sum_{i=0}^{m-1} \sum_{j=0}^{m-1} a_{ij}x_i x_j. $$To find $Q$'s extrema (maximum and minimum) subject to the restriction $g(\mathbf{x}) = ||\mathbf{x}||_2^2 = \sum_{i=0}^{m-1}x_i^2 = 1$, we construct the Lagrangian form
$$ L(\mathbf{x}) = Q(\mathbf{x}) - \lambda g(\mathbf{x}) = Q(\mathbf{x}) - \lambda\sum_{i=0}^{m-1}x_i^2 $$for some scalar multiplier $\lambda$. Then, taking the gradient of $L(\mathbf{x})$, we obtain a system of $m$ linear equations
$$ L_{x_i} = 2\left(\sum_{j=0}^{m-1}a_{ij}x_j\right) - 2\lambda x_i = 0, \quad \textrm{for }i = 0, 1, \dots, (m-1), $$where the factor of $2$ in front of the sum comes from the fact that $a_{ij}x_i x_j$ appears twice due to the symmetry in $A$. Simplifying the previous system of equations, we get
$$ \sum_{j=0}^{m-1}a_{ij}w_j = \lambda w_i, \quad \textrm{for }i = 0, 1, \dots, (m-1), $$which is satisfied by some vector $\mathbf{w} = [w_0, w_1, \dots, w_{m-1}]^T$. Therefore, $\mathbf{w}$ is a constrained **critical point** of $Q$ subject to $||\mathbf{w}||_2^2 = 1$ if and only if
$$ A\mathbf{w} = \lambda\mathbf{w} $$for some $\lambda$; that is, if and only if $\lambda$ is an eigenvalue and $\mathbf{w}$ is an associated unit eigenvector of $A$.
If $A\mathbf{w} = \lambda\mathbf{w}$ and $\sum_{i=0}^{m-1}w_i^2 = 1$, then
$$ \begin{split} Q(\mathbf{w}) &= \sum_{i=0}^{m-1}\left(\sum_{j=0}^{m-1}a_{ij}w_j\right) w_i \\ &= \sum_{i=0}^{m-1}(\lambda w_i)w_i \\ &= \lambda \sum_{i=0}^{m-1} w_i^2 \\ &= \lambda. \end{split} $$Therefore, the largest and smallest eigenvalues of $A$ are the **_maximum_ and _minimum_ values** of $Q$ attained at ther respective eigenvectors subject to $||\mathbf{w}||_2 = 1$.
When we work with symmetric matrices that are positive (semi) definite, both the eigendecomposition and the SVD are identical, up to a reverse ordering.
For example, the graph Laplacian matrices that we need for computing Laplacian quadratic forms are SPSD, and it doesn't matter whether we compute their SVD or eigenpairs.
# The Laplacian matrix of a path.
L = cs111.path( 5 )
print( L )
[[ 1. -1. 0. 0. 0.] [-1. 2. -1. 0. 0.] [ 0. -1. 2. -1. 0.] [ 0. 0. -1. 2. -1.] [ 0. 0. 0. -1. 1.]]
# Here's the eigendecomposition.
lam, W = npla.eigh( L )
print( "Eigenvalues:\n", lam, sep="" )
print( "\nEigenvectors:\n", W, sep="" )
Eigenvalues: [-4.5874e-17 3.8197e-01 1.3820e+00 2.6180e+00 3.6180e+00] Eigenvectors: [[ 4.4721e-01 -6.0150e-01 5.1167e-01 -3.7175e-01 1.9544e-01] [ 4.4721e-01 -3.7175e-01 -1.9544e-01 6.0150e-01 -5.1167e-01] [ 4.4721e-01 3.5744e-16 -6.3246e-01 -1.2819e-16 6.3246e-01] [ 4.4721e-01 3.7175e-01 -1.9544e-01 -6.0150e-01 -5.1167e-01] [ 4.4721e-01 6.0150e-01 5.1167e-01 3.7175e-01 1.9544e-01]]
# And, here's the SVD.
U, sigma, Vt = npla.svd( L )
print( "Singular values:\n", sigma, sep="" )
print( "\nSingular vectors:\n", U, sep="" )
Singular values: [3.6180e+00 2.6180e+00 1.3820e+00 3.8197e-01 5.1904e-17] Singular vectors: [[-1.9544e-01 -3.7175e-01 -5.1167e-01 -6.0150e-01 4.4721e-01] [ 5.1167e-01 6.0150e-01 1.9544e-01 -3.7175e-01 4.4721e-01] [-6.3246e-01 1.2068e-16 6.3246e-01 -2.4659e-16 4.4721e-01] [ 5.1167e-01 -6.0150e-01 1.9544e-01 3.7175e-01 4.4721e-01] [-1.9544e-01 3.7175e-01 -5.1167e-01 6.0150e-01 4.4721e-01]]
# Reading in the graph.
G, xycoords = cs111.read_mesh( 'mesh1e1' )
# Number of nodes.
print( "G has", G.number_of_nodes(), "nodes" )
# Number of edges.
print( "\nG has", G.number_of_edges(), "edges" )
G has 48 nodes G has 129 edges
# Plotting the graph by using the xycoords list of tuples.
plt.figure( dpi=150 )
plt.axis( 'equal' )
plt.title( "Mesh graph")
nx.draw( G, pos=xycoords, node_size=10, node_shape='o', width=0.25 )
**Acknowledgement**: The following information is a summary of the notes at https://www.cis.upenn.edu/~cis515/cis515-15-graph-drawing.pdf
If $G = (V,E)$ is our indirected graph, the idea of drawing $G$ consists of assigning a coordinate (say in two dimensions) to every node $v_i \in V$ and drawing a line segment between $v_i$ and $v_j$ whenever the edge $e_{ij} = (i,j)$ exists in $E$.
Spectral drawing originates from the goal of finding an $m$-by-$n$ drawing matrix $X$, where $m = |V|$, $n \ll m$, and the $i$th row vector contains the coordinates for $v_i$. To find a reasonable $X$, we can imagine building a physical model of $G$ by replacing the edeges in $E$ with identical springs. A good drawing of $G$ thus requires these springs to be less extended.
Formally, the energy of a drawing $X$ is
$$ \mathcal{E}(X) = \sum_{(i,j)\in E}||\mathbf{x}_i - \mathbf{x}_j||_2^2, $$where $\mathbf{x}_i$ is the $i$th row of $X$ corresponding to node $v_i$. A good drawing seeks to minimize $\mathcal{E}(X)$.
As seen in lecture this week, it turns out we can model $\mathcal{E}(X)$ by using the Laplacian matrix of $G$. That is, if $\mathbf{e}_{ij} = [0, \cdots, 0, 1, 0, \cdots, 0, -1, 0, \cdots, 0]^T$ is a vector with zeros except for a $1$ at the $i$th location and a $-1$ at the $j$th location (representing the edge $e_{ij}$), then
$$ X^T L X = X^T \left(\sum_{(i,j)\in E}\mathbf{e}_{ij}\mathbf{e}_{ij}^T\right) X = \sum_{(i,j)\in E}(X^T\mathbf{e}_{ij})(\mathbf{e}_{ij}^T X) = \sum_{(i,j)\in E}\begin{pmatrix} (x_i - x_j)^2 & (x_i - x_j)(y_i - y_j) \\ (x_i - x_j)(y_i - y_j) & (y_i - y_j)^2 \end{pmatrix}, $$where $L$ is the graph Laplacian and $x_i$ and $y_i$ are the entries (e.g., two-dimensional coordinates) for the $i$th node in $X$. From this Laplacian quadratic form, we can see that the energy function is equivalent to
$$ \mathcal{E}(X) = \textrm{trace}(X^T L X). $$Furthermore, $X^T L X$ is symmetric positive semidefinite, and its trace is equal to the sum of its positive eigenvalues.
Now, if we choose $X$ to be the second and third columns from $W$ (i.e., $\mathbf{w}_1$ and $\mathbf{w}_2$, where $L = W S W^T$ is the eigendecomposition of $L$), our drawing satisfies the equation $X^T X = I$, and we can rule out the trivial minimizer for $\mathcal{E}(X)$. In this case, the corresponding drawing given by $X$ is called an orthogonal drawing.
The following result tells us how to find minimum energy drawings, assuming that $G$ is connected.
Theorem. Let $G = (V, E)$ be an undirected graph with $|V| = m$. If $L$ is the Laplacian of $G$, and if the eigenvalues of $L$ are $0 = \lambda_0 \leqslant \lambda_1 \leqslant \cdots \leqslant \lambda_{m-1}$, then the minimal energy of any balanced orthogonal graph drawing of $G$ in $\mathbb{R}^n$ is equal to $\lambda_1 + \cdots + \lambda_{n}$ (of course, implying that $n \ll m$). The $m$-by-$n$ matrix $X$ consisting of any unit vectors $\mathbf{w}_1, \cdots, \mathbf{w}_n$ associated with $\lambda_1 \leqslant \cdots \leqslant \lambda_n$ yields a balanced orthogonal graph drawing of minimal energy and satisfies the condition $X^T X = I$.
Note: A representation is balanced if and only if the sum of the entries of every column is zero: $\mathbf{1}^T X = \mathbf{0}$.
# Building the Laplacian matrix.
L = nx.linalg.laplacian_matrix( G ).toarray()
L
array([[ 4, -1, 0, ..., 0, 0, -1], [-1, 6, -1, ..., -1, 0, -1], [ 0, -1, 3, ..., -1, 0, 0], ..., [ 0, -1, -1, ..., 6, 0, 0], [ 0, 0, 0, ..., 0, 6, 0], [-1, -1, 0, ..., 0, 0, 4]])
# Let's compute the eigendecomposition of the Laplacian matrix.
lam, W = npla.eigh( L )
print( "First five eigenvalues:\n", lam[:5], sep="" )
print( "\nFirst five eigenvectors:\n", W[:,:5], sep="" )
First five eigenvalues: [-4.3066e-15 6.1356e-01 7.2802e-01 8.0652e-01 1.3861e+00] First five eigenvectors: [[ 1.4434e-01 -3.2799e-02 2.9154e-02 -2.8439e-01 1.4437e-01] [ 1.4434e-01 -6.7875e-02 -9.6091e-02 -2.1813e-01 2.4900e-01] [ 1.4434e-01 8.9738e-03 -2.2404e-01 -2.0333e-01 3.2379e-01] [ 1.4434e-01 1.0498e-01 -2.1861e-01 -1.1597e-01 7.7661e-02] [ 1.4434e-01 2.1286e-01 -1.8646e-01 -1.4056e-01 -1.9732e-01] [ 1.4434e-01 2.2058e-01 -5.9474e-02 -1.3534e-01 -2.9472e-01] [ 1.4434e-01 1.9902e-01 3.8231e-02 -2.2400e-01 -4.1069e-01] [ 1.4434e-01 8.2667e-02 8.6795e-02 -2.2533e-01 -1.3938e-01] [ 1.4434e-01 -1.6404e-01 1.6373e-01 4.7879e-02 -6.8083e-02] [ 1.4434e-01 -1.7408e-02 2.4592e-01 1.2421e-01 7.9842e-02] [ 1.4434e-01 1.0477e-01 1.4989e-01 1.8580e-01 1.3658e-01] [ 1.4434e-01 8.5924e-02 -6.9328e-02 2.1994e-01 9.3656e-02] [ 1.4434e-01 -7.3881e-02 -1.7996e-01 2.2550e-01 -5.5534e-02] [ 1.4434e-01 -2.0279e-01 -1.4490e-01 1.6938e-01 -1.8532e-01] [ 1.4434e-01 -2.4868e-01 -7.0699e-03 1.0775e-01 -2.1820e-01] [ 1.4434e-01 -2.4333e-01 -7.9905e-02 9.7516e-02 -1.8915e-01] [ 1.4434e-01 -1.9471e-01 -1.5633e-01 1.1100e-01 -1.2411e-01] [ 1.4434e-01 -2.3197e-01 3.7949e-02 2.9332e-02 -1.2779e-01] [ 1.4434e-01 -1.2894e-01 -1.9576e-01 1.6853e-01 -8.2715e-02] [ 1.4434e-01 -1.8919e-01 -7.0996e-02 -8.5359e-03 -2.8163e-02] [ 1.4434e-01 4.8717e-02 2.4821e-01 1.2038e-01 1.0760e-01] [ 1.4434e-01 -8.9828e-02 -1.7628e-01 3.8593e-02 3.6823e-02] [ 1.4434e-01 -4.8397e-02 2.4282e-01 4.2617e-02 3.2598e-02] [ 1.4434e-01 1.1992e-01 2.0190e-01 1.2545e-01 1.0257e-01] [ 1.4434e-01 1.6567e-01 1.2439e-01 1.6273e-01 1.1071e-01] [ 1.4434e-01 1.5204e-01 1.2021e-01 3.0418e-02 -2.1474e-02] [ 1.4434e-01 1.6150e-01 3.9155e-02 2.1221e-01 1.3579e-01] [ 1.4434e-01 5.4814e-02 2.1981e-01 2.6747e-02 3.7265e-02] [ 1.4434e-01 1.8845e-01 2.0211e-02 1.2853e-01 4.6620e-02] [ 1.4434e-01 -1.3685e-01 2.0470e-01 -2.7008e-02 -1.8900e-02] [ 1.4434e-01 -2.0028e-01 1.3885e-01 -2.8248e-02 -6.1682e-02] [ 1.4434e-01 -1.4164e-01 1.3134e-01 -1.0463e-01 1.6187e-02] [ 1.4434e-01 -4.5924e-02 1.9775e-01 -7.0881e-02 1.2679e-02] [ 1.4434e-01 -4.3828e-03 -1.7882e-01 1.6226e-01 2.9215e-02] [ 1.4434e-01 6.8896e-02 1.4795e-01 -1.0908e-01 -6.2038e-02] [ 1.4434e-01 1.1119e-01 -1.3788e-01 1.6363e-01 8.2897e-02] [ 1.4434e-01 7.1925e-02 -1.8350e-01 2.6507e-02 5.9670e-02] [ 1.4434e-01 1.6362e-01 -5.7894e-02 1.9289e-01 1.0317e-01] [ 1.4434e-01 1.7065e-01 -9.9479e-02 8.4573e-02 1.3871e-02] [ 1.4434e-01 -2.0399e-01 5.5455e-02 -6.4032e-02 -2.4327e-02] [ 1.4434e-01 2.0159e-01 -1.9830e-02 -1.5310e-02 -1.2697e-01] [ 1.4434e-01 -1.3173e-01 -2.4346e-04 -1.5644e-01 1.1353e-01] [ 1.4434e-01 -1.1269e-01 -1.2592e-01 -1.0884e-01 1.5692e-01] [ 1.4434e-01 1.8242e-01 -1.4555e-01 -5.7015e-02 -1.0139e-01] [ 1.4434e-01 -4.4188e-02 9.5907e-02 -1.9677e-01 5.3385e-02] [ 1.4434e-01 -1.5690e-02 -1.9432e-01 -1.1190e-01 1.9590e-01] [ 1.4434e-01 1.7169e-01 5.9539e-02 -1.3067e-01 -2.2871e-01] [ 1.4434e-01 -8.1675e-02 8.7794e-03 -2.6796e-01 2.1435e-01]]
# We'll use the first two non-trivial eigenvectors for the spectral drawing.
X = W[:,1:3]
# Comparing eigenvalues: The evals of R are the same as the first 2 nonzero evals of L.
R = X.T @ L @ X
lamR, _ = npla.eigh( R )
print( "Eigenvalues of R:\n", lamR, sep="" )
print( "Eigenvalues from L:\n", lam[1:3], sep="" )
Eigenvalues of R: [0.6136 0.728 ] Eigenvalues from L: [0.6136 0.728 ]
plt.figure( dpi=150 )
plt.axis( "equal" )
plt.title( "Spectral drawing of mesh graph" )
nx.draw( G, pos=X, node_size=10, node_shape="o", width=0.25, with_labels=False )
# Let's get a coordinate-cut plot of the graph.
x = np.zeros( G.number_of_nodes() )
for v in G.nodes():
x[v] = xycoords[v][0]
mean = np.mean( x ) # Get the mean value along the x-direction.
colors = [] # Deciding on the colors.
x = np.zeros( G.number_of_nodes() ) # Cut vector.
for v in G.nodes():
if xycoords[v][0] < mean: # Based on whether x-coord is above or below the mean.
colors.append('r')
x[v] = -1
else:
colors.append('b')
x[v] = +1
# Plotting graph. For more options, visit:
# https://networkx.org/documentation/stable/reference/generated/networkx.drawing.nx_pylab.draw_networkx.html
plt.figure( dpi=150 )
plt.axis( 'equal' )
plt.title( "Mean x-cut of mesh graph")
plt.xlabel( "x" )
plt.ylabel( "y" )
nx.draw( G, pos=xycoords, node_size=10, node_shape='o', node_color=colors, width=0.25, with_labels=False )
# How many edges does the x vector cuts?
count = 0
for e in G.edges:
if x[e[0]] * x[e[1]] < 1: # A cut edge has nodes with distinct labels!
count += 1
print( "Edges cut:", count )
# You'll work on a formula to get this by using the LQF: x.T @ L @ x!
# Also, you can use the LQF on x=w1 (the Fiedler vector) to get an spectral cut!
Edges cut: 18