Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
% Default to the notebook output style
% Inherit from the specified cell style.
\documentclass[11pt]{article}
\usepackage[T1]{fontenc}
% Nicer default font (+ math font) than Computer Modern for most use cases
\usepackage{mathpazo}
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts ![](path) syntax from Markdown).
\usepackage{graphicx}
% We will generate all images so they have a width \maxwidth. This means
% that they will get their normal width if they fit onto the page, but
% are scaled down if they would overflow the margins.
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth
\else\Gin@nat@width\fi}
\makeatother
\let\Oldincludegraphics\includegraphics
% Set max figure width to be 80% of text width, for now hardcoded.
\renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}}
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
\DeclareCaptionLabelFormat{nolabel}{}
\captionsetup{labelformat=nolabel}
\usepackage{adjustbox} % Used to constrain images to a maximum size
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
% Colors for the hyperref package
\definecolor{urlcolor}{rgb}{0,.145,.698}
\definecolor{linkcolor}{rgb}{.71,0.21,0.01}
\definecolor{citecolor}{rgb}{.12,.54,.11}
% ANSI colors
\definecolor{ansi-black}{HTML}{3E424D}
\definecolor{ansi-black-intense}{HTML}{282C36}
\definecolor{ansi-red}{HTML}{E75C58}
\definecolor{ansi-red-intense}{HTML}{B22B31}
\definecolor{ansi-green}{HTML}{00A250}
\definecolor{ansi-green-intense}{HTML}{007427}
\definecolor{ansi-yellow}{HTML}{DDB62B}
\definecolor{ansi-yellow-intense}{HTML}{B27D12}
\definecolor{ansi-blue}{HTML}{208FFB}
\definecolor{ansi-blue-intense}{HTML}{0065CA}
\definecolor{ansi-magenta}{HTML}{D160C4}
\definecolor{ansi-magenta-intense}{HTML}{A03196}
\definecolor{ansi-cyan}{HTML}{60C6C8}
\definecolor{ansi-cyan-intense}{HTML}{258F8F}
\definecolor{ansi-white}{HTML}{C5C1B4}
\definecolor{ansi-white-intense}{HTML}{A1A6B2}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Additional commands for more recent versions of Pandoc
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatability definitions
\def\gt{>}
\def\lt{<}
% Document parameters
\title{380CT - Theoretical Aspects of Computer Science Coursework}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% Exact colors from NB
\definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
\definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=urlcolor,
linkcolor=linkcolor,
citecolor=citecolor,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\begin{itemize}
\tightlist
\item
\textbf{Name:} Shahzeb Dawood
\item
\textbf{Student ID:} 7015511
\item
\textbf{Github Repository Link:}
https://github.coventry.ac.uk/380CT-1718JANMAY/Coursework2-7015511
\end{itemize}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\section{Notation}\label{notation}
Let a given Graph be \emph{G}, Number of nodes be \emph{N} and the
probability of edge creation be \emph{e}.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\section{Definition of the problem}\label{definition-of-the-problem}
\subsection{\texorpdfstring{Directed Hamiltonian Cycle Problem
}{Directed Hamiltonian Cycle Problem }}\label{directed-hamiltonian-cycle-problem}
Given a directed graph G = (N, e), a Hamiltonian cycle in G is a path in
the graph, starting and ending at the same node, such that every node in
N is traversed exactly once.
Hence, the Directed Hamiltonian Cycle Problem are referred to as
problems to determine whether a given a graph has a Hamiltonian cycle.
Also referred to as a special case of the "Traveling Salesman Problem"
(Plesn´ik 1979).
\subsubsection{\texorpdfstring{The Traveling Salesman Problem:
}{The Traveling Salesman Problem: }}\label{the-traveling-salesman-problem}
Is a very well-known NP Complete permutation problem with the aim of
finding the path of the shortest length (or minimum cost) on a graph.
The traveling salesman starts at one node, visits all other nodes
successively only one time each, and finally returns to the starting
node (RAO et al. 2014). This is identical to our DHCP. The relationship
between the two is defined such that a Traveling salesman problem
solving operation can determine a minimum weight Hamiltonian cycle.
\subsection{Complexity Class
Membership}\label{complexity-class-membership}
\textbf{Np complete}
In the work of Garey and Johnson: \textbf{The Hamiltonian Circuit
belongs to NP}
And to quote Garey and Johnson "The Hamiltonian circuit belongs to NP,
because a nondeterministic algorithm need only guess an ordering of the
vertices and check in polynomial time that all required edges belong to
the edge set of the given graph" (Garey and Johnson 1982).
And since: \emph{Hamiltonian Circuit == Hamiltonian Cycle}
Hence proved: \textbf{The Hamiltonian Cycle Problem belongs to NP}
Additionally, \emph{"The ordered list of vertices in a directed
Hamiltonian path can serve as a certificate of its place in NP} (Arora
and Barak 2016).
\subsection{\texorpdfstring{Decision }{Decision }}\label{decision}
For a given directed graph, decide whether it contains a path that
traverses by visiting every vertex just once and terminates at the
origin vertices.
Traverse random generated graphs using different algorithm to decide
whether a given graph is a Hamiltonian cycle graph or not.
\textbf{NP-complete}, because DHCP belongs to NP
DHCP belongs to NP: Once a random graph is generated, we can quickly
"verify" its Hamiltonian in cycle presence quickly.
\subsection{Computation - Search/Traverse
Graph}\label{computation---searchtraverse-graph}
Given that the problem is decidable, then find a graph that meets the
criterion of the Hamiltonian cycle.
\textbf{NP-Hard} We can deduce (using the algorithms) if a solution is
found. If solution is found return TRUE, else return FALSE.
\subsection{Optimisation}\label{optimisation}
Find a graph that contains Hamiltonian cycle while minimizing
non-hamiltonian cycle determinations or alternatively decreasing the
runtime of determinating algorithm.
\textbf{NP-Hard}
Optimization versions of NP-complete problems are automatically NP-Hard.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\section{Testing Methodology}\label{testing-methodology}
Comparison of all 3 algorithms based on their O notation and Bog O
notation graph. With suggestions on what kind of situation they will be
best translated into.
Since the actual code isn't adapted after attempts but due to shortage
of time the big O notation for the pseudocode is compared instead of the
actual accuracy and runtime performance of the algorithm based on
increasing graph nodes and probability of connection.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\section{\texorpdfstring{Random Instance Generation
}{Random Instance Generation }}\label{random-instance-generation}
The random graph is generated using a built in Python library function
(Library 'Networkx'). The function takes in parameters for the number of
nodes 'n', their probability of connection 'e' and the nature of the
graph edges (directed/undirected).
The probability of edge creation 'e' works like a coin toss. The user
defined probability enables the algorithm to set the odds between each
node having a connection between them. The higher the probability of
edge connection, the more connected the graph tends to be.
For testing of the algorithm, it is to be ensured that every graph being
given to them is a Hamiltonian Cycle graph. This is achieved by the
following:
Once a random graph is generated, the following steps are taken to check
whether the graph generated is a Hamiltonian cycle graph:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Determine and store all cycles in the graph
\item
Find the longest cycle in graph
\item
Check that said cycle has traversed through all nodes just once
\item
Check that the cycle terminates at the start node
\end{enumerate}
Given that the mentioned conditions are fulfilled, the
``Hamiltonian\_Generator" function returns the graph with confidence
that it is a Hamiltonian Cycle Graph.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\section{Code}\label{code}
\subsection{Relevant Librarie(s):}\label{relevant-libraries}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}1}]:} \PY{k+kn}{import} \PY{n+nn}{networkx} \PY{k}{as} \PY{n+nn}{nx}
\end{Verbatim}
\subsection{Random Graph Generation:}\label{random-graph-generation}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}17}]:} \PY{c+c1}{\PYZsh{}Function for random generation of a directed graph with specified number of nodes}
\PY{c+c1}{\PYZsh{}and probability of connection between nodes}
\PY{k}{def} \PY{n+nf}{RandomGraphGenerator}\PY{p}{(}\PY{n}{Nodes}\PY{p}{,} \PY{n}{ProbabilityCnct}\PY{p}{)}\PY{p}{:}
\PY{n}{G} \PY{o}{=} \PY{n}{nx}\PY{o}{.}\PY{n}{erdos\PYZus{}renyi\PYZus{}graph}\PY{p}{(}\PY{n}{Nodes}\PY{p}{,} \PY{n}{ProbabilityCnct}\PY{p}{,} \PY{n}{directed}\PY{o}{=}\PY{k+kc}{True}\PY{p}{)}
\PY{k}{return}\PY{p}{(}\PY{n}{G}\PY{p}{)}
\PY{c+c1}{\PYZsh{}Plotting the randomly generated Graph}
\PY{k}{if} \PY{n+nv+vm}{\PYZus{}\PYZus{}name\PYZus{}\PYZus{}} \PY{o}{==} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZus{}\PYZus{}main\PYZus{}\PYZus{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{:}
\PY{n}{Random\PYZus{}Graph} \PY{o}{=} \PY{n}{RandomGraphGenerator}\PY{p}{(}\PY{l+m+mi}{10}\PY{p}{,} \PY{l+m+mf}{0.5}\PY{p}{)}
\PY{n}{nx}\PY{o}{.}\PY{n}{draw\PYZus{}networkx}\PY{p}{(}\PY{n}{Random\PYZus{}Graph}\PY{p}{,} \PY{n}{node\PYZus{}color}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{yellow}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{edge\PYZus{}color}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{k}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_14_0.png}
\end{center}
{ \hspace*{\fill} \\}
The above graph shows a randomly generated with 10 nodes and an edge
connection probability of 50\%
\subsection{Definite Hamiltonian Graph
Generation:}\label{definite-hamiltonian-graph-generation}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}7}]:} \PY{c+c1}{\PYZsh{}Function for definite Hamiltonian cycle graph generation with }
\PY{c+c1}{\PYZsh{}specified number of nodes and probability of connection between nodes}
\PY{k}{def} \PY{n+nf}{Hamiltonian\PYZus{}Generator}\PY{p}{(}\PY{n}{Nodes}\PY{p}{,} \PY{n}{ProbabilityCnct}\PY{p}{)}\PY{p}{:}
\PY{n}{Hamiltonian} \PY{o}{=} \PY{k+kc}{False}
\PY{c+c1}{\PYZsh{}Runs until either a Hamiltonian Cycle is found}
\PY{k}{while} \PY{n}{Hamiltonian} \PY{o}{!=} \PY{k+kc}{True}\PY{p}{:}
\PY{c+c1}{\PYZsh{}Uses same method of random graph generation}
\PY{n}{G} \PY{o}{=} \PY{n}{nx}\PY{o}{.}\PY{n}{erdos\PYZus{}renyi\PYZus{}graph}\PY{p}{(}\PY{n}{Nodes}\PY{p}{,} \PY{n}{ProbabilityCnct}\PY{p}{,} \PY{n}{directed}\PY{o}{=}\PY{k+kc}{True}\PY{p}{)}
\PY{c+c1}{\PYZsh{}Variable declarations}
\PY{n}{count} \PY{o}{=} \PY{l+m+mi}{0}
\PY{n}{Length} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{n}{Cycle\PYZus{}Size} \PY{o}{=} \PY{l+m+mi}{0}
\PY{n}{All\PYZus{}Cycles} \PY{o}{=} \PY{p}{(}\PY{n+nb}{list}\PY{p}{(}\PY{n}{nx}\PY{o}{.}\PY{n}{simple\PYZus{}cycles}\PY{p}{(}\PY{n}{G}\PY{p}{)}\PY{p}{)}\PY{p}{)}
\PY{c+c1}{\PYZsh{} finds longest cycle in the graph}
\PY{k}{while} \PY{n}{count} \PY{o}{\PYZlt{}} \PY{n+nb}{len}\PY{p}{(}\PY{n}{All\PYZus{}Cycles}\PY{p}{)}\PY{p}{:}
\PY{k}{if} \PY{n+nb}{len}\PY{p}{(}\PY{n}{All\PYZus{}Cycles}\PY{p}{[}\PY{n}{count}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZgt{}} \PY{n}{Cycle\PYZus{}Size}\PY{p}{:}
\PY{n}{Length} \PY{o}{=} \PY{n}{All\PYZus{}Cycles}\PY{p}{[}\PY{n}{count}\PY{p}{]}
\PY{n}{Cycle\PYZus{}Size} \PY{o}{=} \PY{n+nb}{len}\PY{p}{(}\PY{n}{Length}\PY{p}{)}
\PY{n}{count} \PY{o}{=} \PY{n}{count} \PY{o}{+} \PY{l+m+mi}{1}
\PY{c+c1}{\PYZsh{} checks found cycle contains all nodes once}
\PY{k}{if} \PY{n+nb}{sorted}\PY{p}{(}\PY{n}{G}\PY{o}{.}\PY{n}{nodes}\PY{p}{(}\PY{p}{)}\PY{p}{)} \PY{o}{==} \PY{n+nb}{sorted}\PY{p}{(}\PY{n}{Length}\PY{p}{)}\PY{p}{:}
\PY{c+c1}{\PYZsh{} checks last edge in cycle connects to first}
\PY{k}{if} \PY{n}{G}\PY{o}{.}\PY{n}{has\PYZus{}edge}\PY{p}{(}\PY{n}{Length}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{Length}\PY{p}{)}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,}\PY{n}{Length}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{:}
\PY{n}{Hamiltonian} \PY{o}{=} \PY{k+kc}{True}
\PY{k}{return}\PY{p}{(}\PY{n}{G}\PY{p}{)}
\PY{c+c1}{\PYZsh{}Plotting the Hamiltonian Graph}
\PY{k}{if} \PY{n+nv+vm}{\PYZus{}\PYZus{}name\PYZus{}\PYZus{}} \PY{o}{==} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZus{}\PYZus{}main\PYZus{}\PYZus{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{:}
\PY{n}{Hamiltonian\PYZus{}Graph} \PY{o}{=} \PY{n}{Hamiltonian\PYZus{}Generator}\PY{p}{(}\PY{l+m+mi}{10}\PY{p}{,} \PY{l+m+mf}{0.5}\PY{p}{)}
\PY{n}{nx}\PY{o}{.}\PY{n}{draw\PYZus{}networkx}\PY{p}{(}\PY{n}{Hamiltonian\PYZus{}Graph}\PY{p}{,} \PY{n}{node\PYZus{}color}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{orange}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{edge\PYZus{}color}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{k}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_17_0.png}
\end{center}
{ \hspace*{\fill} \\}
The above code was implemented to ensure that the graph being
implemented was a Hamiltonian. As initially it was intended to write
code for the algorithms and in testing methodologies all graphs given to
the algorithms, although random, had to be Hamiltonians for testing and
comparison on their ability and performance. \pagebreak
\section{Solution Methods}\label{solution-methods}
\subsection{Exhaustive Search (Exact
Method)}\label{exhaustive-search-exact-method}
The Exhaustive search is a definitive exact method to decide the DHCP
problem. It is an algorithm that in collaboration with the Depth First
Search (DFS) traverses through a graph's nodes and its resulting edges
to make sure that the criteria for a graph to be a DHC is met. It,
logically returns TRUE if the conditions are met. Else returns FALSE
\subsubsection{Pseudocode:}\label{pseudocode}
\paragraph{Exhaustive Pseudocode
Explanation:}\label{exhaustive-pseudocode-explanation}
The proposed exhaustive search pseudocode can be broken into 3
components: * Recursive DFS function to traverse through every node
\paragraph{Exhaustive Pseudocode
Implementation:}\label{exhaustive-pseudocode-implementation}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
2DimensionalList ={[}{[}{]}{]}
\item
\textbf{FUNCTION} RecursiveDFS (EDGE)
\item
CurrentNode = Traverse EDGE
\item
EdgesOut = CurrentNode.edges()
\item
\textbf{FOR} every edge in EdgesOUT
\item
\(\quad\)\textbf{IF} edge \textbf{NOT} Visited
\item
\(\qquad\)RecursiveDFS(edge)
\item
\textbf{FOR} Nodes \textbf{IN} Graph
\item
\(\quad\) 2DimensionalList.Append(RecursiveDFS(Nodes))
\item
\textbf{FUNCTION} Verifier()
\item
\textbf{FOR} EVERY Edge in 2DimensionalList
\item
\(\quad\)TraverseEdge
\item
\(\qquad\)IF Node NOT IN EdgesOut
\item
\(\qquad\)Break
\item
\(\quad\)\textbf{RETURN} TRUE
\end{enumerate}
\subsubsection{Big O Notation:}\label{big-o-notation}
\textbf{O(\textbar{}N\textbar{} + \textbar{}E\textbar{})}
The Exhaustive search makes use of a depth first search, which has a Big
O notation of \textbf{O(\textbar{}N\textbar{} + \textbar{}E\textbar{})}
(O(number of nodes+number of edges) so ultimately, linear)but since
there is a loop within a recursive algorithm and that is the most
complex and worst Big O notation holder it dictates the algorithms Big O
Notation, hence the whole algorithm takes the complexity
\textbf{O(n\^{}2)}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsection{Greedy Heuristic}\label{greedy-heuristic}
Unlike the definitive solution and exact method "Exhaustive Search", the
greedy method nd most heuristics are created to add randomness to the
problem solution. In the real world, a traveling salesman isn't going to
be
Given our random graph generation itself was a solution of the DHCP
problem the generation of graphs would take fairly long time as it is.
\subsubsection{Random Graph Pseudocode
Explanation:}\label{random-graph-pseudocode-explanation}
I present the proposed system of greedy heuristic that I would have
implemented.
The random graph generation method would work following the steps:
\begin{itemize}
\tightlist
\item
Random Graph Generation
\item
Addition of non-existent edges (with expensive weights(cost)) between
nodes.
\end{itemize}
So, given a randomly generated graph, make sure that every node is
connected to every other node of the graph. Those nodes that aren't
connected are connected by placing an expensive edge between them.
\subsubsection{\texorpdfstring{Pseudocode:}{Pseudocode: }}\label{pseudocode}
\subsubsection{Greedy Pseudocode
Explanation:}\label{greedy-pseudocode-explanation}
\begin{itemize}
\tightlist
\item
Pick a random node and assign it as start node
\item
Make start node the current node
\item
Determine all the edges leaving from current node and their weights
(Do this step for every time a node is traversed to)
\item
Traverse to node with cheapest path
\item
If only paths available are expensive, then there is no choice,
traverse an expensive path or only possible edge
\item
If 2 paths are equally weighted, then give preference to node that
hasn't been visited before
\item
Do until NextNode == StartNode
\item
else return that Graph isn't a Hamiltonian
\end{itemize}
\paragraph{\texorpdfstring{Greedy Heuristic Implementation:
}{Greedy Heuristic Implementation: }}\label{greedy-heuristic-implementation}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
StartNode = Random(G.Nodes)
\item
NextNode = StartNode
\item
Cycle = {[}{]}
\item
Visited = {[}{]}
\item
\textbf{FUNCTION} GreedyTraversal(G)
\item
\textbf{FOR} i \textless{} LENGTH(G.nodes)
\item
Sorted = EMPTY
\item
CurrentNode = NextNode
\item
NodeEges = CurrentNode{[}Node:Edges{]}
\item
Sorted = NodeEdges.sort(ascending)
\item
NextNode = Sorted{[}0{]}{[}Node{]}
\item
\(\quad\) \textbf{IF} NextNode == StartNode
\item
\(\qquad\) \textbf{RETURN} Cycle , TRUE
\item
\(\quad\)\textbf{FOR} i in RANGE (LENGTH(Sorted))
\item
\(\qquad\) \textbf{IF} NextNode \textbf{IN} Visited
\item
\(\qquad\) NextNode = Sorted{[}i{]}{[}Node{]}
\item
\(\qquad\) \textbf{ELSE}
\item
\(\qquad\) Hamiltonian = \textbf{FALSE}
\item
\(\quad\)\textbf{RETURN} Cycle , \textbf{FALSE}
\end{enumerate}
\paragraph{\texorpdfstring{Big O Notation:
}{Big O Notation: }}\label{big-o-notation}
\textbf{O(n\^{}2)} Due to the presence of a nested FOR loop in the
otherwise linear greedy algorithm, the worst complexity dictates the
algorithms complexity of O(n\^{}2).
This means that as the number of nodes in the graph increases, the
performance reduces in direct proportion to the square on the number of
nodes. (Abu Naser, 1999)
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsection{Meta Heuristic}\label{meta-heuristic}
\subsubsection{\texorpdfstring{Genetic Algorithm Explanation:
}{Genetic Algorithm Explanation: }}\label{genetic-algorithm-explanation}
Genetic algorithm (GA) (Genetic algorithms in search, optimization, and
machine learning, 1989) is a search technique that takes inspiration
from natural selection and genetics. It updates a population of
solutions to acquire a global optimum. The new solutions generated by
the GA, also referred to as 'offspring', from the parental solutions by
applying the crossover and mutation operation. Said operations should be
applied in such a way that offspring inherit important factors of
parents. (Iima and Yakawa, n.d.)
\subsubsection{Pseudocode:}\label{pseudocode}
\paragraph{\texorpdfstring{Genetic Algorithm Pseudocode Explanation:
}{Genetic Algorithm Pseudocode Explanation: }}\label{genetic-algorithm-pseudocode-explanation}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Create the first population, and set g←1. (g = generation).
\item
Choose 2 solutions say S1 and S2 randomly from the population created
in step 1 with as 'parents'.
\item
Create two further solutions S3 and S4 using an external algorithm
(Maybe greedy as implemented above OR crossover Operation). (for every
generation)
\item
Derive a solution S5 from S1 using another external operation
(mutation - to match 'genetic' nature). Do the same to generate
solution S6 from S2. (for every generation)
\item
From the set of generated populations: \{S1, S2,⋯,S6\} select 2 best
possible solutions. Remove S1 and S2 (parents) from the population,
and the 2 best possible solutions as parents for the next generation.
\item
TERMINATION - Given If g=G, end the algorithm. (Where G =final
generation; and it value is initially defined).
\item
Given that g ≠ G, increment g (g←g+1) and continue from step 2
\end{enumerate}
\subparagraph{Mutation operation and Crossover
Operation:}\label{mutation-operation-and-crossover-operation}
Taken as external operations to the genetic algorithm, these two
functions are responsible to deliver solutions based on natural
instincts. Like in nature, mutation operations to mutate genes and in
the case of crossover operation making sure that the best factors of the
parents are inherited in the resulting solution. (Joel Zwickl, 2006)
\paragraph{\texorpdfstring{Genetic Algorithm Implementation:
}{Genetic Algorithm Implementation: }}\label{genetic-algorithm-implementation}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
BestSolution = {[}{]}
\item
Population =\{\}
\item
Parent1 = RandomSolution(population)
\item
Parent2 = RandomSolution(population)
\item
\textbf{WHILE} Termination = \textbf{FALSE DO}
\item
\(\quad\) S3 , S4 = CrossoverOp()
\item
\(\quad\) S5 = MutationOperation(Parent1)
\item
\(\quad\) S6 = MutationOperation(Parent2)
\item
\(\quad\) population.ADD(Parent1,Parent2,S3,S4,S5,S6)
\item
\(\quad\) Parent1 = population.BestSolution
\item
\(\quad\) Parent2 = population.BestSolution
\item
\textbf{END WHILE}
\item
\textbf{RETURN} BestSolution
\end{enumerate}
\paragraph{\texorpdfstring{Big O Notation:
}{Big O Notation: }}\label{big-o-notation}
The complexity is \textbf{O(n)} as the algorithm is Linear except for
the external functions it incorporates but as population increases the
time taken complexity increases insignificantly. having said that, the
external functions do compromise the computation of the overall
algorithm.
\subsubsection{\texorpdfstring{Grasp Pseudocode Explanation:
}{Grasp Pseudocode Explanation: }}\label{grasp-pseudocode-explanation}
Defined as a multi-start algorithm, this heuristic combines two
different computations to encourage randomness (Glover and Kochenberger,
2003), and breed an element of surprise and unexpectedness to the
computation. Meta Heuristic techniques take inspiration from things such
as nature and everyday life. Ant colony optimization (ACO) and Genetic
Algorithm (GA), both are two powerful meta-heuristic examples.
\begin{itemize}
\tightlist
\item
The termination condition is user specified to carry out a reasonable
number of iterations (eg. 50-100)
\item
The GRASP algorithm below incorporates the above implemented greedy
algorithm within itself in collaboration with a Local Search
\item
Much like neural networks and gradient decent and loss functions, the
linear search aims to produce the best possible solution by changing
the edges of the graph to see if the overall performance improves.
\end{itemize}
\subsubsection{Pseudocode:}\label{pseudocode-1}
\paragraph{\texorpdfstring{Greedy Heuristic Implementation:
}{Greedy Heuristic Implementation: }}\label{greedy-heuristic-implementation}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
BestSolution = EMPTY
\item
\textbf{WHILE} Termination = \textbf{FALSE DO}
\item
\(\quad\) GreedySolution = GreedyTraversal(G)
\item
\(\quad\) GraspSolution = LocalSearch(GreedySolution)
\item
\(\quad\) \textbf{IF} GreedySolution IS BETTER THAN GraspSolution
\textbf{THEN}
\item
\(\qquad\) BestSolution = GraspSolution
\item
\(\quad\) \textbf{ENDIF}
\item
\textbf{END WHILE}
\item
\textbf{RETURN} BestSolution
\end{enumerate}
\paragraph{\texorpdfstring{Big O Notation:
}{Big O Notation: }}\label{big-o-notation-1}
From steps 1-2, the complexity is \textbf{O(n)} although visibly the
algorithm remains linear, it still incorporates 2 foreign functions
(Greedy Traversal and Linear Search) which do compromise the computation
of the overall algorithm, visible algorithm is all pretty linear, but
the implication of the other functions do affect the parent algorithm.
***
\section{Special Cases}\label{special-cases}
\begin{quote}
Let G be a connected graph with n \textless{} 2 vertices, and E edges.
\end{quote}
\subsection{Singleton Graph}\label{singleton-graph}
Description: A graph containing just one node and no edges.
\emph{"By convention, the singleton graph is Hamiltonian"} (Weisstein,
1995).
\begin{itemize}
\tightlist
\item
Meets all the criteria of a Directed Hamiltonian Cycle.
\item
Although there is significant debate of whether a single node graph
can in fact be considered a directed graph
\item
Again, can contribute to the optimisation of determination algorithms
\end{itemize}
\subsection{Empty Graph}\label{empty-graph}
Description: Graph contain empty nodes but no edges.
Given that through the personal communication Mr B. McKay, \emph{"By
convention, the singleton graph is considered to be Hamiltonian (B.
McKay, pers. comm., Mar. 22, 2007"} (Weisstein, 1995) Hence it can be
said that a empty graph, is a special case of a singleton graph where
every node in the graph acts as a graph itself as re-quoted from the
singleton Graph special case.
\begin{quote}
Let G be a connected graph with n = E = 2.
\end{quote}
\subsection{2 nodes 2 edges}\label{nodes-2-edges}
Description: For a given graph with just 2 nodes, and 2 edges connecting
both nodes, is logically a Hamiltonian cycle. easily computable in
polynomial time as all we need to do to verify it is to check if:
\textbf{Number of nodes = number of edges = 2}
\begin{quote}
Let G be a connected graph with Number of nodes 'n' \textless{} Number
of edges 'E'
\end{quote}
\subsection{Number of edges \textless{} number of
nodes}\label{number-of-edges-number-of-nodes}
Description: The number of nodes in a graph are less than the number of
edges in the graph. This means that at least one node in the graph is
disconnected.
It is mathematically impossible for a graph to have a Hamiltonian cycle
if a node(s) isn't participating in the graph. That ultimately suggests
that all nodes are not traversed hence breaking the criteria for a given
graph to be a Hamiltonian Cycle graph.
\begin{quote}
Let G be a connected graph where Number of E = n(n-1).
\end{quote}
\subsection{Complete Graph (100\% connected
graph)}\label{complete-graph-100-connected-graph}
Description: Every node connected to every other node in the graph.
\begin{itemize}
\tightlist
\item
Given a random graph, with specified number of nodes and edge
connection probability of 1 (100\%) the resulting graph will always be
a Hamiltonian Cycle graph.
\item
This can be mathematically checked if the number of edges are =
n(n-1). Where n = number of nodes.
\item
This proves that a graph is 100\% connected and this linear one like
code can check if a given graph is a complete graph.
\item
This can help optimise code as well as if a DHCP determining algorithm
is given a complete graph, rather than traversing through every node,
it would check if it completely connected and not proceed into
traversal.
\end{itemize}
\subsection{Traveling Salesman
Problem}\label{traveling-salesman-problem}
Since DHCP and TSP are rather similar, it was considered that the
special cases of TSP might translate into special cases for DHCP as
well. But the special cases for TSP were much to customised.
(Tsitsiklis, 1992)
However, I later felt that TSP itself is an application of DHCP and so,
although it isn't a special case, I wanted it to be on here as a special
mention.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\pagebreak
\section{Conclusion}\label{conclusion}
\subsection{Restating principle
findings:}\label{restating-principle-findings}
The resulting Big O Notations for the written pseudocode: Exhaustive,
Greedy and Meta Heuristic are as follows: O(n\^{}2), O(n\^{}2) and O(n)
respectively.
Seeing the Big O notations of the algorithms it is evident that based on
this complexity the Greedy performed just as well as the Exhaustive
algorithm.
With increasing amount of data, the time taken will increase
proportionally to the square of data increased for both greedy and
exhaustive, however the Meta Heuristic Graph will take almost the same
time because of its linear complexity.
\subsection{Visual Aid to better understand the resultant Big
O}\label{visual-aid-to-better-understand-the-resultant-big-o}
\subsubsection{Code to mimic the nature of O(n) and
O(n\^{}2)}\label{code-to-mimic-the-nature-of-on-and-on2}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}5}]:} \PY{k+kn}{import} \PY{n+nn}{matplotlib}\PY{n+nn}{.}\PY{n+nn}{pyplot} \PY{k}{as} \PY{n+nn}{plt}
\PY{c+c1}{\PYZsh{}Initialise lists}
\PY{n}{X\PYZus{}Coordinates} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{n}{Exh\PYZus{}Grdy} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{n}{Grsp} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{c+c1}{\PYZsh{}Generate X cordinates from 0 to 40 with increments of 2 }
\PY{k}{for} \PY{n}{i} \PY{o+ow}{in} \PY{n+nb}{range} \PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,}\PY{l+m+mi}{40}\PY{p}{,}\PY{l+m+mi}{2}\PY{p}{)}\PY{p}{:}
\PY{n}{X\PYZus{}Coordinates}\PY{o}{.}\PY{n}{append}\PY{p}{(}\PY{n}{i}\PY{p}{)}
\PY{c+c1}{\PYZsh{}Generate square of X\PYZus{}coordinates for O(n)square and minute increase for O(n)}
\PY{k}{for} \PY{n}{i} \PY{o+ow}{in} \PY{n}{X\PYZus{}Coordinates}\PY{p}{:}
\PY{n}{squared} \PY{o}{=} \PY{n}{i}\PY{o}{*}\PY{n}{i}
\PY{n}{Exh\PYZus{}Grdy}\PY{o}{.}\PY{n}{append}\PY{p}{(}\PY{n}{squared}\PY{p}{)}
\PY{n}{Grsp}\PY{o}{.}\PY{n}{append}\PY{p}{(}\PY{n}{i}\PY{o}{*}\PY{l+m+mf}{1.0002}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{X\PYZus{}Coordinates}\PY{p}{,}\PY{n}{Exh\PYZus{}Grdy}\PY{p}{,} \PY{n}{color}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{orange}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{linewidth}\PY{o}{=}\PY{l+m+mi}{3}\PY{p}{,}
\PY{n}{label} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{O(N\PYZca{}2) \PYZhy{} Exhaustive and Greedy}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{X\PYZus{}Coordinates}\PY{p}{,}\PY{n}{Grsp}\PY{p}{,} \PY{n}{color}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{green}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{linewidth}\PY{o}{=}\PY{l+m+mi}{3}\PY{p}{,}
\PY{n}{label} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{O(N) \PYZhy{} GA \PYZam{} GRASP \PYZhy{} (Meta)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{legend}\PY{p}{(}\PY{n}{loc}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{best}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}5}]:} <matplotlib.legend.Legend at 0x22787bb5160>
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_30_1.png}
\end{center}
{ \hspace*{\fill} \\}
As seen in the graph above, Since: O(n) \textless{} O(n\^{}2)
Meta Heuristic Genetic Algorithm (GA) and GRASP methods win.
The aim for having additional methodologies as opposed to an exact
method is to introduce randomness in computation (Avigad, 2013). In real
world situations having to go through a node twice might actually be
more inexpensive than the restriction of not passing through a given
node twice.
The exhaustive search is an exact method, the rest are approximations
and heuristics and biases with their own computational abilities both
crucial to the field of mathematics.
Out of the implemented Although the three algorithms differ in nature
and aren't all exact methods (except exhaustive), they can't be compared
on their determination abilities as they don't work in the same way and
environment and it is an unfair comparison. But in terms of their
individual computation:
\begin{itemize}
\tightlist
\item
The time taken increased directly proportionally to the square of
increase of number of nodes and edge creation probability.
\item
Both in the case of exhaustive and greedy this complexity was observed
was observed
\item
The DHCP problem is best solved for small values of n by both
Exhaustive and Greedy algorithm (Given worse values of Big O)
\item
The Big o of the GRASP method being Surprisingly O(n) ***
\end{itemize}
\pagebreak
\section{Reflection}\label{reflection}
Despite the belief that an early start and organised approach gets the
job done, in my case since I started too early and started to consider
the problem and researching, I started to relax seeing that my course
mates hadn't even started yet. That was an extremely wrong thing to do.
Instead of using the head start to my advantage, I let comparison and
focus on dissertation consume me until it was very late. However, given
my passion for mathematics and Computer Science I took upon the
challenge of doing all that i could get done in a short space of time.
I planned, if I couldn't get it all done, I still could get something
done which was better than nothing. and assigned tasks and intended to
do them all before we broke off for Easter. That was nearly impossible
as the relevant lecture slides for assignment were scheduled in last few
weeks before spring break. Once we stepped into Easter, Dissertation
Presentations and writeups took over and from time to time we would meet
to get code done for meta heuristics and greedy mostly.
Although management in terms of task assigning was initially good, time
wise. But I lacked and lost when it came to time management and
organisation. I kept up my demeanour's and instead of panicking, tried
to deliver as much as I could.
\paragraph{\texorpdfstring{Problems faced:
}{Problems faced: }}\label{problems-faced}
\begin{itemize}
\tightlist
\item
Understanding the problem (DHCP)
\item
Time management
\item
Coding and understanding
\end{itemize}
\paragraph{\texorpdfstring{Skills and lessons Learnt
}{Skills and lessons Learnt }}\label{skills-and-lessons-learnt}
\begin{itemize}
\tightlist
\item
Transferable Skills that are next to none in terms of learning curve.
As a third-year student I hadn't, up to this stage been introduced to
Jubyter Nootebook.
\item
Jupyter notebook. It is a very effective tool that collaborates code
as well as writing and my introduction to it helped me incorporate it
in my Dissertation.
\item
Problem Solving, mathematical language and literature inculcates a
deep mental understanding and breakdown of a given problem.
\item
Time management, problem facing and ability to function under pressure
\item
Significance of organisation and discipline
\item
Starting early
\end{itemize}
\paragraph{\texorpdfstring{What would i do differently?
}{What would i do differently? }}\label{what-would-i-do-differently}
\begin{itemize}
\tightlist
\item
Start early
\item
Plan
\item
Communicate with lecturer
\item
Make a start
\end{itemize}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\pagebreak
\section{Refrences}\label{refrences}
\begin{itemize}
\item
Abu Naser, S. (1999). Big O Notation for Measuring Expert Systems
complexity. {[}online{]} Available at:
https://philpapers.org/rec/NASBON {[}Accessed 22 Apr. 2018{]}.
\item
Arora, S. and Barak, B. (2016) Computational Complexity. New York
(NY): Cambridge University Press
\item
Avigad, J. (2013). Uniform distribution and algorithmic randomness.
The Journal of Symbolic Logic, 78(01), pp.334-344.
\item
Garey, M. and Johnson, D. (1982) Vychislitel'nye Mashiny I
Trudnoreshaemye Zadachi. Moskva: Izd-vo "Mir"
\item
Garey, S. and Johnson, D. (1979) Computers and Intractability: A Guide
to the Theory of NP-Completeness. Freeman.
\item
Genetic algorithms in search, optimization, and machine learning.
(1989). Choice Reviews Online, 27(02), pp.27-0936-27-0936.
\item
Glover, F. and Kochenberger, G. (2003). Handbook of metaheuristics.
Boston: Kluwer, pp.219-222.
\item
Hoos, H. and Stutzler, T. (2005) Stochastic Local Search: Foundations
and Applications. Morgan Kaufmann.
\item
Iima, H. and Yakawa, T. (n.d.). A new design of genetic algorithm for
bin packing. The 2003 Congress on Evolutionary Computation, 2003. CEC
'03..
\item
Joel Zwickl, D. (2006). Genetic algorithm approaches for the
phylogenetic analysis of large biological sequence datasets. Ph. D.
The University of Texas at Austin.
\item
Koutsoupias, E., \& Papadimitriou, C. H. (1992). On the greedy
algorithm for satisfiability. Information Processing Letters, 43(1),
53-55.
\item
Plesn´ik, J. (1979) "The Np-Completeness Of The Hamiltonian Cycle
Problem In Planar Diagraphs With Degree Bound Two". Information
Processing Letters 8 (4), 199-201
\item
RAO, W., JIN, C. and LU, L. (2014) "An Improved Greedy Algorithm With
Information Of Edges' Location For Solving The Euclidean Traveling
Salesman Problem". Chinese Journal Of Computers 36 (4), 836-850
\item
Thompson, G. and Singhal, S. (1985) "A Successful Algorithm For The
Undirected Hamiltonian Path Problem". Discrete Applied Mathematics 10
(2), 179-195
\item
Tsitsiklis, J. (1992). Special Cases of Traveling Salesman and
Repairman Problems. Post Graduate. Massachusetts Institute of
Technology Cambridge.
\item
Weisstein, Eric W. (1995) "Hamiltonian Cycle." From MathWorld-\/-A
Wolfram Web Resource:
http://mathworld.wolfram.com/HamiltonianCycle.html
\item
Rudoy, M. (2017). Hamiltonian Cycle and Related Problems:
Vertex-Breaking, Grid Graphs, and Rubik's Cubes. Post Graduate.
MASSACHUSETTS INSTITUTE OF TECHNOLOGY.
\end{itemize}
% Add a bibliography block to the postdoc
\end{document}