diff --git a/.eslintrc.js b/.eslintrc.js index 9b62987d..0c7f7af4 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -12,7 +12,7 @@ module.exports = { // https://eslint.org/docs/rules/no-plusplus // allows unary operators ++ and -- in the afterthought (final expression) of a for loop. - "allowForLoopAfterthoughts": true, + "no-plusplus": [2, { "allowForLoopAfterthoughts": true }], // Allow for..of "no-restricted-syntax": [0, "ForOfStatement"], diff --git a/.gitignore b/.gitignore index 63ac9327..67c6e1b2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ node_modules -.idea/ -coverage/ \ No newline at end of file +.idea +coverage +dist +_book +.DS_Store diff --git a/.node-version b/.node-version index f599e28b..c4d592e1 100644 --- a/.node-version +++ b/.node-version @@ -1 +1 @@ -10 +10.12.0 diff --git a/README.md b/README.md index 1514a8a9..fcaf0454 100644 --- a/README.md +++ b/README.md @@ -27,50 +27,3 @@ We are covering the following data structures. 1. **Hash Maps**: implements map using a hash function. [Code](https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/hash-maps/hashmap.js) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#HashMaps) 2. **Tree Maps**: implement map using a self-balanced BST. WIP 3. **Graphs**: data *nodes* that can have a connection or *edge* to zero or more adjacent nodes. Unlike trees, nodes can have multiple parents, loops. [Code](https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/graphs/graph.js) | [Details](https://adrianmejia.com/blog/2018/05/14/data-structures-for-beginners-graphs-time-complexity-tutorial/) - -## Algorithms -1. Searching algorithms (WIP) -2. Sorting algorithms (WIP) - -# Notes -Some notes while working on this project - -## Tests -Running one test without changing file -```sh -jest -t '#findNodeAndParent' -``` - -Running one test changing code -```js -it.only('should return with an element and its parent', () => { -// ... -}); -``` - -## English Words - -Getting some (200k+) English words are useful for testing and benchmarking. - -```sh -cat /usr/share/dict/words > benchmarks/dict.txt -``` - -## ESLint - - Disabling ESLints -```js -somthing(t) => 1 // eslint-disable-line no-unused-vars -// eslint-disable-next-line no-use-before-define -const thing = new Thing(); - -/*eslint-disable */ -//suppress all warnings between comments -alert('foo'); -/*eslint-enable */ - -/* eslint-disable no-alert, no-console */ -alert('foo'); -console.log('bar'); -/* eslint-enable no-alert */ -``` diff --git a/benchmarks/hashmap.perf.js b/benchmarks/hashmap.perf.js index 242d6ebb..c3277f65 100644 --- a/benchmarks/hashmap.perf.js +++ b/benchmarks/hashmap.perf.js @@ -113,7 +113,7 @@ function useBenchmark() { const HashMapSmallBucket = require('../src/data-structures/hash-maps/hash-map-2'); const HashMap3 = require('../src/data-structures/hash-maps/hash-map-3'); const HashMap4 = require('../src/data-structures/hash-maps/hash-map-4'); - const HashMap = require('../src/data-structures/hash-maps/hashmap'); + const HashMap = require('../src/data-structures/hash-maps/hash-map'); // // Map (built-in) x 2,257 ops/sec ±2.42% (75 runs sampled) // suite.add('Map (built-in)', function() { diff --git a/book/Gemfile b/book/Gemfile new file mode 100644 index 00000000..2426e13a --- /dev/null +++ b/book/Gemfile @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +source "https://rubygems.org" + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# https://github.com/akosma/eBook-Template +# brew install plantuml glib gdk-pixbuf cairo pango cmake libxml2 pkg-config +gem 'asciidoctor' + +gem 'pygments.rb' +gem 'kindlegen' +gem 'asciimath' +gem 'asciidoctor' +gem 'asciidoctor-diagram' +gem 'epubcheck' + +# brew link gettext --force +# brew install pkg-config +# https://github.com/gjtorikian/mathematical - run script/bootstrap; MATHEMATICAL_USE_SYSTEM_LASEM=1 gem install mathematical +gem 'asciidoctor-mathematical' + +# --pre +gem 'asciidoctor-pdf' +gem 'asciidoctor-epub3' +gem 'asciidoctor-html5s' # gem install --pre asciidoctor-html5s + +# cd ~/Library/Fonts; \ +# curl -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/cmex10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/cmmi10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/cmr10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/cmsy10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/esint10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/eufm10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/msam10.ttf \ +# -LO http://mirrors.ctan.org/fonts/cm/ps-type1/bakoma/ttf/msbm10.ttf + +gem "prawn-gmagick", "~> 0.0.8" # brew install GraphicsMagick diff --git a/book/Gemfile.lock b/book/Gemfile.lock new file mode 100644 index 00000000..9e154203 --- /dev/null +++ b/book/Gemfile.lock @@ -0,0 +1,106 @@ +GEM + remote: https://rubygems.org/ + specs: + Ascii85 (1.0.3) + addressable (2.5.2) + public_suffix (>= 2.0.2, < 4.0) + afm (0.2.2) + asciidoctor (1.5.7.1) + asciidoctor-diagram (1.5.10) + asciidoctor (~> 1.5.0) + asciidoctor-epub3 (1.5.0.alpha.8) + asciidoctor (~> 1.5.0) + gepub (~> 0.6.9.2) + thread_safe (~> 0.3.6) + asciidoctor-html5s (0.1.0.beta.9) + asciidoctor (~> 1.5.5) + thread_safe (~> 0.3.4) + asciidoctor-mathematical (0.2.2) + asciidoctor (~> 1.5, >= 1.5.0) + mathematical (~> 1.5, >= 1.5.8) + ruby-enum (~> 0.4) + asciidoctor-pdf (1.5.0.alpha.16) + asciidoctor (>= 1.5.0) + prawn (>= 1.3.0, < 2.3.0) + prawn-icon (= 1.3.0) + prawn-svg (>= 0.21.0, < 0.28.0) + prawn-table (= 0.2.2) + prawn-templates (>= 0.0.3, <= 0.1.1) + safe_yaml (~> 1.0.4) + thread_safe (~> 0.3.6) + treetop (= 1.5.3) + asciimath (1.0.6) + concurrent-ruby (1.0.5) + css_parser (1.6.0) + addressable + epubcheck (3.0.1) + gepub (0.6.9.2) + nokogiri (~> 1.6.1) + rubyzip (>= 1.1.1) + hashery (2.1.2) + i18n (1.1.0) + concurrent-ruby (~> 1.0) + kindlegen (3.0.3) + rake + rubyzip + mathematical (1.6.12) + ruby-enum (~> 0.4) + mini_portile2 (2.1.0) + multi_json (1.13.1) + nokogiri (1.6.8.1) + mini_portile2 (~> 2.1.0) + pdf-core (0.7.0) + pdf-reader (2.1.0) + Ascii85 (~> 1.0.0) + afm (~> 0.2.1) + hashery (~> 2.0) + ruby-rc4 + ttfunk + polyglot (0.3.5) + prawn (2.2.2) + pdf-core (~> 0.7.0) + ttfunk (~> 1.5) + prawn-gmagick (0.0.8) + prawn (>= 0.15, < 3.0) + prawn-icon (1.3.0) + prawn (>= 1.1.0, < 3.0.0) + prawn-svg (0.27.1) + css_parser (~> 1.3) + prawn (>= 0.11.1, < 3) + prawn-table (0.2.2) + prawn (>= 1.3.0, < 3.0.0) + prawn-templates (0.1.1) + pdf-reader (~> 2.0) + prawn (~> 2.2) + public_suffix (3.0.3) + pygments.rb (1.2.1) + multi_json (>= 1.0.0) + rake (12.3.0) + ruby-enum (0.7.2) + i18n + ruby-rc4 (0.1.5) + rubyzip (1.2.2) + safe_yaml (1.0.4) + thread_safe (0.3.6) + treetop (1.5.3) + polyglot (~> 0.3) + ttfunk (1.5.1) + +PLATFORMS + ruby + +DEPENDENCIES + asciidoctor + asciidoctor-diagram + asciidoctor-epub3 + asciidoctor-html5s + asciidoctor-mathematical + asciidoctor-pdf + asciimath + epubcheck + kindlegen + prawn-gmagick (~> 0.0.8) + pygments.rb + +BUNDLED WITH + 1.16.6 diff --git a/book/Makefile b/book/Makefile new file mode 100644 index 00000000..d59c3f74 --- /dev/null +++ b/book/Makefile @@ -0,0 +1,73 @@ +DIR = dist + +# INPUT = sample +# OUTPUT = sample + +INPUT = book +OUTPUT = book +# OUTPUT = book-$(date +%Y-%m-%d) + +DIAGRAM = --require=asciidoctor-diagram +MATH = --require=asciidoctor-mathematical +# HTML5S = --require=asciidoctor-html5s --backend=html5 +REQUIRES = ${DIAGRAM} ${MATH} +OUTPUT_FOLDER = --destination-dir=${DIR} +MANPAGE = --backend=manpage +HTML = --backend=html5 -a max-width=75em +RAW_HTML = --backend=html5 -a stylesheet! -a source-highlighter! +PDF = --backend=pdf --require=asciidoctor-pdf -a pdf-fontsdir=fonts +EPUB = --backend=epub3 --require=asciidoctor-epub3 +KINDLE = ${EPUB} -a ebook-format=kf8 + +all: html +# all: html raw_html pdf +# all: clean manpage html raw_html pdf compressed_pdf epub kindle + +manpage: + asciidoctor ${MANPAGE} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.1 ${INPUT}.adoc; \ + +html: + asciidoctor ${HTML} ${REQUIRES} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.html ${INPUT}.adoc; \ + +raw_html: + asciidoctor ${RAW_HTML} ${REQUIRES} ${OUTPUT_FOLDER} --out-file=raw_${OUTPUT}.html ${INPUT}.adoc; \ + +# https://github.com/jirutka/asciidoctor-html5s +# html5s: +# asciidoctor ${HTML5S} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.html ${INPUT}.adoc; \ + +# https://github.com/asciidoctor/asciidoctor-pdf/blob/master/docs/theming-guide.adoc +# check _resources/pdfstyles +pdf: + asciidoctor ${PDF} ${REQUIRES} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.pdf ${INPUT}.adoc; \ + +sample-pdf: + asciidoctor ${PDF} ${REQUIRES} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.pdf sample.adoc; \ + +# Courtesy of +# http://www.smartjava.org/content/compress-pdf-mac-using-command-line-free +# Requires `brew install ghostscript` +compressed_pdf: pdf + gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -dNOPAUSE -dQUIET -dBATCH -sOutputFile=${DIR}/compressed_book.pdf ${DIR}/book.pdf; \ + +# check _resources/epubstyles +epub: + asciidoctor ${EPUB} ${REQUIRES} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.epub ${INPUT}.adoc; \ + +kindle: + asciidoctor ${KINDLE} ${REQUIRES} ${OUTPUT_FOLDER} --out-file=${OUTPUT}.mobi ${INPUT}.adoc; \ + if [ -e "${DIR}/${OUTPUT}-kf8.epub" ]; \ + then rm ${DIR}/${OUTPUT}-kf8.epub; \ + fi; \ + +stats: + wc -w chapters/*.adoc | sort -n \ + +clean: + if [ -d ".asciidoctor" ]; \ + then rm -r .asciidoctor; \ + fi; \ + if [ -d "${DIR}" ]; \ + then rm -r ${DIR}; \ + fi; \ + diff --git a/book/_conf/umlconfig.txt b/book/_conf/umlconfig.txt new file mode 100644 index 00000000..76f6343c --- /dev/null +++ b/book/_conf/umlconfig.txt @@ -0,0 +1,29 @@ +hide empty members +skinparam defaultFontName Helvetica +skinparam backgroundColor transparent +skinparam monochrome true +skinparam class { + BackgroundColor White + ArrowColor Black + BorderColor Black +} +skinparam legend { + BackgroundColor LightGray + ArrowColor Black + BorderColor Black +} +skinparam note { + BackgroundColor LightGray + ArrowColor Black + BorderColor Black +} +skinparam stereotype { + CBackgroundColor LightGray +} +skinparam state { + BackgroundColor White + ArrowColor Black + BorderColor Black +} +skinparam shadowing false + diff --git a/book/_conf/variables.adoc b/book/_conf/variables.adoc new file mode 100644 index 00000000..25533321 --- /dev/null +++ b/book/_conf/variables.adoc @@ -0,0 +1,71 @@ +:author: Adrian Mejia +:email: me@adrianmejia.com + +:revdate: {docdate} +:revnumber: 1.0 +:revremark: First Edition + +:doctitle: Data Structures & Algorithms: Illustrated Guide with JavaScript Examples +:description: A guide data structures and algorithms to survive in this programming world +:keywords: algorithms, data-structures, coding-interviews, javascript, computer science + +// captions +:figure-caption: +//:important-caption: pass:[&#f0a2;] // din't work https://github.com/asciidoctor/asciidoctor/issues/2419 + +:copyright: CC-BY-SA 3.0 +:doctype: book +:producer: {author} +:creator: {author} +:front-cover-image: images/cover.png +:title-logo-image: image:logo.png[Logo,100,100] +:lang: en +:toc: left +:toclevels: 3 +:sectnumlevels: 3 +:numbered: +:icons: font +:icon-set: fi +:imagesdir: {docdir}/images +:source-language: javascript + +// The valid options are coderay, highlightjs, prettify, and pygments. +:source-highlighter: pygments +:pygments-style: xcode + +:codedir: ../../src +:datadir: {docdir}/data +:experimental: +:stem: +:hide-uri-scheme: +:chapter-label: Chapter +:appendix-caption: Appendix +:plantuml-config: {docdir}/_conf/umlconfig.txt + +ifdef::backend-html5[] +:data-uri: +:mathematical-format: svg +:mathematical-ppi: 300 +endif::[] + +ifdef::backend-pdf[] +:media: prepress +:pdf-stylesdir: _resources/pdfstyles +:pdf-style: default +:mathematical-format: png +endif::[] + +ifdef::backend-epub3[] +:imagesdir: images +:epub3-stylesdir: _resources/epubstyles +:ebook-validate: +:mathematical-format: svg +:mathematical-ppi: 300 +endif::[] + +//// +Do not use ":pygments-css: class" in the block above, as this blocks +the generation and/or display of highlighted code in EPUB output. +Also, the ":pygments-style: xcode" is required, since by default the +EPUB generation uses the "bw" style (i.e., black and white.) +//// diff --git a/book/_resources/epubstyles/epub3-css3-only.css b/book/_resources/epubstyles/epub3-css3-only.css new file mode 100644 index 00000000..90178c2d --- /dev/null +++ b/book/_resources/epubstyles/epub3-css3-only.css @@ -0,0 +1,175 @@ +/* Gitden & Namo default to 16px font-size; bump it to 20px (125%) */ +body.gitden-reader, +body.namo-epub-library { + font-size: 125%; +} + +/* Gitden doesn't give us much margin, so let's match Kindle */ +body.gitden-reader { + margin: 0 25pt; +} + +/* Namo has the same margin problem, except setting side margins doesn't work */ +/*body.namo-epub-library > section.chapter { + margin: 0 25pt; +}*/ + +/* Use tighter margins and smaller font (18px) on phones (Nexus 4 and smaller) */ +@media only screen and (max-device-width: 768px) and (max-device-height: 1280px), +only screen and (max-device-width: 1280px) and (max-device-height: 768px) { + body.gitden-reader, + body.namo-epub-library { + font-size: 112.5%; + } + + body.gitden-reader { + margin: 0 5pt; + } + + /*body.namo-epub-library > section.chapter { + margin: 0 5pt; + }*/ +} + +body h1, body h2, body h3:not(.list-heading), body h4, body h5, body h6, +h1 :not(code), h2 :not(code), h3:not(.list-heading) :not(code), h4 :not(code), h5 :not(code), h6 :not(code) { + /* !important required to override custom font setting in Kindle / Gitden / Namo */ + /* Gitden requires the extra weight of a parent selector; it also makes headings bold when custom font is specified */ + /* Kindle and Gitden require the override on heading child elements */ + font-family: "M+ 1p", sans-serif !important; +} + +/* QUESTION what about nested elements inside code? */ +body code, body kbd, body pre, pre :not(code) { + /* !important required to override custom font setting in Kindle / Gitden / Namo */ + /* Gitden requires the extra weight of a parent selector */ + /* Kindle and Gitden require the override on pre child elements */ + font-family: "M+ 1mn", monospace !important; +} + +@media amzn-kf8 { + /* Kindle does its own margin management, so don't use an explicit margin */ + /*body { + margin: 0 !important; + }*/ + + /* text-rendering is the only way to enable kerning in Kindle (and Calibre, though it seems to kern automatically) */ + /* personally, I think Kindle overdoes kerning, but we're running with it for now */ + /* text-rendering: optimizeLegibility kills certain Kindle eInk devices */ + /*h1, h2, h3, h4, h5, h6, + body p, li, dd, blockquote > footer, + th, td, figcaption, caption { + text-rendering: optimizeLegibility; + }*/ + + /* hack line height of subtitle using floats on Kindle */ + h1.chapter-title .subtitle { + margin-top: -0.2em; + margin-bottom: 0.3em; /* compensate for reduced line height */ + } + + /* NOTE using b instead of span since Firefox ePubReader applies immutable styles to span */ + h1.chapter-title .subtitle > b { + float: left; + display: inline-block; + margin-bottom: -0.3em; /* reduce the line height */ + padding-right: 0.2em; /* spacing between words */ + } + + h1.chapter-title .subtitle > b:last-child { + padding-right: 0; + } + + h1.chapter-title .subtitle::after { + display: table; + content: ' '; + clear: both; + } +} + +.chapter-header p.byline { + height: auto; /* Aldiko requires this value to be 0; reset it for all others */ +} + +/* Font-based icons */ +.icon { + display: inline-block; + /* !important required to override custom font setting in Kindle (since .icon can appear inside a span) */ + font-family: "FontAwesome" !important; + font-style: normal !important; + font-weight: normal !important; + line-height: 1; +} + +.icon-1_5x { + padding: 0 0.25em; + -webkit-transform: scale(1.5, 1.5); + transform: scale(1.5, 1.5); +} + +.icon-2x { + padding: 0 0.5em; + -webkit-transform: scale(2, 2); + transform: scale(2, 2); +} + +.icon-small { + font-size: 0.85em; + vertical-align: 0.075em; +} + +.icon-1_5em { + font-size: 1.5em; +} + +.icon-2em { + font-size: 2em; +} + +.icon-3em { + font-size: 3em; +} + +.icon-4em { + font-size: 4em; +} + +.icon-rotate-90 { + -webkit-transform: rotate(90deg); + transform: rotate(90deg); +} + +.icon-rotate-90i { + -webkit-transform: scale(-1, 1) rotate(90deg); + transform: scale(-1, 1) rotate(90deg); +} + +.icon-rotate-180 { + -webkit-transform: rotate(180deg); + transform: rotate(180deg); +} + +.icon-rotate-180i { + -webkit-transform: scale(-1, 1) rotate(180deg); + transform: scale(-1, 1) rotate(180deg); +} + +.icon-rotate-270 { + -webkit-transform: rotate(270deg); + transform: rotate(270deg); +} + +.icon-rotate-270i { + -webkit-transform: scale(-1, 1) rotate(270deg); + transform: scale(-1, 1) rotate(270deg); +} + +.icon-flip-h { + -webkit-transform: scale(-1, 1); + transform: scale(-1, 1); +} + +.icon-flip-v { + -webkit-transform: scale(1, -1); + transform: scale(1, -1); +} diff --git a/book/_resources/epubstyles/epub3.css b/book/_resources/epubstyles/epub3.css new file mode 100644 index 00000000..193512d3 --- /dev/null +++ b/book/_resources/epubstyles/epub3.css @@ -0,0 +1,1286 @@ +@import url("epub3-fonts.css"); + +*, *:before, *:after { + box-sizing: border-box; +} + +/* educate older readers about tags introduced in HTML5 */ +article, aside, details, figcaption, figure, +footer, header, nav, section, summary { + display: block; +} + +/* html and body declarations must be separate entries for some readers */ +html { + margin: 0 !important; + padding: 0 !important; + /* set the em base (and relative em anchor) by setting the font-size on html */ + /* TODO set font-size > 100% except for Kindle */ + font-size: 100%; + -webkit-text-size-adjust: 100%; +} + +/* don't set margin on body as that's how many readers frame reading area */ +/* can't set the font-family on body in Kindle */ +body { + padding: 0 !important; + /* add margin to ~ match Kindle's narrow setting */ + /* don't use !important on margin as it breaks calibre */ + margin: 0; + font-size: 100%; + /* NOTE putting optimizeLegibility on the body slows down rendering considerably */ + text-rendering: optimizeSpeed; + /* -webkit-font-smoothing has no noticable effect and is controversial, so leaving it off */ +} + +/* disables night mode in Aldiko, hoo-ha! */ +html body { + background-color: #FFFFFF; +} + +/* sets minimum margin permitted */ +/* @page not supported by Kindle or GitDen */ +@page { + /* push the top & bottom margins down in Aldiko to emulate Kindle (Kindle uses ~ 10% of screen by default )*/ + margin: 1cm; +} + +div, p, blockquote, pre, figure, figcaption, +h1, h2, h3, h4, h5, h6, +dl, dt, dd, ol, ul, li, +table, caption, thead, tfoot, tbody, tr, th, td { + margin: 0; + padding: 0; + font-size: 100%; + vertical-align: baseline; +} + +a, abbr, address, cite, code, em, kbd, span, strong { + font-size: 100%; +} + +a { + background: transparent; +} + +a:active, a:hover { + outline: 0; +} + +abbr[title] { + border-bottom: 1px dotted; +} + +address { + white-space: pre-line; +} + +b, strong { + font-weight: bold; +} + +b.button { + font-weight: normal; + text-shadow: 1px 0 0 #B3B3B1; + color: #191918; + white-space: nowrap; +} + +b.button .label { + padding: 0 0.25em; +} + +kbd { + display: inline-block; + font-size: 0.8em; + line-height: 1; + background-color: #F7F7F7; /* #FAFAFA */ + border: 1px solid #BEBEBC; + -webkit-border-radius: 3px; + border-radius: 3px; + -webkit-box-shadow: 1px 1px 0 rgba(102, 102, 101, 0.25), 0 0 0 1px white inset; + box-shadow: 1px 1px 0 rgba(102, 102, 101, 0.25), 0 0 0 1px white inset; + margin: 0 0.15em; + padding: 0.25em 0.4em 0.2em 0.4em; + vertical-align: 0.15em; +} + +.keyseq { + white-space: nowrap; +} + +.menuseq .caret { + /* + font-family: "FontAwesome"; + font-size: 0.7em; + line-height: 1; + font-weight: bold; + vertical-align: 0.08rem; + */ + + font-weight: bold; +} + +.menuseq span[class~="caret"] { + visibility: hidden; +} + +.menuseq .caret::before { + font-family: "FontAwesome"; + content: "\f054"; + font-size: 0.6em; + vertical-align: 0.15em; + visibility: visible; + display: inline-block; + width: 0; + padding-right: 0.15em; +} + +img { + border: 0; +} + +mark { + background-color: #FFC14F; + color: #191918; +} + +small { + font-size: 80%; +} + +sub, sup { + font-size: 0.75em; + line-height: 1; +} + +sup { + /* position: relative not permitted on Kindle */ + /* + position: relative; + top: -0.5em; + */ + /* alternate approach #1 */ + /* + display: inline-block; + vertical-align: text-top; + padding-top: .25em; + */ + /* alternate approach #2 */ + line-height: 1; + vertical-align: text-top; +} + +sub { + /* position: relative not permitted on Kindle */ + /* + position: relative; + bottom: -0.25em; + */ + /* alternate approach #1 */ + /* + display: inline-block; + vertical-align: text-bottom; + padding-bottom: .5em; + */ + /* alternate approach #2 */ + line-height: 1; + vertical-align: text-bottom; +} + +table { + border-collapse: collapse; + border-spacing: 0; +} + +td, th { + padding: 0; +} + +body a:link { + color: #333332; + /* hack for font color in iBooks and Gitden (though Gitden would accept color !important too) */ + -webkit-text-fill-color: #333332; + /* Kindle requires the !important on text-decoration */ + /* In night mode, the only indicator of a link is the underline, so we need it or a background image */ + text-decoration: none !important; + border-bottom: 1px dashed #666665; + /* allow URLs to break anywhere if they don't fit on a line; but how do we know it's a URL? */ + /* + word-break: break-all; + */ +} + +body:first-of-type a:link { + border-bottom: none; + background-repeat: no-repeat; + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 0%, #666665 5%, #666665 95%, rgba(255,255,255,0) 100%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 0%, #666665 5%, #666665 95%, rgba(255,255,255,0) 100%); + background-size: 100% 1px; + background-position: 0 1.2em; +} + +body a:visited { + color: #666665; + /* hack for font color in iBooks */ + -webkit-text-fill-color: #666665; +} + +code.literal { + /* don't let it affect line spacing */ + /* disable since M+ 1mn won't interrupt line height */ + /*line-height: 1;*/ + /* + white-space: nowrap; + */ + word-wrap: break-word; +} + +h1, h2, h3, h4, h5, h6 { + font-family: "M+ 1p", sans-serif; + font-weight: 400; + letter-spacing: -0.01em; + /* NOTE Kindle doesn't allow the line-height to be less than the font size (refer to heading font sizes) */ + line-height: 1.4; /* or 1.2125 */ + text-align: left; + + -webkit-hyphens: none; /* disable hyphenation where supported (e.g., iBooks) */ + word-wrap: break-word; /* break in middle of long word if no other break opportunities are available */ + + /* avoiding page breaks does not seem to work in Kindle */ + -webkit-column-break-inside: avoid; + page-break-inside: avoid; + -webkit-column-break-after: avoid; + page-break-after: avoid; +} + +/* Aldiko requires a higher precedence rule to set margin and text-indent, hence the body prefix */ +/* We'll just use the stronger rule for all paragraph-related stuff to be sure */ +body p { + margin: 1em 0 0 0; + text-align: justify; + text-indent: 0; + + widows: 2; + orphans: 2; +} + +body p, +ul, ol, li, dl, dt, dd, footer, +div.verse .attribution, table.table th, table.table td, +figcaption, caption { + color: #333332; + /* NOTE iBooks will forcefully override font-family of text inside div, p and span elements when font other than Original is selected */ + /* NOTE iBooks honors Original font for prose text if declared in display-options.xml */ + font-family: "Noto Serif", serif; +} + +body p, li, dt, dd, footer { + line-height: 1.6; +} + +code, kbd, pre { + color: #191918; + font-family: "M+ 1mn", monospace; + -webkit-hyphens: none; /* disable hyphenation where supported (e.g., iBooks) */ +} + +/* QUESTION should we kern preformatted text blocks? */ +h1, h2, h3, h4, h5, h6, +body p, li, dd, blockquote > footer, +th, td, figcaption, caption { + /* forward-compatible CSS to enable kerning (if we want ligatures, add "liga" and "dlig") */ + /* WebKits that don't recognize these properties don't kern well, hence why we don't simply enable kerning via text-rendering */ + -webkit-font-feature-settings: "kern"; + font-feature-settings: "kern"; + font-kerning: normal; + /* NOTE see Kindle hack in epub3-css3-only.css for additional kerning settings (disabled) */ +} + +p.last::after { + color: #57AD68; + display: inline-block; + font-family: "FontAwesome"; + font-size: 1em; + content: "\f121"; /* i.e., */ + margin-left: 0.25em; +} + +ul li, ol li { + /* minimum margin in case there is no paragraph content */ + margin-top: 0.4em; +} + +/* use paragraph-size gaps between list items */ +.complex > ul > li, +.complex > ol > li { + margin-top: 1em; +} + +/* squeeze content in complex lists */ +/* +li > figure, +li > p { + margin-top: 0.4em; +} +*/ + +dl { + margin-top: 0; + margin-bottom: 0; +} + +dt { + -webkit-column-break-inside: avoid; + page-break-inside: avoid; + -webkit-column-break-after: avoid; + page-break-after: avoid; +} + +dt > span.term { + font-style: italic; +} + +/* +dt > span.term > code.literal { + font-style: normal; +} +*/ + +dt { + margin-top: 0.75em; /* balances 0.25em to term */ +} + +dl dd { + /* minimum margin in case there is no paragraph content */ + margin-top: 0.25em; +} + +div.callout-list { + margin-top: 0.5em; +} + +div.callout-list ol { + font-size: 80%; + margin-left: 1.5em !important; + list-style-type: none; +} + +div.callout-list ol li { + text-align: left; +} + +i.conum { + color: #468C54; + font-family: "M+ 1mn", monospace; + font-style: normal; +} + +/* don't let conum affect line spacing; REVIEW may not need this! */ +/*pre i.conum { + line-height: 1; +}*/ + +div.callout-list li > i.conum { + float: left; + margin-left: -1.25em; + display: block; + width: 1.25em; +} + +div.itemized-list, div.ordered-list, div.description-list { + margin-top: 1em; + padding-bottom: 0.25em; /* REVIEW maybe, maybe not */ +} + +/* QUESTION should we add the class "list" so we can style these generically? */ +div.itemized-list div.itemized-list, +div.itemized-list div.ordered-list, +div.itemized-list div.description-list, +div.ordered-list div.itemized-list, +div.ordered-list div.ordered-list, +div.ordered-list div.description-list { + margin-top: 0; +} + +/*div.description-list div.itemized-list, +div.description-list div.ordered-list, +div.description-list div.description-list { +}*/ + +h3.list-heading { + font-size: 1em; + font-family: "Noto Serif", serif; + font-weight: bold; + line-height: 1.6; + margin-top: 1em; + margin-bottom: -0.25em; + letter-spacing: 0; +} + +div.stack li strong.subject, +div.stack-subject li strong.subject { + display: block; +} + +ul { + /* QUESTION do we need important here? */ + margin-left: 1em !important; + list-style-type: square; +} + +ul ul { + list-style-type: circle; +} + +ul ul ul { + list-style-type: disc; +} + +/* disable list style type for CSS3-enabled clients */ +body:first-of-type ul, +body:first-of-type ul ul, +body:first-of-type ul ul ul { + list-style-type: none; +} + +ul > li::before { + float: left; + margin-left: -1em; + margin-top: -0.05em; + padding-left: 0.25em; + /* guarantee it's out of the flow */ + width: 0; + display: block; +} + +ul > li::before { + content: "\25AA"; /* small black square */ + color: #666665; +} + +ul ul > li::before { + content: "\25E6"; /* small white circle */ + color: #57AD68; +} + +ul ul ul > li::before { + content: "\2022"; /* small black circle */ + color: #666665; +} + +ul ul ul ul > li::before { + content: "\25AB"; /* small white square */ + color: #57AD68; +} + +ol { + margin-left: 1.75em !important; +} + +ol { + list-style-type: decimal; +} + +ol ol { + list-style-type: lower-alpha; +} + +ol ol ol { + list-style-type: lower-roman; +} + +/* REVIEW */ +dd { + margin-left: 1.5rem !important; +} + +/* Kindle does not justify list-item element, must wrap in nested block element */ +li > span.principal, dd > span.principal { + display: block; + text-align: justify; +} + +ol.brief > li > span.principal, +ul.brief > li > span.principal { + text-align: left; +} + +/* REVIEW still considering keeping this one */ +/* disable justify within a link */ +/* +li strong.subject a:link { + white-space: pre-wrap; + word-spacing: 0.1em; +}*/ + +/* +.bibliography ul li, +.references ul li { + text-align: left; +} +*/ + +ul.bibliography > li > span.principal, +ul.references > li > span.principal { + text-align: left; +} + +/* sized based on the major third modular scale (4:5, 16px, 24px) */ +h1, h2 { + color: #333332; + font-size: 1.5em; + word-spacing: -0.075em; + margin-top: 1em; /* 1.5rem */ + margin-bottom: -0.3333em; /* -0.5rem, 0.5rem to content */ +} + +h3 { + color: #333332; + font-size: 1.25em; + margin-top: 0.84em; /* 1.05rem */ + margin-bottom: -0.5em; /* -0.625rem, 0.375rem to content */ +} + +h4 { + color: #4F4F4C; + font-weight: 200; + + font-size: 1.1em; + margin-top: 1em; /* 1.1rem */ + margin-bottom: -0.818em; /* -0.9rem, 0.1rem to content */ + + font-size: 1.2em; + margin-top: .917em; /* 1.1rem */ + margin-top: 0.875em; /* 1.05rem */ + /*margin-bottom: -0.75em;*/ /* -0.9rem, 0.1rem to content */ + margin-bottom: -0.625em; /* -0.75rem, 0.25rem to content */ +} + +h5 { + color: #666665; + /* + font-size: 1em; + text-transform: uppercase; + margin-top: 1em; + margin-bottom: -1em; + */ + + font-size: 0.9em; + font-weight: 700; + text-transform: uppercase; + margin-top: 1.11em; /* 1rem */ + margin-bottom: -0.972em; /* -0.875rem */ +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + color: inherit; +} + +h5 code { + text-transform: none; +} + +/* Kindle strips (or unwraps)
tags, so we use an inner div to style */ +.chapter-header { + background-color: #333332; + /* NOTE div must have at least 1px top padding for background to fill */ + padding: 0.75em 1.5em 0.25em 1.5em; /* would like to use 1.5vh 1.5em */ + margin-bottom: 2.5em; + /* TODO maybe what we need to get articles to start in left column + -webkit-column-break-before: left; + page-break-before: left; + */ +} + +h1.chapter-title { + font-weight: 200; + font-size: 1.2em; + margin-top: 3.5em; /* 4.2rem - would like to use 9vh */ + margin-bottom: 0; + padding-bottom: 0.8333em; /* 1.2rem */ + color: #B3B3B1; + text-transform: uppercase; + word-spacing: -0.075em; + letter-spacing: -0.01em; + border-bottom: 1px solid #DCDCDE; +} + +h1.chapter-title .subtitle { + font-weight: 400; + color: #FFFFFF; + display: block; + font-size: 1.5em; + margin: 0 0 0 0.75em; /* would like to use 2vw */ + line-height: 1.2; /* line-height will remain 1.4 on Kindle, see hack in media query */ +} + +h1.chapter-title em { + color: #57AD68; + font-style: normal; +} + +h1.chapter-title b { + font-weight: inherit; +} + +.chapter-header p.byline { + color: #DCDCDE; + /* float left and height 0 takes this line out of the flow */ + float: left; + height: 0; + width: 100%; + text-align: right; + margin-top: 0; + line-height: 2; +} + +.chapter-header p.byline b { + font-weight: normal; + padding-left: 0.2em; /* 0.25rem */ + font-size: 0.8em; + line-height: 2.5; /* 2rem */ +} + +.chapter-header p.byline img { + -webkit-border-radius: 0.5em; + border-radius: 0.5em; + vertical-align: middle; + /* some readers like to resize images; we don't want the author images resized */ + height: 2em !important; + width: 2em !important; +} + +/* HACK: Solves a problem in the current implementation of asciidoctor-epub3 */ +p.byline { + display: none; +} +/* HACK: Solves a problem in the current implementation of asciidoctor-epub3 */ + +div.abstract { + margin: 5% 1.5em 2.5em 1.5em; +} + +div.abstract > p { + color: #666665; + font-size: 1.05em; /* or 1.1em? */ + line-height: 1.75; +} + +div.abstract > p a:link { + color: #666665; + /* hack for font color in iBooks */ + -webkit-text-fill-color: #666665; +} + +div.abstract > p:first-child::first-line { + font-weight: bold; + -webkit-font-feature-settings: "kern" off; + font-feature-settings: "kern" off; + font-kerning: none; + /* and for Kindle... */ + text-rendering: optimizeSpeed; +} + +div.abstract p strong { + font-weight: inherit; + font-style: italic; +} + +p.lead { + font-size: 1.05em; + line-height: 1.75; +} + +hr.thematicbreak { + display: none; +} + +hr.thematicbreak + p { + margin-top: 1.5em; +} + +/* TODO finish layout of first-letter */ +hr.thematicbreak + p::first-letter { + font-size: 200%; +} + +p.stack > strong.head, +p.stack-head > strong.head { + display: block; +} + +p.signature { + font-size: 0.9em; +} + +figure, +aside.sidebar { + margin-top: 1em; +} + +/* +aside.sidebar { + -webkit-column-break-inside: avoid; + page-break-inside: avoid; + float: left; + margin-bottom: 1em; +} +*/ + +figure.image { + -webkit-column-break-inside: avoid; + page-break-inside: avoid; +} + +figure.image img { + max-width: 100%; +} + +figure.coalesce { + -webkit-column-break-inside: avoid; + page-break-inside: avoid; +} + +figcaption, +caption { + font-size: 0.9em; + font-style: italic; + color: #666665; + letter-spacing: -0.01em; + line-height: 1.4; + text-align: left; + padding-left: 0.1em; + page-break-inside: avoid; + -webkit-column-break-after: avoid; + page-break-after: avoid; +} + +figure.image figcaption { + padding-left: 0; + margin-top: 0.2em; + -webkit-column-break-after: auto; + page-break-after: auto; +} + +p + figure.listing, +span.principal + figure.listing { + margin-top: 0.75em; /* 0.75rem */ +} + +figure.listing > pre { + margin-top: 0; +} + +/* REVIEW TODO put margin bottom on the figcaption instead */ +figure.listing > figcaption + pre { + margin-top: 0.294em; /* 0.25rem */ +} + +aside.sidebar { + border: 1px solid #B3B3B1; + padding: 0 1.5em; + font-size: 0.9em; + background-color: #F2F2F2; + text-align: right; /* aligns heading to right */ + /* + -webkit-box-shadow: 0px 1px 1px rgba(102, 102, 101, 0.15); + box-shadow: 0px 1px 1px rgba(102, 102, 101, 0.15); + */ +} + +body:first-of-type aside.sidebar { + background-color: rgba(0, 0, 0, 0.05); /* using transparency is night-mode friendly */ + /*background-color: rgba(51, 51, 50, 0.06);*/ /* using transparency is night-mode friendly */ +} + +/* a bit of a cheat; could use aside.sidebar[title] instead, but not on Aldiko */ +aside.sidebar.titled { + margin-top: 2em; +} + +aside.sidebar > h2 { + /*text-transform: uppercase;*/ /* uppercase done manually to support Aldiko */ + font-size: 1em; + /* + font-weight: 700; + */ + font-weight: 400; + letter-spacing: 0; + display: inline-block; + white-space: nowrap; /* for some reason it's wrapping prematurely */ + border: 1px solid #B3B3B1; + padding: 1.5em .75em .5em .75em; + margin: -1em 0.5em -0.25em 0.5em; + background-color: #FFFFFF; + /* + -webkit-box-shadow: 0px 1px 1px rgba(102, 102, 101, 0.1); + box-shadow: 0px 1px 1px rgba(102, 102, 101, 0.1); + */ +} + +/* doesn't work +body:first-of-type aside.sidebar > h2 { + background-color: rgba(255, 255, 255, 1); +} +*/ + +aside.sidebar > div.content { + margin-bottom: 1em; + text-align: justify; /* restore text alignment in content */ +} + +/* QUESTION same for ordered-list? */ +aside.sidebar > div.content > div.itemized-list > ul { + margin-left: 0.5em !important; +} + +div.blockquote { + padding: 0 1em; + margin: 1.25em auto; +} + +/* display: table causes quotes to be repeated in Aldiko, so we hide this part */ +div[class~="blockquote"] { + display: table; +} + +blockquote > p { + color: #191918; + font-style: italic; + + /* + font-size: 1.2em; + word-spacing: 0.1em; + */ + + font-size: 1.15em; + word-spacing: 0.1em; + + margin-top: 0; + line-height: 1.75; +} + +/* hide explicit open quote for CSS3-enabled clients */ +blockquote span.open-quote:not(:empty) { + display: none; +} + +/* NOTE if we mapped the font icon to "\201c", we could just style the .open-quote */ +blockquote > p:first-of-type::before { + display: inline-block; + color: #666665; + text-shadow: 0 1px 2px rgba(102, 102, 101, 0.3); + + /* using serif quote from entypo */ + font-family: "FontIcons"; + + /*content: "\f10e";*/ /* quote-right from Entypo */ + /* + -webkit-transform: rotate(180deg); + transform: rotate(180deg); + padding-left: .3em; + padding-right: .2em; + */ + + content: "\f10d"; /* quote-left, a flipped version of the quote-right from Entypo */ + padding-right: .5em; + font-size: 1.5em; + line-height: 1.3; + margin-top: -0.5em; + vertical-align: text-bottom; +} + +blockquote footer { + font-size: 0.9em; + font-style: italic; + + margin-top: 0.5rem; + text-align: right; +} + +blockquote footer .context { + font-size: 0.9em; + letter-spacing: -0.1em; + color: #666665; +} + +/* Kindle requires text-align: center on surrounding div to align image to center */ +figure.image div.content { + text-align: center; +} + +/* in the event the viewer adds display: block to the image */ +figure.image img { + /* max-width not supported in Kindle, need to use a media query to add */ + /*max-width: 95%;*/ + margin: 0 auto; +} + +pre { + text-align: left; /* fix for Namo */ + margin-top: 1em; /* 0.85rem */ + /*margin-top: 1.176em;*/ /* 1rem */ + white-space: pre-wrap; + /*word-break: break-all;*/ /* break at the end of the line, no matter what */ + word-wrap: break-word; /* break in middle of long word if no other break opportunities are available */ + font-size: 0.85em; + line-height: 1.4; /* matches what Kindle uses and can't go less */ + background-color: #F2F2F2; + padding: 0.5rem 0.75rem; + /* + border-top: 3px solid #DCDCDE; + */ + /* QUESTION #B3B3B1? */ + border-top: 1px solid #DCDCDE; + border-right: 1px solid #DCDCDE; +} + +body:first-of-type pre { + background-color: rgba(0, 0, 0, 0.05); /* using transparency is night-mode friendly */ + /*background-color: rgba(51, 51, 50, 0.06);*/ /* using transparency is night-mode friendly */ +} + +/* TODO what we really want is for pre w/o caption to be unbreakable */ +pre.screen { + /* + -webkit-column-break-inside: avoid; + page-break-inside: avoid; + */ + orphans: 3; + widows: 3; /* widows doesn't seem to work here */ +} + +pre.source { + orphans: 3; + widows: 3; /* widows doesn't seem to work here */ +} + +div.verse { + -webkit-column-break-inside: avoid; + page-break-inside: avoid; +} + +/* TODO we may want to reenable hyphens here, but not for kf8 */ +div.verse > pre { + background-color: transparent; + border: none; + font-size: 1.2em; + text-align: center; +} + +div.verse .attribution { + display: block; + margin-top: 1.4em; +} + +aside.admonition { + margin-top: 1em; + padding: 1em; + border-left: 0.5em solid transparent; + -webkit-column-break-inside: avoid; + page-break-inside: avoid; +} + +/* overrides for CSS3-enabled clients */ +aside[class~="admonition"] { + margin: 1.5em 2em; /* even if admonition is at bottom of block, we want that extra space below */ + padding: 0; + border-width: 0; + background: none !important; +} + +aside.note { + border-left-color: #B3B3B1; + background-color: #E1E1E1; /* 25% opacity of border */ +} + +aside.tip { + border-left-color: #57AD68; + background-color: #D4EAD9; /* 25% opacity of border */ +} + +aside.caution { + border-left-color: #666665; + background-color: #D8D8D8; /* 25% opacity of border */ +} + +aside.warning { + border-left-color: #C83737; + background-color: #F1CCCC; /* 25% opacity of border */ +} + +aside.important { + border-left-color: #FFC14F; + background-color: #FFEFD2; /* 25% opacity of border */ +} + +aside.admonition::before { + display: block; + font-family: "FontAwesome"; + font-size: 2em; + line-height: 1; + width: 1em; + text-align: center; + margin-bottom: -0.25em; + margin-left: -0.5em; + text-shadow: 0px 1px 2px rgba(102, 102, 101, 0.3); +} + +aside.admonition > div.content { + font-size: 90%; + margin-top: -1em; /* prevent at top of content when using block form of admonition */ +} + +aside[class~="admonition"] > div[class~="content"] { + margin-top: 0; + padding-bottom: 1em; + background-size: 100% 1px; + background-repeat: no-repeat; + background-position: 0 bottom; + /* template + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 42.5%, 45%, 55%, rgba(255,255,255,0) 57.5%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 42.5%, 45%, 55%, rgba(255,255,255,0) 57.5%); + */ +} + +aside.note::before { + /*content: "\f0f4";*/ /* fa-coffee */ + content: "\f040"; /* fa-pencil */ + color: #B3B3B1; /* 179,179,177 */ +} + +aside[class~="note"] > div[class~="content"] { + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 42.5%, #B3B3B1 45%, #B3B3B1 55%, rgba(255,255,255,0) 57.5%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 42.5%, #B3B3B1 45%, #B3B3B1 55%, rgba(255,255,255,0) 57.5%); +} + +aside.tip::before { + /*content: "\f069";*/ /* fa-asterisk */ + /*content: "\f0d6";*/ /* fa-money */ + content: "\f15a"; /* fa-bitcoin */ + color: #57AD68; /* 87,173,104 */ +} + +aside[class~="tip"] > div[class~="content"] { + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 42.5%, #57AD68 45%, #57AD68 55%, rgba(255,255,255,0) 57.5%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 42.5%, #57AD68 45%, #57AD68 55%, rgba(255,255,255,0) 57.5%); +} + +aside.caution::before { + content: "\f0c2"; /* fa-cloud */ + color: #666665; /* 102,102,101 */ +} + +aside[class~="caution"] > div[class~="content"] { + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 42.5%, #666665 45%, #666665 55%, rgba(255,255,255,0) 57.5%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 42.5%, #666665 45%, #666665 55%, rgba(255,255,255,0) 57.5%); +} + +aside.warning::before { + content: "\f0e7"; /* fa-bolt */ + color: #C83737; /* 200,55,55 */ +} + +aside[class~="warning"] > div[class~="content"] { + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 42.5%, #C83737 45%, #C83737 55%, rgba(255,255,255,0) 57.5%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 42.5%, #C83737 45%, #C83737 55%, rgba(255,255,255,0) 57.5%); +} + +aside.important::before { + content: "\f12a"; /* fa-exclamation */ + color: #FFC14F; /* 255,193,79 */ +} + +aside[class~="important"] > div[class~="content"] { + background-image: -webkit-linear-gradient(left, rgba(255,255,255,0) 42.5%, #FFC14F 45%, #FFC14F 55%, rgba(255,255,255,0) 57.5%); + background-image: linear-gradient(to right, rgba(255,255,255,0) 42.5%, #FFC14F 45%, #FFC14F 55%, rgba(255,255,255,0) 57.5%); +} + +aside.admonition > h2 { + margin-top: 0; + margin-bottom: 1.5em; + font-size: 1em; + text-align: center; +} + +aside[class~="admonition"] > h2 { + float: left; + width: 100%; + margin-top: -1.25em; + margin-bottom: 0; +} + +div.table { + margin-top: 1em; +} + +table.table thead, +table.table tbody, +table.table tfoot { + font-size: 0.8em; +} + +table.table > caption { + padding-bottom: 0.1em; +} + +table.table th, +table.table td { + line-height: 1.4; + padding: 0.5em 0.5em 1em 0.1em; + vertical-align: top; + text-align: left; + -webkit-column-break-inside: avoid; + page-break-inside: avoid; +} + +table.table th { + font-weight: bold; +} + +table.table thead th { + border-bottom: 1px solid #80807F; +} + +table.table td > p { + margin-top: 0; + text-align: left; +} + +/* REVIEW */ +table.table td > p + p { + margin-top: 1em; +} + +table.table-framed { + border-width: 1px; + border-style: solid; + border-color: #80807F; +} + +table.table-framed-topbot { + border-width: 1px 0; + border-style: solid; + border-color: #80807F; +} + +table.table-framed-sides { + border-width: 0 1px; + border-style: solid; + border-color: #80807F; +} + +table.table-grid th, +table.table-grid td { + border-width: 0 1px 1px 0; + border-style: solid; + border-color: #80807F; +} + +table.table-grid thead tr > *:last-child { + border-right-width: 0; +} + +table.table-grid tbody tr:last-child > th, +table.table-grid tbody tr:last-child > td { + border-bottom-width: 0; +} + +table.table-grid-rows tbody th, +table.table-grid-rows tbody td { + border-width: 1px 0 0 0; + border-style: solid; + border-color: #80807F; +} + +table.table-grid-cols th, +table.table-grid-cols td { + border-width: 0 1px 0 0; + border-style: solid; + border-color: #80807F; +} + +table.table-grid-cols thead th:last-child { + border-right-width: 0; +} + +table.table-grid-cols tbody tr > td:last-child { + border-right-width: 0; +} + +hr.pagebreak { + -webkit-column-break-after: always; + page-break-after: always; + border: none; + margin: 0; +} + +/* REVIEW */ +hr.pagebreak + * { + margin-top: 0 !important; +} + +#_about_the_author { + -webkit-column-break-before: always; + page-break-before: always; + border-bottom: 1px solid #B3B3B3; +} + +img.headshot { + float: left; + border: 1px solid #80807F; + padding: 1px; + margin: 0.35em 1em 0.15em 0; + height: 5em !important; + width: 5em !important; +} + +/* Kindle refuses to style footer (perhaps stripped), so we use an explicit class */ +.chapter-footer { + -webkit-column-break-before: always; + page-break-before: always; +} + +div.footnotes { + margin-top: 1em; +} + +div.footnotes p { + font-size: 0.8rem; + margin-top: 0.4rem; +} + +div.footnotes sup.noteref { + font-weight: bold; + font-size: 0.9em; +} + +/*div.footnotes sup.noteref a {*/ +sup.noteref a { + /* Kindle wants to underline these links */ + text-decoration: none !important; + background-image: none; +} + +nav#toc ol { + list-style-type: none; +} + +.icon { + display: none; +} + +@media amzn-mobi { + /* NOTE mobi7 doesn't support custom fonts, so revert to generic ones */ + body p, ul, ol, li, dl, dt, dd, figcaption, caption, footer, + table.table th, table.table td, div.verse .attribution { + font-family: serif; + } + h1, h2, h3, h4, h5, h6 { + font-family: sans-serif; + } + code, kbd, pre, i.conum { + font-family: monospace; + } +} diff --git a/book/_resources/pdfstyles/default-theme.yml b/book/_resources/pdfstyles/default-theme.yml new file mode 100644 index 00000000..bd6ad502 --- /dev/null +++ b/book/_resources/pdfstyles/default-theme.yml @@ -0,0 +1,319 @@ +# Bookerly, Palatino, Garamond, Georgia +font: + catalog: + # Noto Serif supports Latin, Latin-1 Supplement, Latin Extended-A, Greek, Cyrillic, Vietnamese & an assortment of symbols + Noto Serif: + normal: notoserif-regular-subset.ttf + bold: notoserif-bold-subset.ttf + italic: notoserif-italic-subset.ttf + bold_italic: notoserif-bold_italic-subset.ttf + Literata: + normal: Literata-regular.ttf + bold: Literata-bold.ttf + italic: Literata-italic.ttf + bold_italic: Literata-bold-italic.ttf + Fira Code: + normal: FiraCode-Regular.ttf + bold: FiraCode-Bold.ttf + italic: FiraCode-Italic.ttf + bold_italic: FiraCode-Bold-italic.ttf + # M+ 1mn supports ASCII and the circled numbers used for conums + M+ 1mn: + normal: mplus1mn-regular-ascii-conums.ttf + bold: mplus1mn-bold-ascii.ttf + italic: mplus1mn-italic-ascii.ttf + bold_italic: mplus1mn-bold_italic-ascii.ttf + # M+ 1p supports Latin, Latin-1 Supplement, Latin Extended, Greek, Cyrillic, Vietnamese, Japanese & an assortment of symbols + # It also provides arrows for ->, <-, => and <= replacements in case these glyphs are missing from font + M+ 1p Fallback: + normal: mplus1p-regular-fallback.ttf + bold: mplus1p-regular-fallback.ttf + italic: mplus1p-regular-fallback.ttf + bold_italic: mplus1p-regular-fallback.ttf + # https://github.com/asciidoctor/asciidoctor-pdf/issues/323#issuecomment-143966293 + OpenSansEmoji: + normal: OpenSansEmoji.ttf + bold: OpenSansEmoji.ttf + italic: OpenSansEmoji.ttf + bold_italic: OpenSansEmoji.ttf + Symbola: + normal: Symbola.ttf + bold: Symbola.ttf + italic: Symbola.ttf + bold_italic: Symbola.ttf + # https://github.com/emojione/emojione/tree/master/extras/fonts + Emojione: + normal: emojione-android.ttf + bold: emojione-android.ttf + italic: emojione-android.ttf + bold_italic: emojione-android.ttf + # https://www.google.com/get/noto/#emoji-zsye-color + NotoColorEmoji: + normal: NotoColorEmoji.ttf + bold: NotoColorEmoji.ttf + italic: NotoColorEmoji.ttf + bold_italic: NotoColorEmoji.ttf + # https://graphicdesign.stackexchange.com/a/64918 + NotoEmoji: + normal: NotoEmoji-Regular.ttf + bold: NotoEmoji-Regular.ttf + italic: NotoEmoji-Regular.ttf + bold_italic: NotoEmoji-Regular.ttf + fallbacks: + - M+ 1p Fallback + # - NotoEmoji # works (black and white) + - NotoColorEmoji # Didn't work + # - Symbola + # - Emojione # Didn't work +page: + background_color: ffffff + layout: portrait + margin: [0.5in, 0.67in, 0.67in, 0.67in] + # margin_inner and margin_outer keys are used for recto/verso print margins when media=prepress + margin_inner: 0.75in + margin_outer: 0.59in + size: A4 +base: + align: justify + # color as hex string (leading # is optional) + font_color: 333333 + # color as RGB array + #font_color: [51, 51, 51] + # color as CMYK array (approximated) + #font_color: [0, 0, 0, 0.92] + #font_color: [0, 0, 0, 92%] + font_family: Literata + # choose one of these font_size/line_height_length combinations + #font_size: 14 + #line_height_length: 20 + #font_size: 11.25 + #line_height_length: 18 + #font_size: 11.2 + #line_height_length: 16 + font_size: 10.5 + #line_height_length: 15 + # correct line height for Noto Serif metrics + line_height_length: 12 + #font_size: 11.25 + #line_height_length: 18 + line_height: $base_line_height_length / $base_font_size + font_size_large: round($base_font_size * 1.25) + font_size_small: round($base_font_size * 0.85) + font_size_min: $base_font_size * 0.75 + font_style: normal + border_color: eeeeee + border_radius: 4 + border_width: 0.5 +# FIXME vertical_rhythm is weird; we should think in terms of ems +#vertical_rhythm: $base_line_height_length * 2 / 3 +# correct line height for Noto Serif metrics (comes with built-in line height) +vertical_rhythm: $base_line_height_length +horizontal_rhythm: $base_line_height_length +# QUESTION should vertical_spacing be block_spacing instead? +vertical_spacing: $vertical_rhythm +link: + font_color: 428bca +# literal is currently used for inline monospaced in prose and table cells +literal: + font_color: b12146 + font_family: Fira Code +menu_caret_content: " \u203a " + +# https://github.com/asciidoctor/asciidoctor-pdf/blob/master/docs/theming-guide.adoc#keys-heading +heading: + align: left + #font_color: 181818 + font_color: $base_font_color + font_family: $base_font_family + font_style: bold + # h1 is used for part titles (book doctype) or the doctitle (article doctype) + h1_font_size: floor($base_font_size * 2.6) + # h2 is used for chapter titles (book doctype only) + h2_font_size: floor($base_font_size * 2.15) + h3_font_size: round($base_font_size * 1.7) + h4_font_size: $base_font_size_large + h5_font_size: $base_font_size + h6_font_size: $base_font_size_small + #line_height: 1.4 + # correct line height for Noto Serif metrics (comes with built-in line height) + line_height: 1 + margin_top: $vertical_rhythm * 0.4 + margin_bottom: $vertical_rhythm * 0.9 +title_page: + align: right + logo: + top: 10% + title: + top: 55% + font_size: $heading_h1_font_size + font_color: 999999 + line_height: 0.9 + subtitle: + font_size: $heading_h3_font_size + font_style: bold_italic + line_height: 1 + authors: + margin_top: $base_font_size * 1.25 + font_size: $base_font_size_large + font_color: 181818 + revision: + margin_top: $base_font_size * 1.25 +block: + margin_top: 0 + margin_bottom: $vertical_rhythm +caption: + align: left + font_size: $base_font_size * 0.95 + font_style: italic + # FIXME perhaps set line_height instead of / in addition to margins? + margin_inside: $vertical_rhythm / 3 + #margin_inside: $vertical_rhythm / 4 + margin_outside: 0 +lead: + font_size: $base_font_size_large + line_height: 1.4 +abstract: + font_color: 5c6266 + font_size: $lead_font_size + line_height: $lead_line_height + font_style: italic + first_line_font_style: bold + title: + align: center + font_color: $heading_font_color + font_family: $heading_font_family + font_size: $heading_h4_font_size + font_style: $heading_font_style +admonition: + column_rule_color: $base_border_color + column_rule_width: $base_border_width + padding: [0, $horizontal_rhythm, 0, $horizontal_rhythm] + #icon: + # tip: + # name: fa-lightbulb-o + # stroke_color: 111111 + # size: 24 + label: + text_transform: uppercase + font_style: bold +blockquote: + font_color: $base_font_color + font_size: $base_font_size_large + border_color: $base_border_color + border_width: 5 + # FIXME disable negative padding bottom once margin collapsing is implemented + padding: [0, $horizontal_rhythm, $block_margin_bottom * -0.75, $horizontal_rhythm + $blockquote_border_width / 2] + cite_font_size: $base_font_size_small + cite_font_color: 999999 +# code is used for source blocks (perhaps change to source or listing?) +code: + font_color: $base_font_color + font_family: $literal_font_family + font_size: ceil($base_font_size) + padding: $code_font_size + line_height: 1.25 + # line_gap is an experimental property to control how a background color is applied to an inline block element + line_gap: 3.8 + background_color: f5f5f5 + border_color: cccccc + border_radius: $base_border_radius + border_width: 0.75 +conum: + font_family: M+ 1mn + # font_family: $literal_font_family + font_color: $literal_font_color + font_size: $base_font_size + line_height: 4 / 3 +example: + border_color: $base_border_color + border_radius: $base_border_radius + border_width: 0.75 + background_color: ffffff + # FIXME reenable padding bottom once margin collapsing is implemented + padding: [$vertical_rhythm, $horizontal_rhythm, 0, $horizontal_rhythm] +image: + align: left +prose: + margin_top: $block_margin_top + margin_bottom: $block_margin_bottom +sidebar: + background_color: eeeeee + border_color: e1e1e1 + border_radius: $base_border_radius + border_width: $base_border_width + # FIXME reenable padding bottom once margin collapsing is implemented + padding: [$vertical_rhythm, $vertical_rhythm * 1.25, 0, $vertical_rhythm * 1.25] + title: + align: center + font_color: $heading_font_color + font_family: $heading_font_family + font_size: $heading_h4_font_size + font_style: $heading_font_style +thematic_break: + border_color: $base_border_color + border_style: solid + border_width: $base_border_width + margin_top: $vertical_rhythm * 0.5 + margin_bottom: $vertical_rhythm * 1.5 +description_list: + term_font_style: bold + term_spacing: $vertical_rhythm / 4 + description_indent: $horizontal_rhythm * 1.25 +outline_list: + indent: $horizontal_rhythm * 1.5 + #marker_font_color: 404040 + # NOTE outline_list_item_spacing applies to list items that do not have complex content + item_spacing: $vertical_rhythm / 2 +table: + background_color: $page_background_color + #head_background_color: + #head_font_color: $base_font_color + head_font_style: bold + #body_background_color: + body_stripe_background_color: f9f9f9 + foot_background_color: f0f0f0 + border_color: dddddd + border_width: $base_border_width + cell_padding: 3 +toc: + indent: $horizontal_rhythm + line_height: 1.4 + dot_leader: + #content: ". " + font_color: a9a9a9 + #levels: 2 3 +# NOTE in addition to footer, header is also supported +footer: + font_size: $base_font_size_small + # NOTE if background_color is set, background and border will span width of page + border_color: dddddd + border_width: 0.25 + height: $base_line_height_length * 2.5 + line_height: 1 + padding: [$base_line_height_length / 2, 1, 0, 1] + vertical_align: top + #image_vertical_align: or + # additional attributes for content: + # * {page-count} + # * {page-number} + # * {document-title} + # * {document-subtitle} + # * {chapter-title} + # * {section-title} + # * {section-or-chapter-title} + recto: + #columns: "<50% =0% >50%" + right: + # content: '{page-number}' + content: '{section-or-chapter-title} | {page-number}' + # content: '{section-title} | {page-number}' + # content: '{document-title} | {page-number}' + #center: + # content: '{page-number}' + verso: + #columns: $footer_recto_columns + left: + content: $footer_recto_right_content + #content: '{page-number} | {chapter-title}' + #center: + # content: '{page-number}' diff --git a/book/book-all.adoc b/book/book-all.adoc new file mode 100644 index 00000000..5c872247 --- /dev/null +++ b/book/book-all.adoc @@ -0,0 +1,155 @@ +include::_conf/variables.adoc[] + += {doctitle} + +// remove numbering from titles, and sub-titles e.g. 1.1 +:sectnums!: + +// Copyright © 2018 Adrian Mejia +include::chapters/colophon.adoc[] + +// Abstract and Dedication MUST have a level-0 heading in EPUB and Kindle +// but level-1 in PDF and HTML +ifndef::backend-epub3[:leveloffset: +1] +include::chapters/dedication.adoc[] +ifndef::backend-epub3[:leveloffset: -1] + +// TODO: pending +include::chapters/preface.adoc[] + +include::chapters/cheatsheet.adoc[] + +// add sections to chapters +:sectnums: + +// +// chapters +// + += Algorithms Analysis + +// TODO: pending +include::chapters/algorithms-analysis-intro.adoc[] + +:leveloffset: +1 + +include::chapters/algorithms-analysis.adoc[] + +include::chapters/big-o-examples.adoc[] + +:leveloffset: -1 + += Linear Data Structures + +include::chapters/linear-data-structures-intro.adoc[] + +:leveloffset: +1 + +include::chapters/array.adoc[] + +include::chapters/linked-list.adoc[] + +include::chapters/stack.adoc[] + +include::chapters/queue.adoc[] + +:leveloffset: -1 + += Non-Linear Data Structures + +include::chapters/non-linear-data-structures-intro.adoc[] + +:leveloffset: +1 + +include::chapters/tree.adoc[] + +include::chapters/binary-search-tree.adoc[] + +include::chapters/map.adoc[] + +include::chapters/set.adoc[] + +include::chapters/graph.adoc[] + + +:leveloffset: -1 + += Advanced Non-Linear Data Structures + +// TODO: pending +include::chapters/non-linear-data-structures-intro-advanced.adoc[] + +:leveloffset: +1 + +// TODO: pending +include::chapters/avl-tree.adoc[] + +// TODO: pending (optional) +// include::chapters/red-black-tree.adoc[] + +// TODO: pending +include::chapters/heap.adoc[] + +// TODO: (optional) pending +// include::chapters/trie.adoc[] + + +:leveloffset: -1 + += Algorithms + +// TODO: pending +include::chapters/algorithms-intro.adoc[] + +:leveloffset: +1 + +// TODO: pending +include::chapters/sorting-intro.adoc[] + +// +// Slow Sorting +// + +include::chapters/insertion-sort.adoc[] + +include::chapters/selection-sort.adoc[] + +include::chapters/bubble-sort.adoc[] + +// +// Fast Sorting +// + +include::chapters/merge-sort.adoc[] + +include::chapters/quick-sort.adoc[] + +// TODO: (optional) pending +// include::chapters/heap-sort.adoc[] + +// TODO: (optional) pending +// include::chapters/tim-sort.adoc[] + +// +// Searching +// + +// TODO: pending +include::chapters/graph-search.adoc[] + +:leveloffset: -1 + +// +// end chapters +// + +include::chapters/epigraph.adoc[] + +// TODO: (optional) pending +// include::chapters/appendix.adoc[] + +// TODO: (optional) pending +ifdef::backend-pdf[] +include::chapters/index.adoc[] +endif::[] + diff --git a/book/book.adoc b/book/book.adoc new file mode 100644 index 00000000..d48ca52e --- /dev/null +++ b/book/book.adoc @@ -0,0 +1,194 @@ +include::_conf/variables.adoc[] + += {doctitle} + +// remove numbering from titles, and sub-titles e.g. 1.1 +:sectnums!: + +// Copyright © 2018 Adrian Mejia (g) +include::chapters/colophon.adoc[] + +// Abstract and Dedication MUST have a level-0 heading in EPUB and Kindle +// but level-1 in PDF and HTML +ifndef::backend-epub3[:leveloffset: +1] +include::chapters/dedication.adoc[] +ifndef::backend-epub3[:leveloffset: -1] + +// (g) +include::chapters/preface.adoc[] + +// add sections to chapters +:sectnums: + + +//----------------------------------- +// TODO: commment out sample on final +//----------------------------------- + +include::chapters/sample.adoc[] + +//----------------------------------- +// TODO: end remove ------ +//----------------------------------- + +// +// chapters +// + += Algorithms Analysis + +include::chapters/algorithms-analysis-intro.adoc[] + +:leveloffset: +1 + +// (g) +include::chapters/algorithms-analysis.adoc[] + +// (g) +include::chapters/big-o-examples.adoc[] + +:leveloffset: -1 + += Linear Data Structures + +// (g) +include::chapters/linear-data-structures-intro.adoc[] + +:leveloffset: +1 + +// (g) +include::chapters/array.adoc[] + +// (g) +include::chapters/linked-list.adoc[] + +// (g) +include::chapters/stack.adoc[] + +// (g) +include::chapters/queue.adoc[] + +// (g) +include::chapters/linear-data-structures-outro.adoc[] + +:leveloffset: -1 + + += Non-Linear Data Structures + +// (g) +include::chapters/non-linear-data-structures-intro.adoc[] + +:leveloffset: +1 + +// (g) +include::chapters/tree.adoc[] + + +// (g) +include::chapters/tree--binary-search-tree.adoc[] + +include::chapters/tree--search.adoc[] + +include::chapters/tree--self-balancing-rotations.adoc[] + +:leveloffset: +1 + +include::chapters/tree--avl.adoc[] + +:leveloffset: -1 + +// (g) +// include::chapters/map.adoc[] +include::chapters/map-intro.adoc[] + +:leveloffset: +1 + +// (g) +include::chapters/map-hashmap.adoc[] + +// (g) +include::chapters/map-treemap.adoc[] + +// (g) +include::chapters/map-hashmap-vs-treemap.adoc[] + +:leveloffset: -1 + +// (g) +include::chapters/set.adoc[] + +// (g) +include::chapters/graph.adoc[] + +// TODO: pending +include::chapters/graph-search.adoc[] + +:leveloffset: -1 + += Algorithms + +// TODO: pending +include::chapters/algorithms-intro.adoc[] + +:leveloffset: +1 + +// +// Sorting algorithms +// += Sorting Algorithms + +:leveloffset: +1 + +// TODO: pending +include::chapters/sorting-intro.adoc[] + +// Slow Sorting + +include::chapters/insertion-sort.adoc[] + +include::chapters/selection-sort.adoc[] + +include::chapters/bubble-sort.adoc[] + +// Fast Sorting + +include::chapters/merge-sort.adoc[] + +include::chapters/quick-sort.adoc[] + +:leveloffset: -1 + + +// +// Algorithms Techniques +// + +include::chapters/divide-and-conquer.adoc[] + +include::chapters/dynamic-programming.adoc[] + +include::chapters/greedy-algorithms.adoc[] + +include::chapters/backtracking.adoc[] + +// --- end algorithms --- + +:leveloffset: -1 + +:sectnums!: + += Appendix + +:leveloffset: +1 + +// TODO: review and complete when the rest is completed +include::chapters/cheatsheet.adoc[] + +:leveloffset: -1 + +// +// end chapters +// + +include::chapters/epigraph.adoc[] diff --git a/book/chapters/algorithms-analysis-intro.adoc b/book/chapters/algorithms-analysis-intro.adoc new file mode 100644 index 00000000..65d3d4e1 --- /dev/null +++ b/book/chapters/algorithms-analysis-intro.adoc @@ -0,0 +1,4 @@ +[partintro] +-- +In this section we are going to cover the basics about algorithms analysis. We are also going to discuss eight of the most commmon runtimes of algorithms. +-- diff --git a/book/chapters/algorithms-analysis.adoc b/book/chapters/algorithms-analysis.adoc new file mode 100644 index 00000000..5431fc10 --- /dev/null +++ b/book/chapters/algorithms-analysis.adoc @@ -0,0 +1,167 @@ += Fundamentals of Algorithms Analysis + +Chances are you are reading this book because you want to write better and faster code. +How can you do that? Can you time how long it takes to run a program? Of course, you can! +[big]#⏱# +However, if you run the same program on a smartwatch, cellphone or desktop computer, it will give you different very times. + +image:image3.png[image,width=528,height=137] + +Wouldn't it be great if we can compare algorithms regardless of the hardware where we run them? +That's what *time complexity* is for! +But why stop with the running time? +We could also compare the memory "used" by different algorithms, and we called that *space complexity*. + +.In this chapter you will learn: +- What’s the best way to measure your code performance. +- Learn how to use Big O notation to compare algorithms. +- How to use algorithms analysis to improve your programs speed. + +Before going deeper, into space and time complexity, let's cover the basics real quick. + +== What are Algorithms? + +Algorithms (as you might know) are steps of how to do some task. When you cook, you follow a recipe (or an algorithm) to prepare a dish. Let's say you want to make a pizza. + +.Example of an algorithm +// [source, js] // undefined functions +---- +function bakePizza(dough, toppins = ['cheese']) { + const heatedOven = heatOvenTo(550); + punchDown(dough); + rollOut(dough); + applyToppings(dough, toppings); + const pizza = heatedOven.bakePizza(dough) + return pizza; +} +---- + +If you play a game, you are devising strategies (or algorithms) to help you win. Likewise, algorithms in computers are a set of instructions used to solve a problem. + +TIP: Algorithms are instructions on how to perform a task. + +== Comparing Algorithms + +Not all algorithms are created equal. There are “good” and “bad” algorithms. The good ones are fast; the bad ones are slow. Slow algorithms cost more money to run. Inefficient algorithms could make some calculations impossible in our lifespan! + +Most algorithms are affected by the size of the input. Let's say you need to arrange numbers in ascending order. Sorting ten digits will naturally take much less time than sorting 2 million of them. + +To give you a clearer picture of how different algorithms perform as the input size grows, take a look at the following table. + +.Relationship between algorithm input size and time taken to complete +[cols=",,,,,",options="header",] +|============================================================================================= +|Input size -> |10 |100 |10k |100k |1M +|Finding if a number is odd |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. +|Sorting elements in array with merge sort |< 1 sec. |< 1 sec. |< 1 sec. |few sec. |20 sec. +|Sorting elements in array with Bubble Sort |< 1 sec. |< 1 sec. |2 minutes |3 hours |12 days +|Finding all subsets of a given set |< 1 sec. |40,170 trillion years |> centillion years |∞ |∞ +|Find all permutations of a string |4 sec. |> vigintillion years |> centillion years |∞ |∞ +|============================================================================================= + +However, if you keep the input size constant, you can notice the difference between an efficient algorithm and a slow one. An excellent sorting algorithm is `mergesort` for instance, and inefficient algorithm for large inputs is `bubble sort` . +Organizing 1 million elements with merge sort takes 20 seconds while bubble sort takes 12 days, ouch! +The amazing thing is that both programs are measured on the same hardware with the same data! + +After completing this book, you are going to *think differently*. +You will be able to scale your programs while you are designing them. +Find bottlenecks of existing software and have an "algorithmic toolbox" to switch algorithms and make them faster without having to upgrade hardware. [big]#💸# + +== Increasing your code performance + +The first step to improve your code performance is to measure it. As somebody said: + +[quote, H. J. Harrington] +Measurement is the first step that leads to control and eventually to improvement. If you can’t measure something, you can’t understand it. If you can’t understand it, you can’t control it. If you can’t manage it, you can’t improve it. + +In this section, we are going to learn the basics of measuring our current code performance and compare it with other algorithms. + +=== Calculating Time Complexity + +Time complexity, in computer science, is a function that describes the number of operations a program will execute given the size of the input `n`. + +How do get a function that gives us the number of operations that will be executed? Well, we count line by line and mind code inside loops. Let's do an example to explain this point. For instance, we have a function to find the minimum value on an array called `getMin`. + +.Translating lines of code to an approximate number of operations +image:image4.png[Operations per line] + +Assuming that each line of code is an operation, we get the following: + +_3n + 3_ + +`n` = input size. + +That means that if give an array of 3 elements e.g. `getMin([3, 2, 9])`, then it will execute around _3(3)+3 = 12_ operations. Of course, this is not for every case. For instance, Line 12 is only executed if the condition on line 11 is met. As you might learn in the next section, we want to get the big picture and get rid of smaller terms to compare algorithms easier. + +== Space Complexity + +Space complexity is similar to time complexity. However, instead of the count of operations executed, it will account for the amount of memory used additionally to the input. + +For calculating the *space complexity* we keep track of the “variables” and memory used. In the `getMin` example, we just create a single variable called `min`. So, the space complexity is 1. On other algorithms, If we have to use an auxiliary array, then the space complexity would be `n`. + +=== Simplifying Complexity with Asymptotic Analysis + +When we are comparing algorithms, we don't want to have complex expressions. What would you prefer comparing two algorithms like "3n^2^ + 7n" vs. "1000 n + 2000" or compare them as "n^2^ vs. n"? Well, that when the asymptotic analysis comes to the rescue. + +Asymptotic analysis is the of functions when their inputs approach infinity. + +In the previous example, we analyzed `getMin` with an array of size 3, what happen size is 10 or 10k or a million? + +.Operations performed by an algorithm with a time complexity of 3n+3 +[cols=",,",options="header",] +|=========================== +|n (size) |Operations |total +|10 |3(10) + 3 |33 +|10k |3(10k)+3 |30,003 +|1M |3(1M)+3 |3,000,003 +|=========================== + +As the input size `n` grows bigger and bigger then the expression _3n + 3_ could be represented as _3n_ without loosing too much or even _n_. Dropping terms might look like a stretch at first, but you will see that what matters the most is the higher order terms of the function rather than lesser terms and constants. There’s a notation called *Big O*, where O refers to the *order of the function*. + +If you have a program which runs time is like + +_7n^3^ + 3n^2^ + 5_ + +You can safely say that its run time is _n^3^_. The other terms will become less and less significant as the input grows bigger. + +=== What is Big O Notation anyways? + +Big O notation, only cares about the “biggest” terms in the time/space complexity. So, it combines what we learn about time and space complexity, asymptotic analysis and adds a worst-case scenario. + +.All algorithms have three scenarios: +* Best-case scenario: the most favorable input arrange where the program will take the least amount of operations to complete. E.g., array already sorted is beneficial for some sorting algorithms. +* Average-case scenario: this is the most common case. E.g., array items in random order for a sorting algorithm. +* Worst-case scenario: the inputs are arranged in such a way that causes the program to take the longest to complete. E.g., array items in reversed order for some sorting algorithm will take the longest to run. + +To sum up: + +TIP: Big O only cares about the highest order of the run time function and the worst-case scenario. + +WARNING: Don't drop terms that multiplying other terms. _O(n log n)_ is not equivalent to _O(n)_. However, _O(n + log n)_ is. + +There are many common notations like polynomial, _O(n^2^)_ like we saw in the `getMin` example; constant O(1) and many more that we are going to explore in the next chapter. + +Again, time complexity is not a direct measure of how long a program takes to execute but rather how many operations it performs in given the input size. Nevertheless, there’s a relationship between time complexity and clock time as we can see in the following table. + +.How long an algorithm takes to run based on their time complexity and input size +[cols=",,,,,,",options="header",] +|=============================================================== +|Input Size |O(1) |O(n) |O(n log n) |O(n^2^) |O(2^n^) |O(n!) +|1 |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. +|10 |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |4 seconds +|10k |< 1 sec. |< 1 sec. |< 1 sec. |2 minutes |∞ |∞ +|100k |< 1 sec. |< 1 sec. |1 second |3 hours |∞ |∞ +|1M |< 1 sec. |1 second |20 seconds |12 days |∞ |∞ +|=============================================================== + +This just an illustration since in different hardware the times will be slightly different. + +NOTE: These times are under the assumption of running on 1 GHz CPU and that it can execute on average one instruction in 1 nanosecond (usually takes more time). Also, bear in mind that each line might be translated into dozens of CPU instructions depending on the programming language. Regardless, bad algorithms would perform poorly even on a supercomputer. + +== Summary + +In this chapter, we learned how you could measure your algorithm performance using time complexity. Rather than timing how long your program take to run you can approximate the number of operations it will perform based on the input size. + +We learned about time and space complexity and how they can be translated to Big O notation. Big O refers to the *order* of the function. + +In the next section, we are going to provide examples of each of the most common time complexities! diff --git a/book/chapters/algorithms-intro.adoc b/book/chapters/algorithms-intro.adoc new file mode 100644 index 00000000..ef6c8e9a --- /dev/null +++ b/book/chapters/algorithms-intro.adoc @@ -0,0 +1,11 @@ +In this 2nd part of the book we are going to cover algorithms in more details. +We are going to start with sorting and searching algorithms, then you are going to learn some techniques for solving problems. + +IMPORTANT: There's not a single approach to solve all problems but knowing well-known techniques can help you build your own faster. + +.We are going to discuss the following approaches for solving algorithms problems: +- <>: makes greedy choices using heuristics to find the best solution without looking back. +- <>: technique for solving problems with *overlapping subproblems*. It uses *memoization* to avoid duplicated work. +- <>: *divide* problems into smaller pieces, *conquer* each subproblem and then *join* the results. +- <>: search *all (or some)* possible paths. However it stops and *go back* as soon as some contraint is broken. +- *Brute Force*: generate all possible solutions. (Use it as the last resort or as starting point to optimize it with other techniques). diff --git a/book/chapters/appendix.adoc b/book/chapters/appendix.adoc new file mode 100644 index 00000000..ae11903e --- /dev/null +++ b/book/chapters/appendix.adoc @@ -0,0 +1,5 @@ +[appendix] += Appendix + +Some additional topics + diff --git a/book/chapters/array.adoc b/book/chapters/array.adoc new file mode 100644 index 00000000..e1c6a6e3 --- /dev/null +++ b/book/chapters/array.adoc @@ -0,0 +1,186 @@ += Array + +Arrays are one of the most used data structures. You probably have used it a lot but are you aware of the runtimes of `splice`, `shift` and other operations? In this chapter, we are going deeper into the most common operations and their runtimes. + +== Array Basics + +An array is a collection of things (strings, characters, numbers, objects, etc.). They can be many or zero. Strings are a collection of Unicode characters and most of the array concepts apply to them. + +.Fixed vs. Dynamic Size Arrays +**** +Some programming languages have fixed size arrays like Java and C++. Fixed size arrays might be a hassle when your collection gets full, and you have to create a new one with a bigger size. For that, those programming languages also have built-in dynamic arrays: we have `vector` in C++ and `ArrayList` in Java. Dynamic programming languages like JavaScript, Ruby, Python use dynamic arrays by default. +**** + +Arrays look like this: + +.Array representation: each value is accessed through an index. +image:image16.png[image,width=388,height=110] + +Arrays are a sequential collection of elements that can be accessed randomly using an index. Let’s take a look into the different operations that we can do with arrays. + +== Insertion + +Arrays are built-in into most languages. Inserting element in an array is you can either: + +.Inserting elements into an array +[source, javascript] +---- +// (1) Add elements at the creation time: +const array = [2, 5, 1, 9, 6, 7]; + +// (2) initialize the array (empty) and values later +const array0 = []; +array0[2] = 1; +---- + +Using the index, you can replace whatever value you want. + +=== Inserting at the beginning of the array + +What if you want to insert a new element at the beginning of the array? You would have to push every item to the right. + +.Insert to head +[source, javascript] +---- +array.unshift(0); //=> [0, 2, 5, 1, 9, 6, 7] +---- + +As you can see, `2` was the index 0, now was pushed to index 1, and everything else was moved one place. `unshift` takes *O(n)* since it affects all the elements in the array. + +.JavaScript built-in `array.unshift` +**** +The `unshift()` method adds one or more elements to the beginning of an array and returns the new length of the array. Runtime: O(n). +**** + +=== Inserting at the middle of the array + +Inserting a new element in the middle involves moving part of the array but not all of the items. + +.Inserting element in the middle +[source, javascript] +---- +array.splice(1, 0, 111); // <1> +---- +<1> at the position 1, delete 0 elements and insert 111. The array would be `[2, 111, 5, 1, 9, 6, 7]` + +The Big O for this operation would be *O(n)* since in worst case it would move most of the elements to the right. + +.JavaScript built-in `array.splice` +**** +The `splice()` method changes the contents of an array by removing existing elements and/or adding new elements. Runtime: O(n). +**** + +=== Inserting at the end of the array + +We can push new values to the end of the array like this: + +.Insert to tail +[source, javascript] +---- +const array = [2, 5, 1, 9, 6, 7]; +array.push(4); // <1> +---- +<1> The `4` element would be pushed to the end `[2, 5, 1, 9, 6, 7, 4]`. + +Adding to the tail of the array doesn’t change other indexes. E.g., element 2 is still at index 0. So, this is a constant time operation *O(1)*. + +.JavaScript built-in `array.push` +**** +The `push()` method adds one or more elements to the end of an array and returns the new length of the array. Runtime: O(1). +**** + +== Searching by value and index + +Searching by index is very easy using the `[]` operator: + +.Search by index +[source, javascript] +---- +const array = [2, 5, 1, 9, 6, 7]; +array[4]; //↪️ 6 +---- + +Searching by index takes a constant time, *O(1)*, to retrieve values out of the array. If we want to get fancier we can create a function: + +// image:image17.png[image,width=528,height=293] + +.Search by index +[source, javascript] +---- +include::{codedir}/data-structures/arrays/array.js[tag=searchByIndex] +---- + +Finding out if an element is in the array or not is a different story. + +// image:image18.png[image,width=528,height=338] + +.Search by value +[source, javascript] +---- +include::{codedir}/data-structures/arrays/array.js[tag=searchByValue] +---- + +We would have to loop through the whole array (worst case) or until we find it: *O(n)*. + +== Deletion + +Deleting (similar to insertion) there are three possible scenarios, removing at the beginning, middle or end. + +=== Deleting element from the beginning + +Deleting from the beginning can be done using the `splice` function and also the `shift`. Let’s use the `shift` since it’s simpler. + +.Deleting from the beginning of the array. +[source, javascript] +---- +array.shift(); //=> [5, 1, 9, 6, 7] +---- + +As expected, this will make every index to change, so this takes *O(n)*. + +.JavaScript built-in array.shift +**** +The `shift()` method removes the first element from an array and returns that removed element. This method changes the length of the array. Runtime: O(n). +**** + + +=== Deleting element from the middle + +We can use the splice operator for this. + +.Deleting from the middle +[source, javascript] +---- +array.splice(2, 1); // delete 1 element at position 2 +// => array: [2, 5, 9, 6, 7] +---- + +Deleting from the middle might cause most the elements of the array to move back one position to fill in for the eliminated item. Thus, runtime: O(n). + +=== Deleting element from the end + +Removing the last element is very straightforward: + +.Deleting last element from the array +[source, javascript] +---- +array.pop(); // => array: [2, 5, 1, 9, 6] +---- + +No element other element has been shifted, so it’s an _O(1)_ runtime. + +.JavaScript built-in `array.pop` +**** +The `pop()` method removes the last element from an array and returns that element. This method changes the length of the array. Runtime: O(1). +**** + +== Array Complexity + +To sum up, the time complexity on an array is: + +.Time complexity for the array operations +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| Array ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) +|=== diff --git a/book/chapters/avl-tree.adoc b/book/chapters/avl-tree.adoc new file mode 100644 index 00000000..569c30df --- /dev/null +++ b/book/chapters/avl-tree.adoc @@ -0,0 +1,5 @@ += AVL Tree + +The AVL tree builds on top of a <> and it keeps it balanced on insertions. It prevents a BST worst case scenario when the tree is totally unbalanced to one side (similar to linked list), then it takes O(n) to find an element instead of O(log n). + + diff --git a/book/chapters/backtracking.adoc b/book/chapters/backtracking.adoc new file mode 100644 index 00000000..63b52156 --- /dev/null +++ b/book/chapters/backtracking.adoc @@ -0,0 +1,143 @@ += Backtracking + +Backtracking algorithms are used to find *all (or some)* solutions that satisfy a contraint. + +Bracktracking builds a solution step by step using recursion. +If during the process it realizes a given path is not going to lead to a solution, +it stops and step back (backtracking) to try a different alternative. + +Some examples that use backtracking is a solving Sudoku/crosswords puzzle, and graph operations. + +ifndef::backend-pdf[] +image:Sudoku_solved_by_bactracking.gif[] +endif::backend-pdf[] + +Listing all possible solutions might sound like a brute force. +However, is not the same. +Backtracking algorithms are faster than brute force one. + +.Brute Force vs Backtracking Algorithms +**** +*Brute force* evaluates every possiblity. +*Bracktracking* is an optimized brute force. +It stops evaluating a path as soon as some of the conditions are broken and move on to the next. +However, it an only be applied if a quick test can be run to tell if a candidate will contribute to a valid solution. +**** + +== How to develop backtracking algorithms? + +Backtracking algorithms can be tricky to get right or reason about but we are going to follow this recipe to make it easier. + +.Steps to create backtracking algorithms +. Iterate through all the elements in the input +. Make a change +. Recursive function moving to the next element +. Test if the current change is a solution +. Revert back the change (backtracking) + +Let's do an exercise to explain better how backtracking works. + +// https://leetcode.com/problems/combination-sum/description/ + +== Permutations + +> Return all the permutations (without repetitions) of a given word. + +For instace, if you are given the word `art` these are the possible permutations: + +---- +[ [ 'art' ], + [ 'atr' ], + [ 'rat' ], + [ 'rta' ], + [ 'tra' ], + [ 'tar' ] ] +---- + +Now, let's implement the program to generate all permutations of a word. + + +NOTE: We already solved this problem using an <>, now let's do it using backtracking. + +.Word permutations using backtracking +[source, javascript] +---- +include::{codedir}/algorithms/permutations-backtracking.js[tag=snippet,indent=0] +---- +<1> Iterate through all the elements in the input +<2> Make a change: swap letters +<3> Recursive function moving to the next element +<4> Test if the current change is a solution: reached the end of the string. +<5> Revert back the change (backtracking): Undo swap from step 2 + +As you can see, we iterate through each letter and swap with the following letters until we reach the end of the string. Then, we rollback the change and try another path. + +In the following tree, you can visualize how the backtracking algorithm is swaping the letters. + +[graphviz, Recursive Fibonacci call tree with dp, svg] +.... +digraph g { + node [shape = record,height=.1]; + + art[label = " A*| R| T"]; + art1[label = " A| R*| T"]; + art2[label = " A| R| T", color="red"]; + atr[label = " A| T| R", color="red"]; + rat[label = " R| A*| T"]; + rat1[label = " R| A| T", color="red"]; + rta[label = " R| T| A", color="red"]; + tra[label = " T| R*| A"]; + tra1[label = " T| R| A", color="red"]; + tar[label = " T| A| R", color="red"]; + + art:f0 -> art1:f0 [ label = "1. swap A/A"]; + art1:f0 -> art2:f0 [ label = "2. swap R/R"]; + art2:f2 -> art1:f1 [ label = "3.", color="grey", fontcolor="grey"]; + art1:f2 -> atr:f0 [ label = "4. swap R/T"]; + atr:f2 -> art1:f2 [ label = "5.", color="grey", fontcolor="grey"]; + art1:f1 -> art:f0 [ label = "6.", color="grey", fontcolor="grey"]; + + art:f1 -> rat:f0 [ label = "7. swap A/R"]; + rat:f0 -> rat1:f0 [ label = "8. swap A/A"]; + rat1:f2 -> rat:f1 [ label = "9.", color="grey", fontcolor="grey"]; + rat:f2 -> rta:f0 [ label = "10. swap A/T"]; + rta:f2 -> rat:f2 [ label = "11.", color="grey", fontcolor="grey"]; + rat:f2 -> art:f2 [ label = "12.", color="grey", fontcolor="grey"]; + + art:f2 -> tra:f0 [ label = "13. swap A/T"]; + tra:f0 -> tra1:f0 [ label = "14. swap R/R"]; + tra1:f2 -> tra:f2 [ label = "15.", color="grey", fontcolor="grey"]; + tra:f2 -> tar:f0 [ label = "16. swap R/A"]; + tar:f2 -> tra:f2 [ label = "17.", color="grey", fontcolor="grey"]; + tra:f2 -> art:f2 [ label = "18.", color="grey", fontcolor="grey"]; +} +.... + +.Legend: +- The [red]#red# words are the iterations added to the solution array. +- *Black* arrows indicate the `swap` operations. +- *Grey* arrows indicate the _backtracking_ operation (undo swap). +- The asterisk (`*`) indicates `start` index. + +Most of backtracking algorihtms do something similar. What changes is the test function to determine if a current iteration is a solution or not. + + + +// == Finding largest sum + +// [graphviz, Find largest sum, svg] +// .... +// graph G { +// 5 -- 3 [color="#B8E986", penwidth=2] +// 5 -- 7 [color="#FF5252", penwidth=2] +// 3 -- 87 [color="#B8E986", penwidth=2] +// 3 -- 1 +// 7 -- 2 +// 7 -- 4 [color="#FF5252", penwidth=2] + +// label="Optimal vs. Greedy path" +// } +// .... + +// https://medium.com/leetcode-patterns/leetcode-pattern-3-backtracking-5d9e5a03dc26 +// https://leetcode.com/problems/subsets/ diff --git a/book/chapters/big-o-examples.adoc b/book/chapters/big-o-examples.adoc new file mode 100644 index 00000000..891de7b1 --- /dev/null +++ b/book/chapters/big-o-examples.adoc @@ -0,0 +1,275 @@ += Eight Running Times You Should Know + +There are many kinds of algorithms. Most of them fall into one of the eight of the time complexities that we are going to explore in this chapter. + +.Most common time complexities +- Constant time: _O(1)_ +- Logarithmic time: _O(log n)_ +- Linear time: _O(n)_ +- Linearithmic time: _O(n log n)_ +- Quadratic time: _O(n^2^)_ +- Cubic time: _O(n^3^)_ +- Exponential time: _O(2^n^)_ +- Factorial time: _O(n!)_ + +We a going to provide examples for each one of them. + +Before we dive in, here’s a plot with all of them. + +.CPU operations vs. Algorithm runtime as the input size grows +image:image5.png[CPU time needed vs. Algorithm runtime as the input size increases] + +The above chart shows how the running time of an algorithm is related to the amount of work the CPU has to perform. As you can see O(1) and O(log n) are very scalable. However, O(n^2^) and worst can make your computer run for years [big]#😵# on large datasets. We are going to give some examples so you can identify each one. + +== Constant + +Represented as *O(1)*, it means that regardless of the input size the number of operations executed is always the same. Let’s see an example. + +[#constant-example] +=== Finding if an array is empty + +Let's implement a function that finds out if an array is empty or not. + +//.is-empty.js +//image:image6.png[image,width=528,height=401] + +[source, javascript] +---- +include::{codedir}/runtimes/01-is-empty.js[tag=isEmpty] +---- + +Another more real life example is adding an element to the begining of a <>. You can check out the implementation <>. + +As you can see, in both examples (array and linked list) if the input is a collection of 10 elements or 10M it would take the same amount of time to execute. You can't get any more performance than this! + +== Logarithmic + +Represented in Big O notation as *O(log n)*, when an algorithm has this running time it means that as the size of the input grows the number of operations grows very slowly. Logarithmic algorithms are very scalable. One example is the *binary search*. + +[#logarithmic-example] +=== Searching on a sorted array + +The binary search only works for sorted lists. It starts searching for an element on the middle of the array and then it moves to the right or left depending if the value you are looking for is bigger or smaller. + +// image:image7.png[image,width=528,height=437] + +[source, javascript] +---- +include::{codedir}/runtimes/02-binary-search.js[tag=binarySearchRecursive] +---- + +This binary search implementation is a recursive algorithm, which means that the function `binarySearch` calls itself multiple times until the solution is found. The binary search split the array in half every time. + +Finding the runtime of recursive algorithms is not very obvious sometimes. It requires some tools like recursion trees or the https://adrianmejia.com/blog/2018/04/24/analysis-of-recursive-algorithms/[Master Theorem]. The `binarySearch` divides the input in half each time. As a rule of thumb, when you have an algorithm that divides the data in half on each call you are most likely in front of a logarithmic runtime: _O(log n)_. + +== Linear + +Linear algorithms are one of the most common runtimes. It’s represented as *O(n)*. Usually, an algorithm has a linear running time when it iterates over all the elements in the input. + +[#linear-example] +=== Finding duplicates in an array using a map + +Let’s say that we want to find duplicate elements in an array. What’s the first implementation that comes to mind? Check out this implementation: + +// image:image8.png[image,width=528,height=383] + +[source, javascript] +---- +include::{codedir}/runtimes/03-has-duplicates.js[tag=hasDuplicates] +---- + +.`hasDuplicates` has multiple scenarios: +* *Best-case scenario*: first two elements are duplicates. It only has to visit two elements. +* *Worst-case scenario*: no duplicated or duplicated are the last two. In either case, it has to visit every item on the array. +* *Average-case scenario*: duplicates are somewhere in the middle of the collection. Only, half of the array will be visited. + +As we learned before, the big O cares about the worst-case scenario, where we would have to visit every element on the array. So, we have an *O(n)* runtime. + +Space complexity is also *O(n)* since we are using an auxiliary data structure. We have a map that in the worst case (no duplicates) it will hold every word. + +== Linearithmic + +An algorithm with a linearithmic runtime is represented as _O(n log n)_. This one is important because it is the best runtime for sorting! Let’s see the merge-sort. + +[#linearithmic-example] +=== Sorting elements in an array + +The merge sort, like its name indicates, has two functions merge and sort. Let’s start with the sort function: + +// image:image9.png[image,width=528,height=383] + +.Sort part of the mergeSort +[source, javascript] +---- +include::{codedir}/runtimes/04-merge-sort.js[tag=sort] +---- + +Starting with the sort part, we divide the array into two halves and then merge them (line 16) recursively with the following function: + +// image:image10.png[image,width=528,height=380] + +.Merge part of the mergeSort +[source, javascript] +---- +include::{codedir}/runtimes/04-merge-sort.js[tag=merge] +---- + +The merge function combines two sorted arrays in ascending order. Let’s say that we want to sort the array `[9, 2, 5, 1, 7, 6]`. In the following illustration, you can see what each function does. + +.Mergesort visualization. Shows the split, sort and merge steps +image:image11.png[Mergesort visualization,width=500,height=600] + +How do we obtain the running time of the merge sort algorithm? The mergesort divides the array in half each time in the split phase, _log n_, and the merge function join each splits, _n_. The total work we have *O(n log n)*. There more formal ways to reach to this runtime like using the https://adrianmejia.com/blog/2018/04/24/analysis-of-recursive-algorithms/[Master Method] and https://www.cs.cornell.edu/courses/cs3110/2012sp/lectures/lec20-master/lec20.html[recursion trees]. + +== Quadratic + +Running times that are quadratic, O(n^2^), are the ones to watch out for. They usually don’t scale well when they have a large amount of data to process. + +Usually, they have double-nested loops that where each one visits all or most elements in the input. One example of this is a naïve implementation to find duplicate words on an array. + +[#quadratic-example] +=== Finding duplicates in an array (naïve approach) + +If you remember we have solved this problem more efficiently on the <> section. We solved this problem before using an _O(n)_, let’s solve it this time with an _O(n^2^)_: + +// image:image12.png[image,width=527,height=389] + +.Naïve implementation of has duplicates function +[source, javascript] +---- +include::{codedir}/runtimes/05-has-duplicates-naive.js[tag=hasDuplicates] +---- + +As you can see, we have two nested loops causing the running time to be quadratic. How much different is a linear vs. quadratic algorithm? + +Let’s say you want to find a duplicated middle name in a phone directory book of a city of ~1 million people. If you use this quadratic solution you would have to wait for ~12 days to get an answer [big]#🐢#; while if you use the <> you will get the answer in seconds! [big]#🚀# + +== Cubic + +Cubic *O(n^3^)* and higher polynomial functions usually involve many nested loops. As an example of a cubic algorithm is a multi-variable equation solver (using brute force): + +[#cubic-example] +=== Solving a multi-variable equation + +Let’s say we want to find the solution for this multi-variable equation: + +_3x + 9y + 8z = 79_ + +A naïve approach to solve this will be the following program: + +//image:image13.png[image,width=528,height=448] + +.Naïve implementation of multi-variable equation solver +[source, javascript] +---- +include::{codedir}/runtimes/06-multi-variable-equation-solver.js[tag=findXYZ] +---- + +WARNING: This just an example, there are better ways to solve multi-variable equations. + +As you can see three nested loops usually translates to O(n^3^). If you have a four variable equation and four nested loops it would be O(n^4^) and so on when we have a runtime in the form of _O(n^c^)_, where _c > 1_, we can refer as a *polynomial runtime*. + +== Exponential + +Exponential runtimes, O(2^n^), means that every time the input grows by one the number of operations doubles. Exponential programs are only usable for a tiny number of elements (<100) otherwise it might not finish on your lifetime. [big]#💀# + +Let’s do an example. + +[#exponential-example] +=== Finding subsets of a set + +Finding all distinct subsets of a given set can be implemented as follows: + +// image:image14.png[image,width=528,height=401] + +.Subsets in a Set +[source, javascript] +---- +include::{codedir}/runtimes/07-sub-sets.js[tag=snippet] +---- +<1> Base case is empty element. +<2> For each element from the input append it to the results array. +<3> The new results array will be what it was before + the duplicated with the appended element. + +//.The way this algorithm generates all subsets is: +//1. The base case is an empty element (line 13). E.g. [''] +//2. For each element from the input append it to the results array (line 16) +//3. The new results array will be what it was before + the duplicated with the appended element (line 17) + +Every time the input grows by one the resulting array doubles. That’s why it has an *O(2^n^)*. + +== Factorial + +Factorial runtime, O(n!), is not scalable at all. Even with input sizes of ~10 elements, it will take a couple of seconds to compute. It’s that slow! [big]*🍯🐝* + +.Factorial +**** +A factorial is the multiplication of all the numbers less than itself down to 1. + +.For instance: +- 3! = 3 x 2 x 1 = 6 +- 5! = 5 x 4 x 3 x 2 x 1 = 120 +- 10! = 3,628,800 +- 11! = 39,916,800 +**** + +[#factorial-example] +=== Getting all permutations of a word + +One classic example of an _O(n!)_ algorithm is finding all the different words that can be formed with a given set of letters. + +.Word's permutations +// image:image15.png[image,width=528,height=377] +[source, javascript] +---- +include::{codedir}/runtimes/08-permutations.js[tag=snippet] +---- + +As you can see in the `getPermutations` function, the resulting array is the factorial of the word length. + +Factorial start very slow and then it quickly becomes uncontrollable. A word size of just 11 characters would take a couple of hours in most computers! +[big]*🤯* + +== Summary + +We went through 8 of the most common time complexities and provided examples for each of them. Hopefully, this will give you a toolbox to analyze algorithms. + +.Most common algorithmic running times and their examples +[cols="2,2,5",options="header"] +|=== +|Big O Notation +|Name +|Example(s) + +|O(1) +|<> +|<> + +|O(log n) +|<> +|<> + +|O(n) +|<> +|<> + +|O(n log n) +|<> +|<> + +|O(n^2^) +|<> +|<> + +|O(n^3^) +|<> +|<> + +|O(2^n^) +|<> +|<> + +|O(n!) +|<> +|<> +|=== diff --git a/book/chapters/bubble-sort.adoc b/book/chapters/bubble-sort.adoc new file mode 100644 index 00000000..cf89aff0 --- /dev/null +++ b/book/chapters/bubble-sort.adoc @@ -0,0 +1,39 @@ += Bubble Sort + +Bubble sort is a simple sorting algorithm that "bubbles up" the biggest values to the right side of the array. + +== Bubble Sort Implementation + +Bubble sort can be implemented in any programming language. This are the general steps that this sorting algorithm follows: + +.Bubble Sort Algorithm +. It moves one element at a time (from left to right). Everything on the left of the current element is already sorted, while everything to the right is not. +. Start with the first element and make it the current element. +. Compare elements to right of the current element. +. Bubble up big values to the right of the array. +.. Swap elements if the previous element is bigger than the previous one. +. Move the current pointer to the next element and repeat for the rest of the array + + +Let's convert these words into code! + +.Bubble Sort implementation in JavaScript +[source, javascript] +---- +include::{codedir}/algorithms/sorting/bubble-sort.js[tag=sort, indent=0] +---- +<1> Convert any kind of iterable (array, sets, etc.) into an array +<2> Start on the first element (position 0) +<3> Start scanning elements that are to the right of the current element. +<4> If the previous element is bigger than the previous one, then swap them. This is called bubbling up bigger values to the right. + +Bubble sort has a <> running time, as you might infer from the nested for-loop. + +== Bubble Sort Properties + +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- Time Complexity: [big]#⛔️# <> _O(n^2^)_ +- Space Complexity: [big]#✅# <> _O(1)_ diff --git a/book/chapters/cheatsheet.adoc b/book/chapters/cheatsheet.adoc new file mode 100644 index 00000000..47102a8b --- /dev/null +++ b/book/chapters/cheatsheet.adoc @@ -0,0 +1,123 @@ += Cheatsheet + +This section summerize what we are going to cover in the rest of this book. + +== Runtimes + +.Most common algorithmic running times and their examples +[cols="2,2,5",options="header"] +|=== +|Big O Notation +|Name +|Example(s) + +|O(1) +|<> +|#<>, #<> + +|O(log n) +|<> +|<> + +|O(n) +|<> +|<> + +|O(n log n) +|<> +|<> + +|O(n^2^) +|<> +|<> + +|O(n^3^) +|<> +|<> + +|O(2^n^) +|<> +|<> + +|O(n!) +|<> +|<> +|=== + +.How long an algorithm takes to run based on their time complexity and input size +[cols=",,,,,,",options="header",] +|=============================================================== +|Input Size |O(1) |O(n) |O(n log n) |O(n^2^) |O(2^n^) |O(n!) +|1 |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. +|10 |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |< 1 sec. |4 seconds +|10k |< 1 sec. |< 1 sec. |< 1 sec. |2 minutes |∞ |∞ +|100k |< 1 sec. |< 1 sec. |1 second |3 hours |∞ |∞ +|1M |< 1 sec. |1 second |20 seconds |12 days |∞ |∞ +|=============================================================== + +== Linear Data Structures + +.Time and Space Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| <> ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) +| <> ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +| Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) +| <> (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +|=== + +== Trees and Maps Data Structures + +This section covers Binary Search Tree (BST) time complexity (Big O). + +.Time and Space Complexity for Non-Linear Data Structures +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| BST (**un**balanced) ^|- ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| BST (balanced) ^|- ^|O(log n) ^|O(log n) ^|O(log n) ^|O(n) +| Hash Map (naïve) ^|O(n) ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| Hash Map (optimized) ^|O(1)* ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +| Tree Map (Red-Black Tree) ^|O(log n) ^|O(n) ^|O(log n) ^|O(log n) ^|O(log n) +| HashSet ^|- ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +| TreeSet ^|- ^|O(n) ^|O(log n) ^|O(log n) ^|O(log n) +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. + + +.Time complexity for a Graph data structure +|=== +.2+.^s| Data Structure 2+^s| Vertices 2+^s| Edges .2+^.^s| Space Complexity +^|_Add_ ^|_Remove_ ^|_Add_ ^|_Remove_ +| Graph (adj. matrix) ^| O(\|V\|^2^) ^| O(\|V\|^2^) ^|O(1) ^|O(1) ^|O(\|V\|^2^) +| Graph (adj. list w/array) ^| O(1) ^| O(\|V\| + \|E\|)) ^|O(1) ^|O(\|V\| + \|E\|) ^|O(\|V\| + \|E\|) +| Graph (adj. list w/HashSet) ^| O(1) ^| O(\|V\|)) ^|O(1) ^|O(\|V\|) ^|O(\|V\| + \|E\|) +|=== + +== Sorting Algorithms + +.Sorting algorithms comparison +|=== +| Algorithms | Runtime | Space | Stable | In-place | Online | Adaptive | Comments +| Insertion sort | O(n^2^) | O(1) | Yes | Yes | Yes | Yes | +| Selection sort | O(n^2^) | O(1) | Yes | Yes | Yes | Yes | +| Bubble sort | O(n^2^) | O(1) | Yes | Yes | Yes | Yes | +| Merge sort | O(n log n) | O(n) | Yes | No | No | No | +| Quicksort | O(n log n) | O(log n) | Yes | No | No | No | +// | Tim sort | O(n log n) | O(log n) | Yes | No | No | Yes | Hybrid of merge and insertion sort +|=== + +// https://algs4.cs.princeton.edu/cheatsheet/ +// http://bigocheatsheet.com/ + +// https://en.wikipedia.org/wiki/Timsort (Tim Peters) +// https://bugs.python.org/file4451/timsort.txt +// https://www.youtube.com/watch?v=emeME__917E&list=PLMCXHnjXnTntLcLmA5SqhMspm7burHi3m + +// https://en.wikipedia.org/wiki/Sorting_algorithm +// http://sorting.at/ +// https://www.toptal.com/developers/sorting-algorithms +// https://www.infopulse.com/blog/timsort-sorting-algorithm/ diff --git a/book/chapters/colophon.adoc b/book/chapters/colophon.adoc new file mode 100644 index 00000000..bd159043 --- /dev/null +++ b/book/chapters/colophon.adoc @@ -0,0 +1,16 @@ +[colophon] += Colophon + +{doctitle} + +Copyright © {docyear} Adrian Mejia + +All rights reserved. + +For online information and ordering this and other books, please visit https://adrianmejia.com. The publisher offers discounts on this book when ordered in quantity for more information contact sales@adrianmejia.com. + +No part of this publication may be produced, the store in the retrieval system, or transmitted, in any form or by means electronic, mechanical, photocopying, or otherwise, without the prior written permission of the publisher. + +While every precaution has been taking in the preparation of this book, the publisher and author assume no responsibility for errors or omissions, or damages resulting from the use of the information contained herein. + +{revremark}, {revdate}. diff --git a/book/chapters/dedication.adoc b/book/chapters/dedication.adoc new file mode 100644 index 00000000..e9c9590b --- /dev/null +++ b/book/chapters/dedication.adoc @@ -0,0 +1,5 @@ +[dedication] += Dedication + +To my wife Nathalie that supported me in my long hours of writing and my baby girl Abigail. + diff --git a/book/chapters/divide-and-conquer--fibonacci.adoc b/book/chapters/divide-and-conquer--fibonacci.adoc new file mode 100644 index 00000000..db7e837a --- /dev/null +++ b/book/chapters/divide-and-conquer--fibonacci.adoc @@ -0,0 +1,65 @@ += Recursive Fibonacci Numers + +To illustrate how we can solve a problem using divide and conquer, let's write a program to find the n-th fibonacci number. + +.Fibonacci Numbers +**** +Fibancci sequence is a serie of numbers that starts with `0, 1`, the next values are calculated as the sum of the previous two. So, we have: + +`0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, ...` +**** + +We can get the n-th fibonacci number with the following recursive program: + +.Recursive Fibonacci implemenation +[source, javascript] +---- +include::{codedir}/algorithms/fibonacci-recursive.js[tag=snippet,indent=0] +---- + +.Let's see how this fit divide and conquer: +- *Divide*: `n` is divided in two subproblems `f(n-1)` and `f(n-2)`. +- *Conquer*: solve each subproblem independently +- *Combine*: sum the subproblem results to get the final solution. + +The implementation above does the job, but what's the runtime? + +For that, let's take a look at the job performed calculating the `fib(5)` number. Since `fib(5) = fib(4) + fib(3)`, we need to find the answer for `fib(4)` and `fib(3)`. We do that recursively until we reach the base cases of `fib(1)` and `fib(0)`. If we represent the calls in a tree, we would have the following: + +// http://bit.ly/2UmwzZV +[graphviz, Recursive fibonacci call tree, png] +.... +graph G { + "fib(5)" -- { "fib(4)", "fib(3)" } + "fib(4)" -- { "fib(3)*", "fib(2)" } + "fib(3)" -- { "fib(2)*", "fib(1)" } + "fib(2)" -- { "fib(1)*", "fib(0)" } + "fib(2)*" -- { "fib(1)***", "fib(0)*" } + "fib(3)*" -- { "fib(2)**", "fib(1)**" } + "fib(2)**" -- { "fib(1)****", "fib(0)**" } + + // red colors + "fib(0)*" [color="#FF5252"]; + "fib(0)**" [color="#FF5252"]; + "fib(1)*" [color="#FF5252"]; + "fib(1)**" [color="#FF5252"]; + "fib(1)***" [color="#FF5252"]; + "fib(1)****" [color="#FF5252"]; + "fib(2)*" [color="#FF5252"]; + "fib(2)**" [color="#FF5252"]; + "fib(3)*" [color="#FF5252"]; +} +.... + +In the diagram, we see the two recursive calls needed to compute each number. So if we follow the _O(branches^depth^)_ we get O(2^n^). [big]#🐢# + +NOTE: Fibonacci is not a perfect binary tree since some nodes only have one children instead of two. The exact runtime for recursive Fibonacci is _O(1.6^n^)_ (still exponential time complexity). + +An exponential time complexity is pretty bad. Can we do better? + +In the call tree you can notice that every element in red and with asterisks `*` it's called more than once. We are repeating calculations too many times! + +[quote, Dynamic Programming] +Those who cannot rememeber the past are condemned to repated it. + +For these cases when subproblems repeat themselves, we can optimize them using dynamic programming. diff --git a/book/chapters/divide-and-conquer--intro.adoc b/book/chapters/divide-and-conquer--intro.adoc new file mode 100644 index 00000000..98bcb0d3 --- /dev/null +++ b/book/chapters/divide-and-conquer--intro.adoc @@ -0,0 +1,17 @@ +Divide and conquer is an strategy for solving algorithmic problems. +It splits the input into manageble parts recursively and finally join solved pieces to form the end result. + +We have already done some divide and conquer algorithms. This list will refresh you the memory. + +.Examples of divide and conquer algorithms: +- <>: *divides* the input into pairs, sort them and them *join* all the pieces in ascending order. +- <>: *splits* the data by a random number called "pivot", then move everything smaller than the pivot to the left and anything bigger to the right. Repeat the process on the left and right side. Note: since this works in place doesn't need a join part. +- <>: find a value in a sorted collection by *spliting* the data in half until it finds the value. +- <>: *Take out* the first element from the input and solve permutation for the reminder of the data recursively, then *join* results and append the elements that were take out. + +We can solve algorithms using D&C algorithms using the following steps. + +.Divide and conquer algorithms steps: +1. *Divide* data into subproblems. +2. *Conquer* each subproblem. +3. *Combine* solutions. diff --git a/book/chapters/divide-and-conquer.adoc b/book/chapters/divide-and-conquer.adoc new file mode 100644 index 00000000..5ee393f9 --- /dev/null +++ b/book/chapters/divide-and-conquer.adoc @@ -0,0 +1,9 @@ += Divide and Conquer + +include::divide-and-conquer--intro.adoc[] + +:leveloffset: +1 + +include::divide-and-conquer--fibonacci.adoc[] + +:leveloffset: -1 diff --git a/book/chapters/dynamic-programming--fibonacci.adoc b/book/chapters/dynamic-programming--fibonacci.adoc new file mode 100644 index 00000000..dfe2dd26 --- /dev/null +++ b/book/chapters/dynamic-programming--fibonacci.adoc @@ -0,0 +1,27 @@ += Fibonacci Sequence with Dynamic Programming + +Let's solve the same Fibonacci problem but this time with dynamic programming. + +When we have recursive functions doing duplicated work is the perfect place for a dynamic programming optimization. We can save (or cache) the results of previous operations and speed up future computations. + +.Recursive Fibonacci Implemenation using Dynamic Programming +[source, javascript] +---- +include::{codedir}/algorithms/fibanacci-dynamic-programming.js[tag=snippet,indent=0] +---- + +This implementation checks if we already calculated the value in the past, if not it will save it for later use. + +[graphviz, Recursive Fibonacci call tree with dp, svg] +.... +graph G { + "fib(5)" -- { "fib(4)" } + "fib(4)" -- { "fib(3)" } + "fib(3)" -- { "fib(2)" } + "fib(2)" -- { "fib(1)", "fib(0)" } +} +.... + +This looks pretty linear now. It's runtime _O(n)_! + +TIP: Saving previous results for later is a technique called "memoization" and is very common to optimize recursive algorithms with exponential time complexity. diff --git a/book/chapters/dynamic-programming--intro.adoc b/book/chapters/dynamic-programming--intro.adoc new file mode 100644 index 00000000..115cb03c --- /dev/null +++ b/book/chapters/dynamic-programming--intro.adoc @@ -0,0 +1,23 @@ +Dynamic programming (dp) is a way to solve algorithmic problems with *overlapping subproblems*. Algorithms using dp find the base case and building a solution from the ground-up. They also _keep track_ of previous answers to avoid re-computing the same operations. + +// https://twitter.com/amejiarosario/status/1103050924933726208 +// https://www.quora.com/How-should-I-explain-dynamic-programming-to-a-4-year-old/answer/Jonathan-Paulson +// https://medium.com/@codingfreak/top-50-dynamic-programming-practice-problems-4208fed71aa3 +// https://www.slideshare.net/balamoorthy39/greedy-algorithm-knapsack-problem + +.How to explain dynamic programming to kids? 👶 +**** + +$$*$$*_Write down 1+1+1+1+1+1+1+1+1+1_*$$*$$ + +--{sp} What's that equal to? + +--{sp} $$*$$*_Kid counting one by one_*$$*$$ Ten! + +--{sp} Add another "+1". What's the total now? + +--{sp} $$*$$*_Quickly_*$$*$$ Eleven! + +--{sp} Why you get the result so quickly? Ah, you got it faster by adding one to the memorized previous result. So Dynamic Programming is a fancy way of saying: "remembering past results to save time later" +**** + diff --git a/book/chapters/dynamic-programming--knapsack-problem.adoc b/book/chapters/dynamic-programming--knapsack-problem.adoc new file mode 100644 index 00000000..daf2eaa5 --- /dev/null +++ b/book/chapters/dynamic-programming--knapsack-problem.adoc @@ -0,0 +1,47 @@ += Knapsack Problem + +The knapsack (backpack [big]#🎒#) problem is the following: + +> A thief breaks into a museum with a backpack that can carry certain weight. +What items shoud he pick to maximize his loot? + +Take a look at the following example to understand better the problem. + +.Knapsack Problem Examples +[source, javascript] +---- + +// Input: +const museumGoods = [ + { value: 1, weight: 1}, + { value: 4, weight: 3 }, + { value: 5, weight: 4 }, + { value: 7, weight: 5 }, +] + +const maxBackpackWeight = 7; + +// Solution: +const backpack = solveKnapsackProblem(museumGoods, maxBackpackWeight); + +// Output: +expect(backpack.items).to.equal([ + { value: 4, weight: 3 }, + { value: 5, weight: 4 } +]) + +expect(backpack.weight).toBeLessThanOrEqual(7); +expect(backpack.value).toBe(9); +---- + +How can we solve this problem? You cannot take them all since total weight is 13 and we only can carry 7. You should not take only one, since that would not be the maximum loot and you would + +One idea would be sort the items by weight and take the items if they do not exceed the max weight. +In that case, the result would be: + +---- + { value: 7, weight: 5 }, + { value: 1, weight: 1}, +---- + +As you can see, this solution is not optimal. The value total value is `8` and the weight just `6`. diff --git a/book/chapters/dynamic-programming.adoc b/book/chapters/dynamic-programming.adoc new file mode 100644 index 00000000..56780119 --- /dev/null +++ b/book/chapters/dynamic-programming.adoc @@ -0,0 +1,11 @@ += Dynamic Programming + +include::dynamic-programming--intro.adoc[] + +:leveloffset: +1 + +include::dynamic-programming--fibonacci.adoc[] + +// include::chapters/dynamic-programming--knapsack-problem.adoc[] + +:leveloffset: -1 diff --git a/book/chapters/epigraph.adoc b/book/chapters/epigraph.adoc new file mode 100644 index 00000000..fb9fb6c2 --- /dev/null +++ b/book/chapters/epigraph.adoc @@ -0,0 +1,4 @@ +[epigraph] += Epigraph + +Thanks for reading! diff --git a/book/chapters/graph-search.adoc b/book/chapters/graph-search.adoc new file mode 100644 index 00000000..ce126fc8 --- /dev/null +++ b/book/chapters/graph-search.adoc @@ -0,0 +1,66 @@ += Graph Search + +Graph search allows you to visit all the elements connected given a starting node. There are two ways to navigate the graph, one is using Depth-First Search (DFS) and the other one is Breadth-First Search (BFS). Let's see the difference. + +== Depth-First Search for Graphs + +With Depth-First Search (DFS) we go deep before going wide. + +// TODO: add arrows to show DFS and create another one for BFS + +[graphviz, dfs-graph, png] +.... +digraph G { + + node [fillcolor="#F8E71C" style=filled shape=circle] 0; + node [fillcolor="#F5A623"] 1; + node [fillcolor="#B8E986"] 2; + node [fillcolor="#BD10E0"] 3; + node [fillcolor="#50E3C2"] 4; + node [fillcolor="#4A90E2"] 5; + // node [fillcolor="#FF5252"] 6; + + 0 -> 5 + 0 -> 4 + 0 -> 1 + 1 -> 4 + 1 -> 3 + 2 -> 1 + 3 -> 4 + 3 -> 2 + + label="DFS" + + { rank=same; 3, 1 } + { rank=same; 0, 4 } + +} +.... + +== Breadth-First Search for Graphs + +With Breadth-First Search (BFS) we go wide before going deep. + +// TODO: BFS traversal + +== Depth-First Search vs Breadth-First Search in a Graph + +DFS and BFS can implementation can be almost identical the difference is the underlying data structured. In our implementation, we have a generic `graphSearch` where we pass the first element to start the search the data structure that we can to use: + +.DFS and BFS implemenation +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=graphSearch,indent=0] +---- + +Using an <> (LIFO) for DFS will make use keep visiting the last node children, while having a <> (FIFO) will allows to visit adjacent nodes first and "queue" their children for later visting. + +TIP: you can also implement the DFS as a recursive function, similar to what we did in the <>. + +You might wonder what's the difference between search algorithms in a tree and a graph? Check out the next section. + +== DFS/BFS on Tree vs Graph + +The difference between searching a tree and a graph is that the tree has always an starting point (root node). However, in a graph you can start searching anywhere. There's no root in graph. + +NOTE: Every tree is a graph but not every graph is a tree. diff --git a/book/chapters/graph.adoc b/book/chapters/graph.adoc new file mode 100644 index 00000000..928e39ce --- /dev/null +++ b/book/chapters/graph.adoc @@ -0,0 +1,297 @@ += Graph + +Graphs are one of my favorite data structures. +They have a lot of cool applications like optimizing routes, social network analysis to name a few. You are probably using apps that use graphs every day. +First, let’s start with the basics. + +TIP: A graph is a non-linear data structure where a node can have zero or more connected nodes. + +You can think of graph like an extension of a Linked List. Instead of having a `next` or `previous` reference, you can have as many as you want. You can implement a graph node as an array of associated nodes. + +.Node's constructor +[source, javascript] +---- +include::{codedir}/data-structures/graphs/node.js[tag=constructor] +---- + +As you can see, it’s pretty similar to the Linked List node. +The only difference is that it uses an *array* of adjacent nodes instead of just one or two. + +Other difference between a linked list and graph is that a linked list always has a root node, while the graph doesn’t. +You can start traversing a graph from anywhere. Let’s examine these graph properties! + +== Graph Properties + +The connection between two nodes is called *edge*. +Also, nodes might be called *vertex*. + +.Graph is composed of vertices/nodes and edges +image:image42.png[image,width=305,height=233] + +=== Directed Graph vs Undirected + +A graph can be either *directed* or *undirected*. + +.Graph: directed vs undirected +image:image43.jpg[image,width=469,height=192] + + +An *undirected graph* has edges that are *two-way street*. E.g., On the undirected example, you can traverse from the green node to the orange and vice versa. + +A *directed graph (digraph)* has edges that are *one-way street*. E.g., On the directed example, you can only go from green node to orange and not the other way around. When one node has an edge to itself is called a *self-loop*. + +=== Graph Cycles + +A graph can have *cycles* or not. + +.Cyclic vs Acyclic Graphs. +image:image44.jpg[image,width=444,height=194] + + +A *cyclic graph* is the one that you can pass through a node more than once. +E.g., On the cyclic illustration, if you start in the green node, then go the orange and purple, finally, you could come back to green again. +Thus, it has a *cycle*. + +An acyclic graph is the one that you can’t pass through a node more than once. E.g., in the acyclic illustration, can you to find a path where you can pass through the same vertex more than one? + +The *Directed Acyclic Graph (DAG)* is unique. It has many applications like scheduling tasks, spreadsheets change propagation, and so forth. DAG is also called *Tree* data structure only when each node has only *one parent*. + +=== Connected vs Disconnected vs Complete Graphs + +.Different kinds of graphs: disconnected, connected, and complete. +image:image45.png[image,width=1528,height=300] + +A *disconnected graph* is one that has one or more subgraph. In other words, a graph is *disconnected* if two nodes don’t have a path between them. + +A *connected graph* is the opposite to disconnected, there’s a path between every node. No one is left behind. + +A *complete graph* is where every node is adjacent to all the other nodes in the graph. E.g., If there are seven nodes, every node has six edges. + +=== Weighted Graphs + +Weighted graphs have labels in the edges (a.k.a *weight* or *cost*). The link weight can represent many things like distance, travel time, or anything else. + +.Weighted Graph representing USA airports distance in miles. +image:image46.png[image,width=528,height=337] + +For instance, a weighted graph can have a distance between nodes. So, algorithms can use the weight and optimize the path between them. + +== Exciting Graph applications in real-world + +Now that we know what graphs are and some of their properties. Let’s discuss some real-life usages of graphs. + +Graphs become a metaphor where nodes and edges model something from our physical world. To name a few: + +* Optimizing Plane traveling +** Nodes = Airport +** Edges = Direct flights between two airports +** Weight = miles between airports | cost | time + +* GPS Navigation System +** Node = road intersection +** Edge = road +** Weight = time between intersections + +* Network routing +** Node = server +** Edge = data link +** Weight = connection speed + +There are endless applications for graphs in electronics, social networks, recommendation systems and many more. That’s cool and all, but how do we represent graphs in code? Let’s see that in the next section. + +== Representing Graphs + +There are two main ways to graphs one is: + +* Adjacency Matrix +* Adjacency List + +=== Adjacency Matrix + +Representing graphs as adjacency matrix is done using a two-dimensional array. For instance, let’s say we have the following graph: + +.Graph and its adjacency matrix. +image:image47.png[image,width=438,height=253] + +The number of vertices |V| define the size of the matrix. In the example, we have five vertices, so we have a 5x5 matrix. + +We fill up the matrix row by row. Mark with 1 (or any other weight) when you find an edge. E.g. + +* *Row 0:* It has a self-loop, so it has a `1` in the coordinate 0,0. Node 0 also has an edge to 1 and 4, so we mark it. +* *Row 1:* The node 1 has one edge to 3, so we check it. +* *Row 2:* Node 2 goes to Node 4, so we note the insertion with 1. +* etc. + +The example graph above is a directed graph (digraph). In the case of an undirected graph, the matrix would be symmetrical by the diagonal. + +If we represent the example graph in code, it would be something like this: + +[source, javascript] +---- +const digraph = [ + [1, 1, 0, 0, 1], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 1], + [0, 0, 1, 0, 0], + [0, 1, 0, 0, 0], +]; +---- + +It would be very easy to tell if two nodes are connected. Let’s query if node 2 is connected to 3: + +[source, javascript] +---- +digraph[2][3]; //=> 0 +digraph[3][2]; //=> 1 +---- + +As you can see, we don’t have a link from node 2 to 3, but we do in the opposite direction. Querying arrays is constant time *O(1)*, so no bad at all. + +The issue with the adjacency matrix is the space it takes. Let’s say you want to represent the entire Facebook network on a digraph. You would have a massive matrix of 1.2 billion x 1.2 billion. The worst part is that most of it would be empty (zeros) since people are friends to at most few thousands. + +TIP: When the graph has few connections compared to the number of nodes we say that we have a *sparse graph*. On the opposite, if we have almost complete graphs, we say we have a *dense graph*. + +The space complexity of the adjacency matrix is *O(|V|^2^)*, where |V| is the number of vertices/nodes. + +=== Adjacency List + +Another way to represent a graph is by using an adjacency list. This time instead of using an array (matrix) we use a list. + +.Graph represented as an Adjacency List. +image:image48.png[image,width=528,height=237] + +If we want to add a new node to the list, we can do it by adding one element to the end of the array of nodes *O(1)*. In the next section, we are going to explore the running times of all operations in an adjacency list. + +== Implementing a Graph data structure + +Since adjacency lists are more efficient (than adjacency matrix), we are going to use to implement a graph data structure. + +Let's start by creating the constructor of the Graph class. + +.Graph's constructor +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=constructor] +---- + +Notice that the constructor takes a parameter. The `edgeDirection` allow us to use one class for both undirected and directed graphs. + +== Adding a vertex + +For adding a vertex, we first need to check if the node already exists. If so, we return the node. + +.Graphs's `addVertex` method +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=addVertex, indent=0] +---- +<1> Check if value is already on the graph. If it is, then return it. +<2> Create new `Node` with the given value. +<3> Set `hashMap` with value and node pair. + +If the node doesn't exist, then we create the new node and add it to a `HashMap`. + +TIP: <> stores key/pair value very efficiently. Lookup is `O(1)`. + +The `key` is the node's value, while the `value` is the newly created node. + +The `Node` class is constructed as follows: + +.Node's class (for Graph data structure) +[source, javascript] +---- +include::{codedir}/data-structures/graphs/node.js[tag=constructor, indent=0] +---- + + +== Deleting a vertex + +.Graphs's `removeVertex` method +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=removeVertex, indent=0] +---- +<1> Try to find if node exists. +<2> Remove related edges. See `removeAdjacent` below. +<3> Remove node with the given value. + +Notice on the callout 2, that we visit every edge on the graph and remove the ones that contain the node to remove. + +For removing adjacent nodes, we use Node's method called `removeAdjacent` that can be implemented as follows: + +.Node's `removeAdjacent` +[source, javascript] +---- +include::{codedir}/data-structures/graphs/node.js[tag=removeAdjacent, indent=0] +---- + +All adjacencies are stored as a HashSet to provide constant time deletion. + +== Adding an edge + +An edge is a connection between two nodes (vertices). If the graph is undirected means that every link is a two-way street. When we create the edge from node 1 to node 2, we also need to establish a connection between node 2 and 1 for undirected graphs. + +If we are dealing with a digraph (directed graph), then we create one edge. + +.Graphs's `addEdge` method +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=addEdge, indent=0] +---- +<1> Find or create nodes if they don't exists yet. +<2> Create edge from source to destination. +<3> If us a undirected graph, create the edge on the other direction. + +We can add adjacencies using the `addAdjacent` method from the Node class. + +.Node's `addAdjacent` +[source, javascript] +---- +include::{codedir}/data-structures/graphs/node.js[tag=addAdjacent, indent=0] +---- + +== Querying Adjacency + +.Graphs's `areAdjacents` method +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=areAdjacents, indent=0] +---- + +.Node's `isAdjacent` +[source, javascript] +---- +include::{codedir}/data-structures/graphs/node.js[tag=isAdjacent, indent=0] +---- + + +== Deleting an edge + +.Graphs's `removeEdge` method +[source, javascript] +---- +include::{codedir}/data-structures/graphs/graph.js[tag=removeEdge, indent=0] +---- + +.Node's `removeAdjacent` +[source, javascript] +---- +include::{codedir}/data-structures/graphs/node.js[tag=removeAdjacent, indent=0] +---- + +== Graph Complexity + +.Time complexity for a Graph data structure +|=== +.2+.^s| Data Structure 2+^s| Vertices 2+^s| Edges .2+^.^s| Space Complexity +^|_Add_ ^|_Remove_ ^|_Add_ ^|_Remove_ +| Graph (adj. matrix) ^| O(\|V\|^2^) ^| O(\|V\|^2^) ^|O(1) ^|O(1) ^|O(\|V\|^2^) +| Graph (adj. list w/array) ^| O(1) ^| O(\|V\| + \|E\|)) ^|O(1) ^|O(\|V\| + \|E\|) ^|O(\|V\| + \|E\|) +| Graph (adj. list w/HashSet) ^| O(1) ^| O(\|V\|)) ^|O(1) ^|O(\|V\|) ^|O(\|V\| + \|E\|) +|=== + +As you can see using a `HashSet` on for the adjacency list make a performance improvement. + += Summary + +In this section, we learned about Graphs applications, properties and how we can implement them. We mention that you can represent a graph as a matrix or as a list of adjacencies. We went for implementing the later since it's more space efficient. We cover the basic graph operations like adding and removing nodes and edges. In the algorithms section, we are going to cover searching values in the graph. diff --git a/book/chapters/greedy-algorithms--intro.adoc b/book/chapters/greedy-algorithms--intro.adoc new file mode 100644 index 00000000..70b8f387 --- /dev/null +++ b/book/chapters/greedy-algorithms--intro.adoc @@ -0,0 +1,42 @@ +Greedy algorithms are designed to find solution by going one step at time and using heuristics to determine the best choice. +They are quick but not always lead to most optimum results since it might not take into consideration all the options to give a solution. + +A good example of a greedy algorithms that doesn't work well is finding the largest sum on a tree. + +[graphviz, Find largest sum, svg] +.... +graph G { + 5 -- 3 [color="#B8E986", penwidth=2] + 5 -- 7 [color="#FF5252", penwidth=2] + 3 -- 87 [color="#B8E986", penwidth=2] + 3 -- 1 + 7 -- 2 + 7 -- 4 [color="#FF5252", penwidth=2] + + label="Optimal vs. Greedy path" +} +.... + +The greedy algorithm will start at the root and say "Which number is bigger 3 or 7?" Then go with 7 and later 4. As you can see in the diagram, the largest sum would be the path `7 - 3 - 87`. A greedy algorithm never go back on it's options. This makes it different from dynamic programming which exhaustive and it's gurantee to find the best option. + +Greedy algorithms are well suited when a local optimal solution is also a global optimal solution. + +[TIP] +==== +Greedy algorithms makes the choice that looks best in the moment based on a heuristic such as smallest, largest, best ratio, and so on. +This algorithm only give one shot at finding the solution and never goes back to consider other options. +==== + +Don't get the wrong idea some greedy algorithms works very well if they are designed correctly. + +.Some examples greedy algorithms that works well: +- <>: we select the best (minimum value) remove it from the input and then select the next minimum until everything is processed. +- <>: the "merge" uses a greedy algorithm, where it combine two sorted arrays by looking at their current values and choosing the best (minimum) at every time. + + +.In general, we can follow these steps to design Greedy Algorithms: +1. Take a sample from the input data (usually in a data structure like array/list, tree, graph). +2. Greedy choice: use a heuristic function that will choose the best candidate. E.g., Largest/smallest number, best ratio, etc. +3. Reduce the processed input and pepeat step #1 and #2 until all data is gone. +4. Return solution. +5. Check correctness with different examples and edge cases. diff --git a/book/chapters/greedy-algorithms--knapsack-problem.adoc b/book/chapters/greedy-algorithms--knapsack-problem.adoc new file mode 100644 index 00000000..51e16cef --- /dev/null +++ b/book/chapters/greedy-algorithms--knapsack-problem.adoc @@ -0,0 +1,54 @@ += Fractional Knapsack Problem + +We are going to use the "Fractional Knapsack Problem" to learn how to design greedy algorithms. The problem is the following: + +> You are going to steal legumes (rice, beans, chickpeas, lentils) and you only brought a knapsack. What proportion of items can you choose to to get the highest loot without exceeding the maximum weight of the bag? + +Let's say we have the following items available. + +.Knpasack Input +[source, javascript] +---- +const items = [ + { value: 1, weight: 1}, + { value: 4, weight: 3 }, + { value: 5, weight: 4 }, + { value: 7, weight: 5 }, +]; + +const maxWeight = 7; +---- + +So, we have 4 items that we can take. We can't take them all because the total weight is `13` and the maximum we can carry is `7`. We can't just take the first one because with value `1` because obviosly is not the best profit. + +How would you solve this problem? + + +First, we have to define what parameters are we going to use to make our *greedy choice*. This some ideas: + +- We can take items with the *largest* value in hopes to get maximize profit. Based on that we can make take the last item and first having a total weight of 7 and total value of 8. + +- Also, we could take items *smallest* weight so we can fit as much as possible. Let's analyze both options. So we can take the first 2 items for a total value of 5 and total weight of 4. This is worst! [big]#👎# + +- One last idea, we can take items based on the *best* value/weight ratio and take fractions of an item to fill up the knapsack to maximum weight. In that case, we can take the last item and 2/3 of the 2nd item. We get a total value of `9.67` and total weight of `7`. + +.Items value/weight ratio +---- + { value: 1, weight: 1 }, // 1/1 = 1 + { value: 4, weight: 3 }, // 4/3 = 1.33 ✅ + { value: 5, weight: 4 }, // 5/4 = 1.25 + { value: 7, weight: 5 }, // 7/5 = 1.4 ✅ +---- + +Let's implement this algorithm! + + +.Factional Knapsack Problem Implementation +[source, javascript] +---- +include::{codedir}/algorithms/knapsack-fractional.js[tag=snippet,indent=0] +---- + +What's the runtime of this algorithm? + +We have to sort the array based on value/weight ratio. Sorting runtime is O(n log n). The rest is linear operations, so we the answer is _O(n log n)_ for our greedy algorithm. diff --git a/book/chapters/greedy-algorithms.adoc b/book/chapters/greedy-algorithms.adoc new file mode 100644 index 00000000..057b4a21 --- /dev/null +++ b/book/chapters/greedy-algorithms.adoc @@ -0,0 +1,9 @@ += Greedy Algorithms + +include::greedy-algorithms--intro.adoc[] + +:leveloffset: +1 + +include::greedy-algorithms--knapsack-problem.adoc[] + +:leveloffset: -1 diff --git a/book/chapters/heap-sort.adoc b/book/chapters/heap-sort.adoc new file mode 100644 index 00000000..421cab79 --- /dev/null +++ b/book/chapters/heap-sort.adoc @@ -0,0 +1,7 @@ += Heap Sort + +Voluptate consequat magna laborum consectetur fugiat deserunt. Id sit est ullamco magna sint laborum proident. Exercitation cupidatat exercitation excepteur ex pariatur qui qui sint amet consectetur laborum ex mollit dolore. + +Et do sunt do labore culpa est eu ut fugiat eiusmod ea excepteur. Irure commodo adipisicing in aute aliquip laborum laboris reprehenderit incididunt in sunt. Cupidatat veniam est culpa ex eu aute voluptate tempor aliqua ullamco sunt et consectetur. Eu laboris mollit culpa consequat. Sunt mollit quis dolor nostrud. In duis mollit do adipisicing veniam do deserunt exercitation Lorem deserunt aliquip. Ea esse reprehenderit incididunt eu deserunt sit nulla sint non eiusmod nisi eu et irure. + +Ad commodo anim nulla occaecat non. Aute fugiat laborum ut mollit exercitation aute proident reprehenderit culpa consectetur. Cillum officia laborum proident labore sunt est eiusmod proident. Lorem nostrud ea qui tempor culpa ullamco ipsum. Dolore nulla minim qui incididunt qui sint consectetur quis tempor esse minim. Do id consequat commodo sit officia aliqua officia reprehenderit eiusmod elit do amet. diff --git a/book/chapters/heap.adoc b/book/chapters/heap.adoc new file mode 100644 index 00000000..104af153 --- /dev/null +++ b/book/chapters/heap.adoc @@ -0,0 +1,3 @@ += Heap + +Sit nostrud Lorem nulla ipsum occaecat enim eiusmod adipisicing velit et cupidatat laboris incididunt. Sunt ex eiusmod amet nulla quis. Officia elit non sunt esse sint. Non enim do laborum adipisicing officia et aliquip cillum ut nisi ipsum. Minim duis minim velit amet laborum aliquip pariatur irure deserunt ex. diff --git a/book/chapters/index.adoc b/book/chapters/index.adoc new file mode 100644 index 00000000..85e81b85 --- /dev/null +++ b/book/chapters/index.adoc @@ -0,0 +1,2 @@ +[index] += Index diff --git a/book/chapters/insertion-sort.adoc b/book/chapters/insertion-sort.adoc new file mode 100644 index 00000000..7cfc2ace --- /dev/null +++ b/book/chapters/insertion-sort.adoc @@ -0,0 +1,35 @@ += Insertion Sort + +Insertion sort is a simple sorting algorithm. It is onne of the most natural way of sorting. You take an element and move it to the sorted pile. You take the next element and move it where it belongs in the sorted pile. Repeat for the rest of the elements. + +If you are given some cards that's probably how you are going to sort them. + +// Good illustration on of sorting a deck of cards: https://www.khanacademy.org/computing/computer-science/algorithms/insertion-sort/a/insertion-sort + +.This is how it works: +. Visit 2nd. If the current element is smaller than the previous one move to the place it should be. +. Visit next element and do the same thing. The left is always sorted and the right is not (yet). +. Repeat for the rest of the array. + +== Insertion Sort Implementation + +.Insertion sort +[source, javascript] +---- +include::{codedir}/algorithms/sorting/insertion-sort.js[tag=sort, indent=0] +---- + +//.Swap function +//[source, javascript] +//---- +//include::{codedir}/algorithms/insertion-sort.js[tag=swap, indent=0] +//---- + +== Insertion Sort Properties + +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- Time Complexity: [big]#⛔️# <> _O(n^2^)_ +- Space Complexity: [big]#✅# <> _O(1)_ diff --git a/book/chapters/linear-data-structures-intro.adoc b/book/chapters/linear-data-structures-intro.adoc new file mode 100644 index 00000000..3d5f4cc5 --- /dev/null +++ b/book/chapters/linear-data-structures-intro.adoc @@ -0,0 +1,21 @@ +[partintro] +-- +Data Structures comes in many flavors. There’s no one to rule them all. There are tradeoffs for each one of them. Even thought in your day-to-day work, you might not need to re-implementing them. However, knowing how they work internally would help to choose one over another depending on your needs. We are going to explore the most common data structures time and space complexity. + +.In this part we are going to learn about the following linear data structures: +- Array +- Linked List +- Stack +- Queue + +Later, in the 2nd part we are going to explore non-linear data structures like Graphs and Trees. +ifdef::backend-html5[] +If you want to have a general overview of each one, take a look at the following interactive diagram: ++++ + ++++ +endif::[] + +-- diff --git a/book/chapters/linear-data-structures-outro.adoc b/book/chapters/linear-data-structures-outro.adoc new file mode 100644 index 00000000..55de385b --- /dev/null +++ b/book/chapters/linear-data-structures-outro.adoc @@ -0,0 +1,35 @@ + += Array vs. Linked List & Queue vs. Stack + +In this chapter, we explored the most used linear data structures such as Arrays, Linked Lists, Stacks and Queues. We implemented them and discussed the runtime of their operations. + +To sum up, + +.Use Arrays when… +* You need to access data in random order fast (using an index). +* Your data is multi-dimensional (e.g., matrix, tensor). + +.Use Linked Lists when: +* You will access your data sequentially. +* You want to save memory and only allocate memory as you need it. +* You want constant time to remove/add from extremes of the list. + +.Use a Queue when: +* You need to access your data in a first-come, first served basis (FIFO). +* You need to implement a <> + +.Use a Stack when: +* You need to access your data as last-in, first-out (LIFO). +* You need to implement a <> + +.Time Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| <> ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) +| <> ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +| Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) +| <> (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +|=== diff --git a/book/chapters/linked-list.adoc b/book/chapters/linked-list.adoc new file mode 100644 index 00000000..ee38e39b --- /dev/null +++ b/book/chapters/linked-list.adoc @@ -0,0 +1,268 @@ += Linked List + +A list (or Linked List) is a linear data structure where each node is linked to another one. + +Linked Lists can be: +- Singly: every item has a pointer to the next node +- Doubly: every node has a reference to the next and previous object +- Circular: the last element points to the first one. +We are going to explore the first two in the next sections. + +== Singly Linked List + +Each element or node is *linked* to the next one by a reference. When a node only has the reference to the next element, it's called *singly linked list*: + +.Singly Linked List Representation: each node has a reference (blue arrow) to the next one. +image:image19.png[image,width=498,height=97] + + +Usually, a Linked List is referenced by the first element in called *head* (or *root* node). For instance, if you want to get the `cat` element from the example above, then the only way to get there is using the next field on the head node. You would get `art` first, then use the next field recursively until you eventually get the `cat` element. + +== Doubly Linked List + +When each node has a reference to the next item and also the previous one, then we have a *doubly linked list*. + +.Doubly Linked List: each node has a reference to the next and previous element. +image:image20.png[image,width=528,height=74] + +If we implement the code for the `Node` elements, it would be something like this: + +// image:image21.png[image,width=528,height=285] + +.Linked List Node +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/node.js[tag=snippet] +---- + +== Linked List vs. Array + +Arrays allow you to access data anywhere in the collection using an index. However, Linked List visits nodes in sequential order. In the worst case scenario, it takes _O(n)_ to get an element from a Linked List. You might be wondering: Isn’t always an array more efficient with O(1) access time? It depends. + +We also have to understand the space complexity to see the trade-offs between arrays and linked lists. An array pre-allocates contiguous blocks of memory. When the array is getting full, it has to copy all the elements over to a new space usually 2x bigger. It takes _O(n)_ to copy all the items over. On the other hand, LinkedList’s nodes only reserve precisely the amount of memory it needs. They don’t have to be next to each other, nor large chunks of memory have to be booked beforehand like arrays. Linked List is more on a "grow as you go" basis. + +Another difference is that adding/deleting at the beginning on an array takes O(n), however, in the linked list is a constant operation O(1) as we will implement later. + +A drawback of a linked list is that if you want to insert/delete an element at the end of the list, you would have to navigate the whole collection to find the last one O(n). However, this can be solved by keeping track of the last element in the list. We are going to implement that! + +== Implementing a Linked List + +We are going to implement a doubly linked list. First, let's start with the constructor. + +// image:image22.png[image,width=528,height=251] + +.Linked List's constructor +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=constructor] + + // ... methods go here ... +} +---- + +In our constructor, we keep a reference of the first (and last node for performance reasons). + +== Searching by value + +Finding an element by value there’s no other way than iterating through the whole list. + +.Linked List's searching by values +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=searchByValue, indent=0] +---- + +If we find the element, we will return the index otherwise `undefined`. The runtime for locating an item by value is _O(n)_. + +For finding elements by value or position we are using the following helper function: +.Find elements using a callback +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=find, indent=0] +---- +<1> We initialize two variables `current` to the first node and `position` to keep track of the index. +<2> While `current` node is not null we keep going. +<3> On each loop we move to the next node and increment the index. +<4> We invoke the callback passing the current position and node. If the callback returns something, then we stop and return that value. +<5> Return whatever result we got from the callback. E.g., we can return the index or the node itself or any other calculation. + +We are going to use this `find` method again to implement searching by index. + +== Searching by index + +Searching by index is very similar, we iterate through the list until we find the element that matches the position. + +.Linked List's searching by index (position) +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=searchByIndex, indent=0] +---- + +If there’s no match, we return `undefined` then. The runtime is _O(n)_. As you might notice the search by index and by position methods looks pretty similar. If you want to take a look at the whole implementation https://github.com/amejiarosario/algorithms.js/blob/7694c20d13f6c53457ee24fbdfd3c0ac57139ff4/src/data-structures/linked-lists/linked-list.js#L8[click here]. + +== Insertion + +Similar to the array, with a linked list you can add elements at the beginning, end or anywhere in the middle of the list. So, let's implement each case. + +=== Inserting elements at the beginning of the list + +We are going to use the `Node` class to create a new element and stick it at the beginning of the list as shown below. + +.Insert at the beginning by linking the new node with the current first node. +image:image23.png[image,width=498,height=217] + + +To insert at the beginning, we create a new node with the next reference to the current first node. Then we make first the new node. In code, it would look something like this: + +[#linked-list-inserting-beginning] + +.Add item to the beginning of a Linked List +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=addFirst, indent=0] +---- + +As you can see, we create a new node and make it the first one. + + +=== Inserting element at the end of the list + +Appending an element at the end of the list can be done very effectively if we have a pointer to the `last` item in the list. Otherwise, you would have to iterate through the whole list. + +.Add element to the end of the linked list +image:image24.png[image,width=498,height=208] + + +In code: + +.Linked List's add to the end of the list implementation +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=addLast, indent=0] +---- + +If there’s no element in the list yet, the first and last node would be the same. If there’s something, then, we go to the `last` item and add the reference `next` to the new node. That’s it! We got a constant time for inserting at the beginning and the end of the list: *O(1)*. + + +=== Inserting element at the middle of the list + +For inserting an element at the middle of the list, you would need to specify the position (index) in the collection. Then, you create the new node and update the references to it. + +Let’s do an example, with a doubly linked list. We want to insert the `new` node in the 2^nd^ position. + +.Inserting node in the middle of a doubly linked list. +image:image25.png[image,width=528,height=358] + +Let’s work in the code to do this: + +.Linked List's add to the middle of the list +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=addMiddle, indent=0] +---- +<1> If the new item goes to position 0, then we reuse the `addFirst` method, and we are done! +<2> However, If we are adding to the last position, then we reuse the `addLast` method, and done! +<3> Adding `newNode` to the middle: First, create the `new` node only if the position exists. Take a look at <> to see `get` implementation. +<4> Set newNode `previous` reference. +<5> Set newNode `next` link. +<6> No other node in the list is pointing to `newNode`, so we have to make the prior element point to `newNode`. +<7> Make the next element point to `newNode`. + + +Take notice that we reused, `addFirst` and `addLast` methods. For all the other cases the insertion is in the middle. We use `current.previous.next` and `current.next.previous` to update the surrounding elements and make them point to the new node. Inserting on the middle takes *O(n)* because we have to iterate through the list using the `get` method. + +== Deletion + +Deleting is an interesting one. We don’t delete an element; we remove all references to that node. Let’s go case by case to explore what happens. + +=== Deleting element from the head + +Deleting the first element (or head) is a matter of removing all references to it. + +.Deleting an element from the head of the list +image:image26.png[image,width=528,height=74] + +For instance, to remove the head (“art”) node, we change the variable `first` to point to the second node “dog”. We also remove the variable `previous` from the "dog" node, so it doesn't point to the “art” node. The garbage collector will get rid of the “art” node when it seems nothing is using it anymore. + +In code, it looks like this: + +.Linked List's remove from the beginning of the list +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=removeFirst, indent=0] +---- + +As you can see, when we want to remove the first node we make the 2nd element the first one. + +=== Deleting element from the tail + +Removing the last element from the list would require to iterate from the head until we find the last one, that’s O(n). But, If we have a reference to the last element, which we do, We can do it in _O(1)_ instead! + +.Removing last element from the list using the last reference. +image:image27.png[image,width=528,height=221] + + +For instance, if we want to remove the last node “cat”. We use the last pointer to avoid iterating through the whole list. We check `last.previous` to get the “dog” node and make it the new `last` and remove its next reference to “cat”. Since nothing is pointing to “cat” then is out of the list and eventually is deleted from memory by the garbage collector. + +Let’s code this up like this: + +.Linked List's remove from the end of the list +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=removeLast, indent=0] +---- + + +The code is very similar to `removeFirst`, but instead of first we update `last` reference, and instead of nullifying `previous` we nullify its `next` reference. + +=== Deleting element from the middle + +To remove a node from the middle, we make the surrounding nodes to bypass the one we want to delete. + +.Remove the middle node +image:image28.png[image,width=528,height=259] + + +In the illustration, we are removing the middle node “dog” by making art’s `next` variable to point to cat and cat’s `previous` to be “art” totally bypassing “dog”. + +Let’s implement it: + +.Linked List's remove from the middle of the list +[source, javascript] +---- +include::{codedir}/data-structures/linked-lists/linked-list.js[tag=removeByPosition, indent=0] +---- + +Notice that we are using the `get` method to get the node at the current position. That method loops through the list until it found the node at the specified location. This iteration has a runtime of _O(n)_. + +== Linked List Complexity vs. Array Complexity + +So far, we have seen two liner data structures with different use cases. Here’s a summary: + + +.Big O cheat sheet for Linked List and Array +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| Array ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) +| Linked List (singly) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) +| Linked List (doubly) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) +|=== + +If you compare the singly linked list vs. doubly linked list, you will notice that the main difference is deleting elements from the end. For a singly list is *O(n)*, while for a doubly list is *O(1)*. + +Comparing an array with a doubly linked list, both have different use cases: + +Use arrays when: + +* You want to access *random* elements by numeric key or index in constant time O(1). +* You need two-dimensional and multi-dimensional arrays. + +Use a doubly linked list when: + +* You want to access elements in a *sequential* manner only like <> or <>. + +* You want to insert elements at the start and end of the list. The linked list has O(1) while array has O(n). +* You want to save some memory when dealing with possibly large data sets. Arrays pre-allocate a large chunk of contiguous memory on initialization. Lists are more “grow as you go”. + +For the next two linear data structures <> and <>, we are going to use a doubly linked list to implement them. We could use an array as well, but since inserting/deleting from the start perform better on linked-list, we are going use that. diff --git a/book/chapters/map-hashmap-vs-treemap.adoc b/book/chapters/map-hashmap-vs-treemap.adoc new file mode 100644 index 00000000..3015eaf1 --- /dev/null +++ b/book/chapters/map-hashmap-vs-treemap.adoc @@ -0,0 +1,27 @@ += HashMap vs TreeMap + +.A map can be implemented using hash functions or binary search tree: +- *HashMap*: it’s a map implementation using an *array* and *hash function*. The job of the hash function is to convert the key into an index that contains the matching data. Optimized HashMap can have an average runtime of *O(1)*. +- *TreeMap*: it’s a map implementation that uses a self-balanced Binary Search Tree (red-black tree). The BST nodes store the key, and the value and nodes are sorted by key guaranteeing an *O(log n)* look up. + + +.When to use a TreeMap vs. HashMap? +* `HashMap` is more time-efficient. A `TreeMap` is more space-efficient. +* `TreeMap` search complexity is *O(log n)*, while an optimized `HashMap` is *O(1)* on average.  +* `HashMap`’s keys are in insertion order (or random in some implementations). `TreeMap`’s keys are always sorted. +* `TreeMap` offers some statistical data for free such as: get minimum, get maximum, median, find ranges of keys. `HashMap` doesn’t. +* `TreeMap` has a guarantee always an *O(log n)*, while `HashMap`s has an amortized time of *O(1)* but in the rare case of a rehash, it would take an *O(n)*. + +== TreeMap Time complexity vs HashMap + +As we discussed so far, there are trade-off between the implementations + +.Time complexity for different Maps implementations +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| Hash Map (naïve) ^|O(n) ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| Hash Map (optimized) ^|O(1)* ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +| Tree Map (Red-Black Tree) ^|O(log n) ^|O(n) ^|O(log n) ^|O(log n) ^|O(log n) +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. diff --git a/book/chapters/map-hashmap.adoc b/book/chapters/map-hashmap.adoc new file mode 100644 index 00000000..ccf4b44c --- /dev/null +++ b/book/chapters/map-hashmap.adoc @@ -0,0 +1,282 @@ += HashMap + +A HashMap is a Map implementation. HashMaps are composed of two things: +1) a hash function and +2) a bucket array to store values. + +Before going into the implementation details let’s give an overview of how it works. Let’s say we want to keep a tally of things: + +.HashMap example +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=snippet, indent=0] +---- + +How are the keys mapped to their values? +Using a hash function. Here’s an illustration: + +.HashMap representation. Keys are mapped to values using a hash function. +image:image41.png[image,width=528,height=299] + + +.This is the main idea: +1. We use a *hash function* to transform the keys (e.g., dog, cat, rat, …) into an array index. This array is called *bucket*. +2. The bucket holds the values (linked list in case of collisions). + +In the illustration, we have a bucket size of 10. In bucket 0, we have a collision. Both `cat` and `art` keys map to the same bucket even thought their hash codes are different. + +In a HashMap, a *collision* is when different keys lead to the same index. They are nasty for performance since it can reduce the search time from *O(1)* to *O(n)*. + +Having a big bucket size can avoid a collision but also can waste too much memory. We are going to build an _optimized_ HashMap that re-sizes itself when it is getting full. This avoids collisions and doesn’t spend too much memory upfront. Let’s start with the hash function. + +== Designing an optimized hash function + +To minimize collisions, we need to create an excellent hash function. + +IMPORTANT: A *perfect* hash function is one that assigns a unique array index for every different key. + +It’s no practical and memory-wise wasteful to have a perfect hash function, so we are going to shoot for a cost-effective hash function instead. + +.To recap: +- A hash function converts keys into array indices. +- A hash function is composed of two parts: +1. *Hash Code*: maps any key into an integer (unbonded) +2. *Compression function*: maps an arbitrary integer to integer in the range of [0… BUCKET_SIZE -1]. + +Before doing a great hash function, let's see what a lousy hash function looks like. 😉 + +=== Analysing collisions on bad hash code functions + +The goal of a hash code function is to convert any value given into a positive integer — a common way to accomplish with summing each string’s Unicode value. + +.Naïve hashing function implementation +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=naiveHashCode, indent=0] +---- + + +This function uses `codePointAt` to get the Unicode value of a character. E.g., `a` has a value of 97, `A` is 65, even https://en.wikipedia.org/wiki/Emoji#Unicode_blocks[emojis have codes]; “[big]#😁#” is `128513`. + +.JavaScript built-in `string.charCodeAt` vs. `string.codePointAt` +**** +The `charCodeAt()` method returns an integer between `0` and `65535` representing the UTF-16 code unit at the given index. However, it doesn’t play nice with Unicode, so it’s better to use `codePointAt` instead. + +The `codePointAt()` method returns a non-negative integer that is the Unicode code point value. +**** +With this function we have the can convert some keys to numbers as follows: + +.Hashing examples +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=naiveHashCodeExamples, indent=0] +---- + +Notice that `rat` and `art` have the same hash code! These are collisions that we need to solve. + +Collisions happened because we are just summing the character's codes and are not taking the order into account nor the type. We can do better by offsetting the character value based on their position in the string. We can also add the object type, so number `10` produce different output than string `'10'`. + +.Hashing function implementation that offset character value based on the position +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=hashCodeOffset, indent=0] +---- + +Since Unicode uses 20 bits, we can offset each character by 20 bits based on the position. + +.JavaScript built-in `BigInt` +**** +BigInt allows to operate beyond the maximum safe limit of integers (Number.MAX_SAFE_INTEGER => 9,007,199,254,740,991). BigInt uses the suffix n, e.g. 1n + 3n === 4n. +**** + +As you can imagine the output is a humongous number! We are using `BigInt` that doesn’t overflow. + +.Verifying there's not hashing code duplicates +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=hashCodeOffsetExample, indent=0] +---- + +As you can see We don’t have duplicates if the keys have different content or type. However, we need to represent these unbounded integers. We do that using *compression function* they can be as simple as `% BUCKET_SIZE`. + +However, there’s an issue with the last implementation. It doesn’t matter how humongous is the number if we at the end use the modulus to get an array index. The part of the hash code that truly matters is the last bits. + +.Look at this example with a bucket size of 4. +[source, javascript] +---- +10 % 4 //↪️ 2 +20 % 4 //↪️ 0 +30 % 4 //↪️ 2 +40 % 4 //↪️ 0 +50 % 4 //↪️ 2 +---- + +We get many collisions. [big]#😱# + +Based on statistical data, using a prime number as the modulus produce fewer collisions. + +.Let’s see what happens if the bucket size is a prime number: +[source, javascript] +---- +10 % 7 //↪️ 3 +20 % 7 //↪️ 6 +30 % 7 //↪️ 2 +40 % 7 //↪️ 4 +50 % 7 //↪️ 1 +---- + +Now it’s more evenly distributed!! [big]#😎👍# + +.So, to sum up: +* Bucket size should always be a *prime number*, so data is distributed more evenly and minimized collisions. +* Hash code doesn’t have to be too big. At the end what matters is the few last digits. + +Let’s design a better HashMap with what we learned. + +=== Implementing an optimized hash function + +Take a look at the following function: + +.Optimal Hash function +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=hashFunction, indent=0] +---- + +Is somewhat similar to what we did before, in the sense that we use each letter’s Unicode is used to compute the hash. The difference is: + +1. We are using the XOR bitwise operation (^) to produce an *avalanche effect*, where a small change in two strings produces completely different hash codes. E.g. + +.Hash Code example using FVN1a +[source, javascript] +---- +hashCode('cat') //↪️ 4201630708 +hashCode('cats') //↪️ 3304940933 +---- + +.Fowler/Noll/Vo (FNV) Hash +**** +It is a non-cryptographic hash function designed to be fast while maintaining a low collision rate. The high dispersion of the FNV hashes makes them well suited for hashing nearly identical strings such as URLs, keys, IP addresses, zip codes, and others. +**** + +We are using the FVN-1a prime number (16777619) and offset (2166136261) to reduce collisions even further. If you are curious where these numbers come from check out this https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function[link]. + +FVN-1a hash function is a good trade-off between speed and collision prevention. + +Now that we have a proper hash function. Let’s move on with the rest of the HashMap implementation. + +== Implementing a HashMap in JavaScript + +Let’s start by creating a class and its constructor to initialize the hash map. We are going to have an array called *buckets* to hold all the data as below: + +.HashMap's constructor +[source, javascript] +---- +class HashMap { +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=constructorPartial, indent=2] + this.buckets = new Array(this.initialCapacity); + this.size = 0; + this.collisions = 0; + } + +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=getLoadFactor, indent=2] +} +---- + +Notice that we are also keeping track of collisions (just for benchmarking purposes) and a load factor. *The load factor* measures how full the hash map is. We don’t want to be fuller than 75%. If the HashMap is getting too full, then we are going to fix it doing a *rehash* (more on that later). + +=== Inserting elements in a HashMap + +To insert values into a HashMap, we first convert the *key* into *an array index* using the hash function. Each bucket of the array will have an object `{key, value}`. + +There are multiple scenarios for inserting key/values in a HashMap: + +1. Key doesn’t exist yet, so we create the new key/value pair. +2. Key already exists, then we will replace the value. +3. Key doesn’t exist, but the bucket already has other data, this is a collision! We push the new element to the bucket. + +In code, it looks like this: + +.HashMap's set method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=set, indent=0] +---- + +Notice, that we are using a function called `getEntry` to check if the key already exists. It gets the index of the bucket corresponding to the key and then checks if the entry with the given key exists. We are going to implement this function in a bit. + +=== Getting values out of a HashMap + +For getting values out of the Map, we do something similar to inserting. We convert the key into an index using the hash function. + +.HashMap's getEntry method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=getEntry, indent=0] +---- +<1> Convert key to an array index. +<2> If the bucket is empty create a new linked list +<3> Use Linked list's <> method to find value on the bucket. +<4> Return bucket and entry if found. + +With the help of the `getEntry` method, we can do the `HashMap.get` and `HashMap.has` methods: + +.HashMap's get method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=get, indent=0] +---- + +and also, + +.HashMap's has method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=has, indent=0] +---- + +For `HashMap.has` we only care if the value exists or not, while that for `HashMap.get` we want to return the value or `undefined` if it doesn’t exist. + +=== Deleting from a HashMap + +Removing items from a HashMap is not too different from what we did before: + +.HashMap's delete method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=delete, indent=0] +---- + +If the bucket doesn’t exist or is empty, we don't have to do anything else. If the value exists, we use the +https://github.com/amejiarosario/dsa.js/blob/master/src/data-structures/linked-lists/linked-list.js[`LinkedList.remove` ] +method. + +== Rehashing the HashMap + +Rehashing is a technique to minimize collisions when a hash map is getting full. It doubles the size of the map and recomputes all the hash codes and insert data in the new bucket. + +When we increase the map size, we try to find the next prime. We explained that keeping the bucket size a prime number is beneficial for minimizing collisions. + +.HashMap's rehash method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=rehash, indent=0] +---- + +In the +https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/hash-maps/primes.js[prime.js] file you can find the implementation for finding the next prime. Also, you can see the full HashMap implementation on this file: https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/hash-maps/hashmap.js[hashmap.js] + +== HashMap time complexity + +Hash Map it’s very optimal for searching values by key in constant time *O(1)*. However, searching by value is not any better than an array since we have to visit every value *O(n)*. + +.Time complexity for a Hash Map +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| Hash Map (naïve) ^|O(n) ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| Hash Map (optimized) ^|O(1)* ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time. + +As you can notice we have amortized times since, in the unfortunate case of a rehash, it will take O(n) while it resizes. After that, it will be on average *O(1)*. diff --git a/book/chapters/map-intro.adoc b/book/chapters/map-intro.adoc new file mode 100644 index 00000000..ceeec195 --- /dev/null +++ b/book/chapters/map-intro.adoc @@ -0,0 +1,18 @@ += Map + +A map is a data structure to store pairs of data: *key* and *value*. In an array, you can only store values. The array’s key is always the position index. However, in a *Map* the key can be whatever you want. + +IMPORTANT: Map is a data structure that _maps_ *keys* to *values*. + +Many languages have maps already built-in. JavaScript/Node has `Map`: + +.JavaScript Built-in Map Usage +[source, javascript] +---- +include::{codedir}/data-structures/maps/map.js[tag=snippet, indent=0] +---- + +The attractive part of Maps is that they are very performant usually *O(1)* or *O(log n)* depending on the implementation. We can implement the maps using two different techniques: + +* *HashMap*: it’s a map implementation using an *array* and *hash function*. The job of the hash function is to convert the key into an index that contains the matching data. Optimized HashMap can have an average runtime of *O(1)*. +* *TreeMap*: it’s a map implementation that uses a self-balanced Binary Search Tree (red-black tree). The BST nodes store the key, and the value and nodes are sorted by key guaranteeing an *O(log n)* look up. diff --git a/book/chapters/map-treemap.adoc b/book/chapters/map-treemap.adoc new file mode 100644 index 00000000..2487d73b --- /dev/null +++ b/book/chapters/map-treemap.adoc @@ -0,0 +1,88 @@ += TreeMap + +A TreeMap is a Map implementation using Binary Search Trees. + +Implementing a Map with a tree, TreeMap, has a couple of advantages over a HashMap: + +* Keys are always sorted. +* Statistical data can be easily obtained like the median, highest, lowest key. +* Collisions are not a concern so in the worst case is still *O(log n)*. +* Trees are more space efficient and don’t need to allocate memory beforehand (e.g. `HashMap`’s initial capacity) nor you have to rehash when is getting full. + +Ok, now that you know the advantages, let’s implement it! +For a full comparison read the <> section. + +Let’s get started with the essential functions. They have the same interface as the `HashMap` (but the implementation is different). + +.TreeMap class overview +[source, javascript] +---- +class TreeMap { + constructor(){} + set(key, value) {} + get(key) {} + has(key) {} + delete(key) {} +} +---- + +== Inserting values into a TreeMap + +For inserting a value on a TreeMap, we first need to inialize the tree: + +.TreeMap constructor +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=constructor, indent=0] +---- + +The tree can be an instance of any Binary Search Tree that we implemented so far. However, for better performance, it should be a self-balanced tree like a https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/trees/red-black-tree.js[Red-Black Tree] or https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/trees/avl-tree.js[AVL Tree]. + + +Let's implement the method to add values to the tree. + +.TreeMap `add` method and `size` attribute +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=set, indent=0] +---- + +Adding values is very easy (once we have the underlying tree implementation). + +== Getting values out of a TreeMap + +When We search by key in a tree map, it takes *O(log n)*. This is the implementation: + +.TreeMap `get` and `has` method +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=get, indent=0] +---- + +One side effect of storing keys in a tree is that they don't come up in insertion order. Instead, they ordered by value. + +.TreeMap iterators +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=iterators, indent=0] +---- +<1> We implemented the default iterator using the in-order traversal. That's useful for getting the keys sorted. + +.JavaScript Iterators and Generators +**** +Generators are useful for producing values that can you can iterate in a `for...of` loop. Generators use the `function*` syntax which expects to have a `yield` with a value. +**** + +== Deleting values from a TreeMap + +Removing elements from TreeMap is simple. + +.TreeMap `delete` method +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=delete, indent=0] +---- + +The BST implementation does all the heavy lifting. + +That’s it! To see the full file in context, click here: https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/maps/tree-maps/tree-map.js[here] diff --git a/book/chapters/map.adoc b/book/chapters/map.adoc new file mode 100644 index 00000000..c41fec9a --- /dev/null +++ b/book/chapters/map.adoc @@ -0,0 +1,411 @@ += Map + +A map is a data structure to store pairs of data: *key* and *value*. In an array, you can only store values. The array’s key is always the position index. However, in a *Map* the key can be whatever you want. + +IMPORTANT: Map is a data structure that _maps_ *keys* to *values*. + +Many languages have maps already built-in. JavaScript/Node has `Map`: + +.JavaScript Built-in Map Usage +[source, javascript] +---- +include::{codedir}/data-structures/maps/map.js[tag=snippet, indent=0] +---- + +The attractive part of Maps is that they are very performant usually *O(1)* or *O(log n)* depending on the implementation. We can implement the maps using two different techniques: + +* *HashMap*: it’s a map implementation using an *array* and *hash function*. The job of the hash function is to convert the key into an index that contains the matching data. Optimized HashMap can have an average runtime of *O(1)*. +* *TreeMap*: it’s a map implementation that uses a self-balanced Binary Search Tree (red-black tree). The BST nodes store the key, and the value and nodes are sorted by key guaranteeing an *O(log n)* look up. + +== HashMap vs TreeMap + +Here are the key differences: + +* `HashMap` is more time-efficient. A `TreeMap` is more space-efficient. +* `TreeMap` search complexity is *O(log n)*, while an optimized `HashMap` is *O(1)* on average.  +* `HashMap`’s keys are in insertion order (or random in some implementations). `TreeMap`’s keys are always sorted. +* `TreeMap` offers some statistical data for free such as: get minimum, get maximum, median, find ranges of keys. `HashMap` doesn’t. +* `TreeMap` has a guarantee always an *O(log n)*, while `HashMap`s has an amortized time of *O(1)* but in the rare case of a rehash, it would take an *O(n)*. + +== How hash maps work? + +A HashMap is composed of two things: 1) a hash function and 2) a bucket array to store values. + +Before going into the implementation details let’s give an overview of how it works. Let’s say we want to keep a tally of things: + +.HashMap example +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=snippet, indent=0] +---- + +How are the keys mapped to their values? +Using a hash function. Here’s an illustration: + +.HashMap representation. Keys are mapped to values using a hash function. +image:image41.png[image,width=528,height=299] + + +.This is the main idea: +1. We use a *hash function* to transform the keys (e.g., dog, cat, rat, …) into an array index. This array is called *bucket*. +2. The bucket holds the values (linked list in case of collisions). + +In the illustration, we have a bucket size of 10. In bucket 0, we have a collision. Both `cat` and `art` keys map to the same bucket even thought their hash codes are different. + +In a HashMap, a *collision* is when different keys lead to the same index. They are nasty for performance since it can reduce the search time from *O(1)* to *O(n)*. + +Having a big bucket size can avoid a collision but also can waste too much memory. We are going to build an _optimized_ HashMap that re-sizes itself when it is getting full. This avoids collisions and doesn’t spend too much memory upfront. Let’s start with the hash function. + +=== Designing an optimized hash function + +To minimize collisions, we need to create an excellent hash function. + +IMPORTANT: A *perfect* hash function is one that assigns a unique array index for every different key. + +It’s no practical and memory-wise wasteful to have a perfect hash function, so we are going to shoot for a cost-effective hash function instead. + +.To recap: +- A hash function converts keys into array indices. +- A hash function is composed of two parts: +1. *Hash Code*: maps any key into an integer (unbonded) +2. *Compression function*: maps an arbitrary integer to integer in the range of [0… BUCKET_SIZE -1]. + +Before doing a great hash function, let's see what a lousy hash function looks like. 😉 + +==== Analysing collisions on bad hash code functions + +The goal of a hash code function is to convert any value given into a positive integer — a common way to accomplish with summing each string’s Unicode value. + +.Naïve hashing function implementation +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=naiveHashCode, indent=0] +---- + + +This function uses `codePointAt` to get the Unicode value of a character. E.g., `a` has a value of 97, `A` is 65, even https://en.wikipedia.org/wiki/Emoji#Unicode_blocks[emojis have codes]; “[big]#😁#” is `128513`. + +.JavaScript built-in `string.charCodeAt` vs. `string.codePointAt` +**** +The `charCodeAt()` method returns an integer between `0` and `65535` representing the UTF-16 code unit at the given index. However, it doesn’t play nice with Unicode, so it’s better to use `codePointAt` instead. + +The `codePointAt()` method returns a non-negative integer that is the Unicode code point value. +**** +With this function we have the can convert some keys to numbers as follows: + +.Hashing examples +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=naiveHashCodeExamples, indent=0] +---- + +Notice that `rat` and `art` have the same hash code! These are collisions that we need to solve. + +Collisions happened because we are just summing the character's codes and are not taking the order into account nor the type. We can do better by offsetting the character value based on their position in the string. We can also add the object type, so number `10` produce different output than string `'10'`. + +.Hashing function implementation that offset character value based on the position +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=hashCodeOffset, indent=0] +---- + +Since Unicode uses 20 bits, we can offset each character by 20 bits based on the position. + +.JavaScript built-in `BigInt` +**** +BigInt allows to operate beyond the maximum safe limit of integers (Number.MAX_SAFE_INTEGER => 9,007,199,254,740,991). BigInt uses the suffix n, e.g. 1n + 3n === 4n. +**** + +As you can imagine the output is a humongous number! We are using `BigInt` that doesn’t overflow. + +.Verifying there's not hashing code duplicates +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=hashCodeOffsetExample, indent=0] +---- + +As you can see We don’t have duplicates if the keys have different content or type. However, we need to represent these unbounded integers. We do that using *compression function* they can be as simple as `% BUCKET_SIZE`. + +However, there’s an issue with the last implementation. It doesn’t matter how humongous is the number if we at the end use the modulus to get an array index. The part of the hash code that truly matters is the last bits. + +.Look at this example with a bucket size of 4. +[source, javascript] +---- +10 % 4 //↪️ 2 +20 % 4 //↪️ 0 +30 % 4 //↪️ 2 +40 % 4 //↪️ 0 +50 % 4 //↪️ 2 +---- + +We get many collisions. [big]#😱# + +Based on statistical data, using a prime number as the modulus produce fewer collisions. + +.Let’s see what happens if the bucket size is a prime number: +[source, javascript] +---- +10 % 7 //↪️ 3 +20 % 7 //↪️ 6 +30 % 7 //↪️ 2 +40 % 7 //↪️ 4 +50 % 7 //↪️ 1 +---- + +Now it’s more evenly distributed!! [big]#😎👍# + +.So, to sum up: +* Bucket size should always be a *prime number*, so data is distributed more evenly and minimized collisions. +* Hash code doesn’t have to be too big. At the end what matters is the few last digits. + +Let’s design a better HashMap with what we learned. + +==== Implementing an optimized hash function + +Take a look at the following function: + +.Optimal Hash function +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=hashFunction, indent=0] +---- + +Is somewhat similar to what we did before, in the sense that we use each letter’s Unicode is used to compute the hash. The difference is: + +1. We are using the XOR bitwise operation (^) to produce an *avalanche effect*, where a small change in two strings produces completely different hash codes. E.g. + +.Hash Code example using FVN1a +[source, javascript] +---- +hashCode('cat') //↪️ 4201630708 +hashCode('cats') //↪️ 3304940933 +---- + +.Fowler/Noll/Vo (FNV) Hash +**** +It is a non-cryptographic hash function designed to be fast while maintaining a low collision rate. The high dispersion of the FNV hashes makes them well suited for hashing nearly identical strings such as URLs, keys, IP addresses, zip codes, and others. +**** + +We are using the FVN-1a prime number (16777619) and offset (2166136261) to reduce collisions even further. If you are curious where these numbers come from check out this https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function[link]. + +FVN-1a hash function is a good trade-off between speed and collision prevention. + +Now that we have a proper hash function. Let’s move on with the rest of the HashMap implementation. + +== Implementing a HashMap in JavaScript + +Let’s start by creating a class and its constructor to initialize the hash map. We are going to have an array called *buckets* to hold all the data as below: + +.HashMap's constructor +[source, javascript] +---- +class HashMap { +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=constructorPartial, indent=2] + this.buckets = new Array(this.initialCapacity); + this.size = 0; + this.collisions = 0; + } + +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=getLoadFactor, indent=2] +} +---- + +Notice that we are also keeping track of collisions (just for benchmarking purposes) and a load factor. *The load factor* measures how full the hash map is. We don’t want to be fuller than 75%. If the HashMap is getting too full, then we are going to fix it doing a *rehash* (more on that later). + +=== Inserting elements in a HashMap + +To insert values into a HashMap, we first convert the *key* into *an array index* using the hash function. Each bucket of the array will have an object `{key, value}`. + +There are multiple scenarios for inserting key/values in a HashMap: + +1. Key doesn’t exist yet, so we create the new key/value pair. +2. Key already exists, then we will replace the value. +3. Key doesn’t exist, but the bucket already has other data, this is a collision! We push the new element to the bucket. + +In code, it looks like this: + +.HashMap's set method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=set, indent=0] +---- + +Notice, that we are using a function called `getEntry` to check if the key already exists. It gets the index of the bucket corresponding to the key and then checks if the entry with the given key exists. We are going to implement this function in a bit. + +=== Getting values out of a HashMap + +For getting values out of the Map, we do something similar to inserting. We convert the key into an index using the hash function. + +.HashMap's getEntry method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=getEntry, indent=0] +---- +<1> Convert key to an array index. +<2> If the bucket is empty create a new linked list +<3> Use Linked list's <> method to find value on the bucket. +<4> Return bucket and entry if found. + +With the help of the `getEntry` method, we can do the `HashMap.get` and `HashMap.has` methods: + +.HashMap's get method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=get, indent=0] +---- + +and also, + +.HashMap's has method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=has, indent=0] +---- + +For `HashMap.has` we only care if the value exists or not, while that for `HashMap.get` we want to return the value or `undefined` if it doesn’t exist. + +=== Deleting from a HashMap + +Removing items from a HashMap is not too different from what we did before: + +.HashMap's delete method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=delete, indent=0] +---- + +If the bucket doesn’t exist or is empty, we don't have to do anything else. If the value exists, we use the +https://github.com/amejiarosario/dsa.js[`LinkedList.remove` ] +method. + +=== Rehashing the HashMap + +Rehashing is a technique to minimize collisions when a hash map is getting full. It doubles the size of the map and recomputes all the hash codes and insert data in the new bucket. + +When we increase the map size, we try to find the next prime. We explained that keeping the bucket size a prime number is beneficial for minimizing collisions. + +.HashMap's rehash method +[source, javascript] +---- +include::{codedir}/data-structures/maps/hash-maps/hash-map.js[tag=rehash, indent=0] +---- + +In the +https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/hash-maps/primes.js[prime.js] file you can find the implementation for finding the next prime. Also, you can see the full HashMap implementation on this file: https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/hash-maps/hashmap.js[hashmap.js] + +== HashMap time complexity + +Hash Map it’s very optimal for searching values by key in constant time *O(1)*. However, searching by value is not any better than an array since we have to visit every value *O(n)*. + +.Time complexity for a Hash Map +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| Hash Map (naïve) ^|O(n) ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| Hash Map (optimized) ^|O(1)* ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time. + +As you can notice we have amortized times since, in the unfortunate case of a rehash, it will take O(n) while it resizes. After that, it will be on average *O(1)*. + +== Implementing a TreeMap + +Implementing a Map with a tree, TreeMap, has a couple of advantages over a HashMap: + +* Keys are always sorted. +* Statistical data can be easily obtained like the median, highest, lowest key. +* Collisions are not a concern so in the worst case is still *O(log n)*. +* Trees are more space efficient and don’t need to allocate memory beforehand (e.g. `HashMap`’s initial capacity) nor you have to rehash when is getting full. + +Ok, now that you know the advantages, let’s implement it! +For a full comparison read the <> section again. + +Let’s get started with the essential functions. They have the same interface as the `HashMap` (but the implementation is different). + +.TreeMap class overview +[source, javascript] +---- +class TreeMap { + constructor(){} + set(key, value) {} + get(key) {} + has(key) {} + delete(key) {} +} +---- + +=== Inserting values into a TreeMap + +For inserting a value on a TreeMap, we first need to inialize the tree: + +.TreeMap constructor +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=constructor, indent=0] +---- + +The tree can be an instance of any Binary Search Tree that we implemented so far. However, for better performance, it should be a self-balanced tree like a https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/trees/red-black-tree.js[Red-Black Tree] or https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/trees/avl-tree.js[AVL Tree]. + + +Let's implement the method to add values to the tree. + +.TreeMap `add` method and `size` attribute +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=set, indent=0] +---- + +Adding values is very easy (once we have the underlying tree implementation). + +=== Getting values out of a TreeMap + +When We search by key in a tree map, it takes *O(log n)*. This is the implementation: + +.TreeMap `get` and `has` method +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=get, indent=0] +---- + +One side effect of storing keys in a tree is that they don't come up in insertion order. Instead, they ordered by value. + +.TreeMap iterators +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=iterators, indent=0] +---- +<1> We implemented the default iterator using the in-order traversal. That's useful for getting the keys sorted. + +.JavaScript Iterators and Generators +**** +Generators are useful for producing values that can you can iterate in a `for...of` loop. Generators use the `function*` syntax which expects to have a `yield` with a value. +**** + +=== Deleting values from a TreeMap + +Removing elements from TreeMap is simple. + +.TreeMap `delete` method +[source, javascript] +---- +include::{codedir}/data-structures/maps/tree-maps/tree-map.js[tag=delete, indent=0] +---- + +The BST implementation does all the heavy lifting. + +That’s it! To see the full file in context, click here: https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/maps/tree-maps/tree-map.js[https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/maps/tree-maps/tree-map.js] + + +== TreeMap Time complexity vs HashMap + +As we discussed so far, there are trade-offs between the implementations + +.Time complexity for different Maps implementations +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| Hash Map (naïve) ^|O(n) ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| Hash Map (optimized) ^|O(1)* ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +| Tree Map (Red-Black Tree) ^|O(log n) ^|O(n) ^|O(log n) ^|O(log n) ^|O(log n) +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. diff --git a/book/chapters/merge-sort.adoc b/book/chapters/merge-sort.adoc new file mode 100644 index 00000000..91c53609 --- /dev/null +++ b/book/chapters/merge-sort.adoc @@ -0,0 +1,61 @@ += Merge Sort + +Merge sort is an efficient sorting algorithm that uses "divide and conquer" paradigm to accomplish it task faster. It uses auxiliary memory in the process of sorting. + +Merge sort algorithm splits the array in halves until 2 or less elements are left. It sorts these two elements and then merge back all halves until the whole array is sorted. + +== Merge Sort Implementation + +Merge sort implementation is as follows + +.Merge Sort Algorithm +. It moves one element at a time (from left to right). Everything on the left of the current element is already sorted, while everything to the right is not. +. Start with the first element and make it the current element. +. Compare elements to right of the current element. +. Merge up big values to the right of the array. +.. Swap elements if the previous element is bigger than the previous one. +. Move the current pointer to the next element and repeat for the rest of the array + + +Let's convert these words into code! + +.Merge Sort implementation in JavaScript (mergeSort) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/merge-sort.js[tag=sort, indent=0] +---- +<1> Convert any kind of iterable (array, sets, etc.) into an array + +As you can see this function is just a wrapper to transform things to array. The heavy lifting is done in `splitSort` as you can see below. + +.Merge Sort implementation in JavaScript (splitSort) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/merge-sort.js[tag=splitSort, indent=0] +---- +<1> Recursively divide the array in half until two or less elements are left. +<2> Sort two or less elements. +<3> Merge back the sorted halves in ascending order. + +Let's now take a look at the merge function: + +.Merge Sort implementation in JavaScript (merge) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/merge-sort.js[tag=merge, indent=0] +---- +<1> We need to keep track of 3 arrays indices (mergedArray, a1 and a2). +<2> If `array1` has the lowest current value, we insert it into the merged array if not we then insert `array2`. +<3> End result is array1 and array2 combined in ascending order (sorted). + +Merge sort has a _O(n log n)_ running time. For more details about the how to extract the runtime go to <>. + +== Merge Sort Properties + +- Time Complexity: [big]#✅# <> _O(n log n)_ +- Space Complexity: [big]#⚠️# <> _O(n)_ +- <>: [big]#✅# Yes +- <>: [big]#⛔️️️️️# No, it requires auxiliary memory O(n). +- <>: [big]#️️️️️️️⛔️️️️️# No, new elements will require to sort the whole array. +- <>: [big]#️️️️️️️⛔️️️️️# No, mostly sorted array takes the same time O(n log n). +- Recursive: Yes diff --git a/book/chapters/non-linear-data-structures-intro-advanced.adoc b/book/chapters/non-linear-data-structures-intro-advanced.adoc new file mode 100644 index 00000000..33917a8d --- /dev/null +++ b/book/chapters/non-linear-data-structures-intro-advanced.adoc @@ -0,0 +1,8 @@ +[partintro] +-- +Nulla aute exercitation adipisicing exercitation sunt nostrud sunt Lorem amet aute proident sit deserunt. Ex ullamco velit eu quis aliqua aliquip quis ullamco esse eiusmod. Enim dolor ex do adipisicing ullamco ipsum. Ad anim magna pariatur enim enim excepteur est cupidatat qui tempor cillum. + +Minim elit Lorem commodo labore amet culpa amet. Culpa ut fugiat est velit. Consectetur deserunt occaecat do cupidatat adipisicing sunt ullamco ut anim. Ut anim aute culpa minim nisi qui. Incididunt mollit quis veniam reprehenderit ad tempor voluptate aliquip ut ut. + +Magna consectetur anim laborum excepteur laborum. Ea magna tempor officia in elit nisi tempor proident aute. Tempor quis tempor sit culpa aliquip. +-- diff --git a/book/chapters/non-linear-data-structures-intro.adoc b/book/chapters/non-linear-data-structures-intro.adoc new file mode 100644 index 00000000..fe3b8209 --- /dev/null +++ b/book/chapters/non-linear-data-structures-intro.adoc @@ -0,0 +1,12 @@ +[partintro] +-- +Non-Linear data structures are everywhere whether you realize it or not. They are used in databases, Web (HTML DOM tree), search algorithms, finding the best route to get home and so on. We are going to learn the basic concepts and when to choose one over the other. + +.In this chapter we are going to learn: +- Exciting <> data structure applications +- Searching efficiently with a <> data structures. +- One of the most versatile data structure of all <>. +- Keeping dups out with a <>. + +By the end of this section, you will know the data structures trade-offs and when to use one over the other. +-- diff --git a/book/chapters/output.adoc b/book/chapters/output.adoc new file mode 100644 index 00000000..ad7fd278 --- /dev/null +++ b/book/chapters/output.adoc @@ -0,0 +1,515 @@ +[[_Toc525822218]]Learning Fast Sorting Algorithms + +Introduction. + +* _______ +Topic 1 +_______ +* _______ +Topic 2 +_______ +* _______ +Topic 3 +_______ + += Avoiding Slow Sorting Algorithms + +Iterate and expand on the sub-topic. + +== Selection Sort + +Body text + +== Bubble Sort + +Body text + +== Insertion Sort + +Body text + += Understanding Efficient Sorting Algorithms + +Iterate and expand on the sub-topic. +https://en.wikipedia.org/wiki/Sorting_algorithm[https://en.wikipedia.org/wiki/Sorting_algorithm#Comparison_of_algorithms] + +== Merge Sort + +Stable but uses additional memory, Block merge sort uses constant memory +https://en.wikipedia.org/wiki/Block_sort + +The entire input must be iterated through, and this must occur O(log(n)) +times (the input can only be halved O(log(n)) times). n items iterated +log(n) times gives O(n log(n)). + +== Quicksort + +Body text + +A binary search tree is a dynamic version of what happens during +quicksort. + +== Tim Sort + +Stable but use additional memory + +== Heapsort + +Body text + +== Radix Sort + +t's been proven that no comparison sort can operate faster than this. +Only sorts that rely on a special property of the input such as radix +sort can beat this complexity. The constant factors of mergesort are +typically not that great though so algorithms with worse complexity can +often take less time. +https://softwareengineering.stackexchange.com/a/297161/106607 + +A trie is a dynamic version of what happens during radix sort. + += Summary + +Body text + +5 + +[[_Toc525822222]]Searching Efficiently + +Introduction. + +* _______ +Topic 1 +_______ +* _______ +Topic 2 +_______ +* _______ +Topic 3 +_______ + += Linear Search + +Iterate and expand on the sub-topic. + +== Linear Search + +Body text + +== Binary Search + +Body text + +== Sub-topic + +Body text + += Searching in a Graph + +Iterate and expand on the sub-topic. + +== Depth First Search (DFS) + +Body text + +== Breadth First Search (BFS) + +Body text + +== Sub-topic + +Body text + += Shortest Path with Dijkstra + +Iterate and expand on the sub-topic. + +== Sub-topic + +Body text + +== Sub-topic + +Body text + +== Sub-topic + +Body text + += Summary + +Body text + +5 + +[[_Toc525822227]]Balancing Binary Search Trees for Max Performance + +Introduction. + +* _______ +Topic 1 +_______ +* _______ +Topic 2 +_______ +* _______ +Topic 3 +_______ + += Tree Rotations + +Iterate and expand on the sub-topic. + +== Left Rotation + +Body text + +== Right Rotation + +Body text + +== Left-Right Rotation + +Body text + +== Right-Left Rotation + +Body text + += AVL Tree + +Iterate and expand on the sub-topic. + +== Insertion + +Body text + +== Search by Value + +Body text + +== Deletion + +Body text + += Summary + +Body text + +0 + +[[_Toc525822231]]Algorithmic Thinking + +Introduction. Firstly, address your headings. Next introduce _yourself_ +to the chapter. Start with the topic. What is it. Tell them why it’s +useful. Now explain your chapter structure. What key milestones will hit +throughout the chapter. + +Reiterate the chapter structure with bullet points: + +* _______ +Topic 1 +_______ +* _______ +Topic 2 +_______ +* _______ +Topic 3 +_______ + += Algorithmic Paradigms + +Write your heading. Your headings should generally always try to tell +the reader what they will be _doing_ with the section. A useful device +are “gerund” words. These are –ing words, like “Implementing”, +“Building, “Creating”, “Programming”, “Testing. + +Iterate and expand on the sub-topic. Explain what the sub-topic is. +Where does it fit in to the wider topic? Explain the key steps/subtopics +the reader will perform. + +Towards the end, outline any prerequisites the reader will need – will +they need anything new installed? Will they want any specific files or +programmes open? + +== Brute Force + +Body text. Now outline the key steps needed to perform the topic. + +Linear search + +== Greedy + +Body text, + +A Dijkstra Algorithm - finding shortest path to all graph vertices + +== Divide and Conquer + +Binary Search, +https://github.com/trekhleb/javascript-algorithms#algorithms-by-paradigm + +B Merge Sort + +B Quicksort + +B Tree Depth-First Search (DFS) + +B Graph Depth-First Search (DFS) + +== Dynamic Programming + +Binary Search, + += Topic + +Iterate and expand on the sub-topic. + +== Sub-topic + +Body text + +== Sub-topic + +Body text + +== Sub-topic + +Body text + += Topic + +Iterate and expand on the sub-topic. + +== Sub-topic + +Body text + +== Sub-topic + +Body text + +== Sub-topic + +Body text + += Summary + +Body text + +0 + +[[_Toc525822236]]Stepping up your game with Advanced Data Structures + +Introduction. + +* _______ +Topic 1 +_______ +* _______ +Topic 2 +_______ +* _______ +Topic 3 +_______ + += Heap + +Iterate and expand on the sub-topic. + +== Insert + +Body text + +== Heapify + +Body text + +== Find max/min + +Body text + +== Extract max/min + +Body text + +== Increase Key + +Body text + +== Delete + +Body text + +== Merge + +Body text + += Tries + +Iterate and expand on the sub-topic. +https://github.com/trekhleb/javascript-algorithms/tree/master/src/data-structures/trie + +Why Trie? :- + +1. With Trie, we can insert and find strings in O(L) time where L +represent the length of a single word. This is obviously faster that +BST. This is also faster than Hashing because of the ways it is +implemented. We do not need to compute any hash function. No collision +handling is required (like we do in open addressing and separate +chaining) +2. Another advantage of Trie is, we can easily print all words in +alphabetical order which is not easily possible with hashing. +3. We can efficiently do prefix search (or auto-complete) with Trie. + +Issues with Trie :- + +The main disadvantage of tries is that they need lot of memory for +storing the strings. For each node we have too many node pointers(equal +to number of characters of the alphabet), If space is concern, then +Ternary Search Tree can be preferred for dictionary implementations. In +Ternary Search Tree, time complexity of search operation is O(h) where h +is height of the tree. Ternary Search Trees also supports other +operations supported by Trie like prefix search, alphabetical order +printing and nearest neighbor search. + +https://thenextcode.wordpress.com/2015/04/12/trie-vs-bst-vs-hashtable/ + +https://en.wikipedia.org/wiki/Deterministic_acyclic_finite_state_automaton + +http://jayant7k.blogspot.com/2011/06/data-structures-trie.html + +The final conclusion is regarding tries data structure is that they are +faster but require huge memory for storing the strings. + +Binary Tree, BST, Heaps, Tries, … + +Body text +https://en.wikipedia.org/wiki/Heap_(data_structure)[https://en.wikipedia.org/wiki/Heap_(data_structure)#Comparison_of_theoretic_bounds_for_variants] + +== Applications + +Body text + +== Insert word + +Body text + +== Suggesting next characters + +Body text + +== Delete Word + +Body text + +Summary + +Body text + +Code + +_const_ Node = require('./node'); + +_/**_ + +_* Doubly linked list that keeps track of_ + +_* the last and first element_ + +_*/_ + +_class_ LinkedList \{ + +_constructor_() \{ + +this.first = null; // head/root element + +_this_.last = null; _// last element of the list_ + +_this_.size = 0; _// total number of elements in the list_ + +} + +} + +===== Testing.ts + +// code + +Code end + +High 0 + +Highend + +$ curl –-path-as-is http://localhost:3000/../test.txt + +Big O Cheatsheet + +[cols=",,,,,,,,,",options="header",] +|======================================================================= +|Data Structure |Searching by |Inserting at the |Deleting from the +|Space Complexity | | | | | +| |_Index/Key_ |_Value_ |_start_ |_middle_ |_end_ |_start_ |_middle_ +|_end_ | + +|Array |*O(1)* |*O(n)* |*O(n)* |*O(n)* |*O(1)* |*O(n)* |*O(n)* |*O(1)* +|*O(n)* + +|Linked List (singly) |*O(n)* |*O(n)* |*O(1)* |*O(n)* |*O(1)* |*O(1)* +|*O(n)* |*O(n)* |*O(n)* + +|Linked List (doubly) |*O(n)* |*O(n)* |*O(1)* |*O(n)* |*O(1)* |*O(1)* +|*O(n)* |*O(1)* |*O(n)* + +|Stack |- |- |- |- |*O(1)* |- |- |*O(1)* |*O(n)* + +|Queue (w/array) |- |- |*O(n)* |- |- |- |- |*O(1)* |*O(n)* + +|Queue (w/list) |- |- |*O(1)* |- |- |- |- |*O(1)* |*O(n)* +|======================================================================= + +[cols=",,,,,",options="header",] +|======================================================================= +|Data Structure |Searching by |Insert |Delete |Space Complexity | +| |_Index/Key_ |_Value_ | | | + +|Binary Search Tree (unbalanced) |- |*O(n)* |*O(n)* |*O(n)* |*O(n)* + +|Binary Search Tree (balanced: AVL tree) |- |*O(log n)* |*O(log n)* +|*O(log n)* |*O(n)* + +|Hash Map (Imperfect) |*O(n)* |*O(n)* |*O(n)* |*O(n)* |*O(n)* + +|Hash Map (optimized) |*O(1)** |*O(n)* |*O(1)** |*O(1)** |*O(n)* + +|Tree Map |*O(log n)* |*O(n)* |*O(log n)* |*O(log n)* |*O(n)* + +|Set (using Hash Map) |- |*O(1)** |*O(1)** |*O(1)** |*O(n)* + +|Set (using Tree Map) |- |*O(log n)* |*O(log n)* |*O(log n)* |*O(n)* +|======================================================================= + +* = Amortized time. E.g. rehashing might affect run time + +image:extracted-media/media/image49.jpeg[image,width=528,height=186] + +Implementing an LRU Cache with HashMap + +Discards the least recently used items first.  + +https://leetcode.com/problems/lru-cache/description/ + +TODO: Compare content with: + +* https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/[https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Stacks] +* https://leetcode.com/explore/learn/ +* https://github.com/trekhleb/javascript-algorithms +* Compare with: Data Structures and Algorithms.pdf by Lydia Hallie +* Cracking code interviews +* Grokking Algorithms +* CS Distilled +* Create poster like: http://bigocheatsheet.com/, http://bigoref.com/, +* Princeton +** https://introcs.cs.princeton.edu/java/11cheatsheet/ diff --git a/book/chapters/output.adoc.zip b/book/chapters/output.adoc.zip new file mode 100644 index 00000000..3900f428 Binary files /dev/null and b/book/chapters/output.adoc.zip differ diff --git a/book/chapters/preface.adoc b/book/chapters/preface.adoc new file mode 100644 index 00000000..347ca5d6 --- /dev/null +++ b/book/chapters/preface.adoc @@ -0,0 +1,35 @@ +[preface] += Preface + +This book is intended for programmers who want to go deeper into understanding the most common data structures and algorithms. +Even though you can use them without knowing how they work, it's handy to know when to use one over the other. This book gives you a tool for analyzing trade-offs. When something is slow, you would know how to analyze the code for better performance. + +The concepts in this book can be applied to any programming language. However, instead of doing examples on pseudo-code we are going to use JavaScript to implement the examples. JavaScript is the lingua franca of the web and nowadays is growing its usages in the backend, IOT, and others. + +The following admonitions are used to highlight content + +IMPORTANT: Reword essential concepts. Good for memorizing, tweeting and sharing. + +.Side Note with Title +[NOTE] +==== +Side notes. E.g. provide language specific: BigInt, charCodeAt... +[source,javascript] +---- +function a(test) { + return `${test}`; +} +---- +==== + +Legend: + +NOTE: NOTE + +TIP: TIP + +IMPORTANT: IMPORTANT + +CAUTION: CAUTION + +WARNING: WARNING diff --git a/book/chapters/queue.adoc b/book/chapters/queue.adoc new file mode 100644 index 00000000..1a59e697 --- /dev/null +++ b/book/chapters/queue.adoc @@ -0,0 +1,107 @@ += Queue + +A queue is a linear data structure where the data flows in a *First-In-First-Out* (FIFO) manner. + +.Queue data structure is like a line of people: the First-in, is the First-out +image:image30.png[image,width=528,height=171] + +A queue is like a line of people at the bank, the person that arrived first is the first to go out as well. + +Similar to the stack, we only have two operations (insert and remove). In a Queue, we add elements to the back of the list and remove it from the front. + +We could use an array or a linked list to implement a Queue. However, it is recommended only to use a linked list. Why? An array has a linear runtime _O(n)_ to remove an element from the start while a linked list has constant time _O(1)_. + +.Queue's constructor +[source, javascript] +---- +include::{codedir}/data-structures/queues/queue.js[tag=constructor] + // ... methods goes here ... +} +---- + +We initialize the Queue creating a linked list. Now, let’s add the `enqueue` and `dequeue` methods. + +== Insertion + +For inserting elements on queue, also know as *enqueue*, we add items to the back of the list using `addLast`: + +.Queue's enqueue +[source, javascript] +---- +include::{codedir}/data-structures/queues/queue.js[tag=enqueue, indent=0] +---- + +As discussed, this operation has a constant runtime. + +== Deletion + +For removing elements from a queue, also know as *dequeue*, we remove elements from the front of the list using `removeFirst`: + +.Queue's dequeue +[source, javascript] +---- +include::{codedir}/data-structures/queues/queue.js[tag=dequeue, indent=0] +---- + +As discussed, this operation has a constant runtime. + +== Implementation usage + +We can use our Queue class like follows: + +.Queue usage example +[source, javascript] +---- +include::{codedir}/data-structures/queues/queue.js[tag=snippet, indent=0] +---- + +You can see that the items are dequeue in the same order they were added, FIFO (first-in, first out). + +== Queue Complexity + +As an experiment, we can see in the following table that if we had implemented the Queue using an array, its enqueue time would be _O(n)_ instead of _O(1)_. Check it out: + + +.Time complexity for queue operations +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) +| Queue (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +|=== + + += Summary + +In this chapter, we explored the most used linear data structures such as Arrays, Linked Lists, Stacks and Queues. We implemented them and discussed the runtime of their operations. + +To sum up, + +.Use Arrays when… +* You need to access data in random order fast (using an index). +* Your data is multi-dimensional (e.g., matrix, tensor). + +.Use Linked Lists when: +* You will access your data sequentially. +* You want to save memory and only allocate memory as you need it. +* You want constant time to remove/add from extremes of the list. + +.Use a Queue when: +* You need to access your data in a first-come, first served basis (FIFO). +* You need to implement a <> + +.Use a Stack when: +* You need to access your data as last-in, first-out (LIFO). +* You need to implement a <> + +.Time Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| <> ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) +| <> ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +| Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) +| <> (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +|=== diff --git a/book/chapters/quick-sort.adoc b/book/chapters/quick-sort.adoc new file mode 100644 index 00000000..5e81f617 --- /dev/null +++ b/book/chapters/quick-sort.adoc @@ -0,0 +1,60 @@ += Quicksort + +Quicksort is an efficient recursive sorting algorithm that uses "divide and conquer" paradigm to sort faster. It can be implemented in-place so it doesn't require additonal memory. + +In practice quicksort outperforms efficient sorting algorithms like <>. And, of course, It also outperforms simple sorting algorithms like <>, <> and <>. + +Quicksort basically picks a "pivot" element (preferably random) and move all the elements that are smaller than the pivot to the right and the ones that are bigger to the left. It does this recursively until all the array is sorted. + +== Quicksort Implementation + +Quicksort implementation uses the divide-and-conquer in the following way: + +.Quicksort Algorithm +. Pick a "pivot" element (at random) +. Move everything that is lower than the pivot to the left and everything that is bigger than the pivot to the right. +. Recursively repeat step 1 and 2, the sub-arrays on the left and on the right WITHOUT including the pivot. + +Let's convert these words into code! + +.Quicksort implementation in JavaScript (QuickSort) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/quick-sort.js[tag=quickSort, indent=0] +---- +<1> Partition: picks a pivot and find the index where the pivot will be when the array is sorted. +<2> Do the partition of the sub-array at the left of the pivot. +<3> Do the partition of the sub-array at the right of the pivot. +<4> Only do the partition when there's something to divide. + +The real heavy-lifting is don in the partion function. Let's implement that: + +.Quicksort implementation in JavaScript (partition) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/Quick-sort.js[tag=partition, indent=0] +---- +<1> Make the rightmost element as the pivot. +<2> This is the place holder for the final pivot index. We start in low and as we move all the lower elements to the left we will get the final place where the pivot should be. +<3> Move one element at a time comparing it to the pivot value. +<4> If the current element value is less than the pivot, then increment pivot index (pivot should be place after all the lower values). We also swap the value before incrementing because current element that is lower than the pivot to be at its left side. + +Merge sort has a _O(n log n)_ running time. For more details about the how to extract the runtime go to <>. + +== Quicksort Properties + +- Time Complexity: [big]#✅# <> _O(n log n)_ +- Space Complexity: [big]#⚠️# <> _O(n)_ +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#️️️️️️️⛔️️️️️# No, mostly sorted array takes the same time O(n log n). +- <>: [big]#️️️️️️️⛔️️️️️# No, the pivot element can be choose at random. +- Recursive: Yes + + +// Resources: +// https://www.khanacademy.org/computing/computer-science/algorithms/quick-sort/a/linear-time-partitioning +// https://www.khanacademy.org/computing/computer-science/algorithms/quick-sort/a/overview-of-quicksort +// https://algs4.cs.princeton.edu/23quicksort/ +// https://twitter.com/mathias/status/1036626116654637057?lang=en +// https://www.toptal.com/developers/sorting-algorithms/quick-sort diff --git a/book/chapters/red-black-tree.adoc b/book/chapters/red-black-tree.adoc new file mode 100644 index 00000000..40c414b2 --- /dev/null +++ b/book/chapters/red-black-tree.adoc @@ -0,0 +1,3 @@ += Red-Black Tree + +Nisi ex aliqua minim commodo cupidatat proident sint fugiat commodo irure. Duis quis ullamco ut veniam pariatur cillum voluptate irure. Irure aliqua elit cupidatat exercitation eiusmod et duis mollit proident reprehenderit ad. Aute aliquip cillum nostrud irure quis. Sint reprehenderit voluptate adipisicing amet ut aliquip. Eiusmod laborum nisi eu irure est consectetur ut ex sit cupidatat non. diff --git a/book/chapters/sample.adoc b/book/chapters/sample.adoc new file mode 100644 index 00000000..3d8cffb4 --- /dev/null +++ b/book/chapters/sample.adoc @@ -0,0 +1,298 @@ += Sample Section + + +:leveloffset: +1 + +// ------------------ + + += Sample Title 1 + +Provident architecto soluta commodi odit accusamus non molestias necessitatibus, culpa possimus repudiandae, ex sit officiis, sint hic doloribus harum vero quisquam aspernatur. + +== Sample Title 2 + +Optio ab voluptate impedit, iusto explicabo tempore? Ipsam eaque accusamus mollitia accusantium quod aperiam. Sit cum tempora quod! Placeat assumenda adipisci eius? + +=== Sample Title 3 + +Quisquam, vero facere voluptatem impedit optio fuga accusamus non dignissimos, exercitationem culpa error debitis, molestiae corporis? Repudiandae eum dolor quae nemo reiciendis. + +==== Sample Title 4 + +Laboris commodo labore anim ea. Nostrud culpa Lorem enim labore esse qui enim incididunt sunt eiusmod cupidatat veniam enim irure. Culpa velit duis duis esse amet adipisicing fugiat dolore do minim exercitation. Nostrud magna id nostrud nostrud minim cupidatat. Sunt amet qui amet deserunt commodo Lorem. + +===== Sample title 5 + +Irure nisi laboris amet do sit Lorem do. Aliqua esse ex in dolore nulla. Aute deserunt nostrud eiusmod fugiat aliquip proident ad eiusmod incididunt est in nisi deserunt Lorem. + +====== Sample title 6 + +Et nisi fugiat in culpa id voluptate incididunt anim commodo. Irure non dolor velit irure non incididunt nisi laborum minim. Elit duis consectetur aliqua laborum tempor et nulla. Nulla dolore magna dolor occaecat velit magna sint nulla. Proident veniam officia in nisi cillum ut deserunt consequat mollit laborum. + +======= Sample title 7 + +Ullamco ipsum consequat consequat in pariatur ad tempor nisi minim deserunt sunt ex. Minim enim irure quis aliqua anim nisi amet aliquip labore excepteur nisi amet commodo mollit. Est dolor eu commodo Lorem cillum. Elit deserunt cillum excepteur proident aliquip sunt incididunt. Deserunt voluptate enim incididunt incididunt pariatur. + + +== Code + +.Subsets in a Set +[source, javascript] +---- +include::{codedir}/runtimes/07-sub-sets.js[tag=snippet] +---- +<1> Base case is empty element. +<2> For each element from the input append it to the results array. +<3> The new results array will be what it was before + the duplicated with the appended element. + +//.The way this algorithm generates all subsets is: +//1. The base case is an empty element (line 13). E.g. [''] +//2. For each element from the input append it to the results array (line 16) +//3. The new results array will be what it was before + the duplicated with the appended element (line 17) + +== Side notes + +.JavaScript built-in `BigInt` +**** +BigInt allows to operate beyond the maximum safe limit of integers (Number.MAX_SAFE_INTEGER => 9,007,199,254,740,991). BigInt uses the suffix n, e.g. 1n + 3n === 4n. +**** + +== Emojis + +[big]#💸# +[big]#⏱# +[big]*🍯🐝* +[big]#💀# +[big]#😱# +[big]#😎👍# +[big]#🐢# +[big]#🚀# +[big]*🤯* +[big]#👎# + +- Space Complexity: [big]#⚠️# <> _O(n)_ +- <>: [big]#✅# Yes +- Time Complexity: [big]#⛔️# <> _O(n^2^)_ + +.How to explain dynamic programming to kids? 👶 +---- +test [big]*🤯* +---- + +== Images + +.CPU operations vs. Algorithm runtime as the input size grows +image:image5.png[CPU time needed vs. Algorithm runtime as the input size increases] + +== Quotes + +Lorem, ipsum dolor sit amet consectetur adipisicing elit. Perspiciatis doloremque fuga nobis tempora saepe sed iste quod quia blanditiis dolorem alias, accusantium quas nihil ullam assumenda nostrum similique ad itaque? + +[quote, H. J. Harrington] +Measurement is the first step that leads to control and eventually to improvement. If you can’t measure something, you can’t understand it. If you can’t understand it, you can’t control it. If you can’t manage it, you can’t improve it. + +== Tables + +.Time and Space Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| <> ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) +| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) +| <> ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +| Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) +| <> (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. + +.Time complexity for a Graph data structure +|=== +.2+.^s| Data Structure 2+^s| Vertices 2+^s| Edges .2+^.^s| Space Complexity +^|_Add_ ^|_Remove_ ^|_Add_ ^|_Remove_ +| Graph (adj. matrix) ^| O(\|V\|^2^) ^| O(\|V\|^2^) ^|O(1) ^|O(1) ^|O(\|V\|^2^) +| Graph (adj. list w/array) ^| O(1) ^| O(\|V\| + \|E\|)) ^|O(1) ^|O(\|V\| + \|E\|) ^|O(\|V\| + \|E\|) +| Graph (adj. list w/HashSet) ^| O(1) ^| O(\|V\|)) ^|O(1) ^|O(\|V\|) ^|O(\|V\| + \|E\|) +|=== + + +.Most common algorithmic running times and their examples +[cols="2,2,5",options="header"] +|=== +|Big O Notation +|Name +|Example(s) + +|O(1) +|<> +|#<>, #<> + +|O(log n) +|<> +|<> + +|O(n) +|<> +|<> + +|O(n log n) +|<> +|<> + +|O(n^2^) +|<> +|<> + +|O(n^3^) +|<> +|<> + +|O(2^n^) +|<> +|<> + +|O(n!) +|<> +|<> +|=== + +== Admonitions + +The following admonitions are used to highlight content + +IMPORTANT: Reword essential concepts. Good for memorizing, tweeting and sharing. + +.Side Note with Title +[NOTE] +==== +Side notes. E.g. provide language specific: BigInt, charCodeAt... +[source,javascript] +---- +function a(test) { + return `${test}`; +} +---- +==== + +=== Legend + +==== Note + +NOTE: Lorem ipsum dolor sit amet, consectetur adipisicing elit. Odio laudantium et consequuntur, eveniet numquam voluptatibus molestias nostrum reprehenderit blanditiis enim asperiores consequatur dolore tempore laboriosam! At aliquam mollitia aspernatur magnam. + +==== Tip + +TIP: Lorem ipsum dolor sit amet, consectetur adipisicing elit. Id nesciunt quaerat sint provident beatae. Assumenda necessitatibus ea non illum ipsa eveniet! Odio, blanditiis debitis harum porro autem ut fugiat deserunt. + +==== Important + +IMPORTANT: Lorem ipsum dolor sit amet consectetur adipisicing elit. Sequi maiores aperiam quasi error facilis ducimus quis vero architecto soluta! Fuga alias aspernatur voluptate voluptas veniam maxime eligendi nemo neque excepturi. + + +==== Caution + +CAUTION: Lorem ipsum dolor sit amet consectetur adipisicing elit. Sed deserunt error alias quod, modi ex, repudiandae voluptatem atque saepe, vero eius vel numquam aperiam neque incidunt eum nobis earum nostrum. + +==== Warning + +WARNING: Lorem ipsum dolor sit amet, consectetur adipisicing elit. Eaque quam delectus consequatur omnis nostrum, minus consectetur animi quod adipisci, architecto similique quasi voluptatem voluptas repudiandae minima. Fuga incidunt maiores magnam! + +== HTML-only content (iframe) + +Later, in the 2nd part we are going to explore non-linear data structures like Graphs and Trees. +ifdef::backend-html5[] +If you want to have a general overview of each one, take a look at the following interactive diagram: ++++ + ++++ +endif::[] + + +== Graphviz Diagrams + +=== Graphviz with png + +[graphviz, dfs-graph, png] +.... +digraph G { + + node [fillcolor="#F8E71C" style=filled shape=circle] 0; + node [fillcolor="#F5A623"] 1; + node [fillcolor="#B8E986"] 2; + node [fillcolor="#BD10E0"] 3; + node [fillcolor="#50E3C2"] 4; + node [fillcolor="#4A90E2"] 5; + // node [fillcolor="#FF5252"] 6; + + 0 -> 5 + 0 -> 4 + 0 -> 1 + 1 -> 4 + 1 -> 3 + 2 -> 1 + 3 -> 4 + 3 -> 2 + + label="DFS" + + { rank=same; 3, 1 } + { rank=same; 0, 4 } + +} +.... + +=== Graphviz with svg + +[graphviz, Recursive Fibonacci call tree with dp, svg] +.... +graph G { + "fib(5)" -- { "fib(4)" } + "fib(4)" -- { "fib(3)" } + "fib(3)" -- { "fib(2)" } + "fib(2)" -- { "fib(1)", "fib(0)" } +} +.... + +[graphviz, Recursive Fibonacci call tree with dp, svg] +.... +digraph g { + node [shape = record,height=.1]; + + art[label = " A| R| T"]; + art1[label = " A| R| T"]; + art2[label = " A| R| T", color="red"]; + atr[label = " A| T| R", color="red"]; + rat[label = " R| A| T"]; + rat1[label = " R| A| T", color="red"]; + rta[label = " R| T| A", color="red"]; + tra[label = " T| R| A"]; + tra1[label = " T| R| A", color="red"]; + tar[label = " T| A| R", color="red"]; + + "art":f0 -> "art1":f0 [ label = "1. swap A/A"]; + "art1":f0 -> "art2":f0 [ label = "2. swap R/R"]; + "art2":f2 -> "art1":f1 [ label = "3", color="grey"]; + "art1":f2 -> "atr":f0 [ label = "4. swap R/T"]; + "atr":f2 -> "art1":f2 [ label = "5", color="grey"]; + "art1":f1 -> "art":f0 [ label = "6", color="grey"]; + + "art":f1 -> "rat":f0 [ label = "7. swap A/R"]; + "rat":f0 -> "rat1":f0 [ label = "8. swap A/A"]; + "rat1":f2 -> "rat":f1 [ label = "9", color="grey"]; + "rat":f2 -> "rta":f0 [ label = "10. swap A/T"]; + + "art":f2 -> "tra":f0 [ label = "swap A/T"]; + "tra":f0 -> "tra1":f0 [ label = "swap R/R"]; + "tra":f2 -> "tar":f0 [ label = "swap R/A"]; + +} +.... + + + +// ------------------ + +:leveloffset: -1 diff --git a/book/chapters/selection-sort.adoc b/book/chapters/selection-sort.adoc new file mode 100644 index 00000000..f4e0eba8 --- /dev/null +++ b/book/chapters/selection-sort.adoc @@ -0,0 +1,71 @@ += Selection Sort + +The selection sort is a simple sorting algorithm. As its name indicates, it chooses the lowest element from the list and move it where it should be. + +TIP: selection sort is a in-place sorting algorithms, it should be used when auxiliary memory is limited. + +.Selection sort algorithm +. Start with the element in position 0. +. Find minimum element in the rest of array. If a new minimun is found swap them. +. Repeat with the element in postion 1 and so until the last one. + +image:selection-sort.gif[] + +== Selection sort implementation +For implementing the selection sort we need 2 indexes. + +.Selection sort +[source, javascript] +---- +include::{codedir}/algorithms/sorting/selection-sort.js[tag=sort, indent=0] +---- + +One index is for the position in question (selection/outer) and another one for finding the minimun in the rest of the array (element/inner). + +The swap function is implemented as follows. + +.Swap function +[source, javascript] +---- +include::{codedir}/algorithms/sorting/sorting-common.js[tag=swap, indent=0] +---- + +It uses JavaScript ES6 destructing arrays. + +.JavaScript Array destructuring +**** +*Assignment separate from declaration* + +A variable can be assign to its values using the destructing syntax. + +[source, js] +---- +let a, b; + +[a, b] = [1, 2]; +console.log(a); //↪️ 1 +console.log(b); //️↪️ 2 +---- + +*Swapping variables* + +Two variables values can be swapped in one destructuring expression. + +[source, js] +---- +[a, b] = [b, a]; +console.log(a); //↪️ 2 +console.log(b); //️↪️ 1 +---- + +Without destructuring assignment, swapping two values requires a temporary variable. +**** + +== Selection Sort Properties + +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- Time Complexity: [big]#⛔️# <> _O(n^2^)_ +- Space Complexity: [big]#✅# <> _O(1)_ diff --git a/book/chapters/set.adoc b/book/chapters/set.adoc new file mode 100644 index 00000000..d06a2341 --- /dev/null +++ b/book/chapters/set.adoc @@ -0,0 +1,219 @@ += Set + +A set is a data structure where duplicated entries are not allowed. Set is like an array with unique values. + +NOTE: JavaScript has already a built-in Set data structure. + +Take a look at the following +example: + +.Set usage example (using JavaScript built-in Set) +[source, javascript] +---- +const set = new Set(); + +set.add(1); //↪️ Set [ 1 ] +set.add(1); //↪️ Set [ 1 ] +set.add(2); //↪️ Set [ 1, 2 ] +set.add(3); //↪️ Set [ 1, 2, 3 ] +set.has(1); //↪️ true +set.delete(1); //↪️ removes 1 from the set +set.has(1); //↪️ false, 1 has been removed +set.size; //↪️ 2, we just removed one value +console.log(set); //↪️ Set(2) {2, 3} +---- + +As you can see, even if we insert the same value multiple times, it only gets added once. + +Can you think in a way how to implement it? + +TIP: A hint... it should perform all operations in *O(1)** or at most *O(log n)* + +If we use a `map`, we can accomplish this. However, maps use a key/value pair. If we only use the keys, we can avoid duplicates. Since in a `map` you can only have one key at a time. + +As you might remember from the <> chapter, there are two ways of implementing a `map` and both can be used to create a `set`. Let's explore the difference between the two implementations are. + +== HashSet vs TreeSet + +We can implement a `map` using a *balanced BST* and using a *hash function*. If we use them to implement a `Set`, then we would have a `HashSet` and `TreeSet` respectively. + +* `TreeSet`, would return the values sorted in ascending order. +* `HashSet`, would return the values in insertion order. +* Operations on a `HashSet` would take on average O(1) and in the worst case (rehash is due), it would take O(n). +* Operation on a `TreeSet` is always O(log n). + +Let’s implement both! + +== Implementing a TreeSet + +We are to use a balanced BST (Red-Black Tree) to implement TreeSet. + +.TreeSet's constructor method and size attribute +[source, javascript] +---- +include::{codedir}/data-structures/sets/tree-set.js[tag=constructor] +} +---- +<1> Converts an array or any iterable data structure to a set. + +A common use case for Sets is to remove duplicated values from an array. We can do that by passing them in the constructor as follows: + +.Removing duplicates from an Array using a Set +[source, javascript] +---- +set = new TreeSet([1, 2, 3, 2, 1]); +expect(set.size).toBe(3); +expect(Array.from(set.keys())).toEqual([1, 2, 3]); +---- + +Ok, now let’s implement the add method. + +=== Adding elements to a TreeSet + +For adding values to the set, we `Tree.add` method. + +.TreeSet's constructor method and size attribute +[source, javascript] +---- +include::{codedir}/data-structures/sets/tree-set.js[tag=add,indent=0] +---- + +Our <> can hold duplicated values. It has a multiplicity tally to keep track of duplicates. However, we don’t dupe in a set. For that, we check if the value is already in the tree. +Don’t worry about adding extra lookups. The +`Tree.has` is also very performant *O(log n)*. + +=== Searching for values in a TreeSet + +Again, we rely on the Tree implementation to do the heavy lifting: + +.TreeSet's `has` method +[source, javascript] +---- +include::{codedir}/data-structures/sets/tree-set.js[tag=has, indent=0] +---- + +=== Deleting elements from a TreeSet + +We delete the elements from the TreeSet using the remove method of the BST. + +.TreeSet's `delete` method +[source, javascript] +---- +include::{codedir}/data-structures/sets/tree-set.js[tag=delete, indent=0] +---- + +Voilà! That’s it! + +=== Converting TreeSet to Array + +A common use case for a Set is to convert it to an array or use in an iterator (for loops, forEach, …). Let’s provide the method for that: + +.TreeSet's iterator +[source, javascript] +---- +include::{codedir}/data-structures/sets/tree-set.js[tag=iterator, indent=0] +---- + +We are using the `inOrderTraversal` method of the BST to go each key in an +ascending order. + +.JavaScript Built-in `Symbol` iterator +**** +The `Symbol.iterator` built-in symbol specifies the default iterator for +an object. Used by `for...of`, `Array.from` and others. +**** + +Now we can convert from set to array and vice versa easily. For +instance: + +.TreeSet's iterator +[source, javascript] +---- +const array = [1, 1, 2, 3, 5]; + +// array to set +const set = new TreeSet(array); + +// set to array +Array.from(set); //↪️ (4) [1, 2, 3, 5] +---- + +No more duplicates in our array! + +Check out our https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/sets/tree-set.js[GitHub repo for the full TreeSet implementation]. + +Let’s now, implement a `HashSet`. + +== Implementing a HashSet + +The *HashSet* is the set implementation using a HashMap as its underlying data structure. + +The HashSet interface will be the same as the built-in `Set` or our previously implemented `TreeSet`. + +.HashSet's constructor method and size attribute +[source, javascript] +---- +include::{codedir}/data-structures/sets/hash-set.js[tag=constructor] +} +---- + +This constructor is useful for converting an array to set and initializing the `HashMap`. + +=== Inserting values to a HashSet + +To insert items in a HashSet we use the `set` method of the `HashMap`: + +.HashSet's `add` method +[source, javascript] +---- +include::{codedir}/data-structures/sets/hash-set.js[tag=add, indent=0] +} +---- + +`HashMap` stores key/value pairs, but for this, we only need the key, and we ignore the value. + +=== Finding values in a HashSet + +We use the method `has` to check if a value is on the `Set` or not. + +.HashSet's `has` method +[source, javascript] +---- +include::{codedir}/data-structures/sets/hash-set.js[tag=has, indent=0] +---- + +Internally, the `HashMap` will convert the key into an array index using a hash function. If there’s something in the array index bucket, it will return +true, and if it’s empty, it will be false. + +=== Deleting values from a HashSet + +For deleting a value from a hashSet we use the HashMap’s delete method: + +.HashSet's `delete` method +[source, javascript] +---- +include::{codedir}/data-structures/sets/hash-set.js[tag=delete, indent=0] +---- + +This method has an average runtime of *O(1)*. + +== HashSet vs HashMap Time Complexity + +We can say that `HashMap` in on average more performant O(1) vs. O(log n). However, if a +rehash happens, it will take *O(n)* instead of *O(1)*. A `TreeSet` is always *O(log n)*. + +.Time complexity HashSet vs TreeSet +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| HashSet ^|- ^|O(n) ^|O(1)* ^|O(1)* ^|O(1)* +| TreeSet ^|- ^|O(n) ^|O(log n) ^|O(log n) ^|O(log n) +|=== +{empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. + +To recap, HashSet and TreeSet will keep data without duplicates. The +difference besides runtime is that: + +.TreeSet vs HashSet +* HashSet keeps data in insertion order +* TreeSet keeps data sorted in ascending order. diff --git a/book/chapters/sorting-intro.adoc b/book/chapters/sorting-intro.adoc new file mode 100644 index 00000000..eb888aea --- /dev/null +++ b/book/chapters/sorting-intro.adoc @@ -0,0 +1,19 @@ += Sorting + +Excepteur ad occaecat ex dolor reprehenderit esse occaecat exercitation Lorem. Consectetur laboris qui aliquip nisi cillum ea. Laborum Lorem ullamco tempor nisi Lorem qui tempor. + +== Stable + +Does not change the relative order of elements with equal keys. + +== Online + +Can sort a list as it receives it. + +== In-place + +only requires a constant amount O(1) of additional memory space. Does not use any other auxiliary memory. + +== Adaptive + +Efficient for data sets that are already substantially sorted. diff --git a/book/chapters/stack.adoc b/book/chapters/stack.adoc new file mode 100644 index 00000000..ee14d4d1 --- /dev/null +++ b/book/chapters/stack.adoc @@ -0,0 +1,75 @@ += Stack + +The stack is a data structure that restricts the way you add and remove data. It only allows you to insert and retrieve in a *Last-In-First-Out* (LIFO) fashion. + +An analogy is to think the stack is a rod and the data are discs. You can only take out the last one you put in. + +.Stack data structure is like a stack of disks: the last element in is the first element out +image:image29.png[image,width=240,height=238] + +// #Change image from https://www.khanacademy.org/computing/computer-science/algorithms/towers-of-hanoi/a/towers-of-hanoi[Khan Academy]# + +As you can see in the image above, If you insert the disks in the order `5`, `4`, `3`, `2`, `1`. Then you can remove them on `1`, `2`, `3`, `4`, `5`. + +The stack inserts items to the end of the collection and also removes from the end. Both, an array and linked list would do it in constant time. However, since we don’t need the Array’s random access, a linked list makes more sense. + +.Stack's constructor +[source, javascript] +---- +include::{codedir}/data-structures/stacks/stack.js[tag=constructor] + // ... methods goes here ... +} +---- + +As you can see in the stack constructor, we are using a linked list as the underlying data structure. + +Let's now develop the insert and remove operations in a stack. + +== Insertion + +We can insert into a stack using the linked list’s `addLast` method. + +.Stack's add +[source, javascript] +---- +include::{codedir}/data-structures/stacks/stack.js[tag=add, indent=0] +---- + +We are returning `this`, in case we want to chain multiple add commands. + +== Deletion + +Deleting is straightforward as well. + +.Stack's remove +[source, javascript] +---- +include::{codedir}/data-structures/stacks/stack.js[tag=remove, indent=0] +---- + +This time we used the linked list’s `removeLast` method. That’s all we need for a stack implementation. Check out the full implementation https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/stacks/stack.js[here] + +== Implementation Usage + +We can use our stack implementation as follows: + +.Stack usage example +[source, javascript] +---- +include::{codedir}/data-structures/stacks/stack.js[tag=snippet, indent=0] +---- + +As you can see if we add new items they will be the first to go out to honor LIFO. + +== Stack Complexity + +Implementing the stack with an array and linked list would lead to the same time complexity: + +.Time complexity for the stack operations +|=== +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ +| Stack ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) +|=== + +It's not very common to search for values on a stack (other Data Structures are better suited for this). Stacks especially useful for implementing <>. diff --git a/book/chapters/tim-sort.adoc b/book/chapters/tim-sort.adoc new file mode 100644 index 00000000..e37061db --- /dev/null +++ b/book/chapters/tim-sort.adoc @@ -0,0 +1,3 @@ += Tim Sort + +Ea duis sunt consequat sit mollit elit proident sint. Deserunt et fugiat duis pariatur deserunt officia mollit elit consectetur pariatur nisi. Sit non enim quis ut quis. Consequat commodo consequat veniam labore excepteur incididunt amet in eiusmod enim laborum veniam incididunt. Adipisicing fugiat mollit tempor sint ipsum eiusmod excepteur est. diff --git a/book/chapters/timsort.adoc b/book/chapters/timsort.adoc new file mode 100644 index 00000000..088124e4 --- /dev/null +++ b/book/chapters/timsort.adoc @@ -0,0 +1,57 @@ += tim Sort + +tim sort is an efficient recursive sorting algorithm that uses "divide and conquer" paradigm to sort faster. It can be implemented in-place so it doesn't require additonal memory. + +In practice timsort outperforms efficient sorting algorithms like <>. And, of course, It also outperforms simple sorting algorithms like <>, <> and <>. + +tim sort basically picks a "pivot" element (preferably random) and move all the elements that are smaller than the pivot to the right and the ones that are bigger to the left. It does this recursively until all the array is sorted. + +== tim Sort Implementation + +tim sort implementation uses the divide-and-conquer in the following way: + +.tim Sort Algorithm +. Pick a "pivot" element (at random) +. Move everything that is lower than the pivot to the left and everything that is bigger than the pivot to the right. +. Recursively repeat step 1 and 2, the sub-arrays on the left and on the right WITHOUT including the pivot. + +Let's convert these words into code! + +.tim Sort implementation in JavaScript (timSort) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/tim-sort.js[tag=timSort, indent=0] +---- +<1> Partition: picks a pivot and find the index where the pivot will be when the array is sorted. +<2> Do the partition of the sub-array at the left of the pivot. +<3> Do the partition of the sub-array at the right of the pivot. +<4> Only do the partition when there's something to divide. + +The real heavy-lifting is don in the partion function. Let's implement that: + +.tim Sort implementation in JavaScript (partition) +[source, javascript] +---- +include::{codedir}/algorithms/sorting/tim-sort.js[tag=partition, indent=0] +---- +<1> Make the rightmost element as the pivot. +<2> This is the place holder for the final pivot index. We start in low and as we move all the lower elements to the left we will get the final place where the pivot should be. +<3> Move one element at a time comparing it to the pivot value. +<4> If the current element value is less than the pivot, then increment pivot index (pivot should be place after all the lower values). We also swap the value before incrementing because current element that is lower than the pivot to be at its left side. + +Merge sort has a _O(n log n)_ running time. For more details about the how to extract the runtime go to <>. + +== tim Sort Properties + +- Time Complexity: [big]#✅# <> _O(n log n)_ +- Space Complexity: [big]#⚠️# <> _O(n)_ +- <>: [big]#✅# Yes +- <>: [big]#✅# Yes +- <>: [big]#️️️️️️️⛔️️️️️# No, mostly sorted array takes the same time O(n log n). +- <>: [big]#️️️️️️️⛔️️️️️# No, the pivot element can be choose at random. +- Recursive: Yes + + +// Resources: +// https://twitter.com/mathias/status/1036626116654637057?lang=en +// http://cr.openjdk.java.net/~martin/webrevs/openjdk7/timsort/raw_files/new/src/share/classes/java/util/TimSort.java diff --git a/book/chapters/tree--avl.adoc b/book/chapters/tree--avl.adoc new file mode 100644 index 00000000..0a1267de --- /dev/null +++ b/book/chapters/tree--avl.adoc @@ -0,0 +1,60 @@ += AVL Tree + +AVL Tree is named after their inventors (**A**delson-**V**elsky and **L**andis). +This self-balancing tree keep track of subtree sizes to know if a rebalance is needed or not. +We can compare the size of the left and right subtrees using a balance factor. + +[NOTE] +==== + +The *balanced factor* on each node is calculated recurviely as follows: + +---- +Balance Factor = (left subtree height) - (right subtree height) +---- + +==== + +The implementation will got into the BST node. +We will need two methods to calculate the left and right subtree, and with those we can get the balance factor. + +.Balance Factor methods on the BST node +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-tree-node.js[tag=avl, indent=0] +---- + + +== Implementing AVL Tree + +Implementing an AVL Tree is not too hard, since it builds upon what we did in the Binary Search Tree. + +.AVL Tree class +[source, javascript] +---- +include::{codedir}/data-structures/trees/avl-tree.js[tag=AvlTree] +---- + +As you can see, AVL tree inherits from the BST class. +The insert and remove operations works the same as in the BST, except that at the end we call `balanceUptream`. +This function makes balance the tree after every change if is needed. Let's see how it's implemented. + +.Balance Upstream for AVL tree +[source, javascript] +---- +include::{codedir}/data-structures/trees/avl-tree.js[tag=balanceUptream] +---- + +This function recurively goes from the modified node to the root checking if each node in between is balanced. +Now, let's examine how does the balancing works on AVL tree. + +.Balance method for AVL tree +[source, javascript] +---- +include::{codedir}/data-structures/trees/avl-tree.js[tag=balance] +---- + +The first thing we do is to see if one subtree is longer than the other. +If so, then we check the children balance to determine if need a single or double rotation and in which direction. + +You can review <> in case you want a refresher. diff --git a/book/chapters/tree--binary-search-tree.adoc b/book/chapters/tree--binary-search-tree.adoc new file mode 100644 index 00000000..d4ad9e90 --- /dev/null +++ b/book/chapters/tree--binary-search-tree.adoc @@ -0,0 +1,172 @@ += Binary Search Tree + +.The Binary Search Tree (BST) is a tree data structure that keeps the following constraints: +* Each node must have at most two children. Usually referred to as "left" and "right". +* All trees must a have a "root" node. +* The order of nodes values must be: left child < parent < right child. +* Nodes might need re-ordering after each insert/delete operation to keep the `left < parent < right` constraint. + +== Implementing a Binary Search Tree + +The first step is to implement the Binary Tree Node, which can hold 0, 1 or 2 children. + +.Binary Tree Node's constructor +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-tree-node.js[tag=snippet, indent=0] + } +} +---- + +Does this look familiar to you? It’s almost like the linked list node, but instead of having `next` and `previous`, it has `left` and `right`. That guarantees that we have at most two children. + +We also added the `meta` object to hold some metadata about the node, like duplicity, color (for red-black trees), or any other data needed for future algorithms. + +We implemented the node, now let’s layout other methods that we can implement for a BST: + +.Binary Search Tree's class +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=snippet, indent=0] + + add(value) { /* ... */ } + find(value) { /* ... */ } + remove(value) { /* ... */ } + getMax() { /* ... */ } + getMin() { /* ... */ } +} +---- + +With the methods `add` and `remove` we have to guarantee that our tree always has one root element from where we can navigate left or right based on the value that we are looking for. Let's implement those `add` method first: + +=== Inserting new elements in a BST + +.For inserting an element, in a BST, we have two scenarios: +1. If the tree is empty (root element is null), we add the newly created node as root, and we are done! +2. If the root is not null. Start from the root, then compare the node’s value against the new element. If the node has higher than a new item, we move to the right child, otherwise to the left. We check each node recursively until we find an empty spot where we can put the new element and keep the rule `right < parent < left`. +3. If we insert the same value multiple times, we don’t want duplicates. So, we can keep track of multiples using a duplicity counter. + +For instance, let’s say that we want to insert the values 19, 21, 10, 2, 8 in a BST: + +.Inserting values on a BST. +image:image36.png[image,width=528,height=329] + +In the last box of the image above, when we are inserting node 18, we start by the root (19). Since 18 is less than 19, then we move left. Node 18 is greater than 10, so we move right. There’s an empty spot, and we place it there. Let’s code it up: + +.Binary Search Tree's class +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=add, indent=0] +---- +<1> We are using a helper function `findNodeAndParent` to iterate through the tree finding a node with current value “found” and its parent (implementation on the next section). +<2> We are taking care of duplicates. Instead of inserting duplicates we are keeping a multiplicity tally. We have to decrease it when removing nodes. + +=== Finding a value in a BST + +We can implement the find method using the helper `findNodeAndParent` as follows: + +.Binary Search Tree's find methods +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=find, indent=0] +---- + +`findNodeAndParent` is a recursive function that goes to the left child or right depending on the value. However, if the value already exists, it will return it in `found` variable. + +=== Removing elements from a BST + +Deleting a node from a BST have three cases. + +.The node is a +1. leaf +2. parent with one child +3. parent with two children/root. + +==== Removing a leaf (Node with 0 children) + +Deleting a leaf is the easiest, we look for their parent and set the child to null. + +.Removing node without children from a BST. +image:image37.png[image,width=528,height=200] + + +Node 18, will be hanging around until the garbage collector is run. However, there’s no node referencing to it so it won’t be reachable from the tree anymore. + +==== Removing a parent (Node with 1 children) + +Removing a parent is not as easy since you need to find new parents for its children. + +.Removing node with 1 children from a BST. +image:image38.png[image,width=528,height=192] + + +In the example, we removed node `10` from the tree, so its child (node 2) needs a new parent. We made node 19 the new parent for node 2. + +==== Removing a full parent (Node with 2 children) or root + +Removing a parent of two children is the trickiest of all cases because we need to find new parents for two children. (This sentence sounds tragic out of context 😂) + +.Removing node with two children from a BST. +image:image39.png[image,width=528,height=404] + + +In the example, we delete the root node 19. This leaves the two orphans (node 10 and node 21). There are no more parents because node 19 was the *root* element. One way to solve this problem is to *combine* the left subtree (Node 10 and descendants) into the right subtree (node 21). The final result is node 21 is the new root. + +What would happen if node 21 had a left child (e.g., node 20)? Well, we would move node 10 and its descendants' bellow node 20. + +==== Implementing removing elements from a BST + +All the described scenarios removing nodes with zero, one and two children can be sum up on this code: + +.Binary Search Tree's remove method +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=remove, indent=0] +---- +<1> Try to find if the value exists on the tree. +<2> If the value doesn’t exist we are done! +<3> Create new subtree without the value to delete +<4> Check the multiplicity (duplicates) and decrement the count in case we have multiple nodes with the same value +<5> If the `nodeToRemove` was the root, then we move the removed node’s children as the new root. +<6> If it was not the root, then we go to the deleted node’s parent and put their children there. + +We compute `removedNodeChildren`, which is the resulting subtree after combining the children of the deleted node. + +The method to combine subtrees is the following: + +.BST's combine subtrees +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=combine, indent=0] +---- + +Take a look at the code above and the example. You will see how to remove node `30` and combine both children subtree and keeping the BST rules. Also, this method uses a helper to get the left-most node. We can implement it like this: + +.Binary Search Tree's get the leftmost node +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=leftMost, indent=0] +---- + +That’s all we need to remove elements from a BST. Check out the complete BST implementation https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/trees/binary-search-tree.js[here]. + +== Differentiating a balanced and non-balanced Tree + +As we insert and remove nodes from a BST we could end up like the tree on the left: + +.Balanced vs. Unbalanced Tree. +image:image40.png[image,width=454,height=201] + +The tree on the left is unbalanced. It looks like a Linked List and has the same runtime! Searching for an element would be *O(n)*, yikes! However, on a balanced tree, the search time is *O(log n)*, which is pretty good! That’s why we always want to keep the tree balanced. In further chapters, we are going to explore how to keep a tree balanced after each insert/delete. + +== Tree Complexity + +We can sum up the tree operations using Big O notation: + +.Time complexity for a Binary Search Tree (BST) +|=== +.2+.^s| Data Structure 2+^s| Searching By .2+^.^s| Insert .2+^.^s| Delete .2+^.^s| Space Complexity +^|_Index/Key_ ^|_Value_ +| BST (**un**balanced) ^|- ^|O(n) ^|O(n) ^|O(n) ^|O(n) +| BST (balanced) ^|- ^|O(log n) ^|O(log n) ^|O(log n) ^|O(n) +|=== diff --git a/book/chapters/tree--binary-tree-traversal.adoc b/book/chapters/tree--binary-tree-traversal.adoc new file mode 100644 index 00000000..9b8d7ce6 --- /dev/null +++ b/book/chapters/tree--binary-tree-traversal.adoc @@ -0,0 +1,86 @@ += Binary Tree Traversal + +As mentioned before, there are different ways to visit all the nodes or search for a value in a binary tree. On this section we are going to focus on depth-first tree traversal. The implementations are recursive since it's more elegant and concise. Let's explore them. + +== In Order Traversal + +If you tree happens to be a binary search tree (BST), then could use "in order" traversal to get the values sorted in ascending order. To accomplish this, you have to visit the nodes in a `left-root-right` order. + +If we have the following tree: +---- + 10 + / \ + 5 30 + / / \ + 4 15 40 + / +3 +---- + +In-order traverval will return `3, 4, 5, 10, 15, 30, 40`. + +Check out the implementation: + +.In-order traversal implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=inOrderTraversal, indent=0] +---- + +This function goes recursively to the leftmost element and then yield that node, then we go to the right child (if any) and repeat the process. This will get us the values ordered. + +== Pre Order Traversal + +Pre-order traveral visits nodes in this order `root-left-right` recursively. + +.Usage of pre-order traversal: +- Create a copy of the tree. +- Get prefix expression on of an expression tree used in the https://en.wikipedia.org/wiki/Polish_notation[polish notation]. + + +.Pre-order traversal implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=preOrderTraversal, indent=0] +---- + +If we have the following tree: +---- + 10 + / \ + 5 30 + / / \ + 4 15 40 + / +3 +---- + +Pre-order traverval will return `10, 5, 4, 3, 30, 15, 40`. + +== Post-order Traversal + +Post-order traveral goes to each node in this order `left-right-root` recursively. + +.Usages of the post-order tree traveral +- Traversal is used to delete the tree because you visit the children before removing the parent. +- Get the postfix expression of an expression tree used in the http://en.wikipedia.org/wiki/Reverse_Polish_notation[reverse polish notation]. + +.Post-order traversal implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=postOrderTraversal, indent=0] +---- + + +If we have the following tree: +---- + 10 + / \ + 5 30 + / / \ + 4 15 40 + / +3 +---- + +Post-order traverval will return `3, 4, 5, 15, 40, 30, 10`. diff --git a/book/chapters/tree--search.adoc b/book/chapters/tree--search.adoc new file mode 100644 index 00000000..daedd965 --- /dev/null +++ b/book/chapters/tree--search.adoc @@ -0,0 +1,121 @@ += Tree Search & Traversal + +So far we covered, how to insert/delete/search values in a binary search tree (BST). +However, not all binary trees are BST, so there are other ways to look for values or visit all nodes in a certain order. + +If we have the following tree: +---- + 10 + / \ + 5 30 + / / \ + 4 15 40 + / +3 +---- + +Depending on what traversal methods we used we will have a different visiting order. + +.Tree traversal methods +- Breadth-first traversal (a.k.a level order traversal): `10, 5, 30, 4, 15, 40, 3` +- Depth-first traversal +** In-order (left-root-right): `3, 4, 5, 10, 15, 30, 40` +** Pre-order (root-left-right): `10, 5, 4, 3, 30, 15, 40` +** Post-order (left-right-root): `3, 4, 5, 15, 40, 30, 10` + +Why do we care? Well, there are certain problems that you solve more optimally using one or another traversal method. For instance to get the size of a subtree, finding maximums/minimums, and so on. + +Let's cover Breadth-first search (BFS) and Depth-first search (DFS). + +[Breadth First Search] +== Breadth-First Search for Binary Tree + +Breadth-first search goeas wide (breadth) before going deep. Hence, the name. In other words, it goes level by level. It visits all the inmediate nodes or children and then move on to the children's children. +Let's how can we implement it! + +.Breath-First Search (BFS) Implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=bfs,indent=0] +---- + +As you see, the BFS uses a <> data structure. We enqueue all the children of the current node and then dequeue them as we visit them. + +Note the asterisk (`*`) in front of the function means that this function is a generator that yield values. + +.JavaScript Generators +**** + +JavaScript generators were added as part of ES6, they allow process possibly expensive operations one by one. You can convert any function into a generator by adding the asterisk in front and `yield`ing a value. + +Then you can use `next()` to get the value and also `done` to know if it's the last value. Here are some examples: + +[source, javascript] +---- +function* dummyIdMaker() { + yield 0; + yield 1; + yield 2; +} + +const generator = dummyIdMaker() + +// getting values +console.log(generator.next()); // ↪️ {value: 0, done: false} +console.log(generator.next()); // ↪️ {value: 1, done: false} +console.log(generator.next()); // ↪️ {value: 2, done: false} +console.log(generator.next()); // ↪️ {value: undefined, done: true} + +// iterating generator values with for..of loops +for(const n of dummyIdMaker()) { + console.log(n); +} + +// converting a generator to an array +console.log(Array.from(dummyIdMaker())); // [0, 1, 2] +---- + +**** + + +== Depth-First Search for Binary Tree + +Depth-First search goes deep before going wide. It means, that starting for the root it goes as deep as it can until it found a leaf node (node without children), then it visits all the remaing nodes that were in the path. + +.Depth-First Search (DFS) Implementation with a Stack +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-search-tree.js[tag=dfs,indent=0] +---- + +This is a iterative implementation of a DFS using an <>. +It's almost identical to the BFS but instead of using a <> we usa a Stack. +We can also implement it as recursive functions are we are going to see in the <> section. + +== Depth-First Search vs. Breadth-First Search + +We can see visually the difference on how the DFS and BFS search for nodes: + +.Depth-First Search vs. Breadth-First Search +image:depth-first-search-dfs-breadth-first-search-bfs.jpg[] + +As you can see the DFS in two iterations is already at one of the farthest node from the root while BFS search nearby nodes first. + +.Use DFS when: +- The node you are looking for is likely to be *far* from the root. + +.Use BFS when: +- The node you are looking for is *nearby* the root. + +:leveloffset: +1 + +include::tree--binary-tree-traversal.adoc[] + +:leveloffset: -1 + + + + + + + diff --git a/book/chapters/tree--self-balancing-rotations.adoc b/book/chapters/tree--self-balancing-rotations.adoc new file mode 100644 index 00000000..f4adfb2b --- /dev/null +++ b/book/chapters/tree--self-balancing-rotations.adoc @@ -0,0 +1,141 @@ += Self-balancing Binary Search Trees + +Binary Search Trees (BST) are a great data structure to find elements very fast _O(n log n)_. +However, when the BST branches has different branch sizes then the performance suffers. +In the worst case, all nodes can go to one side (e.g. right) and then the search time would be linear. +At this point searching element won't be any better on that tree than an array or linked list. Yikes! + +Self-balanced trees will automatically balanced the tree when an element is inserted to keep search performace. +We balance a tree by making the height (distance from a node to the root) of any leaf on the tree as similar as possible. + +.From unbalanced BST to balanced BST +[source, javascript] +---- +1 2 + \ / \ + 2 => 1 3 + \ + 3 +---- + +In the example above: +- Unbalanced BST: height node `3` is 2 and height node `2` is 1. +- Balanced BST: height node `3` is 1 and height node `2` is 1. Much better! + +As you might notice, we balanced the tree in the example by doing a rotation. +To be more specific we rotated node `1` to the left to balance the tree. +Let's examine all the possible rotation we can do to balance a tree. + +== Tree Rotations + +We can do single rotations left and right and also we can do double rotations. +Let's go one by one. + +=== Single Right Rotation + +Right rotation moves a node on the right as a child of other node. + +Take a look at the `@example` in the code below. +As you can see we have an unbalanced tree `4-3-2-1`. +We want to balance the tree, for that we need to do a right rotation of node 3. +So, the node 3 is moved as the right child of the previous child. + +.Single right rotation implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/tree-rotations.js[tag=rightRotation] +---- + +.In the `rightRotation` we identify 3 nodes: +- `node` this is the node we want to rotate to the right. E.g., `node 3` +- `newParent` this is the new parent after the rotation. E.g., `node 2` +- `grandparent` this the current's node parent. E.g. `node 4`. + +The `swapParentChild` as it name says, swap the children. +For our example, it swaps `node 4`'s left children from `node 3` to `node 2`. + +Take a look at the implementation + +.Swap Parent and Child Implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/tree-rotations.js[tag=swapParentChild] +---- + +After `swapParentChild`, we have the following: +---- + 4 + / + 2 - 3* + / + 1 +---- + +Still not quite what we want. +So, `newParent.setRightAndUpdateParent(node)` will make `node 3` the right child of `node 2`. +Finally, we remove left child of `node 3` to be `null`. + +---- + 4 + / + 2 + / \ + 1 3* +---- + +Check out the <> implementation again. It should make more sense to you now. + +This rotation is also known as `RR rotation`. + + +=== Single Left Rotation + +Left rotation is similar to the `rightRotation` we explained above. + +.Single left rotation implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/tree-rotations.js[tag=leftRotation] +---- + +As you can see, this function is just the opposite of `rightRotation`. Where ever we used the right now we use the left here and vice versa. +This rotation is also known as `LL rotation`. + +If you are curious about the `setRightAndUpdateParent` and `setLeftAndUpdateParent`. Here's the implementation: + +.Set and update parent implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/binary-tree-node.js[tag=setAndUpdateParent] +---- + +You can also checkout the full +https://github.com/amejiarosario/dsa.js/blob/adfd8a660bbe0a7068fd7881aff9f51bdb9f92ae/src/data-structures/trees/binary-tree-node.js#L20[binary tree node implementation]. + +=== Left Right Rotation + +This time are we going to do a double rotation. + +.Left-Right rotation implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/tree-rotations.js[tag=leftRightRotation] +---- + +As you can see we do a left and then a right rotation. This is also called `LR rotation` + +=== Right Left Rotation + +Very similar to `leftRightRotation`. The difference is that we rotate right and then left. + +.Right-Left rotation implementation +[source, javascript] +---- +include::{codedir}/data-structures/trees/tree-rotations.js[tag=rightLeftRotation] +---- + +This rotation is also refered as `RL rotation`. + +== Self-balancing trees implementations + +So far, we have study how to make tree rotations which are the basis for self-balancing trees. There are different implementations of self-balancing trees such a Red-Black Tree and AVL Tree. diff --git a/book/chapters/tree.adoc b/book/chapters/tree.adoc new file mode 100644 index 00000000..441f2516 --- /dev/null +++ b/book/chapters/tree.adoc @@ -0,0 +1,98 @@ += Tree + +A tree is a non-linear data structure where a node can have zero or more connections. The topmost node in a tree is called *root*. The linked nodes to the root are called *children* or *descendants*. + +.Tree Data Structure: root node and descendants. +image:image31.jpg[image,width=404,height=240] + +As you can see in the picture above, this data structure resembles an inverted tree hence the name. It starts with a *root* node and *branch* off with its descendants, and finally *leaves*. + +== Implementing a Tree + +Implementing a tree is not too hard. It’s similar to a <>. The main difference is that instead of having a `next` and `previous` links, we have an infinite number of linked nodes (children/descendants). + +.Tree's node constructor +[source, javascript] +---- +include::{codedir}/data-structures/trees/tree-node.js[tag=snippet] +---- + +Simple! Right? But there are some constraints that you have to keep at all times. + +.Tree data structures constraints +1. *Loops*: You have to be careful *not* to make a circular loop. Otherwise, this wouldn’t be a tree anymore but a <>! E.g., Node A has B as a child, then Node B list Node A as its descendant forming a loop. ‍️ +2. *Parents*: A node with more than two parents. Again, if that happens is no longer a tree but a <>. +3. *Root*: a tree must have only one root. Two non-connected parts are not a tree. <> can have non-connected portions and doesn’t have root. + +== Basic concepts + +.Here’s a summary of the three basic concepts: +* The topmost node is called *root*. +* A node’s immediate linked nodes are called *children*. +* A *leaf* or *terminal node* is a node without any descendant or children. +* A node immediate ancestor is called *parent*. Yep, and like a family tree, a node can have *uncles* and *siblings*, and *grandparents*. +* *Internal nodes* are all nodes except for the leaf nodes and the root node. +* The connection/link between nodes is called *edge*. +* The *height of a _tree_* is the distance (edge count) from the farthest leaf to the root. +* The *height of a _node_* is obtained by counting the edges between the _node_ and the most distant leaf. For instance, from the image above: + +** Node A has a height of 3. +** Node G has a height of 1. +** Node I has a height of 0. + +* The *depth of a tree* is the distance (edge count) from the root to the farthest leaf. + +.Tree anatomy +image:image31.jpg[image] + +== Types of Binary Trees + +There are different kinds of trees depending on the restrictions. E.g. The trees that have two children or less are called *binary tree*, while trees with at most three children are called *Ternary Tree*. Since binary trees are most common we are going to cover them here and others in another chapter. + +=== Binary Tree + +The binary restricts the nodes to have at most two children. Trees, in general, can have 3, 4, 23 or more, but not binary trees. + +.Binary tree has at most 2 children while non-binary trees can have more. +image:image32.png[image,width=321,height=193] + +Binary trees are the one of the most common types and it's used to build other data structures and applications. + +.Binary Tree Applications +- <> +- <> +- Priority Queues +- <> + + +=== Binary Search Tree (BST) + +The Binary Search Tree (BST) is a specialization of the binary tree. BST has the same restriction as a binary tree; each node has at most two children. However, there’s another restriction: the values are ordered. It means the left child’s value has to be less or equal than the parent. In turn, the right child’s value has to be bigger than the parent. + +> BST: left ≤ parent < right + +.BST or ordered binary tree vs. non-BST. +image:image33.png[image,width=348,height=189] + + +=== Binary Heap + +The heap (max-heap) is a type of binary tree where the children's values are higher than the parent. Opposed to the BST, the left child doesn’t have to be smaller than the right child. + +.Heap vs BST +image:image34.png[image,width=325,height=176] + + +The (max) heap has the maximum value in the root, while BST doesn’t. + +There is two kind of heaps: min-heap and max-heap. +For a *max-heap*, the root has the highest value. The heap guarantee that as you move away from the root, the values get smaller. The opposite is true for a *min-heap*. In a min-heap, the lowest value is at the root, and as you go down the lower to the descendants, they will keep increasing values. + +.Max-heap keeps the highest value at the top while min-heap keep the lowest at the root. +image:image35.png[image,width=258,height=169] + + +.Heap vs. Binary Search Tree +**** +Heap is better at finding max or min values in constant time *O(1)*, while a balanced BST is good a finding any element in *O(log n)*. Heaps are often used to implement priority queues while BST is used when you need every value sorted. +**** diff --git a/book/chapters/trie.adoc b/book/chapters/trie.adoc new file mode 100644 index 00000000..6c310e76 --- /dev/null +++ b/book/chapters/trie.adoc @@ -0,0 +1,3 @@ += Trie + +Aute ad cupidatat cillum enim deserunt. Reprehenderit eiusmod non do eiusmod duis culpa ipsum consequat tempor magna elit. Fugiat est eu proident incididunt adipisicing. Enim tempor aute ad quis officia enim enim pariatur do commodo labore sunt minim. diff --git a/book/fonts/EmojiSymbols-Regular.woff b/book/fonts/EmojiSymbols-Regular.woff new file mode 100755 index 00000000..f07716fd Binary files /dev/null and b/book/fonts/EmojiSymbols-Regular.woff differ diff --git a/book/fonts/FiraCode-Bold.ttf b/book/fonts/FiraCode-Bold.ttf new file mode 100644 index 00000000..0d78eef3 Binary files /dev/null and b/book/fonts/FiraCode-Bold.ttf differ diff --git a/book/fonts/FiraCode-Light.ttf b/book/fonts/FiraCode-Light.ttf new file mode 100644 index 00000000..18b9e9a0 Binary files /dev/null and b/book/fonts/FiraCode-Light.ttf differ diff --git a/book/fonts/FiraCode-Medium.ttf b/book/fonts/FiraCode-Medium.ttf new file mode 100644 index 00000000..d1066320 Binary files /dev/null and b/book/fonts/FiraCode-Medium.ttf differ diff --git a/book/fonts/FiraCode-Regular.ttf b/book/fonts/FiraCode-Regular.ttf new file mode 100644 index 00000000..0fb08171 Binary files /dev/null and b/book/fonts/FiraCode-Regular.ttf differ diff --git a/book/fonts/FiraCode-Retina.ttf b/book/fonts/FiraCode-Retina.ttf new file mode 100644 index 00000000..5bbb74be Binary files /dev/null and b/book/fonts/FiraCode-Retina.ttf differ diff --git a/book/fonts/FiraCode_1.206.zip b/book/fonts/FiraCode_1.206.zip new file mode 100644 index 00000000..7854b71f Binary files /dev/null and b/book/fonts/FiraCode_1.206.zip differ diff --git a/book/fonts/IBMPlexMono-Bold.ttf b/book/fonts/IBMPlexMono-Bold.ttf new file mode 100755 index 00000000..c05ba515 Binary files /dev/null and b/book/fonts/IBMPlexMono-Bold.ttf differ diff --git a/book/fonts/IBMPlexMono-BoldItalic.ttf b/book/fonts/IBMPlexMono-BoldItalic.ttf new file mode 100755 index 00000000..a088dbdf Binary files /dev/null and b/book/fonts/IBMPlexMono-BoldItalic.ttf differ diff --git a/book/fonts/IBMPlexMono-Italic.ttf b/book/fonts/IBMPlexMono-Italic.ttf new file mode 100755 index 00000000..fbf75b7c Binary files /dev/null and b/book/fonts/IBMPlexMono-Italic.ttf differ diff --git a/book/fonts/IBMPlexMono-Regular.ttf b/book/fonts/IBMPlexMono-Regular.ttf new file mode 100755 index 00000000..74d3681f Binary files /dev/null and b/book/fonts/IBMPlexMono-Regular.ttf differ diff --git a/book/fonts/NotoColorEmoji.ttf b/book/fonts/NotoColorEmoji.ttf new file mode 100644 index 00000000..69cf21a1 Binary files /dev/null and b/book/fonts/NotoColorEmoji.ttf differ diff --git a/book/fonts/NotoEmoji-Regular.ttf b/book/fonts/NotoEmoji-Regular.ttf new file mode 100644 index 00000000..19b7badf Binary files /dev/null and b/book/fonts/NotoEmoji-Regular.ttf differ diff --git a/book/fonts/NotoSans-Bold.ttf b/book/fonts/NotoSans-Bold.ttf new file mode 100755 index 00000000..987da8c9 Binary files /dev/null and b/book/fonts/NotoSans-Bold.ttf differ diff --git a/book/fonts/NotoSans-BoldItalic.ttf b/book/fonts/NotoSans-BoldItalic.ttf new file mode 100755 index 00000000..8233baf3 Binary files /dev/null and b/book/fonts/NotoSans-BoldItalic.ttf differ diff --git a/book/fonts/NotoSans-Italic.ttf b/book/fonts/NotoSans-Italic.ttf new file mode 100755 index 00000000..14c0e3c9 Binary files /dev/null and b/book/fonts/NotoSans-Italic.ttf differ diff --git a/book/fonts/NotoSans-Regular.ttf b/book/fonts/NotoSans-Regular.ttf new file mode 100755 index 00000000..79a438a5 Binary files /dev/null and b/book/fonts/NotoSans-Regular.ttf differ diff --git a/book/fonts/NotoSerif-Bold.ttf b/book/fonts/NotoSerif-Bold.ttf new file mode 100755 index 00000000..252aa1e8 Binary files /dev/null and b/book/fonts/NotoSerif-Bold.ttf differ diff --git a/book/fonts/NotoSerif-BoldItalic.ttf b/book/fonts/NotoSerif-BoldItalic.ttf new file mode 100755 index 00000000..92694c63 Binary files /dev/null and b/book/fonts/NotoSerif-BoldItalic.ttf differ diff --git a/book/fonts/NotoSerif-Italic.ttf b/book/fonts/NotoSerif-Italic.ttf new file mode 100755 index 00000000..3306f4f9 Binary files /dev/null and b/book/fonts/NotoSerif-Italic.ttf differ diff --git a/book/fonts/NotoSerif-Regular.ttf b/book/fonts/NotoSerif-Regular.ttf new file mode 100755 index 00000000..828d61f0 Binary files /dev/null and b/book/fonts/NotoSerif-Regular.ttf differ diff --git a/book/fonts/OpenSansEmoji.ttf b/book/fonts/OpenSansEmoji.ttf new file mode 100644 index 00000000..57d86a62 Binary files /dev/null and b/book/fonts/OpenSansEmoji.ttf differ diff --git a/book/fonts/RobotoCondensed-Bold.ttf b/book/fonts/RobotoCondensed-Bold.ttf new file mode 100755 index 00000000..8c7a08be Binary files /dev/null and b/book/fonts/RobotoCondensed-Bold.ttf differ diff --git a/book/fonts/RobotoCondensed-BoldItalic.ttf b/book/fonts/RobotoCondensed-BoldItalic.ttf new file mode 100755 index 00000000..83e8a93b Binary files /dev/null and b/book/fonts/RobotoCondensed-BoldItalic.ttf differ diff --git a/book/fonts/RobotoCondensed-Italic.ttf b/book/fonts/RobotoCondensed-Italic.ttf new file mode 100755 index 00000000..a7fcb3f7 Binary files /dev/null and b/book/fonts/RobotoCondensed-Italic.ttf differ diff --git a/book/fonts/RobotoCondensed-Regular.ttf b/book/fonts/RobotoCondensed-Regular.ttf new file mode 100755 index 00000000..533e3999 Binary files /dev/null and b/book/fonts/RobotoCondensed-Regular.ttf differ diff --git a/book/fonts/RobotoMono-Bold.ttf b/book/fonts/RobotoMono-Bold.ttf new file mode 100755 index 00000000..61842afd Binary files /dev/null and b/book/fonts/RobotoMono-Bold.ttf differ diff --git a/book/fonts/RobotoMono-BoldItalic.ttf b/book/fonts/RobotoMono-BoldItalic.ttf new file mode 100755 index 00000000..a06919c1 Binary files /dev/null and b/book/fonts/RobotoMono-BoldItalic.ttf differ diff --git a/book/fonts/RobotoMono-Italic.ttf b/book/fonts/RobotoMono-Italic.ttf new file mode 100755 index 00000000..6d29a69f Binary files /dev/null and b/book/fonts/RobotoMono-Italic.ttf differ diff --git a/book/fonts/RobotoMono-Regular.ttf b/book/fonts/RobotoMono-Regular.ttf new file mode 100755 index 00000000..f7b4a9b3 Binary files /dev/null and b/book/fonts/RobotoMono-Regular.ttf differ diff --git a/book/fonts/RobotoSlab-Bold.ttf b/book/fonts/RobotoSlab-Bold.ttf new file mode 100755 index 00000000..0eefcd0c Binary files /dev/null and b/book/fonts/RobotoSlab-Bold.ttf differ diff --git a/book/fonts/RobotoSlab-Light.ttf b/book/fonts/RobotoSlab-Light.ttf new file mode 100755 index 00000000..825e64d6 Binary files /dev/null and b/book/fonts/RobotoSlab-Light.ttf differ diff --git a/book/fonts/RobotoSlab-Regular.ttf b/book/fonts/RobotoSlab-Regular.ttf new file mode 100755 index 00000000..a904b6af Binary files /dev/null and b/book/fonts/RobotoSlab-Regular.ttf differ diff --git a/book/fonts/RobotoSlab-Thin.ttf b/book/fonts/RobotoSlab-Thin.ttf new file mode 100755 index 00000000..b8138265 Binary files /dev/null and b/book/fonts/RobotoSlab-Thin.ttf differ diff --git a/book/fonts/SourceCodePro-Bold.ttf b/book/fonts/SourceCodePro-Bold.ttf new file mode 100755 index 00000000..2e545fee Binary files /dev/null and b/book/fonts/SourceCodePro-Bold.ttf differ diff --git a/book/fonts/SourceCodePro-Light.ttf b/book/fonts/SourceCodePro-Light.ttf new file mode 100755 index 00000000..8f95a47c Binary files /dev/null and b/book/fonts/SourceCodePro-Light.ttf differ diff --git a/book/fonts/SourceCodePro-Medium.ttf b/book/fonts/SourceCodePro-Medium.ttf new file mode 100755 index 00000000..b7471baa Binary files /dev/null and b/book/fonts/SourceCodePro-Medium.ttf differ diff --git a/book/fonts/SourceCodePro-Regular.ttf b/book/fonts/SourceCodePro-Regular.ttf new file mode 100755 index 00000000..fa1f90b9 Binary files /dev/null and b/book/fonts/SourceCodePro-Regular.ttf differ diff --git a/book/fonts/Symbola.ttf b/book/fonts/Symbola.ttf new file mode 100755 index 00000000..e863bd63 Binary files /dev/null and b/book/fonts/Symbola.ttf differ diff --git a/book/fonts/Symbola_hint.ttf b/book/fonts/Symbola_hint.ttf new file mode 100755 index 00000000..59e428fe Binary files /dev/null and b/book/fonts/Symbola_hint.ttf differ diff --git a/book/fonts/UbuntuMono-Bold.ttf b/book/fonts/UbuntuMono-Bold.ttf new file mode 100755 index 00000000..cdfb14b5 Binary files /dev/null and b/book/fonts/UbuntuMono-Bold.ttf differ diff --git a/book/fonts/UbuntuMono-BoldItalic.ttf b/book/fonts/UbuntuMono-BoldItalic.ttf new file mode 100755 index 00000000..c08c8778 Binary files /dev/null and b/book/fonts/UbuntuMono-BoldItalic.ttf differ diff --git a/book/fonts/UbuntuMono-Italic.ttf b/book/fonts/UbuntuMono-Italic.ttf new file mode 100755 index 00000000..b47ee3b1 Binary files /dev/null and b/book/fonts/UbuntuMono-Italic.ttf differ diff --git a/book/fonts/UbuntuMono-Regular.ttf b/book/fonts/UbuntuMono-Regular.ttf new file mode 100755 index 00000000..4b3bd7cd Binary files /dev/null and b/book/fonts/UbuntuMono-Regular.ttf differ diff --git a/book/fonts/emojione-android.ttf b/book/fonts/emojione-android.ttf new file mode 100755 index 00000000..4cd640d0 Binary files /dev/null and b/book/fonts/emojione-android.ttf differ diff --git a/book/fonts/emojione-apple.ttf b/book/fonts/emojione-apple.ttf new file mode 100755 index 00000000..a06616ff Binary files /dev/null and b/book/fonts/emojione-apple.ttf differ diff --git a/book/fonts/literata-bold-italic.ttf b/book/fonts/literata-bold-italic.ttf new file mode 100755 index 00000000..19ea4f07 Binary files /dev/null and b/book/fonts/literata-bold-italic.ttf differ diff --git a/book/fonts/literata-bold.ttf b/book/fonts/literata-bold.ttf new file mode 100755 index 00000000..7d1b6a27 Binary files /dev/null and b/book/fonts/literata-bold.ttf differ diff --git a/book/fonts/literata-italic.ttf b/book/fonts/literata-italic.ttf new file mode 100755 index 00000000..8ce6c3e5 Binary files /dev/null and b/book/fonts/literata-italic.ttf differ diff --git a/book/fonts/literata-regular.ttf b/book/fonts/literata-regular.ttf new file mode 100755 index 00000000..3c7bf132 Binary files /dev/null and b/book/fonts/literata-regular.ttf differ diff --git a/book/fonts/mplus1mn-bold-ascii.ttf b/book/fonts/mplus1mn-bold-ascii.ttf new file mode 100644 index 00000000..726bcc46 Binary files /dev/null and b/book/fonts/mplus1mn-bold-ascii.ttf differ diff --git a/book/fonts/mplus1mn-bold_italic-ascii.ttf b/book/fonts/mplus1mn-bold_italic-ascii.ttf new file mode 100644 index 00000000..c91d944a Binary files /dev/null and b/book/fonts/mplus1mn-bold_italic-ascii.ttf differ diff --git a/book/fonts/mplus1mn-italic-ascii.ttf b/book/fonts/mplus1mn-italic-ascii.ttf new file mode 100644 index 00000000..77c16844 Binary files /dev/null and b/book/fonts/mplus1mn-italic-ascii.ttf differ diff --git a/book/fonts/mplus1mn-regular-ascii-conums.ttf b/book/fonts/mplus1mn-regular-ascii-conums.ttf new file mode 100644 index 00000000..5645bbeb Binary files /dev/null and b/book/fonts/mplus1mn-regular-ascii-conums.ttf differ diff --git a/book/fonts/mplus1p-regular-fallback.ttf b/book/fonts/mplus1p-regular-fallback.ttf new file mode 100644 index 00000000..693454ac Binary files /dev/null and b/book/fonts/mplus1p-regular-fallback.ttf differ diff --git a/book/fonts/notoserif-bold-subset.ttf b/book/fonts/notoserif-bold-subset.ttf new file mode 100644 index 00000000..bbebf88c Binary files /dev/null and b/book/fonts/notoserif-bold-subset.ttf differ diff --git a/book/fonts/notoserif-bold_italic-subset.ttf b/book/fonts/notoserif-bold_italic-subset.ttf new file mode 100644 index 00000000..010b0255 Binary files /dev/null and b/book/fonts/notoserif-bold_italic-subset.ttf differ diff --git a/book/fonts/notoserif-italic-subset.ttf b/book/fonts/notoserif-italic-subset.ttf new file mode 100644 index 00000000..5c42a524 Binary files /dev/null and b/book/fonts/notoserif-italic-subset.ttf differ diff --git a/book/fonts/notoserif-regular-subset.ttf b/book/fonts/notoserif-regular-subset.ttf new file mode 100644 index 00000000..e9d54ea5 Binary files /dev/null and b/book/fonts/notoserif-regular-subset.ttf differ diff --git a/book/images/Find largest sum.svg b/book/images/Find largest sum.svg new file mode 100644 index 00000000..595fe27a --- /dev/null +++ b/book/images/Find largest sum.svg @@ -0,0 +1,86 @@ + + + + + + +G + +Optimal vs. Greedy path + + +5 + +5 + + + +3 + +3 + + + +5--3 + + + + +7 + +7 + + + +5--7 + + + + +87 + +87 + + + +3--87 + + + + +1 + +1 + + + +3--1 + + + + +2 + +2 + + + +7--2 + + + + +4 + +4 + + + +7--4 + + + + diff --git a/book/images/Recursive Fibonacci call tree with dp.svg b/book/images/Recursive Fibonacci call tree with dp.svg new file mode 100644 index 00000000..6cdb56a2 --- /dev/null +++ b/book/images/Recursive Fibonacci call tree with dp.svg @@ -0,0 +1,239 @@ + + + + + + +g + + + +art + +A* + +R + +T + + + +art1 + +A + +R* + +T + + + +art:f0->art1:f0 + + +1. swap A/A + + + +rat + +R + +A* + +T + + + +art:f1->rat:f0 + + +7. swap A/R + + + +tra + +T + +R* + +A + + + +art:f2->tra:f0 + + +13. swap A/T + + + +art1:f1->art:f0 + + +6. + + + +art2 + +A + +R + +T + + + +art1:f0->art2:f0 + + +2. swap R/R + + + +atr + +A + +T + +R + + + +art1:f2->atr:f0 + + +4. swap R/T + + + +art2:f2->art1:f1 + + +3. + + + +atr:f2->art1:f2 + + +5. + + + +rat:f2->art:f2 + + +12. + + + +rat1 + +R + +A + +T + + + +rat:f0->rat1:f0 + + +8. swap A/A + + + +rta + +R + +T + +A + + + +rat:f2->rta:f0 + + +10. swap A/T + + + +rat1:f2->rat:f1 + + +9. + + + +rta:f2->rat:f2 + + +11. + + + +tra:f2->art:f2 + + +18. + + + +tra1 + +T + +R + +A + + + +tra:f0->tra1:f0 + + +14. swap R/R + + + +tar + +T + +A + +R + + + +tra:f2->tar:f0 + + +16. swap R/A + + + +tra1:f2->tra:f2 + + +15. + + + +tar:f2->tra:f2 + + +17. + + + diff --git a/book/images/Recursive fibonacci call tree.png b/book/images/Recursive fibonacci call tree.png new file mode 100644 index 00000000..9e18a2ab Binary files /dev/null and b/book/images/Recursive fibonacci call tree.png differ diff --git a/book/images/Recursive fibonacci call tree.svg b/book/images/Recursive fibonacci call tree.svg new file mode 100644 index 00000000..43875cdc --- /dev/null +++ b/book/images/Recursive fibonacci call tree.svg @@ -0,0 +1,74 @@ + + + + + + +G + + + +fib(5) + +fib(5) + + + +fib(4) + +fib(4) + + + +fib(5)--fib(4) + + + + +fib(3) + +fib(3) + + + +fib(4)--fib(3) + + + + +fib(2) + +fib(2) + + + +fib(3)--fib(2) + + + + +fib(1) + +fib(1) + + + +fib(2)--fib(1) + + + + +fib(0) + +fib(0) + + + +fib(2)--fib(0) + + + + diff --git a/book/images/Sudoku_solved_by_bactracking.gif b/book/images/Sudoku_solved_by_bactracking.gif new file mode 100644 index 00000000..cf62c1dd Binary files /dev/null and b/book/images/Sudoku_solved_by_bactracking.gif differ diff --git a/book/images/cover-Data Structures & Algorithms (1).png b/book/images/cover-Data Structures & Algorithms (1).png new file mode 100644 index 00000000..ae30909e Binary files /dev/null and b/book/images/cover-Data Structures & Algorithms (1).png differ diff --git a/book/images/cover-Data Structures & Algorithms.png b/book/images/cover-Data Structures & Algorithms.png new file mode 100644 index 00000000..d50b5f5f Binary files /dev/null and b/book/images/cover-Data Structures & Algorithms.png differ diff --git a/book/images/cover-dsa2.png b/book/images/cover-dsa2.png new file mode 100644 index 00000000..a08eefa3 Binary files /dev/null and b/book/images/cover-dsa2.png differ diff --git a/book/images/cover.png b/book/images/cover.png new file mode 100644 index 00000000..e274c975 Binary files /dev/null and b/book/images/cover.png differ diff --git a/book/images/cover1.png b/book/images/cover1.png new file mode 100644 index 00000000..233d8df1 Binary files /dev/null and b/book/images/cover1.png differ diff --git a/book/images/cover2.png b/book/images/cover2.png new file mode 100644 index 00000000..8d622368 Binary files /dev/null and b/book/images/cover2.png differ diff --git a/book/images/depth-first-search-dfs-breadth-first-search-bfs.jpg b/book/images/depth-first-search-dfs-breadth-first-search-bfs.jpg new file mode 100644 index 00000000..738392f2 Binary files /dev/null and b/book/images/depth-first-search-dfs-breadth-first-search-bfs.jpg differ diff --git a/book/images/dfs-graph.png b/book/images/dfs-graph.png new file mode 100644 index 00000000..48d3749d Binary files /dev/null and b/book/images/dfs-graph.png differ diff --git a/book/images/diagram-state.png b/book/images/diagram-state.png new file mode 100644 index 00000000..f5fcd5f7 Binary files /dev/null and b/book/images/diagram-state.png differ diff --git a/book/images/greedy-search-path-example.gif b/book/images/greedy-search-path-example.gif new file mode 100644 index 00000000..7f45d147 Binary files /dev/null and b/book/images/greedy-search-path-example.gif differ diff --git a/book/images/image1.png b/book/images/image1.png new file mode 100644 index 00000000..05d4f0b1 Binary files /dev/null and b/book/images/image1.png differ diff --git a/book/images/image10.png b/book/images/image10.png new file mode 100644 index 00000000..5bdf016a Binary files /dev/null and b/book/images/image10.png differ diff --git a/book/images/image11.png b/book/images/image11.png new file mode 100644 index 00000000..6377ac85 Binary files /dev/null and b/book/images/image11.png differ diff --git a/book/images/image12.png b/book/images/image12.png new file mode 100644 index 00000000..bafb7379 Binary files /dev/null and b/book/images/image12.png differ diff --git a/book/images/image13.png b/book/images/image13.png new file mode 100644 index 00000000..80520673 Binary files /dev/null and b/book/images/image13.png differ diff --git a/book/images/image14.png b/book/images/image14.png new file mode 100644 index 00000000..fef6fb82 Binary files /dev/null and b/book/images/image14.png differ diff --git a/book/images/image15.png b/book/images/image15.png new file mode 100644 index 00000000..53e67523 Binary files /dev/null and b/book/images/image15.png differ diff --git a/book/images/image16.png b/book/images/image16.png new file mode 100644 index 00000000..86bbbe41 Binary files /dev/null and b/book/images/image16.png differ diff --git a/book/images/image17.png b/book/images/image17.png new file mode 100644 index 00000000..8ae6a99a Binary files /dev/null and b/book/images/image17.png differ diff --git a/book/images/image18.png b/book/images/image18.png new file mode 100644 index 00000000..17548a2c Binary files /dev/null and b/book/images/image18.png differ diff --git a/book/images/image19.png b/book/images/image19.png new file mode 100644 index 00000000..6c15973f Binary files /dev/null and b/book/images/image19.png differ diff --git a/book/images/image20.png b/book/images/image20.png new file mode 100644 index 00000000..1200afd4 Binary files /dev/null and b/book/images/image20.png differ diff --git a/book/images/image21.png b/book/images/image21.png new file mode 100644 index 00000000..c6a526a6 Binary files /dev/null and b/book/images/image21.png differ diff --git a/book/images/image22.png b/book/images/image22.png new file mode 100644 index 00000000..5924e1ed Binary files /dev/null and b/book/images/image22.png differ diff --git a/book/images/image23.png b/book/images/image23.png new file mode 100644 index 00000000..71e82784 Binary files /dev/null and b/book/images/image23.png differ diff --git a/book/images/image24.png b/book/images/image24.png new file mode 100644 index 00000000..6761e893 Binary files /dev/null and b/book/images/image24.png differ diff --git a/book/images/image25.png b/book/images/image25.png new file mode 100644 index 00000000..e9747b95 Binary files /dev/null and b/book/images/image25.png differ diff --git a/book/images/image26.png b/book/images/image26.png new file mode 100644 index 00000000..12f0a0c5 Binary files /dev/null and b/book/images/image26.png differ diff --git a/book/images/image27.png b/book/images/image27.png new file mode 100644 index 00000000..7e5823d5 Binary files /dev/null and b/book/images/image27.png differ diff --git a/book/images/image28.png b/book/images/image28.png new file mode 100644 index 00000000..d713d6a6 Binary files /dev/null and b/book/images/image28.png differ diff --git a/book/images/image29.png b/book/images/image29.png new file mode 100644 index 00000000..829fdc7d Binary files /dev/null and b/book/images/image29.png differ diff --git a/book/images/image3.png b/book/images/image3.png new file mode 100644 index 00000000..372c1eb1 Binary files /dev/null and b/book/images/image3.png differ diff --git a/book/images/image30.png b/book/images/image30.png new file mode 100644 index 00000000..e04978b2 Binary files /dev/null and b/book/images/image30.png differ diff --git a/book/images/image31.jpg b/book/images/image31.jpg new file mode 100644 index 00000000..13768500 Binary files /dev/null and b/book/images/image31.jpg differ diff --git a/book/images/image32.png b/book/images/image32.png new file mode 100644 index 00000000..b45612b5 Binary files /dev/null and b/book/images/image32.png differ diff --git a/book/images/image33.png b/book/images/image33.png new file mode 100644 index 00000000..d1d0546a Binary files /dev/null and b/book/images/image33.png differ diff --git a/book/images/image34.png b/book/images/image34.png new file mode 100644 index 00000000..d2ebaf7b Binary files /dev/null and b/book/images/image34.png differ diff --git a/book/images/image35.png b/book/images/image35.png new file mode 100644 index 00000000..a986b77b Binary files /dev/null and b/book/images/image35.png differ diff --git a/book/images/image36.png b/book/images/image36.png new file mode 100644 index 00000000..c40f7a27 Binary files /dev/null and b/book/images/image36.png differ diff --git a/book/images/image37.png b/book/images/image37.png new file mode 100644 index 00000000..85d17a12 Binary files /dev/null and b/book/images/image37.png differ diff --git a/book/images/image38.png b/book/images/image38.png new file mode 100644 index 00000000..a79edaec Binary files /dev/null and b/book/images/image38.png differ diff --git a/book/images/image39.png b/book/images/image39.png new file mode 100644 index 00000000..7f580967 Binary files /dev/null and b/book/images/image39.png differ diff --git a/book/images/image4.png b/book/images/image4.png new file mode 100644 index 00000000..8fb3081d Binary files /dev/null and b/book/images/image4.png differ diff --git a/book/images/image40.png b/book/images/image40.png new file mode 100644 index 00000000..8c325dc4 Binary files /dev/null and b/book/images/image40.png differ diff --git a/book/images/image41.png b/book/images/image41.png new file mode 100644 index 00000000..d88eb382 Binary files /dev/null and b/book/images/image41.png differ diff --git a/book/images/image42.png b/book/images/image42.png new file mode 100644 index 00000000..bf68cec5 Binary files /dev/null and b/book/images/image42.png differ diff --git a/book/images/image43.jpg b/book/images/image43.jpg new file mode 100644 index 00000000..e255f881 Binary files /dev/null and b/book/images/image43.jpg differ diff --git a/book/images/image44.jpg b/book/images/image44.jpg new file mode 100644 index 00000000..3a52cfa8 Binary files /dev/null and b/book/images/image44.jpg differ diff --git a/book/images/image45.emf b/book/images/image45.emf new file mode 100644 index 00000000..82109b7f Binary files /dev/null and b/book/images/image45.emf differ diff --git a/book/images/image45.png b/book/images/image45.png new file mode 100644 index 00000000..6c036662 Binary files /dev/null and b/book/images/image45.png differ diff --git a/book/images/image46.png b/book/images/image46.png new file mode 100644 index 00000000..ebcc245a Binary files /dev/null and b/book/images/image46.png differ diff --git a/book/images/image47.png b/book/images/image47.png new file mode 100644 index 00000000..7cb466ed Binary files /dev/null and b/book/images/image47.png differ diff --git a/book/images/image48.png b/book/images/image48.png new file mode 100644 index 00000000..838e2d7f Binary files /dev/null and b/book/images/image48.png differ diff --git a/book/images/image49.jpeg b/book/images/image49.jpeg new file mode 100644 index 00000000..40c4445c Binary files /dev/null and b/book/images/image49.jpeg differ diff --git a/book/images/image5.png b/book/images/image5.png new file mode 100644 index 00000000..b6585830 Binary files /dev/null and b/book/images/image5.png differ diff --git a/book/images/image6.png b/book/images/image6.png new file mode 100644 index 00000000..00173269 Binary files /dev/null and b/book/images/image6.png differ diff --git a/book/images/image7.png b/book/images/image7.png new file mode 100644 index 00000000..7c38519a Binary files /dev/null and b/book/images/image7.png differ diff --git a/book/images/image8.png b/book/images/image8.png new file mode 100644 index 00000000..051de9c4 Binary files /dev/null and b/book/images/image8.png differ diff --git a/book/images/image9.png b/book/images/image9.png new file mode 100644 index 00000000..55558cb3 Binary files /dev/null and b/book/images/image9.png differ diff --git a/book/images/logo-sq2.png b/book/images/logo-sq2.png new file mode 100644 index 00000000..57d2a4b8 Binary files /dev/null and b/book/images/logo-sq2.png differ diff --git a/book/images/logo.png b/book/images/logo.png new file mode 100644 index 00000000..bb5e3831 Binary files /dev/null and b/book/images/logo.png differ diff --git a/book/images/logo1.png b/book/images/logo1.png new file mode 100644 index 00000000..92bec33b Binary files /dev/null and b/book/images/logo1.png differ diff --git a/book/images/logo3.png b/book/images/logo3.png new file mode 100644 index 00000000..24d81b8b Binary files /dev/null and b/book/images/logo3.png differ diff --git a/book/images/logo4.png b/book/images/logo4.png new file mode 100644 index 00000000..37dd0b3f Binary files /dev/null and b/book/images/logo4.png differ diff --git a/book/images/quicksort.gif b/book/images/quicksort.gif new file mode 100644 index 00000000..c1071051 Binary files /dev/null and b/book/images/quicksort.gif differ diff --git a/book/images/selection-sort.gif b/book/images/selection-sort.gif new file mode 100644 index 00000000..b441c4eb Binary files /dev/null and b/book/images/selection-sort.gif differ diff --git a/book/sample.adoc b/book/sample.adoc new file mode 100644 index 00000000..83abe88b --- /dev/null +++ b/book/sample.adoc @@ -0,0 +1,41 @@ +include::_conf/variables.adoc[] + += {doctitle} + +// remove numbering from titles, and sub-titles e.g. 1.1 +:sectnums!: + +// Copyright © 2018 Adrian Mejia (g) +include::chapters/colophon.adoc[] + +// Abstract and Dedication MUST have a level-0 heading in EPUB and Kindle +// but level-1 in PDF and HTML +ifndef::backend-epub3[:leveloffset: +1] +include::chapters/dedication.adoc[] +ifndef::backend-epub3[:leveloffset: -1] + +// (g) +include::chapters/preface.adoc[] + +// add sections to chapters +:sectnums: + + +//----------------------------------- +// TODO: commment out sample on final +//----------------------------------- + +include::chapters/sample.adoc[] + +//----------------------------------- +// TODO: end remove ------ +//----------------------------------- + + +// --- end algorithms --- + + +// +// end chapters +// +include::chapters/epigraph.adoc[] diff --git a/src/exercises/01-arrays/check-permutation.js b/lab/exercises/01-arrays/check-permutation.js similarity index 100% rename from src/exercises/01-arrays/check-permutation.js rename to lab/exercises/01-arrays/check-permutation.js diff --git a/src/exercises/01-arrays/one-away.js b/lab/exercises/01-arrays/one-away.js similarity index 100% rename from src/exercises/01-arrays/one-away.js rename to lab/exercises/01-arrays/one-away.js diff --git a/src/exercises/01-arrays/palindrome-permutation.js b/lab/exercises/01-arrays/palindrome-permutation.js similarity index 100% rename from src/exercises/01-arrays/palindrome-permutation.js rename to lab/exercises/01-arrays/palindrome-permutation.js diff --git a/src/exercises/01-arrays/permutation.js b/lab/exercises/01-arrays/permutation.js similarity index 100% rename from src/exercises/01-arrays/permutation.js rename to lab/exercises/01-arrays/permutation.js diff --git a/src/exercises/01-arrays/rotate-matrix.js b/lab/exercises/01-arrays/rotate-matrix.js similarity index 100% rename from src/exercises/01-arrays/rotate-matrix.js rename to lab/exercises/01-arrays/rotate-matrix.js diff --git a/src/exercises/01-arrays/rotate-string.js b/lab/exercises/01-arrays/rotate-string.js similarity index 100% rename from src/exercises/01-arrays/rotate-string.js rename to lab/exercises/01-arrays/rotate-string.js diff --git a/src/exercises/01-arrays/stringCompression.js b/lab/exercises/01-arrays/stringCompression.js similarity index 100% rename from src/exercises/01-arrays/stringCompression.js rename to lab/exercises/01-arrays/stringCompression.js diff --git a/src/exercises/01-arrays/urlify.js b/lab/exercises/01-arrays/urlify.js similarity index 100% rename from src/exercises/01-arrays/urlify.js rename to lab/exercises/01-arrays/urlify.js diff --git a/src/exercises/01-arrays/zero-matrix.js b/lab/exercises/01-arrays/zero-matrix.js similarity index 100% rename from src/exercises/01-arrays/zero-matrix.js rename to lab/exercises/01-arrays/zero-matrix.js diff --git a/src/exercises/02-linked-list/delete-middle-node.js b/lab/exercises/02-linked-list/delete-middle-node.js similarity index 100% rename from src/exercises/02-linked-list/delete-middle-node.js rename to lab/exercises/02-linked-list/delete-middle-node.js diff --git a/src/exercises/02-linked-list/intersection.js b/lab/exercises/02-linked-list/intersection.js similarity index 100% rename from src/exercises/02-linked-list/intersection.js rename to lab/exercises/02-linked-list/intersection.js diff --git a/src/exercises/02-linked-list/intersection.spec.js b/lab/exercises/02-linked-list/intersection.spec.js similarity index 100% rename from src/exercises/02-linked-list/intersection.spec.js rename to lab/exercises/02-linked-list/intersection.spec.js diff --git a/src/exercises/02-linked-list/kth-to-last.js b/lab/exercises/02-linked-list/kth-to-last.js similarity index 100% rename from src/exercises/02-linked-list/kth-to-last.js rename to lab/exercises/02-linked-list/kth-to-last.js diff --git a/src/exercises/02-linked-list/linkedlist.js b/lab/exercises/02-linked-list/linkedlist.js similarity index 100% rename from src/exercises/02-linked-list/linkedlist.js rename to lab/exercises/02-linked-list/linkedlist.js diff --git a/src/exercises/02-linked-list/linkedlist.spec.js b/lab/exercises/02-linked-list/linkedlist.spec.js similarity index 100% rename from src/exercises/02-linked-list/linkedlist.spec.js rename to lab/exercises/02-linked-list/linkedlist.spec.js diff --git a/src/exercises/02-linked-list/loop-detection.js b/lab/exercises/02-linked-list/loop-detection.js similarity index 100% rename from src/exercises/02-linked-list/loop-detection.js rename to lab/exercises/02-linked-list/loop-detection.js diff --git a/src/exercises/02-linked-list/loop-detection.spec.js b/lab/exercises/02-linked-list/loop-detection.spec.js similarity index 100% rename from src/exercises/02-linked-list/loop-detection.spec.js rename to lab/exercises/02-linked-list/loop-detection.spec.js diff --git a/src/exercises/02-linked-list/node.js b/lab/exercises/02-linked-list/node.js similarity index 100% rename from src/exercises/02-linked-list/node.js rename to lab/exercises/02-linked-list/node.js diff --git a/src/exercises/02-linked-list/palindrome.js b/lab/exercises/02-linked-list/palindrome.js similarity index 100% rename from src/exercises/02-linked-list/palindrome.js rename to lab/exercises/02-linked-list/palindrome.js diff --git a/src/exercises/02-linked-list/palindrome.spec.js b/lab/exercises/02-linked-list/palindrome.spec.js similarity index 100% rename from src/exercises/02-linked-list/palindrome.spec.js rename to lab/exercises/02-linked-list/palindrome.spec.js diff --git a/src/exercises/02-linked-list/partition.js b/lab/exercises/02-linked-list/partition.js similarity index 100% rename from src/exercises/02-linked-list/partition.js rename to lab/exercises/02-linked-list/partition.js diff --git a/src/exercises/02-linked-list/remove-dups.js b/lab/exercises/02-linked-list/remove-dups.js similarity index 100% rename from src/exercises/02-linked-list/remove-dups.js rename to lab/exercises/02-linked-list/remove-dups.js diff --git a/src/exercises/02-linked-list/sum-lists.js b/lab/exercises/02-linked-list/sum-lists.js similarity index 100% rename from src/exercises/02-linked-list/sum-lists.js rename to lab/exercises/02-linked-list/sum-lists.js diff --git a/src/exercises/03-stacks-and-queues/animal-shelter.js b/lab/exercises/03-stacks-and-queues/animal-shelter.js similarity index 100% rename from src/exercises/03-stacks-and-queues/animal-shelter.js rename to lab/exercises/03-stacks-and-queues/animal-shelter.js diff --git a/src/exercises/03-stacks-and-queues/animal-shelter.spec.js b/lab/exercises/03-stacks-and-queues/animal-shelter.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/animal-shelter.spec.js rename to lab/exercises/03-stacks-and-queues/animal-shelter.spec.js diff --git a/src/exercises/03-stacks-and-queues/min-stack.js b/lab/exercises/03-stacks-and-queues/min-stack.js similarity index 100% rename from src/exercises/03-stacks-and-queues/min-stack.js rename to lab/exercises/03-stacks-and-queues/min-stack.js diff --git a/src/exercises/03-stacks-and-queues/min-stack.spec.js b/lab/exercises/03-stacks-and-queues/min-stack.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/min-stack.spec.js rename to lab/exercises/03-stacks-and-queues/min-stack.spec.js diff --git a/src/exercises/03-stacks-and-queues/multi-stack.js b/lab/exercises/03-stacks-and-queues/multi-stack.js similarity index 100% rename from src/exercises/03-stacks-and-queues/multi-stack.js rename to lab/exercises/03-stacks-and-queues/multi-stack.js diff --git a/src/exercises/03-stacks-and-queues/multi-stack.spec.js b/lab/exercises/03-stacks-and-queues/multi-stack.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/multi-stack.spec.js rename to lab/exercises/03-stacks-and-queues/multi-stack.spec.js diff --git a/src/exercises/03-stacks-and-queues/queue-via-stack.js b/lab/exercises/03-stacks-and-queues/queue-via-stack.js similarity index 100% rename from src/exercises/03-stacks-and-queues/queue-via-stack.js rename to lab/exercises/03-stacks-and-queues/queue-via-stack.js diff --git a/src/exercises/03-stacks-and-queues/queue-via-stack.spec.js b/lab/exercises/03-stacks-and-queues/queue-via-stack.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/queue-via-stack.spec.js rename to lab/exercises/03-stacks-and-queues/queue-via-stack.spec.js diff --git a/src/exercises/03-stacks-and-queues/queue.js b/lab/exercises/03-stacks-and-queues/queue.js similarity index 100% rename from src/exercises/03-stacks-and-queues/queue.js rename to lab/exercises/03-stacks-and-queues/queue.js diff --git a/src/exercises/03-stacks-and-queues/queue.spec.js b/lab/exercises/03-stacks-and-queues/queue.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/queue.spec.js rename to lab/exercises/03-stacks-and-queues/queue.spec.js diff --git a/src/exercises/03-stacks-and-queues/set-of-stacks.js b/lab/exercises/03-stacks-and-queues/set-of-stacks.js similarity index 100% rename from src/exercises/03-stacks-and-queues/set-of-stacks.js rename to lab/exercises/03-stacks-and-queues/set-of-stacks.js diff --git a/src/exercises/03-stacks-and-queues/set-of-stacks.spec.js b/lab/exercises/03-stacks-and-queues/set-of-stacks.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/set-of-stacks.spec.js rename to lab/exercises/03-stacks-and-queues/set-of-stacks.spec.js diff --git a/src/exercises/03-stacks-and-queues/sort-stack.js b/lab/exercises/03-stacks-and-queues/sort-stack.js similarity index 100% rename from src/exercises/03-stacks-and-queues/sort-stack.js rename to lab/exercises/03-stacks-and-queues/sort-stack.js diff --git a/src/exercises/03-stacks-and-queues/sort-stack.spec.js b/lab/exercises/03-stacks-and-queues/sort-stack.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/sort-stack.spec.js rename to lab/exercises/03-stacks-and-queues/sort-stack.spec.js diff --git a/src/exercises/03-stacks-and-queues/stack.js b/lab/exercises/03-stacks-and-queues/stack.js similarity index 100% rename from src/exercises/03-stacks-and-queues/stack.js rename to lab/exercises/03-stacks-and-queues/stack.js diff --git a/src/exercises/03-stacks-and-queues/stack.spec.js b/lab/exercises/03-stacks-and-queues/stack.spec.js similarity index 100% rename from src/exercises/03-stacks-and-queues/stack.spec.js rename to lab/exercises/03-stacks-and-queues/stack.spec.js diff --git a/src/exercises/04-trees-and-graphs/binary-search-tree.js b/lab/exercises/04-trees-and-graphs/binary-search-tree.js similarity index 100% rename from src/exercises/04-trees-and-graphs/binary-search-tree.js rename to lab/exercises/04-trees-and-graphs/binary-search-tree.js diff --git a/src/exercises/04-trees-and-graphs/bst-sequence.js b/lab/exercises/04-trees-and-graphs/bst-sequence.js similarity index 100% rename from src/exercises/04-trees-and-graphs/bst-sequence.js rename to lab/exercises/04-trees-and-graphs/bst-sequence.js diff --git a/src/exercises/04-trees-and-graphs/bst-sequence.spec.js b/lab/exercises/04-trees-and-graphs/bst-sequence.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/bst-sequence.spec.js rename to lab/exercises/04-trees-and-graphs/bst-sequence.spec.js diff --git a/src/exercises/04-trees-and-graphs/build-order.js b/lab/exercises/04-trees-and-graphs/build-order.js similarity index 100% rename from src/exercises/04-trees-and-graphs/build-order.js rename to lab/exercises/04-trees-and-graphs/build-order.js diff --git a/src/exercises/04-trees-and-graphs/build-order.spec.js b/lab/exercises/04-trees-and-graphs/build-order.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/build-order.spec.js rename to lab/exercises/04-trees-and-graphs/build-order.spec.js diff --git a/src/exercises/04-trees-and-graphs/first-common-ancestor.js b/lab/exercises/04-trees-and-graphs/first-common-ancestor.js similarity index 100% rename from src/exercises/04-trees-and-graphs/first-common-ancestor.js rename to lab/exercises/04-trees-and-graphs/first-common-ancestor.js diff --git a/src/exercises/04-trees-and-graphs/first-common-ancestor.spec.js b/lab/exercises/04-trees-and-graphs/first-common-ancestor.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/first-common-ancestor.spec.js rename to lab/exercises/04-trees-and-graphs/first-common-ancestor.spec.js diff --git a/src/exercises/04-trees-and-graphs/graph.js b/lab/exercises/04-trees-and-graphs/graph.js similarity index 100% rename from src/exercises/04-trees-and-graphs/graph.js rename to lab/exercises/04-trees-and-graphs/graph.js diff --git a/src/exercises/04-trees-and-graphs/graph.spec.js b/lab/exercises/04-trees-and-graphs/graph.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/graph.spec.js rename to lab/exercises/04-trees-and-graphs/graph.spec.js diff --git a/src/exercises/04-trees-and-graphs/is-balanced.js b/lab/exercises/04-trees-and-graphs/is-balanced.js similarity index 100% rename from src/exercises/04-trees-and-graphs/is-balanced.js rename to lab/exercises/04-trees-and-graphs/is-balanced.js diff --git a/src/exercises/04-trees-and-graphs/is-balanced.spec.js b/lab/exercises/04-trees-and-graphs/is-balanced.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/is-balanced.spec.js rename to lab/exercises/04-trees-and-graphs/is-balanced.spec.js diff --git a/src/exercises/04-trees-and-graphs/is-binary-search-tree.js b/lab/exercises/04-trees-and-graphs/is-binary-search-tree.js similarity index 100% rename from src/exercises/04-trees-and-graphs/is-binary-search-tree.js rename to lab/exercises/04-trees-and-graphs/is-binary-search-tree.js diff --git a/src/exercises/04-trees-and-graphs/is-binary-search-tree.spec.js b/lab/exercises/04-trees-and-graphs/is-binary-search-tree.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/is-binary-search-tree.spec.js rename to lab/exercises/04-trees-and-graphs/is-binary-search-tree.spec.js diff --git a/src/exercises/04-trees-and-graphs/list-of-depths.js b/lab/exercises/04-trees-and-graphs/list-of-depths.js similarity index 100% rename from src/exercises/04-trees-and-graphs/list-of-depths.js rename to lab/exercises/04-trees-and-graphs/list-of-depths.js diff --git a/src/exercises/04-trees-and-graphs/list-of-depths.spec.js b/lab/exercises/04-trees-and-graphs/list-of-depths.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/list-of-depths.spec.js rename to lab/exercises/04-trees-and-graphs/list-of-depths.spec.js diff --git a/src/exercises/04-trees-and-graphs/minimal-height-tree.js b/lab/exercises/04-trees-and-graphs/minimal-height-tree.js similarity index 100% rename from src/exercises/04-trees-and-graphs/minimal-height-tree.js rename to lab/exercises/04-trees-and-graphs/minimal-height-tree.js diff --git a/src/exercises/04-trees-and-graphs/minimal-height-tree.spec.js b/lab/exercises/04-trees-and-graphs/minimal-height-tree.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/minimal-height-tree.spec.js rename to lab/exercises/04-trees-and-graphs/minimal-height-tree.spec.js diff --git a/src/exercises/04-trees-and-graphs/routes-between-nodes.js b/lab/exercises/04-trees-and-graphs/routes-between-nodes.js similarity index 100% rename from src/exercises/04-trees-and-graphs/routes-between-nodes.js rename to lab/exercises/04-trees-and-graphs/routes-between-nodes.js diff --git a/src/exercises/04-trees-and-graphs/routes-between-nodes.spec.js b/lab/exercises/04-trees-and-graphs/routes-between-nodes.spec.js similarity index 100% rename from src/exercises/04-trees-and-graphs/routes-between-nodes.spec.js rename to lab/exercises/04-trees-and-graphs/routes-between-nodes.spec.js diff --git a/src/exercises/04-trees-and-graphs/successor.js b/lab/exercises/04-trees-and-graphs/successor.js similarity index 100% rename from src/exercises/04-trees-and-graphs/successor.js rename to lab/exercises/04-trees-and-graphs/successor.js diff --git a/src/exercises/05-bits/00-bit-manipulation.js b/lab/exercises/05-bits/00-bit-manipulation.js similarity index 100% rename from src/exercises/05-bits/00-bit-manipulation.js rename to lab/exercises/05-bits/00-bit-manipulation.js diff --git a/src/exercises/05-bits/00-bit-manipulation.spec.js b/lab/exercises/05-bits/00-bit-manipulation.spec.js similarity index 100% rename from src/exercises/05-bits/00-bit-manipulation.spec.js rename to lab/exercises/05-bits/00-bit-manipulation.spec.js diff --git a/src/exercises/05-bits/01-insertion.js b/lab/exercises/05-bits/01-insertion.js similarity index 100% rename from src/exercises/05-bits/01-insertion.js rename to lab/exercises/05-bits/01-insertion.js diff --git a/src/exercises/05-bits/01-intersection.spec.js b/lab/exercises/05-bits/01-intersection.spec.js similarity index 100% rename from src/exercises/05-bits/01-intersection.spec.js rename to lab/exercises/05-bits/01-intersection.spec.js diff --git a/src/exercises/05-bits/02-binary-to-string.js b/lab/exercises/05-bits/02-binary-to-string.js similarity index 100% rename from src/exercises/05-bits/02-binary-to-string.js rename to lab/exercises/05-bits/02-binary-to-string.js diff --git a/src/exercises/05-bits/02-binary-to-string.spec.js b/lab/exercises/05-bits/02-binary-to-string.spec.js similarity index 100% rename from src/exercises/05-bits/02-binary-to-string.spec.js rename to lab/exercises/05-bits/02-binary-to-string.spec.js diff --git a/src/exercises/05-bits/03-flip-bit-to-win.js b/lab/exercises/05-bits/03-flip-bit-to-win.js similarity index 100% rename from src/exercises/05-bits/03-flip-bit-to-win.js rename to lab/exercises/05-bits/03-flip-bit-to-win.js diff --git a/src/exercises/05-bits/03-flip-bit-to-win.spec.js b/lab/exercises/05-bits/03-flip-bit-to-win.spec.js similarity index 100% rename from src/exercises/05-bits/03-flip-bit-to-win.spec.js rename to lab/exercises/05-bits/03-flip-bit-to-win.spec.js diff --git a/src/exercises/05-bits/04-next-number.js b/lab/exercises/05-bits/04-next-number.js similarity index 100% rename from src/exercises/05-bits/04-next-number.js rename to lab/exercises/05-bits/04-next-number.js diff --git a/src/exercises/05-bits/04-next-number.spec.js b/lab/exercises/05-bits/04-next-number.spec.js similarity index 100% rename from src/exercises/05-bits/04-next-number.spec.js rename to lab/exercises/05-bits/04-next-number.spec.js diff --git a/src/exercises/05-bits/05-debugger.js b/lab/exercises/05-bits/05-debugger.js similarity index 100% rename from src/exercises/05-bits/05-debugger.js rename to lab/exercises/05-bits/05-debugger.js diff --git a/src/exercises/05-bits/05-debugger.spec.js b/lab/exercises/05-bits/05-debugger.spec.js similarity index 100% rename from src/exercises/05-bits/05-debugger.spec.js rename to lab/exercises/05-bits/05-debugger.spec.js diff --git a/src/exercises/05-bits/06-conversion.js b/lab/exercises/05-bits/06-conversion.js similarity index 100% rename from src/exercises/05-bits/06-conversion.js rename to lab/exercises/05-bits/06-conversion.js diff --git a/src/exercises/05-bits/06-conversion.spec.js b/lab/exercises/05-bits/06-conversion.spec.js similarity index 100% rename from src/exercises/05-bits/06-conversion.spec.js rename to lab/exercises/05-bits/06-conversion.spec.js diff --git a/src/exercises/05-bits/07-pairwise-swap.js b/lab/exercises/05-bits/07-pairwise-swap.js similarity index 100% rename from src/exercises/05-bits/07-pairwise-swap.js rename to lab/exercises/05-bits/07-pairwise-swap.js diff --git a/src/exercises/05-bits/07-pairwise-swap.spec.js b/lab/exercises/05-bits/07-pairwise-swap.spec.js similarity index 100% rename from src/exercises/05-bits/07-pairwise-swap.spec.js rename to lab/exercises/05-bits/07-pairwise-swap.spec.js diff --git a/src/exercises/05-bits/08-draw-line.js b/lab/exercises/05-bits/08-draw-line.js similarity index 100% rename from src/exercises/05-bits/08-draw-line.js rename to lab/exercises/05-bits/08-draw-line.js diff --git a/src/exercises/05-bits/08-draw-line.spec.js b/lab/exercises/05-bits/08-draw-line.spec.js similarity index 100% rename from src/exercises/05-bits/08-draw-line.spec.js rename to lab/exercises/05-bits/08-draw-line.spec.js diff --git a/src/exercises/06-teasers/100-lockers.js b/lab/exercises/06-teasers/100-lockers.js similarity index 100% rename from src/exercises/06-teasers/100-lockers.js rename to lab/exercises/06-teasers/100-lockers.js diff --git a/src/exercises/06-teasers/gender-ration-simulator.js b/lab/exercises/06-teasers/gender-ration-simulator.js similarity index 100% rename from src/exercises/06-teasers/gender-ration-simulator.js rename to lab/exercises/06-teasers/gender-ration-simulator.js diff --git a/src/exercises/07-object-oriented-design/call-center.js b/lab/exercises/07-object-oriented-design/call-center.js similarity index 100% rename from src/exercises/07-object-oriented-design/call-center.js rename to lab/exercises/07-object-oriented-design/call-center.js diff --git a/src/exercises/07-object-oriented-design/hash-table.js b/lab/exercises/07-object-oriented-design/hash-table.js similarity index 100% rename from src/exercises/07-object-oriented-design/hash-table.js rename to lab/exercises/07-object-oriented-design/hash-table.js diff --git a/src/exercises/07-object-oriented-design/hash-table.spec.js b/lab/exercises/07-object-oriented-design/hash-table.spec.js similarity index 100% rename from src/exercises/07-object-oriented-design/hash-table.spec.js rename to lab/exercises/07-object-oriented-design/hash-table.spec.js diff --git a/src/exercises/07-object-oriented-design/minesweeper.js b/lab/exercises/07-object-oriented-design/minesweeper.js similarity index 100% rename from src/exercises/07-object-oriented-design/minesweeper.js rename to lab/exercises/07-object-oriented-design/minesweeper.js diff --git a/src/exercises/07-object-oriented-design/minesweeper.spec.js b/lab/exercises/07-object-oriented-design/minesweeper.spec.js similarity index 100% rename from src/exercises/07-object-oriented-design/minesweeper.spec.js rename to lab/exercises/07-object-oriented-design/minesweeper.spec.js diff --git a/src/exercises/07-object-oriented-design/othello.js b/lab/exercises/07-object-oriented-design/othello.js similarity index 100% rename from src/exercises/07-object-oriented-design/othello.js rename to lab/exercises/07-object-oriented-design/othello.js diff --git a/src/exercises/07-object-oriented-design/othello.spec.js b/lab/exercises/07-object-oriented-design/othello.spec.js similarity index 100% rename from src/exercises/07-object-oriented-design/othello.spec.js rename to lab/exercises/07-object-oriented-design/othello.spec.js diff --git a/src/exercises/08-dynamic-programming/coins.js b/lab/exercises/08-dynamic-programming/coins.js similarity index 100% rename from src/exercises/08-dynamic-programming/coins.js rename to lab/exercises/08-dynamic-programming/coins.js diff --git a/src/exercises/08-dynamic-programming/coins.spec.js b/lab/exercises/08-dynamic-programming/coins.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/coins.spec.js rename to lab/exercises/08-dynamic-programming/coins.spec.js diff --git a/src/exercises/08-dynamic-programming/count-ways.js b/lab/exercises/08-dynamic-programming/count-ways.js similarity index 100% rename from src/exercises/08-dynamic-programming/count-ways.js rename to lab/exercises/08-dynamic-programming/count-ways.js diff --git a/src/exercises/08-dynamic-programming/count-ways.spec.js b/lab/exercises/08-dynamic-programming/count-ways.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/count-ways.spec.js rename to lab/exercises/08-dynamic-programming/count-ways.spec.js diff --git a/src/exercises/08-dynamic-programming/hanoi-tower.js b/lab/exercises/08-dynamic-programming/hanoi-tower.js similarity index 100% rename from src/exercises/08-dynamic-programming/hanoi-tower.js rename to lab/exercises/08-dynamic-programming/hanoi-tower.js diff --git a/src/exercises/08-dynamic-programming/hanoi-tower.spec.js b/lab/exercises/08-dynamic-programming/hanoi-tower.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/hanoi-tower.spec.js rename to lab/exercises/08-dynamic-programming/hanoi-tower.spec.js diff --git a/src/exercises/08-dynamic-programming/magic-index.js b/lab/exercises/08-dynamic-programming/magic-index.js similarity index 100% rename from src/exercises/08-dynamic-programming/magic-index.js rename to lab/exercises/08-dynamic-programming/magic-index.js diff --git a/src/exercises/08-dynamic-programming/magic-index.spec.js b/lab/exercises/08-dynamic-programming/magic-index.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/magic-index.spec.js rename to lab/exercises/08-dynamic-programming/magic-index.spec.js diff --git a/src/exercises/08-dynamic-programming/parenthesis.js b/lab/exercises/08-dynamic-programming/parenthesis.js similarity index 100% rename from src/exercises/08-dynamic-programming/parenthesis.js rename to lab/exercises/08-dynamic-programming/parenthesis.js diff --git a/src/exercises/08-dynamic-programming/parenthesis.spec.js b/lab/exercises/08-dynamic-programming/parenthesis.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/parenthesis.spec.js rename to lab/exercises/08-dynamic-programming/parenthesis.spec.js diff --git a/lab/exercises/08-dynamic-programming/permutations.js b/lab/exercises/08-dynamic-programming/permutations.js new file mode 100644 index 00000000..8815eb91 --- /dev/null +++ b/lab/exercises/08-dynamic-programming/permutations.js @@ -0,0 +1,29 @@ +/** + * 8.7 Permutations without Dups: + * + * Write a method to compute all permutations of + * a string of unique characters. + * + * @param string + * @param prefix + * @param memo + * @returns {*} + */ +function permutations(string = '', prefix = '', memo = {}) { + if (string.length < 2) { + return [prefix + string]; + } else if (string.length === 2) { + return [prefix + string, prefix + string[1] + string[0]]; + } else if (memo[string]) { + return memo[string].map(e => prefix + e); + } + let results = []; + for (let i = 0; i < string.length; i++) { + const letter = string[i]; + results = results.concat(permutations(string.replace(letter, ''), letter, memo)); + } + memo[string] = results; + return results.map(e => prefix + e); +} + +module.exports = permutations; diff --git a/src/exercises/08-dynamic-programming/permutations.spec.js b/lab/exercises/08-dynamic-programming/permutations.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/permutations.spec.js rename to lab/exercises/08-dynamic-programming/permutations.spec.js diff --git a/src/exercises/08-dynamic-programming/power-set.js b/lab/exercises/08-dynamic-programming/power-set.js similarity index 100% rename from src/exercises/08-dynamic-programming/power-set.js rename to lab/exercises/08-dynamic-programming/power-set.js diff --git a/src/exercises/08-dynamic-programming/power-set.spec.js b/lab/exercises/08-dynamic-programming/power-set.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/power-set.spec.js rename to lab/exercises/08-dynamic-programming/power-set.spec.js diff --git a/src/exercises/08-dynamic-programming/recursive-multiply.js b/lab/exercises/08-dynamic-programming/recursive-multiply.js similarity index 100% rename from src/exercises/08-dynamic-programming/recursive-multiply.js rename to lab/exercises/08-dynamic-programming/recursive-multiply.js diff --git a/src/exercises/08-dynamic-programming/robot-path.js b/lab/exercises/08-dynamic-programming/robot-path.js similarity index 100% rename from src/exercises/08-dynamic-programming/robot-path.js rename to lab/exercises/08-dynamic-programming/robot-path.js diff --git a/src/exercises/08-dynamic-programming/robot-path.spec.js b/lab/exercises/08-dynamic-programming/robot-path.spec.js similarity index 100% rename from src/exercises/08-dynamic-programming/robot-path.spec.js rename to lab/exercises/08-dynamic-programming/robot-path.spec.js diff --git a/lab/exercises/easy/search-insert-position.js b/lab/exercises/easy/search-insert-position.js new file mode 100644 index 00000000..80cc1606 --- /dev/null +++ b/lab/exercises/easy/search-insert-position.js @@ -0,0 +1,42 @@ +/* eslint-disable */ + +/** + * binary search O(log n) + * @param {number[]} nums + * @param {number} target + * @return {number} + */ +var searchInsert = function(nums, target, start = 0) { + if (!nums.length) return start; + const i = parseInt(nums.length / 2, 10); + + if (nums[i] === target) { + return start + i; + } else if (target > nums[i]) { + const newArray = nums.slice(i + 1); + if (!newArray.length) return start + i + 1; + return searchInsert(newArray, target, start + i + 1); + } else { + return searchInsert(nums.slice(0, i), target, start); + } +}; + +// --- + +const assert = require('assert'); +function test() { + assert.equal(searchInsert([], 0), 0); + assert.equal(searchInsert([], 1), 0); + + assert.equal(searchInsert([1], 0), 0); + assert.equal(searchInsert([1], 2), 1); + + assert.equal(searchInsert([1,3,5,6], 5), 2); + assert.equal(searchInsert([1,3,5,6], 2), 1); + assert.equal(searchInsert([1,3,5,6], 7), 4); + assert.equal(searchInsert([1,3,5,6], 0), 0); + + assert.equal(searchInsert([1,3,5], 5), 2); + +} +test(); diff --git a/src/exercises/easy/two-sum/two-sum.js b/lab/exercises/easy/two-sum/two-sum.js similarity index 100% rename from src/exercises/easy/two-sum/two-sum.js rename to lab/exercises/easy/two-sum/two-sum.js diff --git a/src/exercises/easy/two-sum/two-sum.spec.js b/lab/exercises/easy/two-sum/two-sum.spec.js similarity index 100% rename from src/exercises/easy/two-sum/two-sum.spec.js rename to lab/exercises/easy/two-sum/two-sum.spec.js diff --git a/lab/exercises/hard/regular-expression-matching.js b/lab/exercises/hard/regular-expression-matching.js new file mode 100644 index 00000000..c289965a --- /dev/null +++ b/lab/exercises/hard/regular-expression-matching.js @@ -0,0 +1,131 @@ +/* eslint-disable */ + +/** + * https://leetcode.com/articles/regular-expression-matching/ + * Pattern: + * '.' Matches any single character. + * '*' Matches zero or more of the preceding element. + * + * @param {string} s + * @param {string} p + * @return {boolean} + */ +var isMatch = function(s, p) { + let si = s.length - 1; + + for(let pi = p.length - 1; pi > 0; pi--) { + switch(p[pi]) { + case '.': + if (si > 0) { si--; } + else { return false; } + break; + case '*': + const char = p[--pi]; + while(si > 0 && (char === '.' || char === s[si])) { + si--; + } + break; + default: + if (si > 0 && p[pi] === s[si]) { + si--; + } else { + return false; + } + } + } + + return si <= 0; +}; + +const assert = require('assert'); +function test() { + assert.equal(isMatch('aa', 'aa'), true); + assert.equal(isMatch('aa', 'a'), false); + + assert.equal(isMatch('aa', 'a.'), true); + assert.equal(isMatch('aaa', 'a.'), false); + assert.equal(isMatch('aaa', 'a..'), true); + + assert.equal(isMatch('', 'a*'), true); + assert.equal(isMatch('a', 'a*'), true); + assert.equal(isMatch('aa', 'a*'), true); + assert.equal(isMatch('aab', 'a*'), false); + assert.equal(isMatch('aab', 'a*b'), true); + + assert.equal(isMatch('aab', 'c*a*b'), true); + assert.equal(isMatch('caab', 'c*a*b'), true); + + assert.equal(isMatch('abc', '.*'), true); + + assert.equal(isMatch('mississippi', 'mis*is*p*.'), false); + assert.equal(isMatch('mississippi', 'mis*is*ip*.'), true); + + assert.equal(isMatch('ab', '.*..'), true); + assert.equal(isMatch('abcz', '.*z'), true); + assert.equal(isMatch('abcz', '.*x'), false); + assert.equal(isMatch('zabc', 'z.*'), true); + assert.equal(isMatch('zabc', 'x.*'), false); + assert.equal(isMatch('zabcz', 'z.*z'), true); + assert.equal(isMatch('zabcx', 'z.*z'), false); +} +test(); + + + +///// + +var isMatch3 = function(s, p) { + let pi = 0; + + for(let si = 0; si < s.length; si++) { + switch(p[pi]) { + case '.': + if (pi < p.length) { pi++; } + else { return false; } + break; + case '*': + const last = p[pi - 1]; + if(last === s[si]) { + break; // break switch + } + pi++; // continue to the default case + default: + if (pi < p.length && p[pi] === s[si]) { + pi++; + continue; + } else { + return false; + } + } + } + + return true; +}; + +var isMatch2 = function(s, p) { + let pi = p.length - 1; + + for(let si = s.length - 1; si > 0; si--) { + switch(p[pi]) { + case '.': + if (pi > 0) { pi--; } + else { return false; } + break; + case '*': + const last = p[--pi]; + while(si > 0 && last === s[si]) { + si--; + } + break; + default: + if (pi > 0 && p[pi] === s[si]) { + pi--; + continue; + } else { + return false; + } + } + } + + return true; +}; diff --git a/lab/exercises/medium/next-permutation.js b/lab/exercises/medium/next-permutation.js new file mode 100644 index 00000000..cad4f4a9 --- /dev/null +++ b/lab/exercises/medium/next-permutation.js @@ -0,0 +1,133 @@ +/* eslint-disable */ + +/** + * Runtime O(10^n) + * https://leetcode.com/problems/next-permutation/ + * @param {number[]} nums + * @return {void} Do not return anything, modify nums in-place instead. + */ +var nextPermutation2 = function(nums) { + const current = parseInt(nums.join(''), 10); + const max = Math.pow(10, nums.length); + + for(let next = current + 1; next < max; next++) { + if(isPermutation(nums, next)) { + setNumbersInArray(nums, next); + return; + } + } +}; + +function isPermutation(array, number) { + return array.sort().join('') === numberToArray(number, array.length).sort().join(''); +} + +function numberToArray(num, pad = 0) { + return num.toString().padStart(pad, "0").split('').map(s => +s) +} + +function setNumbersInArray(to, number) { + const from = numberToArray(number, to.length); + + for(let i = 0; i < to.length; i++) { + to[i] = from[i]; + } +} + + +/* + // swap last with previous and check if is bigger + // if not, swap last with previous - 1 recursively and check if bigger + // sort in asc order the rest and check if bigger + +123 +132 +213 +231 +312 +321 +--- +123 +*/ + +// generate all numbers - discard no matching: O(10^n) +// + + +// starting from last find a bigger number than current + + +function nextPermutation(nums) { + + // try to find next + for(let i = nums.length - 2; i >= 0; i--) { + let smallestBigger; + + for(let j = i + 1; j < nums.length; j++) { + if (nums[j] > nums[i]) { + smallestBigger = Math.min(nums[j], smallestBigger || nums[j]); + } + } + + if (smallestBigger) { + const k = nums.lastIndexOf(smallestBigger); + [nums[i], nums[k]] = [nums[k], nums[i]]; // swap + // sort asc starting from i + sort(nums, i + 1); + return; + } + } + + sort(nums); +} + +/** + * Sort in-place from start on + * @param {*} array + * @param {*} start + */ +function sort(array, start = 0) { + for(let i = start; i < array.length; i++) { + for (let j = i + 1; j < array.length; j++) { + if (array[i] > array[j]) { + [array[i], array[j]] = [array[j], array[i]]; + } + } + } +} + +// ----- +const assert = require('assert'); +function test() { + let a; + + a = [100, 4,3,2]; + sort(a, 1); + assert.deepEqual(a, [100, 2, 3, 4]); + + a = [1, 2, 3]; + nextPermutation(a); + assert.deepStrictEqual(a, [1, 3, 2]); + + a = [2, 3, 1]; + nextPermutation(a); + assert.deepStrictEqual(a, [3, 1, 2]); + + a = [3, 2, 1]; + nextPermutation(a); + assert.deepEqual(a, [1, 2, 3]); + + a = [0,0,4,2,1,0]; + nextPermutation(a); + assert.deepStrictEqual(a, [0,1,0,0,2,4]); + + // Time Limit Exceeded on leetcode for O(n^10) algorithm + a = [2,2,7,5,4,3,2,2,1]; + nextPermutation(a); + assert.deepStrictEqual(a, [2,3,1,2,2,2,4,5,7]); + + a = [5,4,7,5,3,2]; + nextPermutation(a); + assert.deepEqual(a, [5,5,2,3,4,7]); +} +test(); diff --git a/lab/exercises/medium/permute.js b/lab/exercises/medium/permute.js new file mode 100644 index 00000000..02b656cf --- /dev/null +++ b/lab/exercises/medium/permute.js @@ -0,0 +1,58 @@ +/* eslint-disable */ + +/** + * https://www.youtube.com/watch?v=AfxHGNRtFac + * https://www.geeksforgeeks.org/write-a-c-program-to-print-all-permutations-of-a-given-string/ + * https://leetcode.com/articles/permutations/ + * @param {number[]} nums + * @return {number[][]} + */ +var permute = function (nums) { + var result = []; + backtrack(nums, result); + return result; +}; + +function backtrack(nums, result, index = 0) { + // console.log(Array(first).fill(" ").join(""), JSON.stringify({nums, first})); + if (index === nums.length - 1) { + // console.log(nums); + result.push(nums.slice()); + } else { + for (let current = index; current < nums.length; current++) { + swap(nums, index, current); + backtrack(nums, result, index + 1); + swap(nums, index, current); + } + } +}; + +function swap(array, i, j) { + [array[i], array[j]] = [array[j], array[i]] +} + +// --- + +const assert = require('assert'); + +function test() { + // assert.deepEqual(permute([1]), [ + // [1] + // ]); + // assert.deepEqual(permute([1, 2]), [ + // [1, 2], + // [2, 1] + // ]); + assert.deepEqual(permute([1, 2, 3]), [ + [1, 2, 3], + [1, 3, 2], + [2, 1, 3], + [2, 3, 1], + [3, 2, 1], + [3, 1, 2] + ]); + + console.log(permute(Array.from('art'))); + +} +test(); diff --git a/lab/exercises/medium/top-k-frequent-elements.js b/lab/exercises/medium/top-k-frequent-elements.js new file mode 100644 index 00000000..1e7afaad --- /dev/null +++ b/lab/exercises/medium/top-k-frequent-elements.js @@ -0,0 +1,105 @@ +/* eslint-disable */ + +/** + * @param {number[]} nums + * @param {number} k + * @return {number[]} + */ +var topKFrequent = function(nums, k) { + const count = getCount(nums); // O(n) + const frequenciesToNumbers = invertMapKeyValues(count); // O(n) + + // print the last k most frequent + const descendingFrequencies = Array.from(frequenciesToNumbers.keys()).sort((a, b) => b - a); + const valuesByFrequencencies = descendingFrequencies.reduce((array, key) => { + return array.concat(frequenciesToNumbers.get(key)); + },[]); + + // console.log(JSON.stringify({freqToNum: Array.from(freqToNum), f, v})); + + return valuesByFrequencencies.slice(0, k); +}; + +/** + * Get repeated number count + * Runtime: O(n) + * @param {array} nums + * @returns {Map} counts + */ +function getCount(nums) { + const counts = new Map(); + + for(let n of nums) { + if(counts.has(n)) { + counts.set(n, 1 + counts.get(n)); + } else { + counts.set(n, 1); + } + } + + return counts; +} + +/** + * Invert key and values in a Map. + * Similar to _.invert (https://lodash.com/docs/4.17.11#invert) + * @param {*} map + */ +function invertMapKeyValues(map) { + const inverted = new Map(); + + for(let [key, value] of map.entries()) { + if (inverted.has(value)) { + inverted.set(value, inverted.get(value).concat(key)); + } else { + inverted.set(value, [key]); + } + } + + return inverted; +} + +/* +< O(n log n) - linear or log + +*/ + +// hashmap + +// --- + +const assert = require('assert'); +function test() { + assert.deepEqual(topKFrequent([1], 1), [1]); + assert.deepEqual(topKFrequent([1,1,1,2,2,3], 2), [1, 2]); + assert.deepEqual(topKFrequent([3, 1, 4, 4, 5, 2, 6, 1], 2), [1, 4]); + assert.deepEqual(topKFrequent([7, 10, 11, 5, 2, 5, 5, 7, 11, 8, 9], 4).sort(), [5, 11, 7, 10].sort()); + assert.deepEqual(topKFrequent([ + 5,1,-1,-8,-7,8,-5,0,1,10,8,0,-4,3,-1,-1,4,-5,4,-3,0,2,2,2,4,-2,-4,8,-7,-7,2,-8,0,-8,10,8,-8,-2,-9,4,-7,6,6,-1,4,2,8,-3,5,-9,-3,6,-8,-5,5,10,2,-5,-1,-5,1,-3,7,0,8,-2,-3,-1,-5,4,7,-9,0,2,10,4,4,-4,-1,-1,6,-8,-9,-1,9,-9,3,5,1,6,-1,-2,4,2,4,-6,4,4,5,-5 + ], 7).sort(), [ + 4,-1,2,-5,-8,8,0 + ].sort()); +} +test(); + + +/* +{ + freqToNum: Map { + 12 => [4], + 10 => [-1], + 8 => [2], + 7 => [-5], + 6 => [-8, 8, 0], + 5 => [5, -3, -9, 6], + 4 => [1, -7, 10, -2], + 3 => [-4], + 2 => [3, 7], + 1 => [9, -6] + }, + f: [8, 7, 6, 5, 4, 3, 2, 12, 10, 1], + v: [2, -5, -8, 8, 0, 5, -3, -9, 6, 1, -7, 10, -2, -4, 3, 7, 4, -1, 9, -6] +} + +other solutions: https://leetcode.com/submissions/detail/202508293/ +*/ diff --git a/lab/exercises/medium/top-k-frequent-elements2.js b/lab/exercises/medium/top-k-frequent-elements2.js new file mode 100644 index 00000000..505884d3 --- /dev/null +++ b/lab/exercises/medium/top-k-frequent-elements2.js @@ -0,0 +1,105 @@ +/* eslint-disable */ + + +/** + * @param {number[]} nums + * @param {number} k + * @return {number[]} + */ +var topKFrequent2 = function (nums, k) { + + let newObj = {}; + + for (let n of nums) { + if (newObj[n]) { + newObj[n]++; + } else { + newObj[n] = 1; + } + } + + return Object.keys(newObj).sort((a, b) => newObj[b] - newObj[a]).splice(0, k); +}; + + +/** + * @param {number[]} nums + * @param {number} k + * @return {number[]} + */ + +var topKFrequent = function (nums, k) { + var obj = {}; + nums.forEach(function (item) { + if (!obj[item]) + obj[item] = 1; + else + obj[item]++; + }); + + var arr = []; + + for (var key in obj) { + arr.push({ + key: key, + value: obj[key] + }); + } + + arr.sort(function (a, b) { + return b.value - a.value; + }); + + var ans = []; + for (var i = 0; i < k; i++) + ans.push(+arr[i].key); + + + // console.log(JSON.stringify({ + // obj, + // arr, + // ans + // })) + + return ans; +}; + +// --- + +const assert = require('assert'); + +function test() { + assert.deepEqual(topKFrequent([1], 1), [1]); + assert.deepEqual(topKFrequent([1, 1, 1, 2, 2, 3], 2), [1, 2]); + assert.deepEqual(topKFrequent([3, 1, 4, 4, 5, 2, 6, 1], 2), [1, 4]); + // assert.deepEqual(topKFrequent([7, 10, 11, 5, 2, 5, 5, 7, 11, 8, 9], 4).sort(), [5, 11, 7, 10].sort()); + // assert.deepEqual(topKFrequent([7, 10, 11, 5, 2, 5, 5, 7, 11, 8, 9], 4).sort(), [5, 7, 11, 2].sort()); // also + assert.deepEqual(topKFrequent([ + 5, 1, -1, -8, -7, 8, -5, 0, 1, 10, 8, 0, -4, 3, -1, -1, 4, -5, 4, -3, 0, 2, 2, 2, 4, -2, -4, 8, -7, -7, 2, -8, 0, -8, 10, 8, -8, -2, -9, 4, -7, 6, 6, -1, 4, 2, 8, -3, 5, -9, -3, 6, -8, -5, 5, 10, 2, -5, -1, -5, 1, -3, 7, 0, 8, -2, -3, -1, -5, 4, 7, -9, 0, 2, 10, 4, 4, -4, -1, -1, 6, -8, -9, -1, 9, -9, 3, 5, 1, 6, -1, -2, 4, 2, 4, -6, 4, 4, 5, -5 + ], 7).sort(), [ + 4, -1, 2, -5, -8, 8, 0 + ].sort()); +} +test(); + + +/* +{ + freqToNum: Map { + 12 => [4], + 10 => [-1], + 8 => [2], + 7 => [-5], + 6 => [-8, 8, 0], + 5 => [5, -3, -9, 6], + 4 => [1, -7, 10, -2], + 3 => [-4], + 2 => [3, 7], + 1 => [9, -6] + }, + f: [8, 7, 6, 5, 4, 3, 2, 12, 10, 1], + v: [2, -5, -8, 8, 0, 5, -3, -9, 6, 1, -7, 10, -2, -4, 3, 7, 4, -1, 9, -6] +} + +other solutions: https://leetcode.com/submissions/detail/202508293/ +*/ diff --git a/lab/exercises/medium/zigzag-conversion.js b/lab/exercises/medium/zigzag-conversion.js new file mode 100644 index 00000000..08558f52 --- /dev/null +++ b/lab/exercises/medium/zigzag-conversion.js @@ -0,0 +1,101 @@ +/* eslint-disable */ +// 2d-array; time: O(n); space: O(n^2) + +/** + * @param {string} s + * @param {number} numRows + * @return {string} + */ +var convert = function(s, numRows) { + let data = { + col: 0, + row: 0, + index: 0, + table: [], + word: s, + numRows + } + + while (data.index < s.length) { + data = goDown(data); + data = goDiagonal(data); + } + + console.table(data.table); + return data.table.map(row => row.join('')).join(''); +}; + +function goDown({ col, row, index, table, word, numRows }) { + // console.log('goDown', { col, row, index}) + // console.table(table); + let i; + for(i = 0; row < numRows; i++) { + table[row] = table[row] || []; + table[row][col] = word[index + i]; + row++; + } + return { col, row: (row - 1), index: (index + i), table, word, numRows }; +} + +function goDiagonal({ col, row, index, table, word, numRows }) { + // console.log('goDiagonal', { col, row, index}) + // console.table(table); + let i; + if (row) { + for(i = 0; row !=0; i++) { + row--; + col++; + table[row] = table[row] || []; + table[row][col] = word[index + i]; + } + row++; + index += i; + } else { + col++; + } + + return { col, row, index, table, word, numRows }; +} + +const assert = require('assert'); +function test() { + assert.equal(convert('PAYPALISHIRING', 1), 'PAYPALISHIRING'); + assert.equal(convert('PAYPALISHIRING', 2), 'PYAIHRNAPLSIIG'); + assert.equal(convert('PAYPALISHIRING', 3), 'PAHNAPLSIIGYIR'); + assert.equal(convert('PAYPALISHIRING', 4), 'PINALSIGYAHRPI'); +} +test(); + +/* +char* convert(char* s, int numRows) { + if (numRows == 1) { + return s; + } + int len = strlen(s); + char *buf = (char *) malloc(sizeof(char) * (len + 1)); + int i = 0; + for (int row = 0; row < numRows; row++) { + int step = (numRows - 1) * 2; + int j = 0; + while (j + row < len) { + buf[i++] = s[j + row]; + j += step; + if (row > 0 && row < numRows - 1 && j - row < len) { + buf[i++] = s[j - row]; + } + } + } + buf[len] = '\0'; + return buf; +} +*/ + +// function convert(s, numRows) { +// if (numRows === 1) { return s; } + +// let buffer; +// let i = 0; +// for(let row = 0; row < numRows; row++) { +// const step = (numRows - 1 ) * 2; // Characters in row 0 are located at indexes +// } +// } diff --git a/lab/timsort.java b/lab/timsort.java new file mode 100644 index 00000000..1d4e710a --- /dev/null +++ b/lab/timsort.java @@ -0,0 +1,928 @@ +/* + * Copyright 2009 Google Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +package java.util; + +/** + * A stable, adaptive, iterative mergesort that requires far fewer than + * n lg(n) comparisons when running on partially sorted arrays, while + * offering performance comparable to a traditional mergesort when run + * on random arrays. Like all proper mergesorts, this sort is stable and + * runs O(n log n) time (worst case). In the worst case, this sort requires + * temporary storage space for n/2 object references; in the best case, + * it requires only a small constant amount of space. + * + * This implementation was adapted from Tim Peters's list sort for + * Python, which is described in detail here: + * + * http://svn.python.org/projects/python/trunk/Objects/listsort.txt + * + * Tim's C code may be found here: + * + * http://svn.python.org/projects/python/trunk/Objects/listobject.c + * + * The underlying techniques are described in this paper (and may have + * even earlier origins): + * + * "Optimistic Sorting and Information Theoretic Complexity" + * Peter McIlroy + * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), + * pp 467-474, Austin, Texas, 25-27 January 1993. + * + * While the API to this class consists solely of static methods, it is + * (privately) instantiable; a TimSort instance holds the state of an ongoing + * sort, assuming the input array is large enough to warrant the full-blown + * TimSort. Small arrays are sorted in place, using a binary insertion sort. + * + * @author Josh Bloch + */ +class TimSort { + /** + * This is the minimum sized sequence that will be merged. Shorter + * sequences will be lengthened by calling binarySort. If the entire + * array is less than this length, no merges will be performed. + * + * This constant should be a power of two. It was 64 in Tim Peter's C + * implementation, but 32 was empirically determined to work better in + * this implementation. In the unlikely event that you set this constant + * to be a number that's not a power of two, you'll need to change the + * {@link #minRunLength} computation. + * + * If you decrease this constant, you must change the stackLen + * computation in the TimSort constructor, or you risk an + * ArrayOutOfBounds exception. See listsort.txt for a discussion + * of the minimum stack length required as a function of the length + * of the array being sorted and the minimum merge sequence length. + */ + private static final int MIN_MERGE = 32; + + /** + * The array being sorted. + */ + private final T[] a; + + /** + * The comparator for this sort. + */ + private final Comparator c; + + /** + * When we get into galloping mode, we stay there until both runs win less + * often than MIN_GALLOP consecutive times. + */ + private static final int MIN_GALLOP = 7; + + /** + * This controls when we get *into* galloping mode. It is initialized + * to MIN_GALLOP. The mergeLo and mergeHi methods nudge it higher for + * random data, and lower for highly structured data. + */ + private int minGallop = MIN_GALLOP; + + /** + * Maximum initial size of tmp array, which is used for merging. The array + * can grow to accommodate demand. + * + * Unlike Tim's original C version, we do not allocate this much storage + * when sorting smaller arrays. This change was required for performance. + */ + private static final int INITIAL_TMP_STORAGE_LENGTH = 256; + + /** + * Temp storage for merges. + */ + private T[] tmp; // Actual runtime type will be Object[], regardless of T + + /** + * A stack of pending runs yet to be merged. Run i starts at + * address base[i] and extends for len[i] elements. It's always + * true (so long as the indices are in bounds) that: + * + * runBase[i] + runLen[i] == runBase[i + 1] + * + * so we could cut the storage for this, but it's a minor amount, + * and keeping all the info explicit simplifies the code. + */ + private int stackSize = 0; // Number of pending runs on stack + private final int[] runBase; + private final int[] runLen; + + /** + * Creates a TimSort instance to maintain the state of an ongoing sort. + * + * @param a the array to be sorted + * @param c the comparator to determine the order of the sort + */ + private TimSort(T[] a, Comparator c) { + this.a = a; + this.c = c; + + // Allocate temp storage (which may be increased later if necessary) + int len = a.length; + @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"}) + T[] newArray = (T[]) new Object[len < 2 * INITIAL_TMP_STORAGE_LENGTH ? + len >>> 1 : INITIAL_TMP_STORAGE_LENGTH]; + tmp = newArray; + + /* + * Allocate runs-to-be-merged stack (which cannot be expanded). The + * stack length requirements are described in listsort.txt. The C + * version always uses the same stack length (85), but this was + * measured to be too expensive when sorting "mid-sized" arrays (e.g., + * 100 elements) in Java. Therefore, we use smaller (but sufficiently + * large) stack lengths for smaller arrays. The "magic numbers" in the + * computation below must be changed if MIN_MERGE is decreased. See + * the MIN_MERGE declaration above for more information. + */ + int stackLen = (len < 120 ? 5 : + len < 1542 ? 10 : + len < 119151 ? 19 : 40); + runBase = new int[stackLen]; + runLen = new int[stackLen]; + } + + /* + * The next two methods (which are package private and static) constitute + * the entire API of this class. Each of these methods obeys the contract + * of the public method with the same signature in java.util.Arrays. + */ + + static void sort(T[] a, Comparator c) { + sort(a, 0, a.length, c); + } + + static void sort(T[] a, int lo, int hi, Comparator c) { + if (c == null) { + Arrays.sort(a, lo, hi); + return; + } + + rangeCheck(a.length, lo, hi); + int nRemaining = hi - lo; + if (nRemaining < 2) + return; // Arrays of size 0 and 1 are always sorted + + // If array is small, do a "mini-TimSort" with no merges + if (nRemaining < MIN_MERGE) { + int initRunLen = countRunAndMakeAscending(a, lo, hi, c); + binarySort(a, lo, hi, lo + initRunLen, c); + return; + } + + /** + * March over the array once, left to right, finding natural runs, + * extending short natural runs to minRun elements, and merging runs + * to maintain stack invariant. + */ + TimSort ts = new TimSort(a, c); + int minRun = minRunLength(nRemaining); + do { + // Identify next run + int runLen = countRunAndMakeAscending(a, lo, hi, c); + + // If run is short, extend to min(minRun, nRemaining) + if (runLen < minRun) { + int force = nRemaining <= minRun ? nRemaining : minRun; + binarySort(a, lo, lo + force, lo + runLen, c); + runLen = force; + } + + // Push run onto pending-run stack, and maybe merge + ts.pushRun(lo, runLen); + ts.mergeCollapse(); + + // Advance to find next run + lo += runLen; + nRemaining -= runLen; + } while (nRemaining != 0); + + // Merge all remaining runs to complete sort + assert lo == hi; + ts.mergeForceCollapse(); + assert ts.stackSize == 1; + } + + /** + * Sorts the specified portion of the specified array using a binary + * insertion sort. This is the best method for sorting small numbers + * of elements. It requires O(n log n) compares, but O(n^2) data + * movement (worst case). + * + * If the initial part of the specified range is already sorted, + * this method can take advantage of it: the method assumes that the + * elements from index {@code lo}, inclusive, to {@code start}, + * exclusive are already sorted. + * + * @param a the array in which a range is to be sorted + * @param lo the index of the first element in the range to be sorted + * @param hi the index after the last element in the range to be sorted + * @param start the index of the first element in the range that is + * not already known to be sorted (@code lo <= start <= hi} + * @param c comparator to used for the sort + */ + @SuppressWarnings("fallthrough") + private static void binarySort(T[] a, int lo, int hi, int start, + Comparator c) { + assert lo <= start && start <= hi; + if (start == lo) + start++; + for ( ; start < hi; start++) { + T pivot = a[start]; + + // Set left (and right) to the index where a[start] (pivot) belongs + int left = lo; + int right = start; + assert left <= right; + /* + * Invariants: + * pivot >= all in [lo, left). + * pivot < all in [right, start). + */ + while (left < right) { + int mid = (left + right) >>> 1; + if (c.compare(pivot, a[mid]) < 0) + right = mid; + else + left = mid + 1; + } + assert left == right; + + /* + * The invariants still hold: pivot >= all in [lo, left) and + * pivot < all in [left, start), so pivot belongs at left. Note + * that if there are elements equal to pivot, left points to the + * first slot after them -- that's why this sort is stable. + * Slide elements over to make room to make room for pivot. + */ + int n = start - left; // The number of elements to move + // Switch is just an optimization for arraycopy in default case + switch(n) { + case 2: a[left + 2] = a[left + 1]; + case 1: a[left + 1] = a[left]; + break; + default: System.arraycopy(a, left, a, left + 1, n); + } + a[left] = pivot; + } + } + + /** + * Returns the length of the run beginning at the specified position in + * the specified array and reverses the run if it is descending (ensuring + * that the run will always be ascending when the method returns). + * + * A run is the longest ascending sequence with: + * + * a[lo] <= a[lo + 1] <= a[lo + 2] <= ... + * + * or the longest descending sequence with: + * + * a[lo] > a[lo + 1] > a[lo + 2] > ... + * + * For its intended use in a stable mergesort, the strictness of the + * definition of "descending" is needed so that the call can safely + * reverse a descending sequence without violating stability. + * + * @param a the array in which a run is to be counted and possibly reversed + * @param lo index of the first element in the run + * @param hi index after the last element that may be contained in the run. + It is required that @code{lo < hi}. + * @param c the comparator to used for the sort + * @return the length of the run beginning at the specified position in + * the specified array + */ + private static int countRunAndMakeAscending(T[] a, int lo, int hi, + Comparator c) { + assert lo < hi; + int runHi = lo + 1; + if (runHi == hi) + return 1; + + // Find end of run, and reverse range if descending + if (c.compare(a[runHi++], a[lo]) < 0) { // Descending + while(runHi < hi && c.compare(a[runHi], a[runHi - 1]) < 0) + runHi++; + reverseRange(a, lo, runHi); + } else { // Ascending + while (runHi < hi && c.compare(a[runHi], a[runHi - 1]) >= 0) + runHi++; + } + + return runHi - lo; + } + + /** + * Reverse the specified range of the specified array. + * + * @param a the array in which a range is to be reversed + * @param lo the index of the first element in the range to be reversed + * @param hi the index after the last element in the range to be reversed + */ + private static void reverseRange(Object[] a, int lo, int hi) { + hi--; + while (lo < hi) { + Object t = a[lo]; + a[lo++] = a[hi]; + a[hi--] = t; + } + } + + /** + * Returns the minimum acceptable run length for an array of the specified + * length. Natural runs shorter than this will be extended with + * {@link #binarySort}. + * + * Roughly speaking, the computation is: + * + * If n < MIN_MERGE, return n (it's too small to bother with fancy stuff). + * Else if n is an exact power of 2, return MIN_MERGE/2. + * Else return an int k, MIN_MERGE/2 <= k <= MIN_MERGE, such that n/k + * is close to, but strictly less than, an exact power of 2. + * + * For the rationale, see listsort.txt. + * + * @param n the length of the array to be sorted + * @return the length of the minimum run to be merged + */ + private static int minRunLength(int n) { + assert n >= 0; + int r = 0; // Becomes 1 if any 1 bits are shifted off + while (n >= MIN_MERGE) { + r |= (n & 1); + n >>= 1; + } + return n + r; + } + + /** + * Pushes the specified run onto the pending-run stack. + * + * @param runBase index of the first element in the run + * @param runLen the number of elements in the run + */ + private void pushRun(int runBase, int runLen) { + this.runBase[stackSize] = runBase; + this.runLen[stackSize] = runLen; + stackSize++; + } + + /** + * Examines the stack of runs waiting to be merged and merges adjacent runs + * until the stack invariants are reestablished: + * + * 1. runLen[i - 3] > runLen[i - 2] + runLen[i - 1] + * 2. runLen[i - 2] > runLen[i - 1] + * + * This method is called each time a new run is pushed onto the stack, + * so the invariants are guaranteed to hold for i < stackSize upon + * entry to the method. + */ + private void mergeCollapse() { + while (stackSize > 1) { + int n = stackSize - 2; + if (n > 0 && runLen[n-1] <= runLen[n] + runLen[n+1]) { + if (runLen[n - 1] < runLen[n + 1]) + n--; + mergeAt(n); + } else if (runLen[n] <= runLen[n + 1]) { + mergeAt(n); + } else { + break; // Invariant is established + } + } + } + + /** + * Merges all runs on the stack until only one remains. This method is + * called once, to complete the sort. + */ + private void mergeForceCollapse() { + while (stackSize > 1) { + int n = stackSize - 2; + if (n > 0 && runLen[n - 1] < runLen[n + 1]) + n--; + mergeAt(n); + } + } + + /** + * Merges the two runs at stack indices i and i+1. Run i must be + * the penultimate or antepenultimate run on the stack. In other words, + * i must be equal to stackSize-2 or stackSize-3. + * + * @param i stack index of the first of the two runs to merge + */ + private void mergeAt(int i) { + assert stackSize >= 2; + assert i >= 0; + assert i == stackSize - 2 || i == stackSize - 3; + + int base1 = runBase[i]; + int len1 = runLen[i]; + int base2 = runBase[i + 1]; + int len2 = runLen[i + 1]; + assert len1 > 0 && len2 > 0; + assert base1 + len1 == base2; + + /* + * Record the length of the combined runs; if i is the 3rd-last + * run now, also slide over the last run (which isn't involved + * in this merge). The current run (i+1) goes away in any case. + */ + runLen[i] = len1 + len2; + if (i == stackSize - 3) { + runBase[i + 1] = runBase[i + 2]; + runLen[i + 1] = runLen[i + 2]; + } + stackSize--; + + /* + * Find where the first element of run2 goes in run1. Prior elements + * in run1 can be ignored (because they're already in place). + */ + int k = gallopRight(a[base2], a, base1, len1, 0, c); + assert k >= 0; + base1 += k; + len1 -= k; + if (len1 == 0) + return; + + /* + * Find where the last element of run1 goes in run2. Subsequent elements + * in run2 can be ignored (because they're already in place). + */ + len2 = gallopLeft(a[base1 + len1 - 1], a, base2, len2, len2 - 1, c); + assert len2 >= 0; + if (len2 == 0) + return; + + // Merge remaining runs, using tmp array with min(len1, len2) elements + if (len1 <= len2) + mergeLo(base1, len1, base2, len2); + else + mergeHi(base1, len1, base2, len2); + } + + /** + * Locates the position at which to insert the specified key into the + * specified sorted range; if the range contains an element equal to key, + * returns the index of the leftmost equal element. + * + * @param key the key whose insertion point to search for + * @param a the array in which to search + * @param base the index of the first element in the range + * @param len the length of the range; must be > 0 + * @param hint the index at which to begin the search, 0 <= hint < n. + * The closer hint is to the result, the faster this method will run. + * @param c the comparator used to order the range, and to search + * @return the int k, 0 <= k <= n such that a[b + k - 1] < key <= a[b + k], + * pretending that a[b - 1] is minus infinity and a[b + n] is infinity. + * In other words, key belongs at index b + k; or in other words, + * the first k elements of a should precede key, and the last n - k + * should follow it. + */ + private static int gallopLeft(T key, T[] a, int base, int len, int hint, + Comparator c) { + assert len > 0 && hint >= 0 && hint < len; + int lastOfs = 0; + int ofs = 1; + if (c.compare(key, a[base + hint]) > 0) { + // Gallop right until a[base+hint+lastOfs] < key <= a[base+hint+ofs] + int maxOfs = len - hint; + while (ofs < maxOfs && c.compare(key, a[base + hint + ofs]) > 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to base + lastOfs += hint; + ofs += hint; + } else { // key <= a[base + hint] + // Gallop left until a[base+hint-ofs] < key <= a[base+hint-lastOfs] + final int maxOfs = hint + 1; + while (ofs < maxOfs && c.compare(key, a[base + hint - ofs]) <= 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to base + int tmp = lastOfs; + lastOfs = hint - ofs; + ofs = hint - tmp; + } + assert -1 <= lastOfs && lastOfs < ofs && ofs <= len; + + /* + * Now a[base+lastOfs] < key <= a[base+ofs], so key belongs somewhere + * to the right of lastOfs but no farther right than ofs. Do a binary + * search, with invariant a[base + lastOfs - 1] < key <= a[base + ofs]. + */ + lastOfs++; + while (lastOfs < ofs) { + int m = lastOfs + ((ofs - lastOfs) >>> 1); + + if (c.compare(key, a[base + m]) > 0) + lastOfs = m + 1; // a[base + m] < key + else + ofs = m; // key <= a[base + m] + } + assert lastOfs == ofs; // so a[base + ofs - 1] < key <= a[base + ofs] + return ofs; + } + + /** + * Like gallopLeft, except that if the range contains an element equal to + * key, gallopRight returns the index after the rightmost equal element. + * + * @param key the key whose insertion point to search for + * @param a the array in which to search + * @param base the index of the first element in the range + * @param len the length of the range; must be > 0 + * @param hint the index at which to begin the search, 0 <= hint < n. + * The closer hint is to the result, the faster this method will run. + * @param c the comparator used to order the range, and to search + * @return the int k, 0 <= k <= n such that a[b + k - 1] <= key < a[b + k] + */ + private static int gallopRight(T key, T[] a, int base, int len, + int hint, Comparator c) { + assert len > 0 && hint >= 0 && hint < len; + + int ofs = 1; + int lastOfs = 0; + if (c.compare(key, a[base + hint]) < 0) { + // Gallop left until a[b+hint - ofs] <= key < a[b+hint - lastOfs] + int maxOfs = hint + 1; + while (ofs < maxOfs && c.compare(key, a[base + hint - ofs]) < 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to b + int tmp = lastOfs; + lastOfs = hint - ofs; + ofs = hint - tmp; + } else { // a[b + hint] <= key + // Gallop right until a[b+hint + lastOfs] <= key < a[b+hint + ofs] + int maxOfs = len - hint; + while (ofs < maxOfs && c.compare(key, a[base + hint + ofs]) >= 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to b + lastOfs += hint; + ofs += hint; + } + assert -1 <= lastOfs && lastOfs < ofs && ofs <= len; + + /* + * Now a[b + lastOfs] <= key < a[b + ofs], so key belongs somewhere to + * the right of lastOfs but no farther right than ofs. Do a binary + * search, with invariant a[b + lastOfs - 1] <= key < a[b + ofs]. + */ + lastOfs++; + while (lastOfs < ofs) { + int m = lastOfs + ((ofs - lastOfs) >>> 1); + + if (c.compare(key, a[base + m]) < 0) + ofs = m; // key < a[b + m] + else + lastOfs = m + 1; // a[b + m] <= key + } + assert lastOfs == ofs; // so a[b + ofs - 1] <= key < a[b + ofs] + return ofs; + } + + /** + * Merges two adjacent runs in place, in a stable fashion. The first + * element of the first run must be greater than the first element of the + * second run (a[base1] > a[base2]), and the last element of the first run + * (a[base1 + len1-1]) must be greater than all elements of the second run. + * + * For performance, this method should be called only when len1 <= len2; + * its twin, mergeHi should be called if len1 >= len2. (Either method + * may be called if len1 == len2.) + * + * @param base1 index of first element in first run to be merged + * @param len1 length of first run to be merged (must be > 0) + * @param base2 index of first element in second run to be merged + * (must be aBase + aLen) + * @param len2 length of second run to be merged (must be > 0) + */ + private void mergeLo(int base1, int len1, int base2, int len2) { + assert len1 > 0 && len2 > 0 && base1 + len1 == base2; + + // Copy first run into temp array + T[] a = this.a; // For performance + T[] tmp = ensureCapacity(len1); + System.arraycopy(a, base1, tmp, 0, len1); + + int cursor1 = 0; // Indexes into tmp array + int cursor2 = base2; // Indexes int a + int dest = base1; // Indexes int a + + // Move first element of second run and deal with degenerate cases + a[dest++] = a[cursor2++]; + if (--len2 == 0) { + System.arraycopy(tmp, cursor1, a, dest, len1); + return; + } + if (len1 == 1) { + System.arraycopy(a, cursor2, a, dest, len2); + a[dest + len2] = tmp[cursor1]; // Last elt of run 1 to end of merge + return; + } + + Comparator c = this.c; // Use local variable for performance + int minGallop = this.minGallop; // " " " " " + outer: + while (true) { + int count1 = 0; // Number of times in a row that first run won + int count2 = 0; // Number of times in a row that second run won + + /* + * Do the straightforward thing until (if ever) one run starts + * winning consistently. + */ + do { + assert len1 > 1 && len2 > 0; + if (c.compare(a[cursor2], tmp[cursor1]) < 0) { + a[dest++] = a[cursor2++]; + count2++; + count1 = 0; + if (--len2 == 0) + break outer; + } else { + a[dest++] = tmp[cursor1++]; + count1++; + count2 = 0; + if (--len1 == 1) + break outer; + } + } while ((count1 | count2) < minGallop); + + /* + * One run is winning so consistently that galloping may be a + * huge win. So try that, and continue galloping until (if ever) + * neither run appears to be winning consistently anymore. + */ + do { + assert len1 > 1 && len2 > 0; + count1 = gallopRight(a[cursor2], tmp, cursor1, len1, 0, c); + if (count1 != 0) { + System.arraycopy(tmp, cursor1, a, dest, count1); + dest += count1; + cursor1 += count1; + len1 -= count1; + if (len1 <= 1) // len1 == 1 || len1 == 0 + break outer; + } + a[dest++] = a[cursor2++]; + if (--len2 == 0) + break outer; + + count2 = gallopLeft(tmp[cursor1], a, cursor2, len2, 0, c); + if (count2 != 0) { + System.arraycopy(a, cursor2, a, dest, count2); + dest += count2; + cursor2 += count2; + len2 -= count2; + if (len2 == 0) + break outer; + } + a[dest++] = tmp[cursor1++]; + if (--len1 == 1) + break outer; + minGallop--; + } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP); + if (minGallop < 0) + minGallop = 0; + minGallop += 2; // Penalize for leaving gallop mode + } // End of "outer" loop + this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field + + if (len1 == 1) { + assert len2 > 0; + System.arraycopy(a, cursor2, a, dest, len2); + a[dest + len2] = tmp[cursor1]; // Last elt of run 1 to end of merge + } else if (len1 == 0) { + throw new IllegalArgumentException( + "Comparison method violates its general contract!"); + } else { + assert len2 == 0; + assert len1 > 1; + System.arraycopy(tmp, cursor1, a, dest, len1); + } + } + + /** + * Like mergeLo, except that this method should be called only if + * len1 >= len2; mergeLo should be called if len1 <= len2. (Either method + * may be called if len1 == len2.) + * + * @param base1 index of first element in first run to be merged + * @param len1 length of first run to be merged (must be > 0) + * @param base2 index of first element in second run to be merged + * (must be aBase + aLen) + * @param len2 length of second run to be merged (must be > 0) + */ + private void mergeHi(int base1, int len1, int base2, int len2) { + assert len1 > 0 && len2 > 0 && base1 + len1 == base2; + + // Copy second run into temp array + T[] a = this.a; // For performance + T[] tmp = ensureCapacity(len2); + System.arraycopy(a, base2, tmp, 0, len2); + + int cursor1 = base1 + len1 - 1; // Indexes into a + int cursor2 = len2 - 1; // Indexes into tmp array + int dest = base2 + len2 - 1; // Indexes into a + + // Move last element of first run and deal with degenerate cases + a[dest--] = a[cursor1--]; + if (--len1 == 0) { + System.arraycopy(tmp, 0, a, dest - (len2 - 1), len2); + return; + } + if (len2 == 1) { + dest -= len1; + cursor1 -= len1; + System.arraycopy(a, cursor1 + 1, a, dest + 1, len1); + a[dest] = tmp[cursor2]; + return; + } + + Comparator c = this.c; // Use local variable for performance + int minGallop = this.minGallop; // " " " " " + outer: + while (true) { + int count1 = 0; // Number of times in a row that first run won + int count2 = 0; // Number of times in a row that second run won + + /* + * Do the straightforward thing until (if ever) one run + * appears to win consistently. + */ + do { + assert len1 > 0 && len2 > 1; + if (c.compare(tmp[cursor2], a[cursor1]) < 0) { + a[dest--] = a[cursor1--]; + count1++; + count2 = 0; + if (--len1 == 0) + break outer; + } else { + a[dest--] = tmp[cursor2--]; + count2++; + count1 = 0; + if (--len2 == 1) + break outer; + } + } while ((count1 | count2) < minGallop); + + /* + * One run is winning so consistently that galloping may be a + * huge win. So try that, and continue galloping until (if ever) + * neither run appears to be winning consistently anymore. + */ + do { + assert len1 > 0 && len2 > 1; + count1 = len1 - gallopRight(tmp[cursor2], a, base1, len1, len1 - 1, c); + if (count1 != 0) { + dest -= count1; + cursor1 -= count1; + len1 -= count1; + System.arraycopy(a, cursor1 + 1, a, dest + 1, count1); + if (len1 == 0) + break outer; + } + a[dest--] = tmp[cursor2--]; + if (--len2 == 1) + break outer; + + count2 = len2 - gallopLeft(a[cursor1], tmp, 0, len2, len2 - 1, c); + if (count2 != 0) { + dest -= count2; + cursor2 -= count2; + len2 -= count2; + System.arraycopy(tmp, cursor2 + 1, a, dest + 1, count2); + if (len2 <= 1) // len2 == 1 || len2 == 0 + break outer; + } + a[dest--] = a[cursor1--]; + if (--len1 == 0) + break outer; + minGallop--; + } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP); + if (minGallop < 0) + minGallop = 0; + minGallop += 2; // Penalize for leaving gallop mode + } // End of "outer" loop + this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field + + if (len2 == 1) { + assert len1 > 0; + dest -= len1; + cursor1 -= len1; + System.arraycopy(a, cursor1 + 1, a, dest + 1, len1); + a[dest] = tmp[cursor2]; // Move first elt of run2 to front of merge + } else if (len2 == 0) { + throw new IllegalArgumentException( + "Comparison method violates its general contract!"); + } else { + assert len1 == 0; + assert len2 > 0; + System.arraycopy(tmp, 0, a, dest - (len2 - 1), len2); + } + } + + /** + * Ensures that the external array tmp has at least the specified + * number of elements, increasing its size if necessary. The size + * increases exponentially to ensure amortized linear time complexity. + * + * @param minCapacity the minimum required capacity of the tmp array + * @return tmp, whether or not it grew + */ + private T[] ensureCapacity(int minCapacity) { + if (tmp.length < minCapacity) { + // Compute smallest power of 2 > minCapacity + int newSize = minCapacity; + newSize |= newSize >> 1; + newSize |= newSize >> 2; + newSize |= newSize >> 4; + newSize |= newSize >> 8; + newSize |= newSize >> 16; + newSize++; + + if (newSize < 0) // Not bloody likely! + newSize = minCapacity; + else + newSize = Math.min(newSize, a.length >>> 1); + + @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"}) + T[] newArray = (T[]) new Object[newSize]; + tmp = newArray; + } + return tmp; + } + + /** + * Checks that fromIndex and toIndex are in range, and throws an + * appropriate exception if they aren't. + * + * @param arrayLen the length of the array + * @param fromIndex the index of the first element of the range + * @param toIndex the index after the last element of the range + * @throws IllegalArgumentException if fromIndex > toIndex + * @throws ArrayIndexOutOfBoundsException if fromIndex < 0 + * or toIndex > arrayLen + */ + private static void rangeCheck(int arrayLen, int fromIndex, int toIndex) { + if (fromIndex > toIndex) + throw new IllegalArgumentException("fromIndex(" + fromIndex + + ") > toIndex(" + toIndex+")"); + if (fromIndex < 0) + throw new ArrayIndexOutOfBoundsException(fromIndex); + if (toIndex > arrayLen) + throw new ArrayIndexOutOfBoundsException(toIndex); + } +} diff --git a/lab/timsort.py b/lab/timsort.py new file mode 100644 index 00000000..6d6074e8 --- /dev/null +++ b/lab/timsort.py @@ -0,0 +1,679 @@ +""" +Intro +----- +This describes an adaptive, stable, natural mergesort, modestly called +timsort (hey, I earned it ). It has supernatural performance on many +kinds of partially ordered arrays (less than lg(N!) comparisons needed, and +as few as N-1), yet as fast as Python's previous highly tuned samplesort +hybrid on random arrays. + +In a nutshell, the main routine marches over the array once, left to right, +alternately identifying the next run, then merging it into the previous +runs "intelligently". Everything else is complication for speed, and some +hard-won measure of memory efficiency. + + +Comparison with Python's Samplesort Hybrid +------------------------------------------ ++ timsort can require a temp array containing as many as N//2 pointers, + which means as many as 2*N extra bytes on 32-bit boxes. It can be + expected to require a temp array this large when sorting random data; on + data with significant structure, it may get away without using any extra + heap memory. This appears to be the strongest argument against it, but + compared to the size of an object, 2 temp bytes worst-case (also expected- + case for random data) doesn't scare me much. + + It turns out that Perl is moving to a stable mergesort, and the code for + that appears always to require a temp array with room for at least N + pointers. (Note that I wouldn't want to do that even if space weren't an + issue; I believe its efforts at memory frugality also save timsort + significant pointer-copying costs, and allow it to have a smaller working + set.) + ++ Across about four hours of generating random arrays, and sorting them + under both methods, samplesort required about 1.5% more comparisons + (the program is at the end of this file). + ++ In real life, this may be faster or slower on random arrays than + samplesort was, depending on platform quirks. Since it does fewer + comparisons on average, it can be expected to do better the more + expensive a comparison function is. OTOH, it does more data movement + (pointer copying) than samplesort, and that may negate its small + comparison advantage (depending on platform quirks) unless comparison + is very expensive. + ++ On arrays with many kinds of pre-existing order, this blows samplesort out + of the water. It's significantly faster than samplesort even on some + cases samplesort was special-casing the snot out of. I believe that lists + very often do have exploitable partial order in real life, and this is the + strongest argument in favor of timsort (indeed, samplesort's special cases + for extreme partial order are appreciated by real users, and timsort goes + much deeper than those, in particular naturally covering every case where + someone has suggested "and it would be cool if list.sort() had a special + case for this too ... and for that ..."). + ++ Here are exact comparison counts across all the tests in sortperf.py, + when run with arguments "15 20 1". + + Column Key: + *sort: random data + \sort: descending data + /sort: ascending data + 3sort: ascending, then 3 random exchanges + +sort: ascending, then 10 random at the end + ~sort: many duplicates + =sort: all equal + !sort: worst case scenario + + First the trivial cases, trivial for samplesort because it special-cased + them, and trivial for timsort because it naturally works on runs. Within + an "n" block, the first line gives the # of compares done by samplesort, + the second line by timsort, and the third line is the percentage by + which the samplesort count exceeds the timsort count: + + n \sort /sort =sort +------- ------ ------ ------ + 32768 32768 32767 32767 samplesort + 32767 32767 32767 timsort + 0.00% 0.00% 0.00% (samplesort - timsort) / timsort + + 65536 65536 65535 65535 + 65535 65535 65535 + 0.00% 0.00% 0.00% + + 131072 131072 131071 131071 + 131071 131071 131071 + 0.00% 0.00% 0.00% + + 262144 262144 262143 262143 + 262143 262143 262143 + 0.00% 0.00% 0.00% + + 524288 524288 524287 524287 + 524287 524287 524287 + 0.00% 0.00% 0.00% + +1048576 1048576 1048575 1048575 + 1048575 1048575 1048575 + 0.00% 0.00% 0.00% + + The algorithms are effectively identical in these cases, except that + timsort does one less compare in \sort. + + Now for the more interesting cases. lg(n!) is the information-theoretic + limit for the best any comparison-based sorting algorithm can do on + average (across all permutations). When a method gets significantly + below that, it's either astronomically lucky, or is finding exploitable + structure in the data. + + n lg(n!) *sort 3sort +sort %sort ~sort !sort +------- ------- ------ ------- ------- ------ ------- -------- + 32768 444255 453096 453614 32908 452871 130491 469141 old + 448885 33016 33007 50426 182083 65534 new + 0.94% 1273.92% -0.30% 798.09% -28.33% 615.87% %ch from new + + 65536 954037 972699 981940 65686 973104 260029 1004607 + 962991 65821 65808 101667 364341 131070 + 1.01% 1391.83% -0.19% 857.15% -28.63% 666.47% + + 131072 2039137 2101881 2091491 131232 2092894 554790 2161379 + 2057533 131410 131361 206193 728871 262142 + 2.16% 1491.58% -0.10% 915.02% -23.88% 724.51% + + 262144 4340409 4464460 4403233 262314 4445884 1107842 4584560 + 4377402 262437 262459 416347 1457945 524286 + 1.99% 1577.82% -0.06% 967.83% -24.01% 774.44% + + 524288 9205096 9453356 9408463 524468 9441930 2218577 9692015 + 9278734 524580 524633 837947 2916107 1048574 + 1.88% 1693.52% -0.03% 1026.79% -23.92% 824.30% + +1048576 19458756 19950272 19838588 1048766 19912134 4430649 20434212 + 19606028 1048958 1048941 1694896 5832445 2097150 + 1.76% 1791.27% -0.02% 1074.83% -24.03% 874.38% + + Discussion of cases: + + *sort: There's no structure in random data to exploit, so the theoretical + limit is lg(n!). Both methods get close to that, and timsort is hugging + it (indeed, in a *marginal* sense, it's a spectacular improvement -- + there's only about 1% left before hitting the wall, and timsort knows + darned well it's doing compares that won't pay on random data -- but so + does the samplesort hybrid). For contrast, Hoare's original random-pivot + quicksort does about 39% more compares than the limit, and the median-of-3 + variant about 19% more. + + 3sort, %sort, and !sort: No contest; there's structure in this data, but + not of the specific kinds samplesort special-cases. Note that structure + in !sort wasn't put there on purpose -- it was crafted as a worst case for + a previous quicksort implementation. That timsort nails it came as a + surprise to me (although it's obvious in retrospect). + + +sort: samplesort special-cases this data, and does a few less compares + than timsort. However, timsort runs this case significantly faster on all + boxes we have timings for, because timsort is in the business of merging + runs efficiently, while samplesort does much more data movement in this + (for it) special case. + + ~sort: samplesort's special cases for large masses of equal elements are + extremely effective on ~sort's specific data pattern, and timsort just + isn't going to get close to that, despite that it's clearly getting a + great deal of benefit out of the duplicates (the # of compares is much less + than lg(n!)). ~sort has a perfectly uniform distribution of just 4 + distinct values, and as the distribution gets more skewed, samplesort's + equal-element gimmicks become less effective, while timsort's adaptive + strategies find more to exploit; in a database supplied by Kevin Altis, a + sort on its highly skewed "on which stock exchange does this company's + stock trade?" field ran over twice as fast under timsort. + + However, despite that timsort does many more comparisons on ~sort, and + that on several platforms ~sort runs highly significantly slower under + timsort, on other platforms ~sort runs highly significantly faster under + timsort. No other kind of data has shown this wild x-platform behavior, + and we don't have an explanation for it. The only thing I can think of + that could transform what "should be" highly significant slowdowns into + highly significant speedups on some boxes are catastrophic cache effects + in samplesort. + + But timsort "should be" slower than samplesort on ~sort, so it's hard + to count that it isn't on some boxes as a strike against it . + ++ Here's the highwater mark for the number of heap-based temp slots (4 + bytes each on this box) needed by each test, again with arguments + "15 20 1": + + 2**i *sort \sort /sort 3sort +sort %sort ~sort =sort !sort + 32768 16384 0 0 6256 0 10821 12288 0 16383 + 65536 32766 0 0 21652 0 31276 24576 0 32767 + 131072 65534 0 0 17258 0 58112 49152 0 65535 + 262144 131072 0 0 35660 0 123561 98304 0 131071 + 524288 262142 0 0 31302 0 212057 196608 0 262143 +1048576 524286 0 0 312438 0 484942 393216 0 524287 + + Discussion: The tests that end up doing (close to) perfectly balanced + merges (*sort, !sort) need all N//2 temp slots (or almost all). ~sort + also ends up doing balanced merges, but systematically benefits a lot from + the preliminary pre-merge searches described under "Merge Memory" later. + %sort approaches having a balanced merge at the end because the random + selection of elements to replace is expected to produce an out-of-order + element near the midpoint. \sort, /sort, =sort are the trivial one-run + cases, needing no merging at all. +sort ends up having one very long run + and one very short, and so gets all the temp space it needs from the small + temparray member of the MergeState struct (note that the same would be + true if the new random elements were prefixed to the sorted list instead, + but not if they appeared "in the middle"). 3sort approaches N//3 temp + slots twice, but the run lengths that remain after 3 random exchanges + clearly has very high variance. + + +A detailed description of timsort follows. + +Runs +---- +count_run() returns the # of elements in the next run. A run is either +"ascending", which means non-decreasing: + + a0 <= a1 <= a2 <= ... + +or "descending", which means strictly decreasing: + + a0 > a1 > a2 > ... + +Note that a run is always at least 2 long, unless we start at the array's +last element. + +The definition of descending is strict, because the main routine reverses +a descending run in-place, transforming a descending run into an ascending +run. Reversal is done via the obvious fast "swap elements starting at each +end, and converge at the middle" method, and that can violate stability if +the slice contains any equal elements. Using a strict definition of +descending ensures that a descending run contains distinct elements. + +If an array is random, it's very unlikely we'll see long runs. If a natural +run contains less than minrun elements (see next section), the main loop +artificially boosts it to minrun elements, via a stable binary insertion sort +applied to the right number of array elements following the short natural +run. In a random array, *all* runs are likely to be minrun long as a +result. This has two primary good effects: + +1. Random data strongly tends then toward perfectly balanced (both runs have + the same length) merges, which is the most efficient way to proceed when + data is random. + +2. Because runs are never very short, the rest of the code doesn't make + heroic efforts to shave a few cycles off per-merge overheads. For + example, reasonable use of function calls is made, rather than trying to + inline everything. Since there are no more than N/minrun runs to begin + with, a few "extra" function calls per merge is barely measurable. + + +Computing minrun +---------------- +If N < 64, minrun is N. IOW, binary insertion sort is used for the whole +array then; it's hard to beat that given the overheads of trying something +fancier. + +When N is a power of 2, testing on random data showed that minrun values of +16, 32, 64 and 128 worked about equally well. At 256 the data-movement cost +in binary insertion sort clearly hurt, and at 8 the increase in the number +of function calls clearly hurt. Picking *some* power of 2 is important +here, so that the merges end up perfectly balanced (see next section). We +pick 32 as a good value in the sweet range; picking a value at the low end +allows the adaptive gimmicks more opportunity to exploit shorter natural +runs. + +Because sortperf.py only tries powers of 2, it took a long time to notice +that 32 isn't a good choice for the general case! Consider N=2112: + +>>> divmod(2112, 32) +(66, 0) +>>> + +If the data is randomly ordered, we're very likely to end up with 66 runs +each of length 32. The first 64 of these trigger a sequence of perfectly +balanced merges (see next section), leaving runs of lengths 2048 and 64 to +merge at the end. The adaptive gimmicks can do that with fewer than 2048+64 +compares, but it's still more compares than necessary, and-- mergesort's +bugaboo relative to samplesort --a lot more data movement (O(N) copies just +to get 64 elements into place). + +If we take minrun=33 in this case, then we're very likely to end up with 64 +runs each of length 33, and then all merges are perfectly balanced. Better! + +What we want to avoid is picking minrun such that in + + q, r = divmod(N, minrun) + +q is a power of 2 and r>0 (then the last merge only gets r elements into +place, and r < minrun is small compared to N), or q a little larger than a +power of 2 regardless of r (then we've got a case similar to "2112", again +leaving too little work for the last merge to do). + +Instead we pick a minrun in range(32, 65) such that N/minrun is exactly a +power of 2, or if that isn't possible, is close to, but strictly less than, +a power of 2. This is easier to do than it may sound: take the first 6 +bits of N, and add 1 if any of the remaining bits are set. In fact, that +rule covers every case in this section, including small N and exact powers +of 2; merge_compute_minrun() is a deceptively simple function. + + +The Merge Pattern +----------------- +In order to exploit regularities in the data, we're merging on natural +run lengths, and they can become wildly unbalanced. That's a Good Thing +for this sort! It means we have to find a way to manage an assortment of +potentially very different run lengths, though. + +Stability constrains permissible merging patterns. For example, if we have +3 consecutive runs of lengths + + A:10000 B:20000 C:10000 + +we dare not merge A with C first, because if A, B and C happen to contain +a common element, it would get out of order wrt its occurrence(s) in B. The +merging must be done as (A+B)+C or A+(B+C) instead. + +So merging is always done on two consecutive runs at a time, and in-place, +although this may require some temp memory (more on that later). + +When a run is identified, its base address and length are pushed on a stack +in the MergeState struct. merge_collapse() is then called to see whether it +should merge it with preceding run(s). We would like to delay merging as +long as possible in order to exploit patterns that may come up later, but we +like even more to do merging as soon as possible to exploit that the run just +found is still high in the memory hierarchy. We also can't delay merging +"too long" because it consumes memory to remember the runs that are still +unmerged, and the stack has a fixed size. + +What turned out to be a good compromise maintains two invariants on the +stack entries, where A, B and C are the lengths of the three righmost not-yet +merged slices: + +1. A > B+C +2. B > C + +Note that, by induction, #2 implies the lengths of pending runs form a +decreasing sequence. #1 implies that, reading the lengths right to left, +the pending-run lengths grow at least as fast as the Fibonacci numbers. +Therefore the stack can never grow larger than about log_base_phi(N) entries, +where phi = (1+sqrt(5))/2 ~= 1.618. Thus a small # of stack slots suffice +for very large arrays. + +If A <= B+C, the smaller of A and C is merged with B (ties favor C, for the +freshness-in-cache reason), and the new run replaces the A,B or B,C entries; +e.g., if the last 3 entries are + + A:30 B:20 C:10 + +then B is merged with C, leaving + + A:30 BC:30 + +on the stack. Or if they were + + A:500 B:400: C:1000 + +then A is merged with B, leaving + + AB:900 C:1000 + +on the stack. + +In both examples, the stack configuration after the merge still violates +invariant #2, and merge_collapse() goes on to continue merging runs until +both invariants are satisfied. As an extreme case, suppose we didn't do the +minrun gimmick, and natural runs were of lengths 128, 64, 32, 16, 8, 4, 2, +and 2. Nothing would get merged until the final 2 was seen, and that would +trigger 7 perfectly balanced merges. + +The thrust of these rules when they trigger merging is to balance the run +lengths as closely as possible, while keeping a low bound on the number of +runs we have to remember. This is maximally effective for random data, +where all runs are likely to be of (artificially forced) length minrun, and +then we get a sequence of perfectly balanced merges (with, perhaps, some +oddballs at the end). + +OTOH, one reason this sort is so good for partly ordered data has to do +with wildly unbalanced run lengths. + + +Merge Memory +------------ +Merging adjacent runs of lengths A and B in-place is very difficult. +Theoretical constructions are known that can do it, but they're too difficult +and slow for practical use. But if we have temp memory equal to min(A, B), +it's easy. + +If A is smaller (function merge_lo), copy A to a temp array, leave B alone, +and then we can do the obvious merge algorithm left to right, from the temp +area and B, starting the stores into where A used to live. There's always a +free area in the original area comprising a number of elements equal to the +number not yet merged from the temp array (trivially true at the start; +proceed by induction). The only tricky bit is that if a comparison raises an +exception, we have to remember to copy the remaining elements back in from +the temp area, lest the array end up with duplicate entries from B. But +that's exactly the same thing we need to do if we reach the end of B first, +so the exit code is pleasantly common to both the normal and error cases. + +If B is smaller (function merge_hi, which is merge_lo's "mirror image"), +much the same, except that we need to merge right to left, copying B into a +temp array and starting the stores at the right end of where B used to live. + +A refinement: When we're about to merge adjacent runs A and B, we first do +a form of binary search (more on that later) to see where B[0] should end up +in A. Elements in A preceding that point are already in their final +positions, effectively shrinking the size of A. Likewise we also search to +see where A[-1] should end up in B, and elements of B after that point can +also be ignored. This cuts the amount of temp memory needed by the same +amount. + +These preliminary searches may not pay off, and can be expected *not* to +repay their cost if the data is random. But they can win huge in all of +time, copying, and memory savings when they do pay, so this is one of the +"per-merge overheads" mentioned above that we're happy to endure because +there is at most one very short run. It's generally true in this algorithm +that we're willing to gamble a little to win a lot, even though the net +expectation is negative for random data. + + +Merge Algorithms +---------------- +merge_lo() and merge_hi() are where the bulk of the time is spent. merge_lo +deals with runs where A <= B, and merge_hi where A > B. They don't know +whether the data is clustered or uniform, but a lovely thing about merging +is that many kinds of clustering "reveal themselves" by how many times in a +row the winning merge element comes from the same run. We'll only discuss +merge_lo here; merge_hi is exactly analogous. + +Merging begins in the usual, obvious way, comparing the first element of A +to the first of B, and moving B[0] to the merge area if it's less than A[0], +else moving A[0] to the merge area. Call that the "one pair at a time" +mode. The only twist here is keeping track of how many times in a row "the +winner" comes from the same run. + +If that count reaches MIN_GALLOP, we switch to "galloping mode". Here +we *search* B for where A[0] belongs, and move over all the B's before +that point in one chunk to the merge area, then move A[0] to the merge +area. Then we search A for where B[0] belongs, and similarly move a +slice of A in one chunk. Then back to searching B for where A[0] belongs, +etc. We stay in galloping mode until both searches find slices to copy +less than MIN_GALLOP elements long, at which point we go back to one-pair- +at-a-time mode. + +A refinement: The MergeState struct contains the value of min_gallop that +controls when we enter galloping mode, initialized to MIN_GALLOP. +merge_lo() and merge_hi() adjust this higher when galloping isn't paying +off, and lower when it is. + + +Galloping +--------- +Still without loss of generality, assume A is the shorter run. In galloping +mode, we first look for A[0] in B. We do this via "galloping", comparing +A[0] in turn to B[0], B[1], B[3], B[7], ..., B[2**j - 1], ..., until finding +the k such that B[2**(k-1) - 1] < A[0] <= B[2**k - 1]. This takes at most +roughly lg(B) comparisons, and, unlike a straight binary search, favors +finding the right spot early in B (more on that later). + +After finding such a k, the region of uncertainty is reduced to 2**(k-1) - 1 +consecutive elements, and a straight binary search requires exactly k-1 +additional comparisons to nail it. Then we copy all the B's up to that +point in one chunk, and then copy A[0]. Note that no matter where A[0] +belongs in B, the combination of galloping + binary search finds it in no +more than about 2*lg(B) comparisons. + +If we did a straight binary search, we could find it in no more than +ceiling(lg(B+1)) comparisons -- but straight binary search takes that many +comparisons no matter where A[0] belongs. Straight binary search thus loses +to galloping unless the run is quite long, and we simply can't guess +whether it is in advance. + +If data is random and runs have the same length, A[0] belongs at B[0] half +the time, at B[1] a quarter of the time, and so on: a consecutive winning +sub-run in B of length k occurs with probability 1/2**(k+1). So long +winning sub-runs are extremely unlikely in random data, and guessing that a +winning sub-run is going to be long is a dangerous game. + +OTOH, if data is lopsided or lumpy or contains many duplicates, long +stretches of winning sub-runs are very likely, and cutting the number of +comparisons needed to find one from O(B) to O(log B) is a huge win. + +Galloping compromises by getting out fast if there isn't a long winning +sub-run, yet finding such very efficiently when they exist. + +I first learned about the galloping strategy in a related context; see: + + "Adaptive Set Intersections, Unions, and Differences" (2000) + Erik D. Demaine, Alejandro López-Ortiz, J. Ian Munro + +and its followup(s). An earlier paper called the same strategy +"exponential search": + + "Optimistic Sorting and Information Theoretic Complexity" + Peter McIlroy + SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), pp + 467-474, Austin, Texas, 25-27 January 1993. + +and it probably dates back to an earlier paper by Bentley and Yao. The +McIlroy paper in particular has good analysis of a mergesort that's +probably strongly related to this one in its galloping strategy. + + +Galloping with a Broken Leg +--------------------------- +So why don't we always gallop? Because it can lose, on two counts: + +1. While we're willing to endure small per-merge overheads, per-comparison + overheads are a different story. Calling Yet Another Function per + comparison is expensive, and gallop_left() and gallop_right() are + too long-winded for sane inlining. + +2. Galloping can-- alas --require more comparisons than linear one-at-time + search, depending on the data. + +#2 requires details. If A[0] belongs before B[0], galloping requires 1 +compare to determine that, same as linear search, except it costs more +to call the gallop function. If A[0] belongs right before B[1], galloping +requires 2 compares, again same as linear search. On the third compare, +galloping checks A[0] against B[3], and if it's <=, requires one more +compare to determine whether A[0] belongs at B[2] or B[3]. That's a total +of 4 compares, but if A[0] does belong at B[2], linear search would have +discovered that in only 3 compares, and that's a huge loss! Really. It's +an increase of 33% in the number of compares needed, and comparisons are +expensive in Python. + +index in B where # compares linear # gallop # binary gallop +A[0] belongs search needs compares compares total +---------------- ----------------- -------- -------- ------ + 0 1 1 0 1 + + 1 2 2 0 2 + + 2 3 3 1 4 + 3 4 3 1 4 + + 4 5 4 2 6 + 5 6 4 2 6 + 6 7 4 2 6 + 7 8 4 2 6 + + 8 9 5 3 8 + 9 10 5 3 8 + 10 11 5 3 8 + 11 12 5 3 8 + ... + +In general, if A[0] belongs at B[i], linear search requires i+1 comparisons +to determine that, and galloping a total of 2*floor(lg(i))+2 comparisons. +The advantage of galloping is unbounded as i grows, but it doesn't win at +all until i=6. Before then, it loses twice (at i=2 and i=4), and ties +at the other values. At and after i=6, galloping always wins. + +We can't guess in advance when it's going to win, though, so we do one pair +at a time until the evidence seems strong that galloping may pay. MIN_GALLOP +is 7, and that's pretty strong evidence. However, if the data is random, it +simply will trigger galloping mode purely by luck every now and again, and +it's quite likely to hit one of the losing cases next. On the other hand, +in cases like ~sort, galloping always pays, and MIN_GALLOP is larger than it +"should be" then. So the MergeState struct keeps a min_gallop variable +that merge_lo and merge_hi adjust: the longer we stay in galloping mode, +the smaller min_gallop gets, making it easier to transition back to +galloping mode (if we ever leave it in the current merge, and at the +start of the next merge). But whenever the gallop loop doesn't pay, +min_gallop is increased by one, making it harder to transition back +to galloping mode (and again both within a merge and across merges). For +random data, this all but eliminates the gallop penalty: min_gallop grows +large enough that we almost never get into galloping mode. And for cases +like ~sort, min_gallop can fall to as low as 1. This seems to work well, +but in all it's a minor improvement over using a fixed MIN_GALLOP value. + + +Galloping Complication +---------------------- +The description above was for merge_lo. merge_hi has to merge "from the +other end", and really needs to gallop starting at the last element in a run +instead of the first. Galloping from the first still works, but does more +comparisons than it should (this is significant -- I timed it both ways). +For this reason, the gallop_left() and gallop_right() functions have a +"hint" argument, which is the index at which galloping should begin. So +galloping can actually start at any index, and proceed at offsets of 1, 3, +7, 15, ... or -1, -3, -7, -15, ... from the starting index. + +In the code as I type it's always called with either 0 or n-1 (where n is +the # of elements in a run). It's tempting to try to do something fancier, +melding galloping with some form of interpolation search; for example, if +we're merging a run of length 1 with a run of length 10000, index 5000 is +probably a better guess at the final result than either 0 or 9999. But +it's unclear how to generalize that intuition usefully, and merging of +wildly unbalanced runs already enjoys excellent performance. + +~sort is a good example of when balanced runs could benefit from a better +hint value: to the extent possible, this would like to use a starting +offset equal to the previous value of acount/bcount. Doing so saves about +10% of the compares in ~sort. However, doing so is also a mixed bag, +hurting other cases. + + +Comparing Average # of Compares on Random Arrays +------------------------------------------------ +[NOTE: This was done when the new algorithm used about 0.1% more compares + on random data than does its current incarnation.] + +Here list.sort() is samplesort, and list.msort() this sort: + +""" +import random +from time import clock as now + +def fill(n): + from random import random + return [random() for i in xrange(n)] + +def mycmp(x, y): + global ncmp + ncmp += 1 + return cmp(x, y) + +def timeit(values, method): + global ncmp + X = values[:] + bound = getattr(X, method) + ncmp = 0 + t1 = now() + bound(mycmp) + t2 = now() + return t2-t1, ncmp + +format = "%5s %9.2f %11d" +f2 = "%5s %9.2f %11.2f" + +def drive(): + count = sst = sscmp = mst = mscmp = nelts = 0 + while True: + n = random.randrange(100000) + nelts += n + x = fill(n) + + t, c = timeit(x, 'sort') + sst += t + sscmp += c + + t, c = timeit(x, 'msort') + mst += t + mscmp += c + + count += 1 + if count % 10: + continue + + print "count", count, "nelts", nelts + print format % ("sort", sst, sscmp) + print format % ("msort", mst, mscmp) + print f2 % ("", (sst-mst)*1e2/mst, (sscmp-mscmp)*1e2/mscmp) + +drive() +""" + +I ran this on Windows and kept using the computer lightly while it was +running. time.clock() is wall-clock time on Windows, with better than +microsecond resolution. samplesort started with a 1.52% #-of-comparisons +disadvantage, fell quickly to 1.48%, and then fluctuated within that small +range. Here's the last chunk of output before I killed the job: + +count 2630 nelts 130906543 + sort 6110.80 1937887573 +msort 6002.78 1909389381 + 1.80 1.49 + +We've done nearly 2 billion comparisons apiece at Python speed there, and +that's enough . + +For random arrays of size 2 (yes, there are only 2 interesting ones), +samplesort has a 50%(!) comparison disadvantage. This is a consequence of +samplesort special-casing at most one ascending run at the start, then +falling back to the general case if it doesn't find an ascending run +immediately. The consequence is that it ends up using two compares to sort +[2, 1]. Gratifyingly, timsort doesn't do any special-casing, so had to be +taught how to deal with mixtures of ascending and descending runs +efficiently in all cases. +""" diff --git a/notes.md b/notes.md new file mode 100644 index 00000000..8cef0099 --- /dev/null +++ b/notes.md @@ -0,0 +1,52 @@ +# Roadmap +- [ ] PDF: callouts and emojis are not showing correctly +- [x] Writeup on balancing trees +- [ ] `BinaryTree` implementation on its own. So far, we only have BST. +- [ ] TreeSet should be able to store objects. Does it need a comparator? on BST in case node's values are not just numbers but also objects. +- [ ] Refactor LinkedList.remove(). It's doing to much maybe it can be refactor in terms of removeByPosition and indexOf +- [ ] More algorithm and datastructres! Greedy, Divide and Conquer etc. +- [ ] Algorithms visualizations like https://bost.ocks.org/mike/algorithms/ +- [ ] sorting algorithms needs a comparator. So, it can sort objects as well. Replace `Array.sort` for `mergesort` in `src/algorithms/knapsack-fractional.js` + +# Troubleshooting +Some notes while working on this project + +## Tests +Running one test without changing file +```sh +jest -t '#findNodeAndParent' +``` + +Running one test changing code +```js +it.only('should return with an element and its parent', () => { +// ... +}); +``` + +## English Words + +Getting some (200k+) English words are useful for testing and benchmarking. + +```sh +cat /usr/share/dict/words > benchmarks/dict.txt +``` + +## ESLint + + Disabling ESLints +```js +somthing(t) => 1 // eslint-disable-line no-unused-vars +// eslint-disable-next-line no-use-before-define +const thing = new Thing(); + +/*eslint-disable */ +//suppress all warnings between comments +alert('foo'); +/*eslint-enable */ + +/* eslint-disable no-alert, no-console */ +alert('foo'); +console.log('bar'); +/* eslint-enable no-alert */ +``` diff --git a/package-lock.json b/package-lock.json index 4c49ee2d..0bc00a35 100644 --- a/package-lock.json +++ b/package-lock.json @@ -482,6 +482,15 @@ "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", "dev": true }, + "asciidoctor.js": { + "version": "1.5.6", + "resolved": "https://registry.npmjs.org/asciidoctor.js/-/asciidoctor.js-1.5.6.tgz", + "integrity": "sha512-GFBpOZiRih8jW8HEqlwU6ywUOE0XHso1cPTG/4YtNbR+0DxZ9H3aQ7JMTBeSsc82pIiUR9MtWXAHIZ6L3IOeFg==", + "dev": true, + "requires": { + "opal-runtime": "1.0.3" + } + }, "asn1": { "version": "0.2.4", "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", @@ -4281,6 +4290,31 @@ "mimic-fn": "^1.0.0" } }, + "opal-runtime": { + "version": "1.0.3", + "resolved": "http://registry.npmjs.org/opal-runtime/-/opal-runtime-1.0.3.tgz", + "integrity": "sha512-bUcaUjep2qZ1GnctgvQ8AsgQ+U0/uu4vaDDLIkkj6Hk4RxfgKW+qaHE0Kd6WPTBcIy/sjjy8zgDYsRMkYMIi4g==", + "dev": true, + "requires": { + "glob": "6.0.4", + "xmlhttprequest": "1.8.0" + }, + "dependencies": { + "glob": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/glob/-/glob-6.0.4.tgz", + "integrity": "sha1-DwiGD2oVUSey+t1PnOJLGqtuTSI=", + "dev": true, + "requires": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, "optimist": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", @@ -5654,6 +5688,15 @@ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", "dev": true }, + "textlint-plugin-asciidoctor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/textlint-plugin-asciidoctor/-/textlint-plugin-asciidoctor-1.0.2.tgz", + "integrity": "sha512-J2PKkk2Kct5Ebug4hpXaA4OiFI+I6MWGaEb3yMwZ6CDtY3xcBKtMZhR1WX0+I5xgfrGgKiUGlde3FR0oS/32FQ==", + "dev": true, + "requires": { + "asciidoctor.js": "^1.5.6-preview.4" + } + }, "throat": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/throat/-/throat-4.1.0.tgz", @@ -6131,6 +6174,12 @@ "integrity": "sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==", "dev": true }, + "xmlhttprequest": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz", + "integrity": "sha1-Z/4HXFwk/vOfnWX197f+dRcZaPw=", + "dev": true + }, "y18n": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", diff --git a/package.json b/package.json index 118f9922..e65d3b64 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { - "name": "algorithms.js", - "version": "1.0.0", - "description": "Algorithms in JS", + "name": "dsa.js", + "version": "0.0.1", + "description": "Data Structures & Algorithms in JS", "main": "./src/data-structures/graphs/graph.js", "dependencies": { "lodash": "4.17.10" @@ -12,15 +12,16 @@ "eslint-config-airbnb-base": "12.1.0", "eslint-plugin-import": "2.12.0", "eslint-plugin-jest": "21.17.0", - "jest": "23.6.0" + "jest": "23.6.0", + "textlint-plugin-asciidoctor": "1.0.2" }, "scripts": { - "test": "jest src/data-structures # jest # mocha src/**/*spec.js # jasmine JASMINE_CONFIG_PATH=jasmine.json # node jasmine-runner.js", - "watch": "jest src/data-structures --watch --coverage", - "coverage": "jest src/data-structures --coverage && open coverage/lcov-report/index.html", + "test": "jest src/ # jest # mocha src/**/*spec.js # jasmine JASMINE_CONFIG_PATH=jasmine.json # node jasmine-runner.js", + "watch": "jest src/ --watch --coverage", + "coverage": "jest src/ --coverage && open coverage/lcov-report/index.html", "linter": "npx eslint --fix -f codeframe src/" }, - "keywords": [], - "author": "", + "keywords": ["algorithms", "data structures", "javascript", "array", "linked lists", "binary search trees"], + "author": "Adrian Mejia ", "license": "ISC" } diff --git a/src/algorithms/combination-sum.js b/src/algorithms/combination-sum.js new file mode 100644 index 00000000..8e1cb024 --- /dev/null +++ b/src/algorithms/combination-sum.js @@ -0,0 +1,53 @@ +/** + * @param {number[]} candidates + * @param {number} target + * @return {number[][]} + */ +function combinationSum( + candidates, + target, + solution = [], + current = [], + index = 0, +) { + if (target < 0) { + // By adding another candidate we've gone below zero. + // This would mean that the last candidate was not acceptable. + return solution; + } + + if (target === 0) { + // If after adding the previous candidate our remaining sum + // became zero - we need to save the current combination since it is one + // of the answers we're looking for. + solution.push(current.slice()); + + return solution; + } + + // If we haven't reached zero yet let's continue to add all + // possible candidates that are left. + for (let candidateIndex = index; candidateIndex < candidates.length; candidateIndex += 1) { + const currentCandidate = candidates[candidateIndex]; + + // Let's try to add another candidate. + current.push(currentCandidate); + + // Explore further option with current candidate being added. + combinationSum( + candidates, + target - currentCandidate, + solution, + current, + candidateIndex, + ); + + // BACKTRACKING. + // Let's get back, exclude current candidate and try another ones later. + current.pop(); + } + + return solution; +} + +module.exports = combinationSum; diff --git a/src/algorithms/combination-sum.spec.js b/src/algorithms/combination-sum.spec.js new file mode 100644 index 00000000..1d345760 --- /dev/null +++ b/src/algorithms/combination-sum.spec.js @@ -0,0 +1,33 @@ +const combinationSum = require('./combination-sum.js'); + +describe('combinationSum', () => { + it('should return empty', () => { + expect(combinationSum([], 0)).toEqual([[]]); + }); + + it('should find solution for one item', () => { + expect(combinationSum([1], 1)).toEqual([[1]]); + }); + + it('should use multiple times one value', () => { + expect(combinationSum([1], 2)).toEqual([[1, 1]]); + }); + + it('should not find solution', () => { + expect(combinationSum([2], 1)).toEqual([]); + }); + + it('should find solution using two values', () => { + expect(combinationSum([1, 2], 3)).toEqual([ + [1, 1, 1], + [1, 2], + ]); + }); + + it('should move on with next index', () => { + expect(combinationSum([1, 10, 2], 3)).toEqual([ + [1, 1, 1], + [1, 2], + ]); + }); +}); diff --git a/src/algorithms/fibanacci-dynamic-programming.js b/src/algorithms/fibanacci-dynamic-programming.js new file mode 100644 index 00000000..ed5a2850 --- /dev/null +++ b/src/algorithms/fibanacci-dynamic-programming.js @@ -0,0 +1,22 @@ +// tag::snippet[] +/** + * Get Fibonacci number on the n-th position. + * @param {integer} n position on the sequence + * @returns {integer} n-th number + */ +function fib(n, memo = new Map()) { + if (n < 0) return 0; + if (n < 2) return n; + + if (memo.has(n)) { + return memo.get(n); + } + + const result = fib(n - 1) + fib(n - 2); + memo.set(n, result); + + return result; +} +// end::snippet[] + +module.exports = fib; diff --git a/src/algorithms/fibonacci-recursive.js b/src/algorithms/fibonacci-recursive.js new file mode 100644 index 00000000..6735ecf7 --- /dev/null +++ b/src/algorithms/fibonacci-recursive.js @@ -0,0 +1,15 @@ +// tag::snippet[] +/** + * Get Fibonacci number on the n-th position. + * @param {integer} n position on the sequence + * @returns {integer} n-th number + */ +function fib(n) { + if (n < 0) return 0; + if (n < 2) return n; + + return fib(n - 1) + fib(n - 2); +} +// end::snippet[] + +module.exports = fib; diff --git a/src/algorithms/fibonacci.js b/src/algorithms/fibonacci.js new file mode 100644 index 00000000..c94f528f --- /dev/null +++ b/src/algorithms/fibonacci.js @@ -0,0 +1,24 @@ +// tag::snippet[] +/** + * Get Fibonacci number on the n-th position. + * @param {integer} n position on the sequence + * @returns {integer} n-th number + */ +function getFibonacci(n) { + if (n < 0) return 0; + if (n < 2) return n; + + let prev = 0; + let result = 1; + + for (let i = 1; i < n; i++) { + const temp = result; + result += prev; + prev = temp; + } + + return result; +} +// end::snippet[] + +module.exports = getFibonacci; diff --git a/src/algorithms/fibonacci.spec.js b/src/algorithms/fibonacci.spec.js new file mode 100644 index 00000000..e9cae4cf --- /dev/null +++ b/src/algorithms/fibonacci.spec.js @@ -0,0 +1,32 @@ +const implementations = [ + 'fibonacci', + 'fibonacci-recursive', + 'fibanacci-dynamic-programming', +]; + +implementations.forEach((fileName) => { + const getFibonacci = require(`./${fileName}`); // eslint-disable-line + + describe(`#getFibonacci (file: ${fileName})`, () => { + it('should return first two fib numbers', () => { + expect(getFibonacci(0)).toBe(0); + expect(getFibonacci(1)).toBe(1); + }); + + it('should calculate 3rd fib number', () => { + expect(getFibonacci(2)).toBe(1); + }); + + it('should calculate 4rd fib number', () => { + expect(getFibonacci(3)).toBe(2); + }); + + it('should calculate 13th fib number', () => { + expect(getFibonacci(12)).toBe(144); + }); + + it('should return 0 for negative numbers', () => { + expect(getFibonacci(-12)).toBe(0); + }); + }); +}); diff --git a/src/algorithms/knapsack-fractional.js b/src/algorithms/knapsack-fractional.js new file mode 100644 index 00000000..237dece2 --- /dev/null +++ b/src/algorithms/knapsack-fractional.js @@ -0,0 +1,34 @@ +// tag::snippet[] +/** + * Solves Bounded Knapsack Problem (BKP) + * You can take fractions or whole part of items. + * @param {Array} input array of objects with the shape {value, weight} + * @param {Number} max maximum weight for knapsack + */ +function solveFractionalKnapsack(input, max) { + let weight = 0; + let value = 0; + const items = []; + + // sort by value/weight ratio + input.sort((a, b) => a.value/a.weight - b.value/b.weight); // eslint-disable-line + + while (input.length && weight < max) { + const bestRatioItem = input.pop(); + + if (weight + bestRatioItem.weight <= max) { + bestRatioItem.proportion = 1; // take item as a whole + } else { // take a fraction of the item + bestRatioItem.proportion = (max - weight) / bestRatioItem.weight; + } + + items.push(bestRatioItem); + weight += bestRatioItem.proportion * bestRatioItem.weight; + value += bestRatioItem.proportion * bestRatioItem.value; + } + + return { weight, value, items }; +} +// end::snippet[] + +module.exports = solveFractionalKnapsack; diff --git a/src/algorithms/knapsack-fractional.spec.js b/src/algorithms/knapsack-fractional.spec.js new file mode 100644 index 00000000..cac397ad --- /dev/null +++ b/src/algorithms/knapsack-fractional.spec.js @@ -0,0 +1,87 @@ +const solveFractionalKnapsack = require('./knapsack-fractional'); + +describe('solveFractionalKnapsack', () => { + it('should solve take fractional items', () => { + const maxWeight = 7; + const items = [ + { value: 1, weight: 1 }, + { value: 4, weight: 3 }, + { value: 5, weight: 4 }, + { value: 7, weight: 5 }, + ]; + + const knapsack = solveFractionalKnapsack(items, maxWeight); + + expect(knapsack.weight).toBeCloseTo(5 + ((2 / 3) * 3)); + expect(knapsack.value).toBeCloseTo(7 + ((2 / 3) * 4)); + expect(knapsack.items.length).toEqual(2); + expect(knapsack.items).toEqual(expect.arrayContaining([ + { value: 7, weight: 5, proportion: 1 }, + { value: 4, weight: 3, proportion: (2 / 3) }, + ])); + }); + + it('should solve whole items', () => { + const maxWeight = 9; + const items = [ + { value: 1, weight: 1 }, // 1/1 = 1 + { value: 4, weight: 3 }, // 4/3 = 1.33 ✅ + { value: 5, weight: 4 }, // 5/4 = 1.25 + { value: 7, weight: 5 }, // 7/5 = 1.4 ✅ + { value: 6, weight: 1 }, // 6 ✅ + ]; + + const knapsack = solveFractionalKnapsack(items, maxWeight); + + expect(knapsack.items).toEqual(expect.arrayContaining([ + { value: 6, weight: 1, proportion: 1 }, + { value: 7, weight: 5, proportion: 1 }, + { value: 4, weight: 3, proportion: 1 }, + ])); + expect(knapsack.weight).toBeCloseTo(1 + 5 + 3); + expect(knapsack.value).toBeCloseTo(6 + 7 + 4); + expect(knapsack.items.length).toEqual(3); + }); + + it('should take none if max is 0', () => { + const maxWeight = 0; + const items = [ + { value: 1, weight: 1 }, // 1/1 = 1 + ]; + const knapsack = solveFractionalKnapsack(items, maxWeight); + expect(knapsack.items.length).toEqual(0); + expect(knapsack.weight).toBeCloseTo(0); + expect(knapsack.value).toBeCloseTo(0); + }); + + it('should take all if capacity allows it', () => { + const maxWeight = 10; + const items = [ + { value: 1, weight: 1 }, // 1/1 = 1 + ]; + const knapsack = solveFractionalKnapsack(items, maxWeight); + expect(knapsack.items.length).toEqual(1); + expect(knapsack.weight).toBeCloseTo(1); + expect(knapsack.value).toBeCloseTo(1); + }); + + it('should solve take fractional items with non-integer max weight', () => { + const maxWeight = 7.5; + const items = [ + { value: 1, weight: 1 }, + { value: 4, weight: 3 }, + { value: 5, weight: 4 }, + { value: 7, weight: 5 }, + ]; + + const knapsack = solveFractionalKnapsack(items, maxWeight); + + expect(knapsack.weight).toBeCloseTo(7.5); + expect(knapsack.value).toBeCloseTo(7 + ((2.5 / 3) * 4)); + expect(knapsack.items.length).toEqual(2); + expect(knapsack.items).toEqual(expect.arrayContaining([ + { value: 7, weight: 5, proportion: 1 }, + { value: 4, weight: 3, proportion: (2.5 / 3) }, + ])); + }); +}); diff --git a/src/algorithms/permutations-backtracking.js b/src/algorithms/permutations-backtracking.js new file mode 100644 index 00000000..f43292e6 --- /dev/null +++ b/src/algorithms/permutations-backtracking.js @@ -0,0 +1,41 @@ +/** + * swap in-place between two elements in an array + * @param {Array} array array to operate on + * @param {Number} index1 + * @param {Number} index2 + */ +function swap(array, index1, index2) { + [array[index1], array[index2]] = [array[index2], array[index1]]; +} + +// tag::snippet[] +/** + * Find all permutations (without duplicates) of a word/array + * + * @param {String|Array} word given string or array + * @param {Array} solution (used by recursion) array with solutions + * @param {Number} start (used by recursion) index to start + * @returns {String[]} all permutations + * + * @example + * permutations('ab') // ['ab', 'ba'] + * permutations([1, 2, 3]) // ['123', '132', '213', '231', '321', '312'] + */ +function permutations(word = '', solution = [], start = 0) { + const array = Array.isArray(word) ? word : Array.from(word); + + if (start === array.length - 1) { // <4> + solution.push(array.join('')); + } else { + for (let index = start; index < array.length; index++) { // <1> + swap(array, start, index); // <2> + permutations(array, solution, start + 1); // <3> + swap(array, start, index); // backtrack // <5> + } + } + + return solution; +} +// end::snippet[] + +module.exports = permutations; diff --git a/src/algorithms/permutations-backtracking.spec.js b/src/algorithms/permutations-backtracking.spec.js new file mode 100644 index 00000000..da03f765 --- /dev/null +++ b/src/algorithms/permutations-backtracking.spec.js @@ -0,0 +1,34 @@ +const permutations = require('./permutations-backtracking.js'); + +describe('permute', () => { + it('should work with nothing', () => { + expect(permutations()).toEqual([]); + }); + + it('should solve one letter word', () => { + expect(permutations('a')).toEqual(['a']); + }); + + it('should solve two letter word', () => { + expect(permutations('ab')).toEqual(['ab', 'ba']); + }); + + it('should solve another two letter word', () => { + expect(permutations('op')).toEqual(['op', 'po']); + }); + + it('should work with numbers too', () => { + expect(permutations([1, 2, 3])).toEqual([ + '123', + '132', + '213', + '231', + '321', + '312', + ]); + }); + + it('should work with art', () => { + expect(permutations('art')).toEqual(['art', 'atr', 'rat', 'rta', 'tra', 'tar']); + }); +}); diff --git a/src/algorithms/sorting/bubble-sort.js b/src/algorithms/sorting/bubble-sort.js new file mode 100644 index 00000000..63564131 --- /dev/null +++ b/src/algorithms/sorting/bubble-sort.js @@ -0,0 +1,25 @@ +const { swap } = require('./sorting-common'); + +// tag::sort[] +/** + * Bubble sort + * Runtime: O(n^2) + * @param {Array|Set} collection elements to be sorted + */ +function bubbleSort(collection) { + const array = Array.from(collection); // <1> + + for (let outer = 0; outer < array.length; outer += 1) { // <2> + for (let inner = outer + 1; inner < array.length; inner += 1) { // <3> + if (array[outer] > array[inner]) { // <4> + swap(array, outer, inner); + } + } + } + + return array; +} +// end::sort[] + + +module.exports = bubbleSort; diff --git a/src/algorithms/sorting/insertion-sort.js b/src/algorithms/sorting/insertion-sort.js new file mode 100644 index 00000000..439eaa37 --- /dev/null +++ b/src/algorithms/sorting/insertion-sort.js @@ -0,0 +1,26 @@ +const { swap } = require('./sorting-common'); + +// tag::sort[] +/** + * Sorting by insertion - start from the 2nd element, + * it tries to find any element (to the left) that could be bigger than the current index. + * It will move all the elements that are bigger and insert the current element where it belongs. + * Runtime: O(n^2) + * @param {Array|Set} collection elements to be sorted + */ +function insertionSort(collection) { + const array = Array.from(collection); + + for (let outer = 0; outer < array.length; outer += 1) { + const insert = array[outer]; + + for (let inner = outer - 1; inner >= 0 && array[inner] > insert; inner -= 1) { + swap(array, inner + 1, inner); + } + } + return array; +} +// end::sort[] + + +module.exports = insertionSort; diff --git a/src/algorithms/sorting/merge-sort.js b/src/algorithms/sorting/merge-sort.js new file mode 100644 index 00000000..27068893 --- /dev/null +++ b/src/algorithms/sorting/merge-sort.js @@ -0,0 +1,61 @@ +// tag::merge[] +/** + * Merge two arrays in ascending order + * @param {Array} array1 + * @param {Array} array2 + */ +function merge(array1, array2 = []) { + const mergedLength = array1.length + array2.length; + const mergedArray = Array(mergedLength); + + for (let index = 0, i1 = 0, i2 = 0; index < mergedLength; index += 1) { // <1> + if (i2 >= array2.length || (i1 < array1.length && array1[i1] <= array2[i2])) { + mergedArray[index] = array1[i1]; // <2> + i1 += 1; + } else { + mergedArray[index] = array2[i2]; // <2> + i2 += 1; + } + } + + return mergedArray; // <3> +} +// end::merge[] + +// tag::splitSort[] +/** + * Split array in half recursively until two or less elements are left. + * Sort these two elements and combine them back using the merge function. + * @param {Array} array + */ +function splitSort(array) { + const size = array.length; + // base case + if (size < 2) { + return array; + } else if (size === 2) { + return array[0] < array[1] ? array : [array[1], array[0]]; // <2> + } + + // recursive split in half and merge back + const half = Math.ceil(size / 2); + return merge( // <3> + splitSort(array.slice(0, half)), // <1> + splitSort(array.slice(half)), // <1> + ); +} +// end::splitSort[] + +// tag::sort[] +/** + * Merge sort + * Runtime: O(n log n) + * @param {Array|Set} collection elements to be sorted + */ +function mergeSort(collection) { + const array = Array.from(collection); // <1> + return splitSort(array); +} +// end::sort[] + +module.exports = mergeSort; diff --git a/src/algorithms/sorting/quick-sort.js b/src/algorithms/sorting/quick-sort.js new file mode 100644 index 00000000..3326f2c3 --- /dev/null +++ b/src/algorithms/sorting/quick-sort.js @@ -0,0 +1,65 @@ +const { swap } = require('./sorting-common'); + +// tag::partition[] +/** + * Linear-time Partitioning + * Chooses a pivot and re-arrage the array that + * everything on the left is <= pivot and + * everything on the right is > pivot + * + * Runtime: O(n) + * @param {*} array + * @param {*} low start index + * @param {*} high end index + * @returns {integer} pivot index + */ +function partition(array, low, high) { + const pivotInitialIndex = high; // <1> + let pivotIndex = low; // <2> + + for (let current = low; current < high; current += 1) { // <3> + if (array[current] <= array[pivotInitialIndex]) { // <4> + swap(array, current, pivotIndex); + pivotIndex += 1; + } + } + + swap(array, pivotInitialIndex, pivotIndex); + return pivotIndex; +} +// end::partition[] + + +// tag::quickSort[] +/** + * QuickSort - Efficient in-place recursive sorting algorithm. + * Avg. Runtime: O(n log n) | Worst: O(n^2) + * @param {*} array + * @param {*} low + * @param {*} high + */ +function quickSort(array, low = 0, high = array.length - 1) { + if (low < high) { // <4> + const partitionIndex = partition(array, low, high); // <1> + quickSort(array, low, partitionIndex - 1); // <2> + quickSort(array, partitionIndex + 1, high); // <3> + } + return array; +} +// end::quickSort[] + + +// tag::sort[] +/** + * Quick sort + * Runtime: O(n log n) + * @param {Array|Set} collection elements to be sorted + */ +function quickSortWrapper(collection) { + const array = Array.from(collection); // <1> + return quickSort(array); +} +// end::sort[] + + +module.exports = quickSortWrapper; diff --git a/src/algorithms/sorting/selection-sort.js b/src/algorithms/sorting/selection-sort.js new file mode 100644 index 00000000..7957329e --- /dev/null +++ b/src/algorithms/sorting/selection-sort.js @@ -0,0 +1,31 @@ +const { swap } = require('./sorting-common'); + +// tag::sort[] +/** + * Selection sort - start from the first element, + * it tries to find any element (to the right) that could be smaller than the current index. + * If it finds one, it will swap the positions. + * Runtime: O(n^2) + * @param {Array|Set} collection elements to be sorted + */ +function selectionSort(collection) { + const array = Array.from(collection); + + for (let outer = 0; outer < array.length; outer += 1) { + let selection = array[outer]; + + for (let inner = outer + 1; inner < array.length; inner += 1) { + const element = array[inner]; + + if (element < selection) { + swap(array, outer, inner); + selection = array[outer]; + } + } + } + + return array; +} +// end::sort[] + +module.exports = selectionSort; diff --git a/src/algorithms/sorting/sorting-common.js b/src/algorithms/sorting/sorting-common.js new file mode 100644 index 00000000..e07913de --- /dev/null +++ b/src/algorithms/sorting/sorting-common.js @@ -0,0 +1,30 @@ +// tag::swap[] +/** + * Swap array elements in place + * Runtime: O(1) + * @param {array} array to be modified + * @param {integer} from index of the first element + * @param {integer} to index of the 2nd element + */ +function swap(array, from, to) { + [array[from], array[to]] = [array[to], array[from]]; // ES6 array destructing +} +// end::swap[] + +/** + * Move an element in an array *from* a postion *to* another. + * Runtime: O(n) + * @param {array} array + * @param {integer} from index of the element to remove (source) + * @param {integer} to index where the removed element would be move (destination) + */ +function moveElement(array, from, to) { + if (from === to + 1) return; + const [elementToInsert] = array.splice(from, 1); // delete from position + array.splice(to + 1, 0, elementToInsert); // insert element in to the position. +} + +module.exports = { + swap, + moveElement, +}; diff --git a/src/algorithms/sorting/sorting.spec.js b/src/algorithms/sorting/sorting.spec.js new file mode 100644 index 00000000..923932f6 --- /dev/null +++ b/src/algorithms/sorting/sorting.spec.js @@ -0,0 +1,47 @@ +const sortingAlgorithms = [ + require('./selection-sort'), + require('./insertion-sort'), + require('./bubble-sort'), + require('./merge-sort'), + require('./quick-sort'), +]; + +sortingAlgorithms.forEach((sort) => { + describe(`Sorting with ${sort.name}`, () => { + it('should work with zero numbers', () => { + expect(sort([])).toEqual([]); + }); + + it('should work with one number', () => { + expect(sort([3])).toEqual([3]); + }); + + it('should sort numbers', () => { + expect(sort([3, 5, 0])).toEqual([0, 3, 5]); + }); + + it('should sort with negative numbers', () => { + expect(sort([3, -5, 0])).toEqual([-5, 0, 3]); + }); + + it('should sort with inverse array', () => { + expect(sort([3, 2, 1])).toEqual([1, 2, 3]); + }); + + it('should sort with with already sorted array', () => { + expect(sort([1, 2, 3])).toEqual([1, 2, 3]); + }); + + it('should sort a set', () => { + expect(sort(new Set([3, 1, 2]))).toEqual([1, 2, 3]); + }); + + it('should sort with duplicated values', () => { + expect(sort([1, 3, 2, 1])).toEqual([1, 1, 2, 3]); + }); + + it('should sort longer arrays', () => { + + }); + }); +}); diff --git a/src/data-structures/arrays/array.js b/src/data-structures/arrays/array.js index cf3130b0..2f36f940 100644 --- a/src/data-structures/arrays/array.js +++ b/src/data-structures/arrays/array.js @@ -33,32 +33,34 @@ array.splice(2, 1); // delete 1 element at position 2 array.pop(); // => array: [2, 5, 1, 9, 6] - +// tag::searchByIndex[] /** * Search for array's element by index * + * @example Given array = [2, 5, 1, 9, 6, 7]; + * searchByIndex(array, 3); //↪️ 9 + * searchByIndex(array, 13); //↪️ -1 * @param {array} array * @param {number} index * @returns {any} value or -1 if not found - * @example Given array = [2, 5, 1, 9, 6, 7]; - * searchByIndex(array, 3) => 9 - * searchByIndex(array, 13) => -1 */ function searchByIndex(array, index) { return array[index] || -1; } +// end::searchByIndex[] assert.equal(searchByIndex(array, 3), 9); assert.equal(searchByIndex(array, 13), -1); +// tag::searchByValue[] /** * Search for array's element by value * + * @example Given array = [2, 5, 1, 9, 6, 7]; + * searchByValue(array, 9); //↪️ 3 + * searchByValue(array, 13); //↪️ -1 * @param {array} array * @param {any} value - * @example Given array = [2, 5, 1, 9, 6, 7]; - * searchByValue(array, 9) => 3 - * searchByValue(array, 13) => -1 */ function searchByValue(array, value) { for (let index = 0; index < array.length; index++) { @@ -67,6 +69,7 @@ function searchByValue(array, value) { } return -1; } +// end::searchByValue[] assert.equal(searchByValue(array, 9), 3); assert.equal(searchByValue(array, 13), -1); diff --git a/src/data-structures/graphs/graph.js b/src/data-structures/graphs/graph.js index 22d34a25..5917799c 100644 --- a/src/data-structures/graphs/graph.js +++ b/src/data-structures/graphs/graph.js @@ -2,27 +2,11 @@ const Node = require('./node'); const Stack = require('../stacks/stack'); const Queue = require('../queues/queue'); -const HashMap = require('../maps/hash-maps/hashmap'); +const HashMap = require('../maps/hash-maps/hash-map'); +// tag::constructor[] /** - * Graph that uses an adjacent list - * - * Most common operations: - * - Add vertex - * - Add edge - * - Remove vertex - * - Remove edge - * - Query (query if two vertices are connected) - * - * - Graph search (BFS, DFS) - * - * - Find path (between two vertices) - * - Find all paths (between two vertices) - * - Find shortest paths (between two vertices) - * - * https://repl.it/@amejiarosario/graphpy - * http://www.pythontutor.com/visualize.html#mode=edit - https://goo.gl/Xp7Zpm - * + * Graph data structure implemented with an adjacent list */ class Graph { /** @@ -30,66 +14,70 @@ class Graph { * @param {Symbol} edgeDirection either `Graph.DIRECTED` or `Graph.UNDIRECTED` */ constructor(edgeDirection = Graph.DIRECTED) { - // this.nodes = new HashMap(); - this.nodes = new Map(); + this.nodes = new HashMap(); this.edgeDirection = edgeDirection; } + // end::constructor[] + // tag::addVertex[] /** * Add a node to the graph. - * Returns the new node or the existing one if it already exits. - * * Runtime: O(1) - * * @param {any} value node's value + * @returns {Node} the new node or the existing one if it already exits. */ addVertex(value) { - if (this.nodes.has(value)) { + if (this.nodes.has(value)) { // <1> return this.nodes.get(value); } - const vertex = new Node(value); - this.nodes.set(value, vertex); + const vertex = new Node(value); // <2> + this.nodes.set(value, vertex); // <3> return vertex; } + // end::addVertex[] + // tag::removeVertex[] /** * Removes node from graph - * + * It also removes the reference of the deleted node from + * anywhere it was adjacent to. * Runtime: O(|V| + |E|) - * * @param {any} value node's value */ removeVertex(value) { - const current = this.nodes.get(value); + const current = this.nodes.get(value); // <1> if (current) { - Array.from(this.nodes.values()).forEach(node => node.removeAdjacent(current)); + Array.from(this.nodes.values()).forEach(node => node.removeAdjacent(current)); // <2> } - return this.nodes.delete(value); + return this.nodes.delete(value); // <3> } + // end::removeVertex[] + // tag::addEdge[] /** * Create a connection between source node and destination node. * If the graph is undirected it will also create the conneciton from destination to destination. * If the nodes doesn't exist then it will create them on the fly - * * Runtime: O(1) - * * @param {any} source * @param {any} destination + * @returns {[Node, Node]} source/destination node pair */ addEdge(source, destination) { - const sourceNode = this.addVertex(source); - const destinationNode = this.addVertex(destination); + const sourceNode = this.addVertex(source); // <1> + const destinationNode = this.addVertex(destination); // <1> - sourceNode.addAdjacent(destinationNode); + sourceNode.addAdjacent(destinationNode); // <2> if (this.edgeDirection === Graph.UNDIRECTED) { - destinationNode.addAdjacent(sourceNode); + destinationNode.addAdjacent(sourceNode); // <3> } return [sourceNode, destinationNode]; } + // end::addEdge[] + // tag::removeEdge[] /** * Remove connection between source node and destination. * If the graph is undirected it will also remove the conneciton from destination to destination. @@ -113,7 +101,9 @@ class Graph { return [sourceNode, destinationNode]; } + // end::removeEdge[] + // tag::areAdjacents[] /** * True if two nodes are adjacent to each other * @param {any} source node's value @@ -129,7 +119,9 @@ class Graph { return false; } + // end::areAdjacents[] + // tag::graphSearch[] /** * Depth-first search * Use a stack to visit nodes (LIFO) @@ -168,6 +160,7 @@ class Graph { } } } + // end::graphSearch[] /** * Return true if two nodes are connected and false if not @@ -255,9 +248,16 @@ class Graph { }); return paths; } +// tag::constructor[] } Graph.UNDIRECTED = Symbol('directed graph'); // one-way edges Graph.DIRECTED = Symbol('undirected graph'); // two-ways edges +// end::constructor[] module.exports = Graph; + +/* + * https://repl.it/@amejiarosario/graphpy + * http://www.pythontutor.com/visualize.html#mode=edit - https://goo.gl/Xp7Zpm + */ diff --git a/src/data-structures/graphs/graph.spec.js b/src/data-structures/graphs/graph.spec.js index 82a4c13f..47d154f2 100644 --- a/src/data-structures/graphs/graph.spec.js +++ b/src/data-structures/graphs/graph.spec.js @@ -43,18 +43,18 @@ describe('Graph', () => { it('should add node a as adjacent of b', () => { const [a, b] = graph.addEdge('a', 'b'); - expect(a.adjacents.map(getValues)).toEqual(['b']); - expect(b.adjacents.map(getValues)).toEqual([]); + expect(a.getAdjacents().map(getValues)).toEqual(['b']); + expect(b.getAdjacents().map(getValues)).toEqual([]); graph.addEdge('b', 'a'); - expect(b.adjacents.map(getValues)).toEqual(['a']); + expect(b.getAdjacents().map(getValues)).toEqual(['a']); }); it('should add both connection on undirected graph', () => { graph = new Graph(Graph.UNDIRECTED); const [a, b] = graph.addEdge('a', 'b'); - expect(a.adjacents.map(getValues)).toEqual(['b']); - expect(b.adjacents.map(getValues)).toEqual(['a']); + expect(a.getAdjacents().map(getValues)).toEqual(['b']); + expect(b.getAdjacents().map(getValues)).toEqual(['a']); }); it('should add falsy values', () => { @@ -71,23 +71,23 @@ describe('Graph', () => { it('should remove edges if they exist', () => { const [a, b] = graph.removeEdge('a', 'b'); - expect(a.adjacents.map(getValues)).toEqual([]); - expect(b.adjacents.map(getValues)).toEqual([]); + expect(a.getAdjacents().map(getValues)).toEqual([]); + expect(b.getAdjacents().map(getValues)).toEqual([]); }); it('should remove edges with falsy values', () => { const [a, b] = graph.addEdge(0, false); - expect(a.adjacents.map(getValues)).toEqual([false]); - expect(b.adjacents.map(getValues)).toEqual([]); + expect(a.getAdjacents().map(getValues)).toEqual([false]); + expect(b.getAdjacents().map(getValues)).toEqual([]); graph.removeEdge(0, false); - expect(a.adjacents.map(getValues)).toEqual([]); - expect(b.adjacents.map(getValues)).toEqual([]); + expect(a.getAdjacents().map(getValues)).toEqual([]); + expect(b.getAdjacents().map(getValues)).toEqual([]); }); it('should not create node when removing unexisting target', () => { const [a, c] = graph.removeEdge('a', 'c'); expect(graph.nodes.size).toBe(2); - expect(a.adjacents.map(getValues)).toEqual(['b']); + expect(a.getAdjacents().map(getValues)).toEqual(['b']); expect(c).toBe(undefined); }); @@ -133,6 +133,8 @@ describe('Graph', () => { let n4; beforeEach(() => { + // 5 + // /^ // 0 -> 1 <- 2 // ^\-> 4 -> 3 graph.addEdge(0, 1); diff --git a/src/data-structures/graphs/node-1.js b/src/data-structures/graphs/node-1.js new file mode 100644 index 00000000..801abef8 --- /dev/null +++ b/src/data-structures/graphs/node-1.js @@ -0,0 +1,53 @@ +// tag::constructor[] +/** + * Graph node/vertex that hold adjacencies nodes + */ +class Node { + constructor(value) { + this.value = value; + this.adjacents = []; // adjacency list + } + // end::constructor[] + + /** + * Add node to adjacency list + * Runtime: O(1) + * @param {Node} node + */ + addAdjacent(node) { + this.adjacents.push(node); + } + + /** + * Remove node from adjacency list + * Runtime: O(n) + * @param {Node} node + * @returns removed node or `undefined` if node was not found + */ + removeAdjacent(node) { + const index = this.adjacents.indexOf(node); + if (index > -1) { + this.adjacents.splice(index, 1); + return node; + } + return undefined; + } + + /** + * Check if a Node is adjacent to other + * Runtime: O(n) + * @param {Node} node + */ + isAdjacent(node) { + return this.adjacents.indexOf(node) > -1; + } + + /** + * Get all adjacent nodes + */ + getAdjacents() { + return this.adjacents; + } +} + +module.exports = Node; diff --git a/src/data-structures/graphs/node.js b/src/data-structures/graphs/node.js index ebf975c3..9e11b1ee 100644 --- a/src/data-structures/graphs/node.js +++ b/src/data-structures/graphs/node.js @@ -1,57 +1,59 @@ +const HashSet = require('../sets/hash-set'); + +// tag::constructor[] /** * Graph node/vertex that hold adjacencies nodes + * For performance, uses a HashSet instead of array for adjacents. */ class Node { constructor(value) { this.value = value; - this.adjacents = []; // adjacency list + this.adjacents = new HashSet(); // adjacency list } + // end::constructor[] + // tag::addAdjacent[] /** - * Add node to adjacency list - * - * Runtime: O(1) - * - * @param {Node} node - */ + * Add node to adjacency list + * Runtime: O(1) + * @param {Node} node + */ addAdjacent(node) { - this.adjacents.push(node); + this.adjacents.add(node); } + // end::addAdjacent[] + // tag::removeAdjacent[] /** - * Remove node from adjacency list - * - * Runtime: O(n) - * - * @param {Node} node - * @returns removed node or `undefined` if node was not found - */ + * Remove node from adjacency list + * Runtime: O(1) + * @param {Node} node + * @returns removed node or `false` if node was not found + */ removeAdjacent(node) { - const index = this.adjacents.indexOf(node); - if (index > -1) { - this.adjacents.splice(index, 1); - return node; - } - return undefined; + return this.adjacents.delete(node); } + // end::removeAdjacent[] + // tag::isAdjacent[] /** - * Check if a Node is adjacent to other - * - * Runtime: O(n) - * - * @param {Node} node - */ + * Check if a Node is adjacent to other + * Runtime: O(1) + * @param {Node} node + */ isAdjacent(node) { - return this.adjacents.indexOf(node) > -1; + return this.adjacents.has(node); } + // end::isAdjacent[] /** - * Get all adjacent nodes - */ + * Get all adjacent nodes + */ getAdjacents() { - return this.adjacents; + return Array.from(this.adjacents); } + // tag::constructor[] } +// end::constructor[] module.exports = Node; diff --git a/src/data-structures/graphs/node.spec.js b/src/data-structures/graphs/node.spec.js index 7a828b77..16d07d13 100644 --- a/src/data-structures/graphs/node.spec.js +++ b/src/data-structures/graphs/node.spec.js @@ -10,7 +10,7 @@ describe('Node (Graph)', () => { describe('#addAdjacent', () => { it('should add node to adjacent list', () => { node.addAdjacent(new Node(2)); - expect(node.adjacents.map(n => n.value)).toEqual([2]); + expect(node.getAdjacents().map(n => n.value)).toEqual([2]); }); }); @@ -31,16 +31,16 @@ describe('Node (Graph)', () => { }); it('should remove node to adjacent list', () => { - expect(a.removeAdjacent(c)).toBe(c); - expect(node.adjacents.map(n => n.value)).toEqual(['b']); + expect(a.removeAdjacent(c)).toBe(true); + expect(node.getAdjacents().map(n => n.value)).toEqual(['b']); - expect(a.removeAdjacent(b)).toBe(b); - expect(node.adjacents.map(n => n.value)).toEqual([]); + expect(a.removeAdjacent(b)).toBe(true); + expect(node.getAdjacents().map(n => n.value)).toEqual([]); }); it('should return undefined if not found', () => { - expect(a.removeAdjacent(d)).toBe(undefined); - expect(node.adjacents.map(n => n.value)).toEqual(['b', 'c']); + expect(a.removeAdjacent(d)).toBe(false); + expect(node.getAdjacents().map(n => n.value)).toEqual(['b', 'c']); }); }); diff --git a/src/data-structures/linked-lists/linked-list.js b/src/data-structures/linked-lists/linked-list.js index 1cf20d04..3197b68b 100644 --- a/src/data-structures/linked-lists/linked-list.js +++ b/src/data-structures/linked-lists/linked-list.js @@ -1,6 +1,7 @@ const Node = require('./node'); const util = require('util'); +// tag::constructor[] /** * Doubly linked list that keeps track of * the last and first element @@ -11,6 +12,7 @@ class LinkedList { this.last = null; // last element of the list this.size = 0; // total number of elements in the list } + // end::constructor[] /** * Alias for size @@ -19,6 +21,7 @@ class LinkedList { return this.size; } + // tag::addFirst[] /** * Adds element to the begining of the list. Similar to Array.unshift * Runtime: O(1) @@ -40,7 +43,9 @@ class LinkedList { return newNode; } + // end::addFirst[] + // tag::addLast[] /** * Adds element to the end of the list (tail). Similar to Array.push * Using the element last reference instead of navigating through the list, @@ -65,7 +70,9 @@ class LinkedList { return newNode; } + // end::addLast[] + // tag::addMiddle[] /** * Insert new element at the given position (index) * @@ -74,33 +81,38 @@ class LinkedList { * @returns {Node} new node or 'undefined' if the index is out of bound. */ add(value, position = 0) { - if (position === 0) { + if (position === 0) { // <1> return this.addFirst(value); } - if (position === this.size) { + if (position === this.size) { // <2> return this.addLast(value); } - + // Adding element in the middle const current = this.get(position); if (current) { - const newNode = new Node(value); - newNode.previous = current.previous; - newNode.next = current; + const newNode = new Node(value); // <3> + newNode.previous = current.previous; // <4> + newNode.next = current; // <5> - current.previous.next = newNode; - if (current.next) { current.next.previous = newNode; } + current.previous.next = newNode; // <6> + if (current.next) { current.next.previous = newNode; } // <7> this.size += 1; return newNode; } return undefined; // out of bound index } + // end::addMiddle[] + // tag::searchByValue[] /** * Search by value. It finds first occurrence of * the element matching the value. * Runtime: O(n) + * @example: assuming a linked list with: a -> b -> c + * linkedList.indexOf('b') // ↪️ 1 + * linkedList.indexOf('z') // ↪️ undefined * @param {any} value * @returns {number} return index or undefined */ @@ -112,10 +124,15 @@ class LinkedList { return undefined; }); } + // end::searchByValue[] + // tag::searchByIndex[] /** * Search by index * Runtime: O(n) + * @example: assuming a linked list with: a -> b -> c + * linkedList.get(1) // ↪️ 'b' + * linkedList.get(40) // ↪️ undefined * @param {Number} index position of the element * @returns {Node} element at the specified position in this list. */ @@ -127,24 +144,32 @@ class LinkedList { return undefined; }); } + // end::searchByIndex[] + // tag::find[] /** * Iterate through the list until callback returns thruthy - * @param {Function} callback evaluates node and index - * @returns {any} callbacks's return value + * @example see #get and #indexOf + * @param {Function} callback evaluates current node and index. + * If any value other than undefined it's returned it will stop the search. + * @returns {any} callbacks's return value or undefined */ find(callback) { - for (let current = this.first, position = 0; - current; - position += 1, current = current.next) { - const result = callback(current, position); + for (let current = this.first, position = 0; // <1> + current; // <2> + position += 1, current = current.next) { // <3> + const result = callback(current, position); // <4> + if (result !== undefined) { - return result; + return result; // <5> } } return undefined; // not found } + // end::find[] + + // tag::removeFirst[] /** * Removes element from the start of the list (head/root). * Similar to Array.shift @@ -165,7 +190,9 @@ class LinkedList { } return head && head.value; } + // end::removeFirst[] + // tag::removeLast[] /** * Removes element to the end of the list. Similar to Array.pop * Using the `last.previous` we can reduce the runtime from O(n) to O(1) @@ -186,7 +213,9 @@ class LinkedList { } return tail && tail.value; } + // end::removeLast[] + // tag::removeByPosition[] /** * Removes the element at the specified position in this list. * Runtime: O(n) @@ -208,6 +237,7 @@ class LinkedList { return current && current.value; } + // end::removeByPosition[] /** * Removes the first occurrence of the specified elementt @@ -220,6 +250,7 @@ class LinkedList { return this.removeByPosition(parseInt(callbackOrIndex, 10) || 0); } + // find desired position to remove using #find const position = this.find((node, index) => { if (callbackOrIndex(node, index)) { return index; @@ -230,6 +261,7 @@ class LinkedList { if (position !== undefined) { // zero-based position. return this.removeByPosition(position); } + return false; } diff --git a/src/data-structures/linked-lists/node.js b/src/data-structures/linked-lists/node.js index 9b433267..1547a953 100644 --- a/src/data-structures/linked-lists/node.js +++ b/src/data-structures/linked-lists/node.js @@ -1,3 +1,4 @@ +// tag::snippet[] /** * Node with reference to next and previous element */ @@ -8,5 +9,6 @@ class Node { this.previous = null; } } +// end::snippet[] module.exports = Node; diff --git a/src/data-structures/maps/hash-maps/hashmap.js b/src/data-structures/maps/hash-maps/hash-map.js similarity index 81% rename from src/data-structures/maps/hash-maps/hashmap.js rename to src/data-structures/maps/hash-maps/hash-map.js index 03c69fcd..66ae4f22 100644 --- a/src/data-structures/maps/hash-maps/hashmap.js +++ b/src/data-structures/maps/hash-maps/hash-map.js @@ -13,6 +13,7 @@ const { nextPrime } = require('./primes'); * - It may have one null key and multiple null values. */ class HashMap { + // tag::constructorPartial[] /** * Initialize array that holds the values. * @param {number} initialCapacity initial size of the array (should be a prime) @@ -22,6 +23,7 @@ class HashMap { constructor(initialCapacity = 19, loadFactor = 0.75) { this.initialCapacity = initialCapacity; this.loadFactor = loadFactor; + // end::constructorPartial[] this.reset(); } @@ -35,10 +37,12 @@ class HashMap { this.buckets = buckets; this.size = size; this.collisions = collisions; + // keyTracker* is used to keep track of the insertion order this.keysTrackerArray = keysTrackerArray; this.keysTrackerIndex = keysTrackerIndex; } + // tag::hashFunction[] /** * Polynomial hash codes are used to hash String typed keys. * It uses FVN-1a hashing algorithm for 32 bits @@ -55,36 +59,41 @@ class HashMap { } return (hash >>> 0) % this.buckets.length; } + // end::hashFunction[] + // tag::getEntry[] /** * Find an entry inside a bucket. * - * The bucket is an array of LinkedList. - * Entries are each of the nodes in the linked list. + * The bucket is an array of Linked Lists. + * Entries are the nodes in the linked list + * containing key/value objects. * * Avg. Runtime: O(1) - * If there are many collisions it could be O(n). + * Usually O(1) but there are many collisions it could be O(n). * * @param {any} key - * @param {function} callback (optional) operation to - * perform once the entry has been found - * @returns {object} object containing the bucket and entry (LinkedList's node's value) + * @returns {object} object containing the bucket and + * entry (LinkedList's node matching value) */ - getEntry(key, callback = () => {}) { - const index = this.hashFunction(key); - this.buckets[index] = this.buckets[index] || new LinkedList(); + getEntry(key) { + const index = this.hashFunction(key); // <1> + this.buckets[index] = this.buckets[index] || new LinkedList(); // <2> const bucket = this.buckets[index]; - const entry = bucket.find(({ value: node }) => { + const entry = bucket.find(({ value: node }) => { // <3> if (key === node.key) { - callback(node); - return node; + return node; // stop search } - return undefined; + return undefined; // continue searching }); - return { bucket, entry }; + + return { bucket, entry }; // <4> } + // end::getEntry[] + + // tag::set[] /** * Insert a key/value pair into the hash map. * If the key is already there replaces its content. @@ -95,9 +104,7 @@ class HashMap { * @returns {HashMap} Return the Map object to allow chaining */ set(key, value) { - const { entry: exists, bucket } = this.getEntry(key, (entry) => { - entry.value = value; // update value if key already exists - }); + const { entry: exists, bucket } = this.getEntry(key); if (!exists) { // add key/value if it doesn't find the key bucket.push({ key, value, order: this.keysTrackerIndex }); @@ -106,10 +113,15 @@ class HashMap { this.size += 1; if (bucket.size > 1) { this.collisions += 1; } if (this.isBeyondloadFactor()) { this.rehash(); } + } else { + // update value if key already exists + exists.value = value; } return this; } + // end::set[] + // tag::get[] /** * Gets the value out of the hash map * Avg. Runtime: O(1) @@ -120,7 +132,9 @@ class HashMap { const { entry } = this.getEntry(key); return entry && entry.value; } + // end::get[] + // tag::has[] /** * Search for key and return true if it was found * Avg. Runtime: O(1) @@ -132,7 +146,9 @@ class HashMap { const { entry } = this.getEntry(key); return entry !== undefined; } + // end::has[] + // tag::delete[] /** * Removes the specified element from a Map object. * Avg. Runtime: O(1) @@ -153,10 +169,13 @@ class HashMap { return undefined; }); } + // end::delete[] + // tag::getLoadFactor[] /** * Load factor - measure how full the Map is. * It's ratio between items on the map and total size of buckets + * @returns {number} load factor ratio */ getLoadFactor() { return this.size / this.buckets.length; @@ -164,11 +183,14 @@ class HashMap { /** * Check if a rehash is due + * @returns {boolean} true if is beyond load factor, false otherwise. */ isBeyondloadFactor() { return this.getLoadFactor() > this.loadFactor; } + // end::getLoadFactor[] + // tag::rehash[] /** * Rehash means to create a new Map with a new (higher) * capacity with the purpose of outgrow collisions. @@ -193,6 +215,8 @@ class HashMap { newArrayKeys.length, ); } + // end::rehash[] + /** * Keys for each element in the Map object in insertion order. @@ -247,3 +271,14 @@ class HashMap { HashMap.prototype.containsKey = HashMap.prototype.has; module.exports = HashMap; + +/* HashMap usage example +// tag::snippet[] +const hashMap = new HashMap(); + +hashMap.set('cat', 2); +hashMap.set('art', 8); +hashMap.set('rat', 7); +hashMap.set('dog', 1); +// end::snippet[] +*/ diff --git a/src/data-structures/maps/hash-maps/hashmap.spec.js b/src/data-structures/maps/hash-maps/hash-map.spec.js similarity index 99% rename from src/data-structures/maps/hash-maps/hashmap.spec.js rename to src/data-structures/maps/hash-maps/hash-map.spec.js index 9ed7cb6b..5523e1a7 100644 --- a/src/data-structures/maps/hash-maps/hashmap.spec.js +++ b/src/data-structures/maps/hash-maps/hash-map.spec.js @@ -1,4 +1,4 @@ -const HashMap = require('./hashmap'); +const HashMap = require('./hash-map'); // import HashMap from './hash-map'; diff --git a/src/data-structures/maps/hash-maps/hashing.js b/src/data-structures/maps/hash-maps/hashing.js index 7ef252a8..6e317037 100644 --- a/src/data-structures/maps/hash-maps/hashing.js +++ b/src/data-structures/maps/hash-maps/hashing.js @@ -1,3 +1,55 @@ +// tag::naiveHashCode[] +/** + * Naïve implementation of a non-cryptographic hashing function + * @param {any} key key to be converted to a positive integer + * @returns {integer} hash code (numeric representation of the key) + */ +function hashCodeNaive(key) { + return Array.from(key.toString()).reduce((hashCode, char) => { + return hashCode + char.codePointAt(0); + }, 0); +} +// end::naiveHashCode[] + +/* Hash Code examples +// tag::naiveHashCodeExamples[] +hashCode('cat'); //=> 312 (c=99 + a=97 + t=116) +hashCode('dog'); //=> 314 (d=100 + o=111 + g=103) +hashCode('rat'); //=> 327 (r=114 + a=97 + t=116) +hashCode('art'); //=> 327 (a=97 + r=114 + t=116) +hashCode(10); //=> 97 ('1'=49 + '0'=48) +// end::naiveHashCodeExamples[] +*/ + +// tag::hashCodeOffset[] +/** + * Calculates hash code that maps a key (value) to an integer (unbounded). + * It uses a 20 bit offset to avoid Unicode value overlaps + * @param {any} key key to be converted to a positive integer + * @returns {BigInt} returns big integer (unbounded) that maps to the key + */ +function hashCode(key) { + const array = Array.from(`${key}${typeof key}`); + return array.reduce((hashCode, char, position) => { + return hashCode + BigInt(char.codePointAt(0)) * (2n ** (BigInt(position) * 20n)); + }, 0n); +} +// end::hashCodeOffset[] + +/* +// tag::hashCodeOffsetExample[] +hashCode('art') //↪️ 150534821962845809557083360656040988391557528813665n +hashCode(10) === hashCode('10'); //↪️ false +hashCode('10') === hashCode('10string'); //↪️ false +hashCode('art') === hashCode('rat'); //↪️ false +hashCode('😄') === hashCode('😄'); //↪️ true +hashCode('😄') === hashCode('😸'); //↪️ false +// end::hashCodeOffsetExample[] +*/ + + +// ---- Experiments ----- + const primes = [31n, 33n, 37n, 39n, 41n, 101n, 8191n, 131071n, 524287n, 6700417n, 1327144003n, 9007199254740881n]; function doubleToLongBits(number) { @@ -22,7 +74,7 @@ function hashString(key) { }, 0n); } -function hashCode(key) { +function hashCode2(key) { if (typeof(key) === 'number') { return hashNumber(key); } @@ -36,11 +88,11 @@ function hashIndex({key, size = 16} = {}) { const p = 524287n; // prime number larger than size. const a = 8191n; // random [1..p-1] const b = 0n; // random [0..p-1] - return ( (a * hashCode(key) + b) % p ) % BigInt(size); + return ( (a * hashCode2(key) + b) % p ) % BigInt(size); } module.exports = { - hashCode, + hashCode: hashCode2, hashIndex } diff --git a/src/data-structures/maps/map.js b/src/data-structures/maps/map.js new file mode 100644 index 00000000..ebcf2c0a --- /dev/null +++ b/src/data-structures/maps/map.js @@ -0,0 +1,18 @@ +/* JavaScript Built-in Map Usage +// tag::snippet[] +const myMap = new Map(); + +// mapping values to keys +myMap.set('string', 'foo'); +myMap.set(1, 'bar'); +myMap.set({}, 'baz'); +const obj1 = {}; +myMap.set(obj1, 'test'); + +// searching values by key +myMap.get(1); //↪️ bar +myMap.get('str'); //↪️ foo +myMap.get({}); //↪️ undefined +myMap.get(obj1); //↪️ test +// end::snippet[] +// */ diff --git a/src/data-structures/maps/map.spec.js b/src/data-structures/maps/map.spec.js index 42c4962d..dea0cdf8 100644 --- a/src/data-structures/maps/map.spec.js +++ b/src/data-structures/maps/map.spec.js @@ -1,4 +1,4 @@ -const HashMap = require('./hash-maps/hashmap'); +const HashMap = require('./hash-maps/hash-map'); const TreeMap = require('./tree-maps/tree-map'); const mapImplementations = [ diff --git a/src/data-structures/maps/tree-maps/tree-map.js b/src/data-structures/maps/tree-maps/tree-map.js index cc96cf25..68562da5 100644 --- a/src/data-structures/maps/tree-maps/tree-map.js +++ b/src/data-structures/maps/tree-maps/tree-map.js @@ -20,13 +20,16 @@ const Tree = require('../../trees/red-black-tree'); // fast insertion * */ class TreeMap { + // tag::constructor[] /** * Initialize tree */ constructor() { this.tree = new Tree(); } + // end::constructor[] + // tag::set[] /** * Insert a key/value pair into the map. * If the key is already there replaces its content. @@ -50,7 +53,9 @@ class TreeMap { get size() { return this.tree.size; } + // end::set[] + // tag::get[] /** * Gets the value out of the map * Runtime: O(log n) @@ -72,7 +77,9 @@ class TreeMap { has(key) { return !!this.get(key); } + // end::get[] + // tag::delete[] /** * Removes the specified element from the map. * Runtime: O(log n) @@ -83,12 +90,14 @@ class TreeMap { delete(key) { return this.tree.remove(key); } + // end::delete[] + // tag::iterators[] /** * Default iterator for this map */ * [Symbol.iterator]() { - yield* this.tree.inOrderTraversal(); + yield* this.tree.inOrderTraversal(); // <1> } /** @@ -112,6 +121,7 @@ class TreeMap { yield node.data(); } } + // end::iterators[] /** * Contains the [key, value] pairs for each element in the Map object diff --git a/src/data-structures/queues/queue.js b/src/data-structures/queues/queue.js index 45589aa3..60d38178 100644 --- a/src/data-structures/queues/queue.js +++ b/src/data-structures/queues/queue.js @@ -1,5 +1,6 @@ const LinkedList = require('../linked-lists/linked-list'); +// tag::constructor[] /** * Data structure where add and remove elements in a first-in, first-out (FIFO) */ @@ -7,24 +8,31 @@ class Queue { constructor() { this.items = new LinkedList(); } + // end::constructor[] + // tag::enqueue[] /** * Add element to the queue * Runtime: O(1) * @param {any} item + * @returns {queue} instance to allow chaining. */ enqueue(item) { this.items.addLast(item); + return this; } + // end::enqueue[] + // tag::dequeue[] /** * Remove element from the queue * Runtime: O(1) + * @returns {any} removed value. */ dequeue() { return this.items.removeFirst(); } - + // end::dequeue[] /** * Size of the queue */ @@ -46,3 +54,16 @@ Queue.prototype.remove = Queue.prototype.dequeue; module.exports = Queue; +/* Usage Example: +// tag::snippet[] +const queue = new Queue(); + +queue.enqueue('a'); +queue.enqueue('b'); +queue.dequeue(); //↪️ a +queue.enqueue('c'); +queue.dequeue(); //↪️ b +queue.dequeue(); //↪️ c +// end::snippet[] +// */ + diff --git a/src/data-structures/sets/hash-set.js b/src/data-structures/sets/hash-set.js index 8f3f04fd..3327745b 100644 --- a/src/data-structures/sets/hash-set.js +++ b/src/data-structures/sets/hash-set.js @@ -1,5 +1,5 @@ -const HashMap = require('../maps/hash-maps/hashmap'); - +const HashMap = require('../maps/hash-maps/hash-map'); +// tag::constructor[] /** * Set implemented with our HashMap * Have an average of O(1) time on all operations @@ -15,6 +15,15 @@ class HashMapSet { Array.from(iterable).forEach(element => this.add(element)); } + /** + * Get size of the set + */ + get size() { + return this.hashMap.size; + } + // end::constructor[] + + // tag::add[] /** * Add a new value (duplicates will be added only once) * Avg. Runtime: O(1) @@ -23,7 +32,9 @@ class HashMapSet { add(value) { this.hashMap.set(value); } + // end::add[] + // tag::has[] /** * Check if value is already on the set * Avg. Runtime: O(1) @@ -32,14 +43,9 @@ class HashMapSet { has(value) { return this.hashMap.has(value); } + // end::has[] - /** - * Get size of the set - */ - get size() { - return this.hashMap.size; - } - + // tag::delete[] /** * Delete a value from the set * Avg. Runtime: O(1) @@ -48,7 +54,9 @@ class HashMapSet { delete(value) { return this.hashMap.delete(value); } + // end::delete[] + // tag::iterators[] /** * Make this class iterable */ @@ -75,6 +83,7 @@ class HashMapSet { yield [value, value]; } } +// end::iterators[] } module.exports = HashMapSet; diff --git a/src/data-structures/sets/set.spec.js b/src/data-structures/sets/set.spec.js index 72ba05d9..6ed0c49b 100644 --- a/src/data-structures/sets/set.spec.js +++ b/src/data-structures/sets/set.spec.js @@ -58,6 +58,7 @@ setImplementations.forEach((MySet) => { expect(set.delete(2)).toBe(true); expect(Array.from(set.entries())).toEqual([[0, 0], [1, 1], [3, 3]]); expect(set.delete(0)).toBe(true); + expect(Array.from(set.entries())).toEqual([[1, 1], [3, 3]]); expect(Array.from(set)).toEqual([1, 3]); expect(set.size).toBe(2); diff --git a/src/data-structures/sets/tree-set.js b/src/data-structures/sets/tree-set.js index d9255947..3eecb3e2 100644 --- a/src/data-structures/sets/tree-set.js +++ b/src/data-structures/sets/tree-set.js @@ -1,5 +1,9 @@ -// const Tree = require('../trees/avl-tree'); // faster lookups -const Tree = require('../trees/red-black-tree'); // faster insertion +// faster lookups +// const Tree = require('../trees/avl-tree'); + +// faster insertion +// tag::constructor[] +const Tree = require('../trees/red-black-tree'); /** * TreeSet implements a Set (collection of unique values) @@ -8,11 +12,11 @@ const Tree = require('../trees/red-black-tree'); // faster insertion class TreeSet { /** * Initialize tree and accept initial values. - * @param {array} iterable initial values (duplicates will be added once) + * @param {array} iterable initial values (new set won't have duplicates) */ constructor(iterable = []) { this.tree = new Tree(); - Array.from(iterable).forEach(value => this.add(value)); + Array.from(iterable).forEach(value => this.add(value)); // <1> } /** @@ -21,7 +25,9 @@ class TreeSet { get size() { return this.tree.size; } + // end::constructor[] + // tag::add[] /** * Add a new value (duplicates will be added only once) * Runtime: O(log n) @@ -32,7 +38,9 @@ class TreeSet { this.tree.add(value); } } + // end::add[] + // tag::has[] /** * Check if value is already on the set * Runtime: O(log n) @@ -42,7 +50,9 @@ class TreeSet { has(value) { return this.tree.has(value); } + // end::has[] + // tag::delete[] /** * Delete a value from the set * Runtime: O(log n) @@ -51,7 +61,9 @@ class TreeSet { delete(value) { return this.tree.remove(value); } + // end::delete[] + // tag::iterator[] /** * Default iterator for this set * @returns {iterator} values in ascending order @@ -61,6 +73,7 @@ class TreeSet { yield node.value; } } + // end::iterator[] /** * Get all the values on the Set diff --git a/src/data-structures/stacks/stack.js b/src/data-structures/stacks/stack.js index ef71ec02..9ab5a357 100644 --- a/src/data-structures/stacks/stack.js +++ b/src/data-structures/stacks/stack.js @@ -1,5 +1,5 @@ const LinkedList = require('../linked-lists/linked-list'); - +// tag::constructor[] /** * Data structure that adds and remove elements in a first-in, first-out (FIFO) fashion */ @@ -7,26 +7,32 @@ class Stack { constructor() { this.items = new LinkedList(); } + // end::constructor[] + // tag::add[] /** - * Add element into the stack. - * Similar to Array.push + * Add element into the stack. Similar to Array.push * Runtime: O(1) * @param {any} item + * @returns {stack} instance to allow chaining. */ add(item) { this.items.addLast(item); return this; } + // end::add[] + // tag::remove[] /** * Remove element from the stack. * Similar to Array.pop * Runtime: O(1) + * @returns {any} removed value. */ remove() { return this.items.removeLast(); } + // end::remove[] /** * Size of the queue @@ -49,3 +55,16 @@ Stack.prototype.pop = Stack.prototype.remove; module.exports = Stack; +/* Usage Example: +// tag::snippet[] +const stack = new Stack(); + +stack.add('a'); +stack.add('b'); +stack.remove(); //↪️ b +stack.add('c'); +stack.remove(); //↪️ c +stack.remove(); //↪️ a +// end::snippet[] +// */ + diff --git a/src/data-structures/trees/avl-tree.js b/src/data-structures/trees/avl-tree.js index 32d81de6..c3b013d2 100644 --- a/src/data-structures/trees/avl-tree.js +++ b/src/data-structures/trees/avl-tree.js @@ -1,12 +1,69 @@ const BinarySearchTree = require('./binary-search-tree'); const { - balanceUptream, + leftRotation, + rightRotation, + leftRightRotation, + rightLeftRotation, } = require('./tree-rotations'); +// tag::balance[] +/** + * Balance tree doing rotations based on balance factor. + * + * Depending on the `node` balance factor and child's factor + * one of this rotation is performed: + * - LL rotations: single left rotation + * - RR rotations: single right rotation + * - LR rotations: double rotation left-right + * - RL rotations: double rotation right-left + * + * @param {TreeNode} node + */ +function balance(node) { + if (node.balanceFactor > 1) { + // left subtree is higher than right subtree + if (node.left.balanceFactor > 0) { + return rightRotation(node); + } else if (node.left.balanceFactor < 0) { + return leftRightRotation(node); + } + } else if (node.balanceFactor < -1) { + // right subtree is higher than left subtree + if (node.right.balanceFactor < 0) { + return leftRotation(node); + } else if (node.right.balanceFactor > 0) { + return rightLeftRotation(node); + } + } + return node; +} +// end::balance[] + +// tag::balanceUptream[] +/** + * Bubbles up balancing nodes a their parents + * + * @param {TreeNode} node + */ +function balanceUptream(node) { + let current = node; + let newParent; + while (current) { + newParent = balance(current); + current = current.parent; + } + return newParent; +} +// end::balanceUptream[] + +// tag::AvlTree[] +/** + * AVL Tree + * It's a self-balanced binary search tree optimized for fast lookups. + */ class AvlTree extends BinarySearchTree { /** * Add node to tree. It self-balance itself. - * * @param {any} value node's value */ add(value) { @@ -30,5 +87,6 @@ class AvlTree extends BinarySearchTree { return false; } } +// end::AvlTree[] module.exports = AvlTree; diff --git a/src/data-structures/trees/binary-search-tree-1.js b/src/data-structures/trees/binary-search-tree-1.js deleted file mode 100644 index 4b9b0519..00000000 --- a/src/data-structures/trees/binary-search-tree-1.js +++ /dev/null @@ -1,12 +0,0 @@ -const Node = require('./binary-tree-node'); - -class BinarySearchTree { - constructor() { - this.root = null; - this.size = 0; - } - - add(value) { - - } -} diff --git a/src/data-structures/trees/binary-search-tree.js b/src/data-structures/trees/binary-search-tree.js index 9dfdca19..12b562bc 100644 --- a/src/data-structures/trees/binary-search-tree.js +++ b/src/data-structures/trees/binary-search-tree.js @@ -1,30 +1,34 @@ -const TreeNode = require('./tree-node'); +const BinaryTreeNode = require('./binary-tree-node'); const Queue = require('../queues/queue'); const Stack = require('../stacks/stack'); - +// tag::snippet[] class BinarySearchTree { constructor() { this.root = null; this.size = 0; } + // end::snippet[] + // tag::add[] /** * Insert value on the BST. - * If the value is already in the tree, t - * then it increase the multiplicity value - * @param {any} value value to insert in the tree + * + * If the value is already in the tree, + * then it increases the multiplicity value + * @param {any} value node's value to insert in the tree + * @returns {BinaryTreeNode} newly added node */ add(value) { - const newNode = new TreeNode(value); + const newNode = new BinaryTreeNode(value); if (this.root) { - const { found, parent } = this.findNodeAndParent(value); + const { found, parent } = this.findNodeAndParent(value); // <1> if (found) { // duplicated: value already exist on the tree - found.meta.multiplicity = (found.meta.multiplicity || 1) + 1; + found.meta.multiplicity = (found.meta.multiplicity || 1) + 1; // <2> } else if (value < parent.value) { - parent.left = newNode; + parent.setLeftAndUpdateParent(newNode); } else { - parent.right = newNode; + parent.setRightAndUpdateParent(newNode); } } else { this.root = newNode; @@ -33,14 +37,7 @@ class BinarySearchTree { this.size += 1; return newNode; } - - /** - * Return node if it found it or undefined if not - * @param {any} value value to find - */ - find(value) { - return this.findNodeAndParent(value).found; - } + // end::add[] /** * Find if a node is present or not @@ -51,11 +48,23 @@ class BinarySearchTree { return !!this.find(value); } + // tag::find[] + /** + * @param {any} value value to find + * @returns {BinaryTreeNode|null} node if it found it or null if not + */ + find(value) { + return this.findNodeAndParent(value).found; + } + + /** - * Finds the node matching the value. - * If it doesn't find, it returns the leaf where the new value should be added. + * Recursively finds the node matching the value. + * If it doesn't find, it returns the leaf `parent` where the new value should be appended. * @param {any} value Node's value to find - * @returns {TreeNode} matching node or the previous node where value should go + * @param {BinaryTreeNode} node first element to start the search (root is default) + * @param {BinaryTreeNode} parent keep track of parent (usually filled by recursion) + * @returns {object} node and its parent like {node, parent} */ findNodeAndParent(value, node = this.root, parent = null) { if (!node || node.value === value) { @@ -65,11 +74,12 @@ class BinarySearchTree { } return this.findNodeAndParent(value, node.right, node); } + // end::find[] /** * Get the node with the max value of subtree: the right-most value. - * @param {TreeNode} node subtree's root - * @returns {TreeNode} right-most node (max value) + * @param {BinaryTreeNode} node subtree's root + * @returns {BinaryTreeNode} right-most node (max value) */ getRightmost(node = this.root) { if (!node || !node.right) { @@ -78,10 +88,11 @@ class BinarySearchTree { return this.getMax(node.right); } + // tag::leftMost[] /** * Get the node with the min value of subtree: the left-most value. - * @param {TreeNode} node subtree's root - * @returns {TreeNode} left-most node (min value) + * @param {BinaryTreeNode} node subtree's root + * @returns {BinaryTreeNode} left-most node (min value) */ getLeftmost(node = this.root) { if (!node || !node.left) { @@ -89,35 +100,41 @@ class BinarySearchTree { } return this.getMin(node.left); } + // end::leftMost[] + + // tag::remove[] /** * Remove a node from the tree * @returns {boolean} false if not found and true if it was deleted */ remove(value) { - const { found: nodeToRemove, parent } = this.findNodeAndParent(value); + const { found: nodeToRemove, parent } = this.findNodeAndParent(value); // <1> - if (!nodeToRemove) return false; + if (!nodeToRemove) return false; // <2> // Combine left and right children into one subtree without nodeToRemove - const removedNodeChildren = this.combineLeftIntoRightSubtree(nodeToRemove); + const removedNodeChildren = this.combineLeftIntoRightSubtree(nodeToRemove); // <3> - if (nodeToRemove.meta.multiplicity && nodeToRemove.meta.multiplicity > 1) { + if (nodeToRemove.meta.multiplicity && nodeToRemove.meta.multiplicity > 1) { // <4> nodeToRemove.meta.multiplicity -= 1; // handles duplicated - } else if (nodeToRemove === this.root) { + } else if (nodeToRemove === this.root) { // <5> // Replace (root) node to delete with the combined subtree. this.root = removedNodeChildren; if (this.root) { this.root.parent = null; } // clearing up old parent - } else { - const side = nodeToRemove.isParentLeftChild ? 'left' : 'right'; + } else if (nodeToRemove.isParentLeftChild) { // <6> // Replace node to delete with the combined subtree. - parent[side] = removedNodeChildren; + parent.setLeftAndUpdateParent(removedNodeChildren); + } else { + parent.setRightAndUpdateParent(removedNodeChildren); } this.size -= 1; return true; } + // end::remove[] + // tag::combine[] /** * Combine left into right children into one subtree without given parent node. * @@ -134,21 +151,23 @@ class BinarySearchTree { * It takes node 30 left subtree (10 and 15) and put it in the * leftmost node of the right subtree (40, 35, 50). * - * @param {TreeNode} node - * @returns {TreeNode} combined subtree + * @param {BinaryTreeNode} node + * @returns {BinaryTreeNode} combined subtree */ combineLeftIntoRightSubtree(node) { if (node.right) { const leftmost = this.getLeftmost(node.right); - leftmost.left = node.left; + leftmost.setLeftAndUpdateParent(node.left); return node.right; } return node.left; } + // end::combine[] + // tag::bfs[] /** * Breath-first search for a tree (always starting from the root element). - * + * @yields {BinaryTreeNode} */ * bfs() { const queue = new Queue(); @@ -158,14 +177,18 @@ class BinarySearchTree { while (!queue.isEmpty()) { const node = queue.remove(); yield node; - node.descendents.forEach(child => queue.add(child)); + + if (node.left) { queue.add(node.left); } + if (node.right) { queue.add(node.right); } } } + // end::bfs[] + // tag::dfs[] /** * Depth-first search for a tree (always starting from the root element) - * * @see preOrderTraversal Similar results to the pre-order transversal. + * @yields {BinaryTreeNode} */ * dfs() { const stack = new Stack(); @@ -175,47 +198,53 @@ class BinarySearchTree { while (!stack.isEmpty()) { const node = stack.remove(); yield node; - // reverse array, so left gets removed before right - node.descendents.reverse().forEach(child => stack.add(child)); + + if (node.right) { stack.add(node.right); } + if (node.left) { stack.add(node.left); } } } + // end::dfs[] + // tag::inOrderTraversal[] /** * In-order traversal on a tree: left-root-right. - * * If the tree is a BST, then the values will be sorted in ascendent order - * - * @param {TreeNode} node first node to start the traversal + * @param {BinaryTreeNode} node first node to start the traversal + * @yields {BinaryTreeNode} */ * inOrderTraversal(node = this.root) { - if (node.left) { yield* this.inOrderTraversal(node.left); } + if (node && node.left) { yield* this.inOrderTraversal(node.left); } yield node; - if (node.right) { yield* this.inOrderTraversal(node.right); } + if (node && node.right) { yield* this.inOrderTraversal(node.right); } } + // end::inOrderTraversal[] + // tag::preOrderTraversal[] /** * Pre-order traversal on a tree: root-left-right. * Similar results to DFS - * - * @param {TreeNode} node first node to start the traversal - * @see dfs similar results to the breath first search + * @param {BinaryTreeNode} node first node to start the traversal + * @yields {BinaryTreeNode} */ * preOrderTraversal(node = this.root) { yield node; if (node.left) { yield* this.preOrderTraversal(node.left); } if (node.right) { yield* this.preOrderTraversal(node.right); } } + // end::preOrderTraversal[] + // tag::postOrderTraversal[] /** * Post-order traversal on a tree: left-right-root. - * - * @param {TreeNode} node first node to start the traversal + * @param {BinaryTreeNode} node first node to start the traversal + * @yields {BinaryTreeNode} */ * postOrderTraversal(node = this.root) { if (node.left) { yield* this.postOrderTraversal(node.left); } if (node.right) { yield* this.postOrderTraversal(node.right); } yield node; } + // end::postOrderTraversal[] /** * Represent Binary Tree as an array. diff --git a/src/data-structures/trees/binary-tree-node.js b/src/data-structures/trees/binary-tree-node.js index 61668e78..5008dcee 100644 --- a/src/data-structures/trees/binary-tree-node.js +++ b/src/data-structures/trees/binary-tree-node.js @@ -1,12 +1,180 @@ +const LEFT = Symbol('left'); +const RIGHT = Symbol('right'); + +// tag::snippet[] /** * Binary Tree Node + * */ class BinaryTreeNode { constructor(value) { this.value = value; this.left = null; this.right = null; + this.meta = {}; + // end::snippet[] + this.parent = null; + this.parentSide = null; + } + + // tag::setAndUpdateParent[] + /** + * Set a left node descendents. + * Also, children get associated to parent. + */ + setLeftAndUpdateParent(node) { + this.left = node; + if (node) { + node.parent = this; + node.parentSide = LEFT; + } + } + + /** + * Set a right node descendents. + * Also, children get associated to parent. + */ + setRightAndUpdateParent(node) { + this.right = node; + if (node) { + node.parent = this; + node.parentSide = RIGHT; + } + } + // end::setAndUpdateParent[] + + /** + * Tell if is parent's left or right child + * + * @returns {string} side (left or right) this node is of its parent + */ + get parentChildSide() { + if (this.parent) { + return this.isParentLeftChild ? 'left' : 'right'; + } + + return 'root'; + } + + /** + * Return true if this node is its parent left child + */ + get isParentLeftChild() { + return this.parentSide === LEFT; + } + + /** + * Return true if this node is its parent right child + */ + get isParentRightChild() { + return this.parentSide === RIGHT; + } + + /** + * Node is leaf is it has no descendents + */ + get isLeaf() { + return !this.left && !this.right; + } + + /** + * Get sibling of current node + */ + get sibling() { + const { parent } = this; + if (!parent) return null; + return parent.right === this ? parent.left : parent.right; + } + + /** + * Get parent sibling = uncle (duh) + */ + get uncle() { + const { parent } = this; + if (!parent) return null; + return parent.sibling; + } + + get grandparent() { + const { parent } = this; + return parent && parent.parent; + } + + /** + * Get color + */ + get color() { + return this.meta.color; + } + + /** + * Set Color + */ + set color(value) { + this.meta.color = value; + } + + // tag::avl[] + /** + * @returns {Number} left subtree height or 0 if no left child + */ + get leftSubtreeHeight() { + return this.left ? this.left.height + 1 : 0; + } + + /** + * @returns {Number} right subtree height or 0 if no right child + */ + get rightSubtreeHeight() { + return this.right ? this.right.height + 1 : 0; + } + + /** + * Get the max height of the subtrees. + * + * It recursively goes into each children calculating the height + * + * Height: distance from the deepest leaf to this node + */ + get height() { + return Math.max(this.leftSubtreeHeight, this.rightSubtreeHeight); + } + + /** + * Returns the difference the heights on the left and right subtrees + */ + get balanceFactor() { + return this.leftSubtreeHeight - this.rightSubtreeHeight; + } + // end::avl[] + + /** + * Serialize node's values + */ + toValues() { + return { + value: this.value, + left: this.left && this.left.value, + right: this.right && this.right.value, + parent: this.parent && this.parent.value, + parentSide: this.parentSide, + }; + } + + /** + * Get and Set data value + * @param {any} value (optional) if not provided is a getter, otherwise a setter. + */ + data(value) { + if (value === undefined) { + return this.meta.data; + } + this.meta.data = value; + return this; } } +BinaryTreeNode.RIGHT = RIGHT; +BinaryTreeNode.LEFT = LEFT; + module.exports = BinaryTreeNode; diff --git a/src/data-structures/trees/tree-node.spec.js b/src/data-structures/trees/binary-tree-node.spec.js similarity index 77% rename from src/data-structures/trees/tree-node.spec.js rename to src/data-structures/trees/binary-tree-node.spec.js index ca8a5122..4a44b899 100644 --- a/src/data-structures/trees/tree-node.spec.js +++ b/src/data-structures/trees/binary-tree-node.spec.js @@ -1,10 +1,10 @@ -const TreeNode = require('./tree-node'); +const BinaryTreeNode = require('./binary-tree-node'); -describe('Tree Node', () => { +describe('Binary Tree Node', () => { let treeNode; beforeEach(() => { - treeNode = new TreeNode('hola'); + treeNode = new BinaryTreeNode('hola'); }); it('should start with null parent', () => { @@ -25,8 +25,8 @@ describe('Tree Node', () => { it('should set/get left node', () => { expect(treeNode.left).toBe(null); - const newNode = new TreeNode(1); - treeNode.left = newNode; + const newNode = new BinaryTreeNode(1); + treeNode.setLeftAndUpdateParent(newNode); expect(treeNode.left.value).toBe(1); expect(newNode.parent).toBe(treeNode); @@ -36,8 +36,8 @@ describe('Tree Node', () => { it('should set/get right node', () => { expect(treeNode.right).toBe(null); - const newNode = new TreeNode(1); - treeNode.right = newNode; + const newNode = new BinaryTreeNode(1); + treeNode.setRightAndUpdateParent(newNode); expect(treeNode.right.value).toBe(1); expect(newNode.parent).toBe(treeNode); @@ -53,16 +53,16 @@ describe('Tree Node', () => { let s; beforeEach(() => { - g = new TreeNode('grandparent'); - p = new TreeNode('parent'); - u = new TreeNode('uncle'); - c = new TreeNode('child'); - s = new TreeNode('sibling'); - - g.right = p; - g.left = u; - p.right = c; - p.left = s; + g = new BinaryTreeNode('grandparent'); + p = new BinaryTreeNode('parent'); + u = new BinaryTreeNode('uncle'); + c = new BinaryTreeNode('child'); + s = new BinaryTreeNode('sibling'); + + g.setRightAndUpdateParent(p); + g.setLeftAndUpdateParent(u); + p.setRightAndUpdateParent(c); + p.setLeftAndUpdateParent(s); }); it('should set heights', () => { diff --git a/src/data-structures/trees/red-black-tree.js b/src/data-structures/trees/red-black-tree.js index d9e9e3ed..28c7e4b4 100644 --- a/src/data-structures/trees/red-black-tree.js +++ b/src/data-structures/trees/red-black-tree.js @@ -1,10 +1,11 @@ const BinarySearchTree = require('./binary-search-tree'); -// const TreeNode = require('./tree-node'); + const RED = Symbol('red'); const BLACK = Symbol('black'); /** * Red-Black Tree + * It's a self-balanced binary search tree optimized for fast insertion. * * Properties: * @@ -89,12 +90,12 @@ class RedBlackTree extends BinarySearchTree { const grandParent = oldParent.parent; if (grandParent) { - // do something + // do nothing } else { this.root = node; node.parent = null; - node.left = oldParent; - oldParent.right = undefined; + node.setLeftAndUpdateParent(oldParent); + oldParent.setRightAndUpdateParent(null); // re-color node.color = BLACK; node.right.color = RED; @@ -124,8 +125,8 @@ class RedBlackTree extends BinarySearchTree { } else { this.root = node; node.parent = null; - node.right = oldParent; - oldParent.left = undefined; + node.setRightAndUpdateParent(oldParent); + oldParent.setLeftAndUpdateParent(null); // re-color node.color = BLACK; node.right.color = RED; diff --git a/src/data-structures/trees/red-black-tree.spec.js b/src/data-structures/trees/red-black-tree.spec.js index ea82afd2..a49e389d 100644 --- a/src/data-structures/trees/red-black-tree.spec.js +++ b/src/data-structures/trees/red-black-tree.spec.js @@ -1,5 +1,6 @@ const RedBlackTree = require('./red-black-tree.js'); -const {RED, BLACK} = RedBlackTree; + +const { RED, BLACK } = RedBlackTree; describe('RedBlackTree', () => { let tree; diff --git a/src/data-structures/trees/tree-node-1.js b/src/data-structures/trees/tree-node-1.js deleted file mode 100644 index afca0278..00000000 --- a/src/data-structures/trees/tree-node-1.js +++ /dev/null @@ -1,12 +0,0 @@ -/** - * Binary Tree Node - */ -class TreeNode { - constructor(value) { - this.value = value; - this.left = null; - this.right = null; - } -} - -module.exports = TreeNode; diff --git a/src/data-structures/trees/tree-node.js b/src/data-structures/trees/tree-node.js index 47fe6456..c78624a1 100644 --- a/src/data-structures/trees/tree-node.js +++ b/src/data-structures/trees/tree-node.js @@ -1,170 +1,13 @@ -const LEFT = 0; -const RIGHT = 1; - +// tag::snippet[] +/** + * TreeNode - each node can have zero or more children + */ class TreeNode { constructor(value) { this.value = value; - this.descendents = []; - this.parent = null; - this.parentSide = null; - this.meta = {}; - } - - /** - * Node is leaf is it has no descendents - */ - get isLeaf() { - return !this.descendents.some(child => child); - } - - get left() { - return this.descendents[LEFT] || null; - } - - /** - * Set a left node descendents. - * Also, children get associated to parent. - */ - set left(node) { - this.descendents[LEFT] = node; - if (node) { - node.parent = this; - node.parentSide = LEFT; - } - } - - get right() { - return this.descendents[RIGHT] || null; - } - - /** - * Set a right node descendents. - * Also, children get associated to parent. - */ - set right(node) { - this.descendents[RIGHT] = node; - if (node) { - node.parent = this; - node.parentSide = RIGHT; - } - } - - /** - * Tell if is parent's left or right child - * - * @returns {string} side (left or right) this node is of its parent - */ - get parentChildSide() { - if (this.parent) { - return this.isParentLeftChild ? 'left' : 'right'; - } - - return 'root'; - } - - /** - * Return true if this node is its parent left child - */ - get isParentLeftChild() { - return this.parentSide === LEFT; - } - - /** - * Return true if this node is its parent right child - */ - get isParentRightChild() { - return this.parentSide === RIGHT; - } - - /** - * Get sibling of current node - */ - get sibling() { - const { parent } = this; - if (!parent) return null; - return parent.right === this ? parent.left : parent.right; - } - - /** - * Get parent sibling = uncle (duh) - */ - get uncle() { - const { parent } = this; - if (!parent) return null; - return parent.sibling; - } - - get grandparent() { - const { parent } = this; - return parent && parent.parent; - } - - // Meta shortcuts - - /** - * Get color - */ - get color() { - return this.meta.color; - } - - /** - * Set Color - */ - set color(value) { - this.meta.color = value; - } - - /** - * Get the max height of the subtrees. - * - * It recursively goes into each children calculating the height - * - * Height: distance from the deepest leaf to this node - */ - get height() { - return Math.max(this.leftSubtreeHeight, this.rightSubtreeHeight); - } - - get leftSubtreeHeight() { - return this.left ? this.left.height + 1 : 0; - } - - get rightSubtreeHeight() { - return this.right ? this.right.height + 1 : 0; - } - - /** - * Returns the difference the heights on the left and right subtrees - */ - get balanceFactor() { - return this.leftSubtreeHeight - this.rightSubtreeHeight; - } - - /** - * Serialize node's values - */ - toValues() { - return { - value: this.value, - left: this.left && this.left.value, - right: this.right && this.right.value, - parent: this.parent && this.parent.value, - parentSide: this.parentSide, - }; - } - - /** - * Get and Set data value - * @param {any} value (optional) if not provided is a getter, otherwise a setter. - */ - data(value) { - if (value === undefined) { - return this.meta.data; - } - this.meta.data = value; - return this; + this.descendants = []; } } +// end::snippet[] module.exports = TreeNode; diff --git a/src/data-structures/trees/tree-rotations.js b/src/data-structures/trees/tree-rotations.js index c41d616a..8f2c6338 100644 --- a/src/data-structures/trees/tree-rotations.js +++ b/src/data-structures/trees/tree-rotations.js @@ -1,3 +1,4 @@ +// tag::swapParentChild[] /** * Swap parent's child * @@ -18,15 +19,18 @@ */ function swapParentChild(oldChild, newChild, parent) { if (parent) { - const side = oldChild.isParentRightChild ? 'right' : 'left'; - // this set parent child AND also - parent[side] = newChild; + // this set parent child + const side = oldChild.isParentRightChild ? 'Right' : 'Left'; + parent[`set${side}AndUpdateParent`](newChild); } else { // no parent? so set it to null newChild.parent = null; } } +// end::swapParentChild[] + +// tag::leftRotation[] /** * Single Left Rotation (LL Rotation) * @@ -43,77 +47,90 @@ function swapParentChild(oldChild, newChild, parent) { * @returns {TreeNode} new parent after the rotation */ function leftRotation(node) { - const newParent = node.right; - const grandparent = node.parent; - + const newParent = node.right; // E.g., node 3 + const grandparent = node.parent; // E.g., node 1 + // swap node 1 left child from 2 to 3. swapParentChild(node, newParent, grandparent); - // do LL rotation - newParent.left = node; - node.right = undefined; + // Update node 3 left child to be 2, and + // updates node 2 parent to be node 3 (instead of 1). + newParent.setLeftAndUpdateParent(node); + // remove node 2 left child (previouly was node 3) + node.setRightAndUpdateParent(null); return newParent; } +// end::leftRotation[] +// tag::rightRotation[] /** * Single Right Rotation (RR Rotation) * * @example rotate node 3 to the right * - * 4 4 - * / / - * 3* 2 - * / / \ - * 2 ---| right-rotation(3) |--> 1 3* + * 4 4 + * / / + * 3* 2 + * / / \ + * 2 ---| right-rotation(3) |--> 1 3* * / * 1 * * @param {TreeNode} node - * @returns {TreeNode} new parent after the rotation + * this is the node we want to rotate to the right. (E.g., node 3) + * @returns {TreeNode} new parent after the rotation (E.g., node 2) */ function rightRotation(node) { - const newParent = node.left; - const grandparent = node.parent; + const newParent = node.left; // E.g., node 2 + const grandparent = node.parent; // E.g., node 4 + // swap node 4 left children (node 3) with node 2. swapParentChild(node, newParent, grandparent); - // do RR rotation - newParent.right = node; - node.left = undefined; + // update right child on node 2 to be node 3, + // also make node 2 the new parent of node 3. + newParent.setRightAndUpdateParent(node); + // remove node 3 left child (so it doesn't point to node 2) + node.setLeftAndUpdateParent(null); return newParent; } +// end::rightRotation[] +// tag::leftRightRotation[] /** * Left Right Rotation (LR Rotation) * * @example LR rotation on node 3 - * 4 4 - * / / 4 - * 3 3* / - * / / 2 - * 1* --left-rotation(1)-> 2 --right-rotation(3)-> / \ - * \ / 1 3* + * 4 4 + * / / 4 + * 3 3* / + * / / 2 + * 1* --left-rotation(1)-> 2 --right-rotation(3)-> / \ + * \ / 1 3* * 2 1 * * @param {TreeNode} node + * this is the node we want to rotate to the right. E.g., node 3 * @returns {TreeNode} new parent after the rotation */ function leftRightRotation(node) { leftRotation(node.left); return rightRotation(node); } +// end::leftRightRotation[] +// tag::rightLeftRotation[] /** * Right Left Rotation (RL Rotation) * * @example RL rotation on 1 * * 1* 1* - * \ \ 2 - * 3 -right-rotation(3)-> 2 -left-rotation(1)-> / \ - * / \ 1* 3 + * \ \ 2 + * 3 -right-rotation(3)-> 2 -left-rotation(1)-> / \ + * / \ 1* 3 * 2 3 * * @param {TreeNode} node @@ -123,58 +140,11 @@ function rightLeftRotation(node) { rightRotation(node.right); return leftRotation(node); } - -/** - * Balance tree doing rotations based on balance factor. - * - * Depending on the `node` balance factor and child's factor - * one of this rotation is performed: - * - LL rotations: single left rotation - * - RR rotations: single right rotation - * - LR rotations: double rotation left-right - * - RL rotations: double rotation right-left - * - * @param {TreeNode} node - */ -function balance(node) { - if (node.balanceFactor > 1) { - // left subtree is higher than right subtree - if (node.left.balanceFactor > 0) { - return rightRotation(node); - } else if (node.left.balanceFactor < 0) { - return leftRightRotation(node); - } - } else if (node.balanceFactor < -1) { - // right subtree is higher than left subtree - if (node.right.balanceFactor < 0) { - return leftRotation(node); - } else if (node.right.balanceFactor > 0) { - return rightLeftRotation(node); - } - } - return node; -} - -/** - * Bubbles up balancing nodes a their parents - * - * @param {TreeNode} node - */ -function balanceUptream(node) { - let current = node; - let newParent; - while (current) { - newParent = balance(current); - current = current.parent; - } - return newParent; -} +// end::rightLeftRotation[] module.exports = { leftRotation, rightRotation, leftRightRotation, rightLeftRotation, - balance, - balanceUptream, }; diff --git a/src/data-structures/trees/tree-rotations.spec.js b/src/data-structures/trees/tree-rotations.spec.js index 4303f981..bb02039f 100644 --- a/src/data-structures/trees/tree-rotations.spec.js +++ b/src/data-structures/trees/tree-rotations.spec.js @@ -1,4 +1,5 @@ -const TreeNode = require('./tree-node'); +const BinaryTreeNode = require('./binary-tree-node'); + const { leftRotation, rightRotation, @@ -13,10 +14,10 @@ describe('Tree rotations', () => { let n4; beforeEach(() => { - n1 = new TreeNode(1); - n2 = new TreeNode(2); - n3 = new TreeNode(3); - n4 = new TreeNode(4); + n1 = new BinaryTreeNode(1); + n2 = new BinaryTreeNode(2); + n3 = new BinaryTreeNode(3); + n4 = new BinaryTreeNode(4); }); describe('#leftRotation (LL Rotation)', () => { @@ -28,8 +29,8 @@ describe('Tree rotations', () => { * \ * 3 */ - n1.right = n2; - n2.right = n3; + n1.setRightAndUpdateParent(n2); + n2.setRightAndUpdateParent(n3); const newParent = leftRotation(n1); expect(newParent.value).toBe(2); @@ -52,9 +53,9 @@ describe('Tree rotations', () => { // 3 // \ // 4 - n1.right = n2; - n2.right = n3; - n3.right = n4; + n1.setRightAndUpdateParent(n2); + n2.setRightAndUpdateParent(n3); + n3.setRightAndUpdateParent(n4); const newParent = leftRotation(n2); @@ -78,9 +79,9 @@ describe('Tree rotations', () => { // 1* // \ // 2 - n4.left = n3; - n3.left = n1; - n1.right = n2; + n4.setLeftAndUpdateParent(n3); + n3.setLeftAndUpdateParent(n1); + n1.setRightAndUpdateParent(n2); const newParent = leftRotation(n1); expect(newParent).toBe(n2); @@ -106,9 +107,9 @@ describe('Tree rotations', () => { // 2 // / // 1 - n4.left = n3; - n3.left = n2; - n2.left = n1; + n4.setLeftAndUpdateParent(n3); + n3.setLeftAndUpdateParent(n2); + n2.setLeftAndUpdateParent(n1); const newParent = rightRotation(n3); @@ -133,8 +134,8 @@ describe('Tree rotations', () => { // 2 // / // 1 - n3.left = n2; - n2.left = n1; + n3.setLeftAndUpdateParent(n2); + n2.setLeftAndUpdateParent(n1); const newParent = rightRotation(n3); @@ -150,8 +151,8 @@ describe('Tree rotations', () => { // 3* // / // 2 - n1.right = n3; - n3.left = n2; + n1.setRightAndUpdateParent(n3); + n3.setLeftAndUpdateParent(n2); const newParent = rightRotation(n3); @@ -177,9 +178,9 @@ describe('Tree rotations', () => { // 1 // \ // 2 - n4.left = n3; - n3.left = n1; - n1.right = n2; + n4.setLeftAndUpdateParent(n3); + n3.setLeftAndUpdateParent(n1); + n1.setRightAndUpdateParent(n2); const newParent = leftRightRotation(n3); @@ -203,8 +204,8 @@ describe('Tree rotations', () => { // 3 // / // 2 - n1.right = n3; - n3.left = n2; + n1.setRightAndUpdateParent(n3); + n3.setLeftAndUpdateParent(n2); const newParent = rightLeftRotation(n1); expect(newParent).toBe(n2); diff --git a/src/data-structures/zz-mixed-ds/lru-cache-1.js b/src/data-structures/zz-mixed-ds/lru-cache-1.js index 7efb12a7..d6453b5d 100644 --- a/src/data-structures/zz-mixed-ds/lru-cache-1.js +++ b/src/data-structures/zz-mixed-ds/lru-cache-1.js @@ -26,7 +26,7 @@ * * @param {number} capacity */ -let LRUCache = function (capacity) { +const LRUCache = function (capacity) { this.map = new Map(); this.capacity = capacity; this.cache = []; diff --git a/src/data-structures/zz-mixed-ds/lru-cache.js b/src/data-structures/zz-mixed-ds/lru-cache.js index 679727c1..817f3c1b 100644 --- a/src/data-structures/zz-mixed-ds/lru-cache.js +++ b/src/data-structures/zz-mixed-ds/lru-cache.js @@ -27,7 +27,7 @@ * * @param {number} capacity */ -var LRUCache = function(capacity) { +const LRUCache = function (capacity) { this.map = new Map(); this.capacity = capacity; }; @@ -36,13 +36,13 @@ var LRUCache = function(capacity) { * @param {number} key * @return {number} */ -LRUCache.prototype.get = function(key) { -const value = this.map.get(key); -if (value) { - this.moveToTop(key); - return value; -} -return -1; +LRUCache.prototype.get = function (key) { + const value = this.map.get(key); + if (value) { + this.moveToTop(key); + return value; + } + return -1; }; /** @@ -50,23 +50,23 @@ return -1; * @param {number} value * @return {void} */ -LRUCache.prototype.put = function(key, value) { -this.map.set(key, value); -this.rotate(key); +LRUCache.prototype.put = function (key, value) { + this.map.set(key, value); + this.rotate(key); }; -LRUCache.prototype.rotate = function(key) { -this.moveToTop(key); -while(this.map.size > this.capacity) { - const it = this.map.keys(); - this.map.delete(it.next().value); -} -} +LRUCache.prototype.rotate = function (key) { + this.moveToTop(key); + while (this.map.size > this.capacity) { + const it = this.map.keys(); + this.map.delete(it.next().value); + } +}; -LRUCache.prototype.moveToTop = function(key) { -if (this.map.has(key)) { - const value = this.map.get(key); - this.map.delete(key); - this.map.set(key, value); -} -} +LRUCache.prototype.moveToTop = function (key) { + if (this.map.has(key)) { + const value = this.map.get(key); + this.map.delete(key); + this.map.set(key, value); + } +}; diff --git a/src/exercises/08-dynamic-programming/permutations.js b/src/exercises/08-dynamic-programming/permutations.js deleted file mode 100644 index 86adc5bd..00000000 --- a/src/exercises/08-dynamic-programming/permutations.js +++ /dev/null @@ -1,27 +0,0 @@ -/** - * 8.7 Permutations without Dups: Write a method to compute all permutations of a string of unique characters. - * - * @param string - * @param prefix - * @param memo - * @returns {*} - */ -function permutations(string = '', prefix = '', memo = {}) { - if (string.length < 2) { - return [prefix + string]; - } else if (string.length == 2) { - return [prefix + string, prefix + string[1] + string[0]]; - } else if (memo[string]) { - return memo[string].map((e) => prefix + e); - } else { - let results = []; - for (var i = 0; i < string.length; i++) { - const letter = string[i]; - results = results.concat(permutations(string.replace(letter, ''), letter, memo)); - } - memo[string] = results; - return results.map((e) => prefix + e); - } -} - -module.exports = permutations; diff --git a/src/runtimes/01-is-empty.js b/src/runtimes/01-is-empty.js index d9dc9ff6..f15d9ef8 100644 --- a/src/runtimes/01-is-empty.js +++ b/src/runtimes/01-is-empty.js @@ -14,7 +14,7 @@ function isEmpty2(thing) { return !thing || thing.length < 1 || !Object.keys(thing).length; } - +// tag::isEmpty[] /** * Return true if an array is empty and false otherwise * @param {array|string} thing @@ -28,5 +28,6 @@ function isEmpty2(thing) { function isEmpty(thing) { return !thing || thing.length < 1; } +// end::isEmpty[] module.exports = isEmpty; diff --git a/src/runtimes/02-binary-search.js b/src/runtimes/02-binary-search.js index ec3ad3f1..4a72c859 100644 --- a/src/runtimes/02-binary-search.js +++ b/src/runtimes/02-binary-search.js @@ -1,10 +1,15 @@ +// tag::binarySearchRecursive[] /** * Recursive Binary Search + * Runtime: O(log n) * - * @return index of the found element or -1 if not found + * @example + * binarySearch([1, 2, 3], 2); //↪️ 1 + * binarySearch([1, 2, 3], 31); //↪️ -1 * @param {array} array collection of sorted elements * @param {string|number} search value to search for * @param {number} offset keep track of array's original index + * @return index of the found element or -1 if not found */ function binarySearchRecursive(array, search, offset = 0) { // split array in half @@ -23,6 +28,7 @@ function binarySearchRecursive(array, search, offset = 0) { const left = array.slice(0, half); return binarySearchRecursive(left, search, offset); } +// end::binarySearchRecursive[] /** * Iterative Binary Search diff --git a/src/runtimes/03-has-duplicates.js b/src/runtimes/03-has-duplicates.js index e43dcaf7..aafdd86d 100644 --- a/src/runtimes/03-has-duplicates.js +++ b/src/runtimes/03-has-duplicates.js @@ -1,14 +1,15 @@ const assert = require('assert'); +// tag::hasDuplicates[] /** * Finds out if an array has duplicates - * + * Runtime: O(n) + * @example + * hasDuplicates([]); //↪️ false + * hasDuplicates([1, 1]); //↪️ true + * hasDuplicates([1, 2]); //↪️ false * @param {Array} array * @returns {boolean} true if has duplicates, false otherwise - * @example - * hasDuplicates([]) => false - * hasDuplicates([1, 1]) => true - * hasDuplicates([1, 2]) => false */ function hasDuplicates(array) { const words = new Map(); @@ -21,6 +22,7 @@ function hasDuplicates(array) { } return false; } +// end::hasDuplicates[] assert.equal(hasDuplicates([]), false); assert.equal(hasDuplicates([1, 1]), true); diff --git a/src/runtimes/04-merge-sort.js b/src/runtimes/04-merge-sort.js index c59ea601..0279161f 100644 --- a/src/runtimes/04-merge-sort.js +++ b/src/runtimes/04-merge-sort.js @@ -1,12 +1,13 @@ const assert = require('assert'); +// tag::merge[] /** * Merge two arrays in asc order + * @example + * merge([2,5,9], [1,6,7]) => [1, 2, 5, 6, 7, 9] * @param {array} array1 * @param {array} array2 * @returns {array} merged arrays in asc order - * @example - * merge([2,5,9], [1,6,7]) => [1, 2, 5, 6, 7, 9] */ function merge(array1 = [], array2 = []) { const merged = []; @@ -24,14 +25,16 @@ function merge(array1 = [], array2 = []) { } return merged; } +// end::merge[] +// tag::sort[] /** * Sort array in asc order using merge-sort - * @param {array} array * @example * sort([3, 2, 1]) => [1, 2, 3] * sort([3]) => [3] * sort([3, 2]) => [2, 3] + * @param {array} array */ function sort(array = []) { const size = array.length; @@ -46,6 +49,7 @@ function sort(array = []) { const mid = size / 2; return merge(sort(array.slice(0, mid)), sort(array.slice(mid))); } +// end::sort[] assert.deepStrictEqual(sort([3, 2, 1]), [1, 2, 3]); assert.deepStrictEqual(sort([3]), [3]); diff --git a/src/runtimes/05-has-duplicates-naive.js b/src/runtimes/05-has-duplicates-naive.js index b8dd64aa..848c9b42 100644 --- a/src/runtimes/05-has-duplicates-naive.js +++ b/src/runtimes/05-has-duplicates-naive.js @@ -1,14 +1,15 @@ const assert = require('assert'); +// tag::hasDuplicates[] /** * Finds out if an array has duplicates - * + * Runtime: O(n^2) + * @example + * hasDuplicates([]); //↪️ false + * hasDuplicates([1, 1]); //↪️ true + * hasDuplicates([1, 2]); //↪️ false * @param {Array} array * @returns {boolean} true if has duplicates, false otherwise - * @example - * hasDuplicates([]) => false - * hasDuplicates([1, 1]) => true - * hasDuplicates([1, 2]) => false */ function hasDuplicates(array) { for (let outter = 0; outter < array.length; outter++) { @@ -21,6 +22,8 @@ function hasDuplicates(array) { return false; } +// end::hasDuplicates[] + assert.equal(hasDuplicates([]), false); assert.equal(hasDuplicates([1, 1]), true); diff --git a/src/runtimes/06-multi-variable-equation-solver.js b/src/runtimes/06-multi-variable-equation-solver.js index 29a2e185..4470eb50 100644 --- a/src/runtimes/06-multi-variable-equation-solver.js +++ b/src/runtimes/06-multi-variable-equation-solver.js @@ -1,16 +1,18 @@ const assert = require('assert'); +// tag::findXYZ[] /** * Brute force approach to find solutions for * this multi-variable equation: * 3x + 9y + 8z = 79 * + * Runtime: O(n^3) + * @example + * findXYZ({ start: -5, end: 5 }) //↪️ [] + * findXYZ({ end: 6 }) //↪️ [{ x: 1, y: 4, z: 5 }, { x: 4, y: 3, z: 5 }] * @param {Number} start where to start searching for solution * @param {Number} end last value to try (exclusive) * @returns {Array} array of objects with solutions e.g. [{x:1, y:1, z:1}] - * @example - * findXYZ({ start: -5, end: 5 }) => [] - * findXYZ({ end: 6 }) => [{ x: 1, y: 4, z: 5 }, { x: 4, y: 3, z: 5 }] */ function findXYZ({ start = 0, end = 10 } = {}) { const solutions = []; @@ -25,6 +27,7 @@ function findXYZ({ start = 0, end = 10 } = {}) { } return solutions; } +// end::findXYZ[] assert.equal(findXYZ().length, 9); assert.deepStrictEqual(findXYZ({ start: -5, end: 5 }), []); diff --git a/src/runtimes/07-sub-sets.js b/src/runtimes/07-sub-sets.js index 82ea0af8..f5c24262 100644 --- a/src/runtimes/07-sub-sets.js +++ b/src/runtimes/07-sub-sets.js @@ -1,26 +1,30 @@ const assert = require('assert'); +// tag::snippet[] /** * Finds all distinct subsets of a given set + * Runtime: O(2^n) + * + * @example + * findSubsets('a') //↪️ ['', 'a'] + * findSubsets([1, 'b']) //↪️ ['', '1', 'b', '1b'] + * findSubsets('abc') //↪️ ['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc'] * * @param {string|array} n * @returns {array} all the subsets (including empty and set itself). - * @example - * findSubsets('a') => ['', 'a'] - * findSubsets([1, 'b']) => ['', '1', 'b', '1b'] - * findSubsets('abc') => ['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc'] */ function findSubsets(n = '') { const array = Array.from(n); - const base = ['']; + const base = ['']; // <1> const results = array.reduce((previous, element) => { - const previousPlusElement = previous.map(el => `${el}${element}`); - return previous.concat(previousPlusElement); + const previousPlusElement = previous.map(el => `${el}${element}`); // <2> + return previous.concat(previousPlusElement); // <3> }, base); return results; } +// end::snippet[] assert.deepStrictEqual(findSubsets(), ['']); assert.deepStrictEqual(findSubsets('a'), ['', 'a']); diff --git a/src/runtimes/08-permutations.js b/src/runtimes/08-permutations.js index 94e8758f..77ed34c7 100644 --- a/src/runtimes/08-permutations.js +++ b/src/runtimes/08-permutations.js @@ -1,15 +1,16 @@ const assert = require('assert'); +// tag::snippet[] /** * Find all the different permutations a word can have - * - * @param {string} word string or array of chars to find permutations - * @param {string} prefix used internally for recursion - * @returns {array} collection of all the ways the letters can be arranged + * Runtime: O(n!) * @example * getPermutations('a') => ['a'] * getPermutations('ab') => ['ab', 'ba'] * getPermutations('mad') => ['mad', 'mda', 'amd', 'adm', 'dma', 'dam'] + * @param {string} word string or array of chars to find permutations + * @param {string} prefix used internally for recursion + * @returns {array} collection of all the ways the letters can be arranged */ function getPermutations(word = '', prefix = '') { if (word.length <= 1) { @@ -20,6 +21,8 @@ function getPermutations(word = '', prefix = '') { return result.concat(getPermutations(reminder, prefix + char)); }, []); } +// end::snippet[] + assert.deepStrictEqual(getPermutations(), ['']); assert.deepStrictEqual(getPermutations('a'), ['a']); diff --git a/src/runtimes/linear/find-max.js b/src/runtimes/linear/find-max.js index 117cb59b..1b1606f6 100644 --- a/src/runtimes/linear/find-max.js +++ b/src/runtimes/linear/find-max.js @@ -4,7 +4,7 @@ function findMax(n) { for (let i = 0; i < n.length; i++) { counter++; - if(max === undefined || max < n[i]) { + if (max === undefined || max < n[i]) { max = n[i]; } } @@ -14,7 +14,8 @@ function findMax(n) { } const assert = require('assert'); + assert.equal(findMax([3, 1, 2]), 3); -assert.equal(findMax([4,5,6,1,9,2,8,3,7]), 9); -assert.equal(findMax([4,2,8,3,7,0,-1]), 8); +assert.equal(findMax([4, 5, 6, 1, 9, 2, 8, 3, 7]), 9); +assert.equal(findMax([4, 2, 8, 3, 7, 0, -1]), 8); diff --git a/src/runtimes/wrapper.js b/src/runtimes/wrapper.js index 367649f6..6aa02e34 100755 --- a/src/runtimes/wrapper.js +++ b/src/runtimes/wrapper.js @@ -41,7 +41,7 @@ function diff(obj1, obj2) { // console.log(process.argv); const args = process.argv.slice(2); const fargs = args.slice(1).map(a => JSON.parse(a)); -console.log({args, fargs}); +console.log({ args, fargs }); // const ELEMENTS = 1e10; // 11,803.013ms // const ELEMENTS = 1e9; // 1,000,000,000-elements: 634.663ms @@ -59,7 +59,7 @@ const chunks = []; process.stdin.on('readable', () => { const chunk = process.stdin.read(); if (chunk !== null) { - process.stdout.write(`.`); + process.stdout.write('.'); chunks.push(chunk); } });