From 98ef9042ba9749239def4daa869bc6b5cc15cd30 Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Thu, 21 Mar 2019 12:50:41 -0400 Subject: [PATCH 001/326] update lodash --- package-lock.json | 51 ++++++++++++++++++++++++++++++++--------------- package.json | 14 ++++++++++++- 2 files changed, 48 insertions(+), 17 deletions(-) diff --git a/package-lock.json b/package-lock.json index 4c49ee2d..1b99ec30 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { - "name": "algorithms.js", - "version": "1.0.0", + "name": "dsa.js", + "version": "0.0.1", "lockfileVersion": 1, "requires": true, "dependencies": { @@ -2054,7 +2054,8 @@ "ansi-regex": { "version": "2.1.1", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "aproba": { "version": "1.2.0", @@ -2075,12 +2076,14 @@ "balanced-match": { "version": "1.0.0", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "brace-expansion": { "version": "1.1.11", "bundled": true, "dev": true, + "optional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -2095,17 +2098,20 @@ "code-point-at": { "version": "1.1.0", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "concat-map": { "version": "0.0.1", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "console-control-strings": { "version": "1.1.0", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "core-util-is": { "version": "1.0.2", @@ -2222,7 +2228,8 @@ "inherits": { "version": "2.0.3", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "ini": { "version": "1.3.5", @@ -2234,6 +2241,7 @@ "version": "1.0.0", "bundled": true, "dev": true, + "optional": true, "requires": { "number-is-nan": "^1.0.0" } @@ -2248,6 +2256,7 @@ "version": "3.0.4", "bundled": true, "dev": true, + "optional": true, "requires": { "brace-expansion": "^1.1.7" } @@ -2255,12 +2264,14 @@ "minimist": { "version": "0.0.8", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "minipass": { "version": "2.2.4", "bundled": true, "dev": true, + "optional": true, "requires": { "safe-buffer": "^5.1.1", "yallist": "^3.0.0" @@ -2279,6 +2290,7 @@ "version": "0.5.1", "bundled": true, "dev": true, + "optional": true, "requires": { "minimist": "0.0.8" } @@ -2359,7 +2371,8 @@ "number-is-nan": { "version": "1.0.1", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "object-assign": { "version": "4.1.1", @@ -2371,6 +2384,7 @@ "version": "1.4.0", "bundled": true, "dev": true, + "optional": true, "requires": { "wrappy": "1" } @@ -2456,7 +2470,8 @@ "safe-buffer": { "version": "5.1.1", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "safer-buffer": { "version": "2.1.2", @@ -2492,6 +2507,7 @@ "version": "1.0.2", "bundled": true, "dev": true, + "optional": true, "requires": { "code-point-at": "^1.0.0", "is-fullwidth-code-point": "^1.0.0", @@ -2511,6 +2527,7 @@ "version": "3.0.1", "bundled": true, "dev": true, + "optional": true, "requires": { "ansi-regex": "^2.0.0" } @@ -2554,12 +2571,14 @@ "wrappy": { "version": "1.0.2", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "yallist": { "version": "3.0.2", "bundled": true, - "dev": true + "dev": true, + "optional": true } } }, @@ -3875,9 +3894,9 @@ } }, "lodash": { - "version": "4.17.10", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==" + "version": "4.17.11", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", + "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==" }, "lodash.sortby": { "version": "4.7.0", diff --git a/package.json b/package.json index 118f9922..38cc36f8 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "description": "Algorithms in JS", "main": "./src/data-structures/graphs/graph.js", "dependencies": { - "lodash": "4.17.10" + "lodash": "4.17.11" }, "devDependencies": { "benchmark": "2.1.4", @@ -20,7 +20,19 @@ "coverage": "jest src/data-structures --coverage && open coverage/lcov-report/index.html", "linter": "npx eslint --fix -f codeframe src/" }, +<<<<<<< HEAD "keywords": [], "author": "", +======= + "keywords": [ + "algorithms", + "data structures", + "javascript", + "array", + "linked lists", + "binary search trees" + ], + "author": "Adrian Mejia ", +>>>>>>> dde5f7d... update lodash "license": "ISC" } From 57f2b6519e28bcf61810a1454ee0678781be2137 Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Thu, 21 Mar 2019 12:54:51 -0400 Subject: [PATCH 002/326] fix merge issue --- package.json | 5 ----- 1 file changed, 5 deletions(-) diff --git a/package.json b/package.json index 38cc36f8..e73067b7 100644 --- a/package.json +++ b/package.json @@ -20,10 +20,6 @@ "coverage": "jest src/data-structures --coverage && open coverage/lcov-report/index.html", "linter": "npx eslint --fix -f codeframe src/" }, -<<<<<<< HEAD - "keywords": [], - "author": "", -======= "keywords": [ "algorithms", "data structures", @@ -33,6 +29,5 @@ "binary search trees" ], "author": "Adrian Mejia ", ->>>>>>> dde5f7d... update lodash "license": "ISC" } From e1feed6c063be316a5ed6efddf040bba5e36f290 Mon Sep 17 00:00:00 2001 From: Ivan J Date: Mon, 25 Mar 2019 10:27:04 +1100 Subject: [PATCH 003/326] Minor correction in Symbol description --- src/data-structures/graphs/graph.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/data-structures/graphs/graph.js b/src/data-structures/graphs/graph.js index 22d34a25..769d01b2 100644 --- a/src/data-structures/graphs/graph.js +++ b/src/data-structures/graphs/graph.js @@ -257,7 +257,7 @@ class Graph { } } -Graph.UNDIRECTED = Symbol('directed graph'); // one-way edges -Graph.DIRECTED = Symbol('undirected graph'); // two-ways edges +Graph.UNDIRECTED = Symbol('undirected graph'); // one-way edges +Graph.DIRECTED = Symbol('directed graph'); // two-ways edges module.exports = Graph; From 266544f95435d1c7a7019c1d7fd965eb48a5a01b Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Sun, 24 Mar 2019 22:57:09 -0400 Subject: [PATCH 004/326] fix readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index fcaf0454..ea07f4e0 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ +# Data Structures and Algorithms in JavaScript +This repository covers the implementation of the classical algorithms and data structures. -# Data Structures and Algorithms in JavaScript + -This repository covers the implementation of the most important algorithms and data structures. -This goes along with [these posts series](https://adrianmejia.com/tags/tutorial-algorithms/) that explain each implementation in details. [![Interactive Data Structures](https://user-images.githubusercontent.com/418605/46118890-ba721180-c1d6-11e8-82bc-6a671428b422.png)](https://embed.kumu.io/85f1a4de5fb8430a10a1bf9c5118e015) From b1493be1b0726e118374dc0503bce48ec96e6b3a Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Sun, 24 Mar 2019 22:57:29 -0400 Subject: [PATCH 005/326] add book cover --- book/cover.png | Bin 0 -> 51983 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 book/cover.png diff --git a/book/cover.png b/book/cover.png new file mode 100644 index 0000000000000000000000000000000000000000..7351961ad3f623c14e0de99a0283a2d42df08b65 GIT binary patch literal 51983 zcmeFZWl&v9(*_D;2X~j??gR_&nvJ`=yGw9_hT!h*?k<7gmf-G$;1=BOBIms4*8P8f zepRcWSbO%Yp6+M5pYEPTxPqJ{5f zl>gtW#{&wp=xa9!_Mbn2pCnko9i`mCj{lxK06f|Bkx~fazdNO-#cp80>``JaPmWbpjpQ2!@g3?ML#Orc1$r}Y0G{*_wDaO4*3 z5Am;b{vah%AwxQ>Ph;T!DZ+pc5?*;g9}4uJYGUAlJEcF2_5M@fHk?8V>oYnD#Q*LD zsK6?u{GVz7b^M=d{sr;>q2^yj{NIN9pZEDcU-NHN_&+!N|Ay56e=XLZ%wapmslv0S%Z(=AhIX=C+Z?Pgov0!_VkwOlZjP+qM1;54b9&{CBt z_Q?fnzx4T&3vF$)my6|c^EN88lA79<9aQsc>?AP+LN>a@03w9}RY0O^(pU;Lt4~$K zQ`J&-XN{^uSru9>Yfv$oNb-Grn~5B%PB{}Hqjrf_XY&xgkNDhC=z3g}eq;4kn@e6# zBBJXrQb=VWT(TAxFUp~Z3HQ`7q;l);%ny@c6boC_nY5v2y@Q0uFEm0NRsXVKpN5Vu z`Lxr(qI7$(G+Y;4rY4L4r5-a^W`3;X|AR5iS*Z{)t2v48fp_P=K#I?jy_vft09N zJd;cj50wZ zNPtd`q*R%SI!ouM;YIshSEA>HXkBd1TiNoMx{L`y?Nj+*p=Q6J`X#xj_*Nk=NW-ap zGu|!?tO>wTEA3)YYmx=p=2jJTvnYJgcmU;1{W)o4ngrVeZS|ci7Du2i0kwZuvo0vJ zI9xbKni2JB(c)3lX_KEUv?RAj8HFzP$sLoHvCshP4KbiP89)O@WJXZvVX_cegZc`h z^vu2ttgdoLe`JKATFDgFX)fB(|LCjZPj~T)%RO3`8#LmXzx`DP_E;k$5i%@;<%yLY zLJHXBNP1EV^cj)bm5C1k!AJIV^po9m1_^?jR&rEJcs3zo*H%j_=^DCBe4N1mpzZE1 zkKRgcfOp>c zuP*^0sUCU%G+IH70bDrEhSrS*bZ#U0 zPuhEh!cfS7A3=tD#+T30!b=b=*TaqOvKubY9M}fZhac!YCK0jQjGCRvLxNH0+z$@1 zv!A96bP*zA2T(Uyvft~Dhd`sHG!Mnm6RC)LT79{tyityU-}>%>?tqb!pCsIN?g*Y! z5cM}-CyJvT;fH+#m+}$F$x8Vr-A4bkCS|yrOU(&!zeQB(xhgw*S%-PWlEeM#u`J zB|^sRLsqx{-fA(8UWC<-EVyhYjMbeKEr&%_sk z%QBLH3e;P%@sQPB8Sph1FNJ=IDM!_%~}POlTwVK*!DD??#D?Nbu9B}q;^}_JU*d2 zp!sVDXHM4aoQ;aII^uJvGO@2PIJ|m+jyIZ5s-PsM=ADsHk?V@QJ%JsEH9FQ6vVcdj zvy&P%QP&ogl=>JERo^RQ>v%-K%EJoASslO*F&O4&)~K7vJ-88{m>4aeFf^4S@rN*b z`Bn&JHmi4IA+^Dt4D5+SARvewbI)mujKn=^Hg>#yT-Ld(p#-a=!UNjQMJuVqY2DVW z6$~NrSd0sifL9Hk3$3Vz4W`w5`&Qau2zRt8{QjXbX)HrOM%JJZh|@YCZ1o78dw)s- zBFo`JeT5vS+)Xn`z$vOiWPYcsB4hCipd8dAjPseS-$@=W-)a?yqEL7TDjLV)zQzY% z^M9b|o|fw5;+geV10rcod_-=z?0BV{;2qcx?ig!wCiR1a=6#;-`_`}AO@;AS85+Go#TdYmedC`zM;QFKfonO~OHlIb+ z!$)m~<9YJ~xJe12z({TK%Tk^l9f2eiMGPn)geulgfV}SfXOe*$^BcIGH-HWAejrqQ zX6J15wLNi2qZE`ULw(Kh!{i|9pIORph~0w1i|s0yB$uk>qiv|VTJTYR=ieMTzB|#MIla*?Y#LYma{+FCiDe$ zzb+U9a5t8v6ODiaDlPn&LF8QU8Nh~%f(afR#x2?sjVXcavNG9>1m)LgJOJx_pq%9U zoAgTP1Bc>XA*e+PpoEd$;K>xgc|cVdxg=+q@0(|fSq+O$62Ok(&p5j_k~!k}5!_f8 zC8C;kUe^KvNDkG*!t(_3&9oVA8sR)pv&x@BNu~q}J3(+lL7ysQ!5mI}Z_8M4?M%J` zl6=iI{bmOx-&qbS1^Co@_KAjFJWyp3`UNYKNnSU&n6z^Kl`Y zT;PMFx;ZJ#SM8*AMJ43>dtZrUk7N!dh105W(ve>mn` z!~4p4RdFEpb@ppmgUy}s=w^VFV3yQLu4b?)rmz-Q*Dp=MyjTy*6>GD4s;SA3#{V5m z5^x9pKBu6|%SEBl1o-MHaUiI)N!7he?}@VlBezmeB(K=Rsd>LTeUbogG6=K-{mJm7 zDw|TXv3#LKc^~B=*IX9Wq0VG;*TD+BlAnUNJd<<-2d_lkKeVSLTdYZ8viv4QG3O#@ z(+(W~JSAkL<|F9DZ@A8Ud`xRYZ~*reDMOM_VD+$fZmpE+0+?Gi-QGtzP-PgwrBDYI z-W67&`IN-4WN26+}9#h&JWm?%efP6QWSLToKX&Qyr8V5Exz?NH}ev??xz+>LrobuY(;4?nn zB>$@{G%8SjV-{E?0{GGaWeN{0>Q_Z9iS!q5z1Q{xYE0Y_f`^gbl_|dmO|QeJz#s3qjCN>aexAUWK%M zf>-VTQJiY`Cb(1vgDLJ}Q}Dnafx+`Ek<1b`UHEnMdsu_35Z#Fe6RIc0evL-z&L z^eBUy54#Z%00Tor@)`<(i%iz4^QW?X-OkA#t-|@Iu-Baw2Xli7$wzb6)ARW=5c2^W z8DK$%DXQ}u4U6A6!KJCHw1^vinHNg&H`R2i{Uo#@{Oz=g1>5c~QlfkMs=vJRt8~K1 zPnt0z0USyPv<;L8$vgK*N|aI<5D-USp=|?Ije@ppD&vauwrru{3-8yAp;zFg%y)N03#{60=Qgab%Q1=RF=^BSfy(xsOLIp^es$2N^{TA7N}JI)cxaZB1YZ^XyPPR`gM)XJ z3Yn(E4cwR|Q}Q1K!38Xd*-vq?T8uhY!+I|@CXd8;v{}Lb6qp;(QUt11MLEP7F((xs zU)hNt@iL=MO@V4mZw1CbweEm1jFg{L3bJX^0d$uQ&R)>)%B{QEiS^7%qF!EQmhIg# z6C?W`=&!qhB!CmD9=iegPEk~KB^mUpasATL#+@Dn##ENZA%c-#e;r8{dP=>N%?Eau zqts%;gb*vml#g$|do$+M4#)K|ZC7-99}~|-0G{j}RLU+4%&-HvPr>_DHvQ}+M)+VL zobiCW9;>7XhRps>7l7tJe()_>LrGmepsc5j$;_2jre91Y1@74~l+qrFajdO=?iGs) z6(ETy9PWE7a9TR80+uqdatdA1nvL~n<7d%}op$jB7S8ZjFn;mxkt7$F>+VM%%@a}u zty+Ev=vArxW1`Iqc%nEzesz;bBj>-4WeB$8Pxuh!jbaL2l3gK-Dg%G%()nX(lFl=U z{RG*85@#^reZt`Q{*(Sb|x90Um*x=;?T37>Y5jOi0~@GpMbQTZYE3l%FdzzU}22k0@ME8oxxy( zefZeVGoj~GyO<^LwK%jgSVq;2vv(ECjCfbdkRSY2sb66Vq)fNQ66GOZ1WJ0P;LSb* zfOb(e5kK{wh{F+#`npZ@&!#iP=qba(Spy%dbiTVrqi*(n$6n<*NVSj z{!Vdr`avAJd%)aVM!_=y@PS0G5`3z7FYT1ny7D-%=QT$7ua5+LD-L~VR(5oekSfiS zq+yxDr;oGFYh*yEnTbohdaGiomiX6s0JRfF(pcjed|!-W#hj10ddRE*lY#tWBDXkY zG+aBc!RqYG+L{{rcQ6nQROZ9@joXWopj5kV2!l(#Pjj2@bzkhk5|0`riKZO?v@)hd z47S)~U)q7CY8jZ-pu#KT1*KQz395nnSFUv0J{ohsVoC@qSZt#^`wQPW_S-V=6E=X| ztnilEy|UmDS$3|;OFN^hSE!KWgA8J#;G_`mq=EJNUPtJb7nzN63hC%%4knt+z7FvR zvC8Fi)z0SbUEss3;Du&>hkK2HjM|!2pBCbNcAFg9@S*&pW&w;8cJR*EWq=YI6Dy{S z4C*Pm1{zPmJf{~Y8aMtknUl!III}pBXCjoU+=e7KZEz`#gOjVpfel!$JM{y&>h|() zcPM5O#N_uQ?$mdemn{1}tXD+7CDIvY;=4OG#p03c!SKE_E)?9Aj|+uXYq;d2%vzXgZMYfs_;v^*bJ{S?fo!Cf1Q~W2@%xGZ89Z7l0wdx zP*tjFz^W9hppmWvH1KmutPnKSKg}zate!kmGV_=CUX?0l5a6CpAYGqx*ELyh8~I9L z%m12*LcsyFM!Z;ieVb+0p|{lZic1=d*7j?G-ZQ?Y#aHkSO6kMVUBCS?FUah=JOOqX`uYAa?@ zoovUkw|bfQ*FDCGpTnM7|7a_ZqNYx7=iM$-0BJkm2ThZkeVXhCv@aM!Z=8g(ieI~l z0YDrxsTeBHR?HbOlUGg|p|W+Y`eg>5XpOJGsr#B)O#Zry^I?J1#dH$;L)lMm8;0Og zhCaXFsR|NfHLT}x4v?{Kf-3J5`*UB`X^9I+%y#FT%0YbIPV7sEEM_Vdzze2>RPU~kk}~{0s8(tUjVu2%$^%x1Z6Pig ze208tAbrRbT-vZTgX1<>pCpr7DLA0c({^}u z()^Z!A?sgs=An8#i^kgf zP+BToB%e*BL2(d%Uqr59#T^_H5>g`GuU%adOyOAL^Kf1FE6sW&NBA?yRin-%d@Pe! z{nvOpwezpB*k8ZQ^h;AsFplxVL-VWaY|$*t+ubVH?OO&MDk<@&6rm?HRaGh9z#uog zKqKOkxVpM>Iy=BzE-fvEm6^u5^DJT7I4eb=2t2oeERw$HVuj3I?pt}P5bJ^gO7 zzHjWwYCApo5VodqB*#&5GSg{!br+rFJIA0Cga@15!Am1%uB> zJ4NYsamG>HR04YqAbg&2iKj^~0xfSSLMwaQ-FZ zxgGi8sIA}X`LMd1DU|3_N#@alaWvKuHGbFgz4fA9`|*)Y`$=2**LU2kCuCpTlr=5ibVlU|5T)AC-E42Hbc3eg==yhnY4)dn#u+BEujzrE$16E4BySzdfS2>Bb z*!#MND_{rA$I1PCyX`X%FW`{nu?6?=+<&DQW*B_0*CLK|gVgoWj~LS^-AU6N`LZzf z8A#%u3p+A#KJ}=3y*xi1C(ltP>yVvx-ek#S@~ASkAF)0}2qaz;dhWzFkp#1zad_SD z7Z)q(EM1tj>)ot*RX^SzR$qU{e-|jyY`41e9Yen1Z4%oA7defl+BUEI2?K#`L+nxe zY1hR~_lvH;-44dx`5^II#7mBMzPFoD-%tWa1s;yJy|~*BBcFwqv%D{)J)iIPRztt| z@@C(2Cf^KErQ47JAgIr9=!ILdF081KnczL;!{c?U@Yl=@dFdc$wKUu8uD=KnnLJS$<#E;ov{7+{Zn*K-_J+8^4M4E&*8lMnFH_LPW)SDDp>X5 zAht<9HZlUe0rwThP=nhcf*jW{&YiGNc8#-YZcWnhRKqvxey5CN9Vm?24KY%kd5IX4 zwFpctJ=ijt4JFHdFAw^utfpTWbXpY73ZrtV5$TVb*L|lNt<|+7ZkDX8xixJfWjGfe zfps^zO>w^mMXq&vAykDn&S`;3j!124`>`*6rQ0t-%`V&TO_r76ss*`irdd~od(awj z*+1T%UlX$z-un9|xg0mnC%r-D>ainVx|;NR#t_upw#{Y$zW{J(i6S-mPTECP?iD&ff$%LTh*22hk_>%&RA z>8k5|W4vwG-LIw}b->@teXhcuuIDwp0KoZ=L_HRaTA)$L`7%Nf@wP@UdfuSiZmpo< z=(;Fsq=-B_PjChGI}LsdA>K}P>-P+mW;=e3wdvY^0Cs%<0j z6&LL`uP0*ncYbt|b#*5vU(o%ZY58TXGLvB2VxRr{CqNtIc9BF!Iyp#BXQGWw5<>oS zkZH8A2kX8Mm6>$f9dF7KVt>nAF@{JfqcKgnMpGczrrS2f%6?d#nLu8q;mcm(biuuN zdb(M+4M3ZR4}pB|Qt?0{(}cV@cC#)&>#+W!SvPlpS)#N#Vc4beyY)igJ_IvCZAA5| zN4BHGkS!1f!Nx0tS<0ds_0)nA3gPUXgSDT5@g}ZX`!>I3UEQI8U(w;)Z?Gr^z>ytw zJx|jdl3;e0snE`)&nN<6s~zyD5c8c#mpeg%1F)gzk+f zhEbs4sFRG_=F*_#`7?zzjlEAn7# zht_)N`7$Jw+ekitRmhSULR3=Oyc3WdabxtIp+(!ib9OFO}|g8R(EPJHYM^Gy2aL$!Bg= zoGh4dZw#q{@@bP&O6ee~V1Uja+p5!8z0Oa;kekL-G!;6ntll&~QAQmG_!d2`eO!r| z>DJseI9Zf|L~xfn5pBKK)RKl~GNLDDP+6WItBUfdm_?U6%i-)B%&1j)r5SZFYQWi? zw*6vvjiQidE9_zwj^2fwMw?4%z2;drYE)*;qFFk3I`>0A3r0}`FXU34iOzejJK5)P zxAkB5MBjd7VX&-}Bh(Z-g+QGLgd28?AR~P=>$njIu>Obi^~_UgN2F|$A`U5{=O!m4 zq*kW$B>((c)YYV|C}jE@5H166lgLSKlZI*GrAO1@TVf8m1>c9`%<;gNOio*^79^IG z!5@-LeU5{KI0F1;XOVbpR2`F%-(gMK?VWw1dy>Y^rJFA&c$ecDJHA(S-CKDX!EUYl zz4$mc^^t}-qf~7S)Ch7m9g5(iL(2dzPdHXi zXKw|2kmL5iu)N@tiG4o9pzm9WVivbavmx=$jnff07$T3HmQc)$Tam|p1}^Fxedjx; z0KxaoZbt(I_AX@u>lFVJN^Qn5JU|+IlK)0$VUgiX_tSo%QyPhJVPzY(Dd<#REHBbe0jQQo)_$M z^iXm?s?7M45)#F%2wGU|F$Z&sFde=HN8gj%8OxqbKC@a4UM8IM^B8-+v;L+y^tG3u>%!%$G^M3!c^gFz_m^aPkbjb)ef z>>Bx;dxm8Oh8F}vy!!Ujs7SQx@M?^0asi)~>ukj|S@-DImZ8s-H1(q2y3bAKu{QkS zns|Cvrlnks>E{nt)CT;#7W5m8-T+uIo6-3Be)M^HRGE)+2NE907>?|7zt|VuWBbm$ z)3l;LT#N?JZSVE+g z_yz7HVKJ=LgQZZ{aSY}U^52vMIrxjp+};E>AzM15;1{l~-6Qv`z(S*2e zrgK~MU{3$n%m%6#voiZy*rw$wR(#`%WL(CbEuwFoFt+8}BZX~kS~X&E`Qm+uW(^N)eblSaf{bK3r7lr8N1?C`v9yfQ`dI zT5N-UICFas05bp~$o0W;u0z>;+<4D32Y#P^<+d!{YMy6WkKFH2W%7XSCT2Ho&!b^- zNc64oD;CVVhkoYn$0;|?uhGH`0dKv8`1(jRgmVJ@_{=F`+qXunKW1NS zT#S2w-PQu365I9jZKuuX@H735Uwo(IV!nN$Wnei%);Jv_IO}noocOGd4XZ5d_rrz@ zQdBp}ClDjKYTdgMXDJkh)24Vsmxxn!G?}?M9m!A6K{9}&ua0m-1IrG&!w>ZgmBsZP zcxy-50YUjEmE?|crn6$Q$kDRQ8hJ}nZmbiCRwgOHmU1He>~=d^(jq^{wwHzP!m}?o zl*md__=i&lhS&7Y!?Qu3q(1sFF}yMydbxeg&i)Phn2O2pYA>eQ+V3N!>lJ7D05!@~ z|Io!)NupGlxfIWfM!l0S+mMJ}XbVb)Lm}dGy(gUds8jU~+!FIl8^OucjI?+^9Vqc>6## zjsXJ`wipGY$&p+OEt~y;zJZxIA%ZLjKlw+ewWIOa%!2Y)z@0(l+Y1gy8Zqn^7C#wf zHq-D~41_djBDXO$<>MHc&N&%UyCR#9T#CVa{7z%<2ZPGpZ&uyGF;B!@a8`x}0I` zB55?>P8^}0T_M;i@IgW7kQ*eCc*`CG1oPSJabGZ6!EeIGI}Y(`F7ah%uWrkh<@s((1N_meR;DHMC29qD&n z>?Fa`@^S=g1O<7E$qNpX2r${Pe1km(E$^!3v}?41$12>(6>KKg4RgqigFa86x7Sf^ zs5g*CTvHI3=i1|D^+k%1hElK9?W9eMEp|=0(WwbKr7&ZY&Y#zIYg{~)%?jyAjx7X1 zZ-rB5!FsWU-s3O9+50O}={>^&7Ltq+(Yh^gBofeDrg|sOv18qt*NAYmBJ!qX2kdX$ zr)}3tsgucj+QC*f`M&u_%})t};LHAYNXRkEmmoL{s78Yy_&vKp7&-jT-87lE-go2J z$zn_C7VA_>Zf(;=heVFNhy#)a75*G|1_Zl&>p9$R8S}k9h*j+tX-gWrdwu7N2;ysm zNhln{FFZg?FDIJFr_Z3=xgRcMVB#yUK|7F!HCorShEC;K(z*Q3{&Z04(BL(v%f%WT z>0PPbKqaibhpQ;M3`I(o-3z3~{h9*M&&4M#+fh9SxhGLRs9igmq^muMS#MIlZv<&n zv7G?vLXEl47)_)abMctYlUY2N%vPoV(D#SG`ffR8lx)W=uS#i!yFu zJfq+4pn14HCb(AKl}-Nb+cih8##{3>c-zYgNPLzo7ivd_b{x4dJAp9SSM1*dFUa5K zubJ&3Ol~lQKA}lCkJt9+go|NybD=@#GaREvgTG-1_=DQhs~?Pm7))?t|eBM zDV&`?woA5P#idwwo_~waU7`DOQc$oWAX;r5nUH5;bc8F-gPKtQbpA*e^4c8Ir%tbw z3M(cCx(cGwg`WGyu^~lV7&yYLxF=hMVVmfDCfAGR*$(9ff5I)l@$@;DG-bw=sErKo z&B?WnI!94o9x_p@UmdKrHy^K{Bb@g-aA@^7j!^}YR$y!pkrKaoyfFC44e%D+{B3m{ zgNSY1sJ>zp?;9~^_%D`wLzjTp4~h63LYGzjOr7nmg zXBMv3B1qX7!eC|q@u_q~;4i~$-G(;alZYAo!Z#Z!;31&bKM2NQ*MZ+a8C66CIA2p} zAThzCFvgE=KfmRnCC9D(joup7s`(D*gS>}+gtr2oB##Dhy?_dBA9l}+`VnbyKO6IK z>j|N1Y}CUm50TzJ-tA@AIc$nN@B|?s%ZNFIa4XKmkcyI-fjvjvPem8p&9uJBV?$-+ z8jW+vxb#Ru2_Uj`yxGKJcJde;RN#Cz_^_>)HlXX))?7bK`Lk_sxmm8Y1zF(nOw|0t zFXqP)?$P?43!;E;T#u{VX=*FQDN=8ECj_5w`?9`;a@?JT9u7O^`$OlwIE<4Mf)r%j z7J=%|k=MJx)a8@+R55%qXCvlhH##PzUYve5oR>&IE1K(*$rd}ZY|j9xzU5!=Y~t=(g3r@-sW?=-y6sOqsHiW|4$Mjsa|5e$2;`8Vaixe>ZxWL z9FY+1A7QRcr>Lp%0ysCY4Ou1a+k42%wHZ@ei(yyvgv-KXdIQerc6=N5R^P+WpaSTK zY|Eyvy&p20HGGD?VlDa&dY5ZH&q!8BOlIHjip%OfmfSWd2^*XT2UpV|hW13mHz=ft zH=cGqNMIEx=+Qy1t=+eT-m~nkrS|X2;!OH>%~(G_-d*!}{pjd-+6i4$0pe12Wn$~& z#b{s1@_EAtbxXVMCpYcw&zHt2xJwN={)Y%|HX3_~2o$_)YpeU4DQEy{vCk4wiVMTQ z>^*~cd?TXi5X3^YE4CZt5NwS#Oh!$joUIg7O=9<)u}+2V6dclta9bPGotNu6Bnr4cL!POhW zp&G2Vc8VMkk;CsH7ce(RvHBo!mHL^B{>Z0ujz52nZM1|;BZECj$qe2U@E$h)&4vLf zItYyTK@`%Qi=+!wQ=dYk;mqd@sU|WlQWtmPELK@l1%RYOCdQuyG!0|1`d-sut;4fP zC@U<4JwBsO)ot)&?45ps)F2)5m1k;nrfZl&%7xPTk>683WqYP+l-bC%_sKu)-57t_ zjl$)RJKwvNQZDq6QH!|7_6oNai>;>L1hxOuzLG%ScwlK+rj>bHA3Hz=Uc*JFghctL zgdgKi3hKhm54p#7E|8dC#Vm4BS=ti7FP%%$M*IH^0?w>e;~#YJsNpE5d=Uww8>eyb z2>V;#?%{&+W{Ja_gB$b6_E#i~@)C(j}I!f%7HU>ek3CU;eGUBU1Zo`ec7)eHT~^G%@&u zYQR`U(D3>HbkC%6A|ck}W0RqROQndei6Rf#75ND|H*l>jO=XC^a zcDMYEnA3l+WRb|^v`LtJZ#k7*Hnn)CnMch3p4%gCFzmG1u6&-uGV7+eFx7YnwMq52HSy0; zi+8iRELdG1L!$H3SP?%H{5vlyHDs3{txPa8)+Z;V{0-=Tzwlkw|5P4c`MSTZV<7I_ z@4L?`3+aDz5d;K}iutBnUH?Y5Et=_Paq5r)huysW?{ij=Omli3OJNm$)D4yMA~>6(NQla{9RFD^5{bdSd6yskFY}nAE?j8o&TfA(&i4 zIsg5fN(Rf)g7sP8zZcG=!h52j>8XIP%A7nVoZy@Ni%Ccv$oTbQQzm2yyQr5{oyLM# zLL&2g@JDc=jgTMh@uKL{Rl<}WeC5q92qHi3Lz+Ys7y|{{%yBzox4jv zb7@fOQ_b=G9I7!sF1#2SZts(wggdS0E*c3)nN+Zf6Ij2xx9L^rbjX;wdUiS@FOb~d zypFXdKPWNZolT}Y$u+A!6UTcRpN1lE-EYp5=N%a^b#4zM73y;nhwOFCblm0^{%IuU zQ_P$%hlo|EHzoa=ha}wli2Ne?D}(ll%tDNV<{P3Z!)*RSn@Wy6oYF_189zZet8QF6 zdO-p*S8_>ldE&jFIld98x{M+CWtyd+u0nJzHQVkWh3%eAMQIZ zGB(<7LNvWzWJRv7)+~+%P7HN0))bz0>Xn{Wz-zx7PHx(cB_8yyu-ks$YtMFauMlMZ z@U?8VyS^;#d-ej)MlFNG?=|4y=SH+j2{^Q@y5CXF%6I2f*_sNO7_i*=Saq;m3Gy6% zZP{(l@Th1|*4JBusFZ9AF0BiAQ{Ys5+*uqqHG6-%#fqo6lpp%1V;_ItZ9MN8az@j12*l1a4C;f2kw)#oWudv~B2*aAfk{n(0`oQyH?!{d6 zqZ+oJW9CdG5rIA2l8f+Dk?cEjlWF33lem(N$6puJpP?1qWoLbO2eKEi zl)ENKYz5Tg8tj`G5Pdh3P_)3>*)Hn@iQ0Bdy95<`_UTh&0|X;5m6etBOUago@RQ|r zjq`t08!Pnn^>sSXZY}m*FB_+>Dy9nyoQqrP5?zjd+@loL*`7KqHnH-#-cPQDdxG|Hjpi~5dd)h_xMrp_h)|SR$BkICaE(hr(h^CP zYx49`xh-+myV8kTt+Uv5zgp}ImJ4ydGfH{Nt4!sN_-0ky^+{DGt*3V_T+3z>+BdM6 z)_qiRf1_^K9Ldzifvkl*ex~mb@+cTcTJKxkNpjrg4%RjuoqI>~Ce)7#i@-j?DfI_F z;^V<~4~|3~^>3$pX}5OpNW9l3h1Z(Lg)q&rcjMVY-M^WdX5G{1t^{wu8NVwbzU*X|%|g#8 zD-c_9-6tz7i!!yYA~k(_cw2PrvEj>lHX3a*!1NF{yMUX*qzjX#`oj5`Gne>y&Aryx{!W-0Yw53%GtAEFh!0+Fzo15IL@q(wv zG`qI@aev|QTZ>3b0EGAJ@%34A`q;rs0ZDe`Sg z`vettRT6kM++@r@Sg9JUk-`t<&t*Y?~rAqu7Zy< zM}1K+NA#Vi56KDkEnTNNnNi##JWn^SrJkpv>$N6i2GoJw9-^)2B|Oirg8ROA;!0f; zOI4EVOby zUj0<6wAo}u%d;wiYA|j4wBh;Vbl1EA->SSyFKO+jhN1P)m&226feNIxwsy1j(~4<> z8}8_x;qPdaHyDAXr>gm(cLUS6eBSokh6=eyZ}WmRdes^_?p!J^!w04Nll*?FSq@ja zk9;M(4x^PKbsnuV9m#pn`Zh*g-t|pguNh$?7T(l4$>O*7AG9X5;bi&oiFB_&!>oHP z%Es_YLkP`C9K?Q9)11q4B>wv%*`X(~ZmOznqIl7to9q|2<%b)PniK{eP^>2Bw@1Jr zHwMHzFiDz^68o_t9i!EJX}pR}Zg;4nT?3EjesWi5ZOYj))9YX-bFQmke5oRL}>-N(L{QyY7{hSS^iYxf5N z((ps!Z6HR}w8s}27uK_WGve$2?|%V&G%i3G&GwPTUUg|dQJ862s_u@xGtWMi&kuoU z&+=p)Z%HXW)$09v>*Y8`h`z#y*M?o_W4&1)M0dyxeXSN@07wciyd+Kh~KXJv<` zi%-$FljSy7ZTc04hw{TI@6f_H3+8293xWGL0$+3Mu$)HYa8k63Byl(mBWbQm$ zeeaO!evvxGZhc+W!qt@2{pZWr(q;7ln0^Pm3WPjwM*zeHE4H$n#$s%F55NM8DBPmO zgy;tdGv`Xr_w;>a`&Ax9kQ0|}C{x@{kjAIYh8|zqdk6?`p1&mPVJ&EQitZUl%UGCc zC8mM=Q*GCN+)gtqRW@)9LoX0Ab{yf`UNaSE_I@4C+s0FLcFjxb!FB#z0%x}? z{8ZgIm==R_&%(V@E~U)(z*8Svmq$O3FUtR;KF-s;s}*>yOC> zuN3K7h{ChC@w^?S6e7MfR7PSc3He#}!QkG7 z@0*^r{Z&_2fd+Ju>7A3lH!B)_qHr72P6Wz(nfyU^7yA+wwg=pccp-*y%!b8%mtOb> zxm)rGY@^xXVe9JF{iB8_PPvrd;nH@^zm9Kw@OY0})0wMq$xBk{2K10D;=?lu{qtO# z66$OnLhl{7k9GIvxuFFmb}j#$9Vetm>fZ+b3?5jYx;ztGjKp|S7Td-uqT^hbQRlb3 zU7uXKH6cqclEbhL_WRO&g2118F&!&;!}_^qoe56z>wXE!XqX&H5;p8}%T^~$UUun~ zg(;?arrfhiP-#imR0hWP^K$K8Sl{iDdlUE83a+xWYYM6^{m5Av+4l}j!aoS}3EHf2 zZv=TgMbi~_C(6O60$J6|#~+r2v^g>%3S@C53r>V}nRTZv)OE%5zb*rM15vkT5q zx-rDZW*Cmw_@6~sQ*%KPvRDTm0i%0;d2 z$Hydl>0Lo1IO!i6l<|*TK;EGJ6^aDwIdqx9yNlAVmSqphjpt*<*WZ$D~P{DGY%WI|&g#iq;L3M%U@a76uK8DPbaK{#dkVV6Uon(!jA zI;o8ZIC%u?!FmH-ahDtgkxT)IeE+3)P_;JnULtleq?L$ODJ8A+g!s1Cz=CoEBPxMg zvoE?bjdqz|#2E}KD}%ZU(pzeG7vs&y%t>x`=8M&*?*TjU9gZbJ!|ITZ*XWSBUKAnG zp@2JgM0@JE+7$3jEeUl$4VZ@d+oep>ZxmQd=-&zL9?DrNu70C$+0^LhO_)$aC)VB9 znpr(Qcr%JPi}awS^(*b1(te48nH+ErgU+ic#5Zo5-CbRw7+pnLW#z{vx%Zze&u-)~)^s{xPDZ>!ReC+s0$tr2$<(q=H^kQ{AY* z;T+dIgx)z!p1qEZ`o5Il(@#QXr|u;(Cu{mU#)-jy;r&hHE*L1o%?K#@d z`@Wy)Bv?E7bfUR=tiqYJ@({z&aSyF|*Po33CpYoq-Cf2!`wgrFyfWKfxg!H~y1#vx zeE%o&GtXdJ6Nc~N(aWxL2+G1xF-SB&{jB);Y!$j3je_Gy0_erfGixUf-!bo(%&%|{ z)?X3K$OC6IXWxWp4wk=l3QAw9mK4WKKyIh4HY>CH{=tZYT`U8J_Z<0>LoO?C*5=y> zo%w0`>%`HpTezN#fdYd9mPUtCY2A07l{|)Jo1tpWuyS&pX!mmyn}K(Xa2h{8IB3v@ zCw(J=Q-0|56Be~bD;}zSsfy#nlb~02d%xx%TrjG=*}J3|k0QWxdaP1c`$8K5_$yVljavJ+2Sa4FYWj1lo*MM}2H z_|H5j6CYeBP!E^CPG)*HWs@_klwxPP2Fk%YN9`WlV+SQBiiV7g$ijO)_3GRZXRv+} z9rf-jg7$>mIqy;mhCh;Zi_=YjO@O+#>Ka-Z&7LEaE0rIf!o4gc| z!=T;zJHfPU;hrseu{!Vh6nN&Y$5!w%Ha~%6ye6wY@!o4<+K%O!iZ3?=;j2|)uLfbr zxm#ojC~ILeyGg>$$yh(~PvM!~eLDOVsh<}%;#8aimr#nA4AAIJYtj0;Z-zsHP?V+z zUMt&Y^LTF78Mo~X3_1;EQ@f^l^*?nIN$!)}Q)=vHaBF93+MEU4?W+Cv1b(fCR_x9#)b+3Hoa1U>Qq&^|w6L1h(8X#jhM~p<>-BqJ7s08hO7n^W>RLVOByxj(D8lYsT14|`#$d`Kfb^KeebwU(ZCQFD1 zG`ds>L`Y_PmSY*G^?3TJ)iz$LHt;Y?k(3MkTpV+BlPcTV;Z~MXr!f#X0{$>>BLNjlh=Z*sCJ7>H?msH?uLk zZYiJRF_fZaCykibh1!C5lpsDB2UtT}mS>uZO=obmQ{>2`rYQcr{GUvJG--?prLriuc@+xKxoaB)6V_k_!Ds5LJM2^*tcTE{ zxE6DsfWKJ-aSJ#iK#P_!sUvI#j?r!$Q%$KxN6vBi>VWn*UV~=!5Ixs-%pL8yaAmLI zn+IX|fONTDW$}l-AI6Q=2f1Mvu5(ENltw_hq#H@;?tCx$e1GrXd-vRX zYG%&Nna=>nqcr{{ep0#fgQkgA!K!WK3QZC@U-@uJ{e{LDuvKIb8M;FVOdjgjA0Edo zw#iS&9V8S4JU7Zl%RPlC&x<%?p%28URw*bV_AaZ@Va4UMvApXBozZR%#y821@2Z#! zCsUdxB^E0Qp6Z96_ScNw&|9g_bYvrbI88NhB^7aorfjo|a*8sDFZgiCI*W9=8+^88 z()qCsIL0q4T`B4}3t<6AvF|Fv9J+2%m-D$yoI+df@HWj1$dph6N9KO_RRDVkNawsn$Dxq z20nPvP)#`W{XW>=+xigk2M2n(+#4n7DZ6#UDfR0^VVl>$*DC(Fd0!soz4kEnnwTPx z%0$XVJBNGy1s2om(}0o0OhV+M)^dOexOOjYXC|%Bn~&sE1{JeakU!v# zEj_-nNG;RHewHr~*5??2Vl1QA0N1jwl>~hjP2*(u-%LBB0KobnW|%nZalXzr`V+W{sdKmr%t0 zQ{pKpQF;|83vBx*1M9urP1ei1ts%S(apHP=cO?AJs9DI!kyAtODzF@2MZ8m#KOf|l zD-i7h4iG6043(aUOthp;&C^~UmfMT=d)Hm}em}Hb1MugC{C80!WW_gbmMfkLdmZ;b ze2E8Aj*@lF@l~i&buD}cy|rAlN$Dk*9?uZEEy~vG(rMK&uovu_r9KiZ)w;KpZsZ(g zaC=p%y`g9i3gcvRTb{iNUhyG|vVtje*gv0p-2m~~{oRKlAcVHfyz6ZRu-!&+ZO7Y6 zYMsJ4dj5dlxOf-kqqw;q>bJB~Wk-bqN6GTZ`i0s7K60GZi?KYw9sXw|X2{?Jbh1WD z7wH1Qb8|Mm@9nfvtMkMRC4HI7~GXxY7K9d`~VxYO^e znKXh1SJ)bom$I3>Rz~UIL)1y5iL0 zA$?DZTMR5I`Gi0kAkU$wK1&#@&vN1+c+jx% z6yIiodSqUuMcI)b`R4AKU&H6X|ZD!sYXgnbq`mI_7qT zpv_VTF4B8iLZe9JseyKMoFO6jb{GTlaNVSo?BVe|O;+^2v7X;&1nm2<`mq;ZswKKC z$&(qL8b6&Gs#SBJFZzR$WXR+7w|Xo%oPNYlWI44m6Z5jU{wr&r5D$Cd`OPx3>Yp+m zv`4^wG~W_92|5^Lk{5i6{R4TFI8`6@yo9(9n!LE>HL6V%+0?3aaaz_Xr3Sh2uokVi zB9IyM&>Pq6hVsOov=}TyA%LgedyCZ{#vgVHbQ5;0*kY?|BazkqIBg5@h zV|JUm$KP9UUE$CLma>%i6YTnIX?FG)o5Ea(k3I!wadeK|-GBuTN0{~PDLl6V$Y2It zvnymC@yPXc5d4lX7JWh=hocwj2HAV9T}`V3zR@4(qCzBqxY(S{V(U($DO#~74mUCJ zw=VvD+SaF*%smiw#{sPQEM}8x)6#+hQQF4k1vfT{k5lIamlEb2DweLuui?mrbG8Fw zya&Er8Q&w#T>oSv6@c3bzI7I{l|O7-C>f-7Rq^pEksL`nbeYoGmTM2-wz~MF!}G@$V`3s~?tzM?n!8#cCZ} zCN&Fe15IjLYLaUA*k0zy!*Deq{$&1Rs%{Ev zYqs@SNqf+UWQ6>HxdKBV|mt=qJ~+N}AcItHk`G{pS0Tot5O=PBK}~ z)if&ETMYr~lOc}wy~eWQSm{fwlz~_JD$I^!vj)?%`(;T-+>J744a&7|oZg@04Y_LG z4&V%W&ibxIUj%0zR!Z)Hw9`vo38;+1wTkaFyWF)#;Lk?Ga`He zHrXfQ2>h7MSuV{2?UKfTr)45qtX4K2yu3X3#FQRxj!ofGB<0egdu7r>Nr zs+dR3!Y=dVqaf%RtG%J>wA0yO>fml!&|e5D;NuKlS$r)MwU)e z1YN3(MX3FG{#v%=&=NREv)S`vzIkb6y#eaB{^xM2>IA(m_SNibO#)L`kjT2HXI!D3;m^hZngP~mj@(UgN>RSJ*$V_9c`)?B>Z2bT~n&|pEGk|O4<(2xt|R% zvn$8hJ;JMs&p6&nhq1m1r6Im^#MTRx>QasGB?y`=k&jx$r{ZsDXqC2zY|UMhc+UH| z!v5=uB+?^5-uxUgFzqAL$;wpd?O9YIZSr_WsjD8#oNy#o5b)sl1}6kb9S)0*0dWg_ zBil62ZVmonhQTTSb4r*OWdWyxby#cc8?V46ZNX(SRM7s=nrrx=uu>d&<-3v0W<;OzXdq%(-Zwa>B!S&#Pn-S23BcAUqdcKNY} zWXPZ@OlhMH`G-Hd_znv>MQefDN!qIDT`r zZlP7DiLY-RM>(?126*NN$Iyx+S46ni-ui=&pum!1fD^7z%NQd^r-3rgku9;oV@=p1 zi*Hr~bT7vxzu4?gFmGXW@$?GA*3~DubY_t_lC2#y zw(Qni%h}8~-BUn9yP%hDFNtg}2ig+^eEG)o)MIudVoQH8H_t1{Vt@OK`Mr%+9kp<= z6S5oso;{61fUe4I^ZrM%$~!fmY3(F>t$O_)ofjd50rAAU`CO^WcF_9c5#S#r^|#CO z6!PeknwYX44qo$5VXvOrl~@UuPC0TPUG~&bg`;VmS9bVlo)vW;we!esIUy9d&M zD=^k={-phR)aNaVPJ?{TPwnc-b`NIE$No(G&dZgpPkjEPVQHgbmueH<49#I<+VPBx z3uU%Sj(kejEbff@P+#@>;dTA=p3kWVjhEfA&wS8A4My1K)9{rJxyFc5xdL391zL*O zUBaW0+HFxwxVk1bqNcNrF;?!)_*?80x3?8cMeZCbWF1kjX102a3+vpf>^~rmwOL&S zp?Gp$q?O!QbC+g&D|s!u@Z39ZjdOi^*IG=do^fKifU1AL#r%Ma2Ix3ED%nd`*_6^H zQ@TUBbg2={$Yr|5>1++1RC@6l8midbpFUVT!FSvQPG2TCN|qd*b_;^v9^`!c5!(2> z8I7#aWRzF`EZTQbztq*yyZ&CeC%TigtXc8=PRB4Lta)QT+zQyMFDDwe=4jt#1#7e7 zcCyHR4yep4397lBy)eUT%2~w48V)dDHk~_%Mx{u*-0~+EZ8m*zEU0I7I)S?>H`u4D zW?lDaCjOPs6_nMoB#jd*Q+1y+k5(~E5cfYh)ON0782>F@+>Gf>fJ~-1O{z3ZVnUUO zwpzh~D@54aEu z`LO9~?kRAn4Ud`iS+w6DI#N+OQlUReUDjznjDP*$&D;T;e$*+fvFWX zUU%GSHfMPnEt&0B|2bkkSp@9tr8Vwiokgk2N5~l_hU-w6hJi<; zIgxeWB4t~J%2K5bw2HJTzBBkeQWmgnWv4SN1&MUztFFRvpnV+c zil_uXWp^B(qo{LMcU*Ywo9v!0e=PBMCMyBLB@Br5#kn%L^+pyMz(uS=;ZF*{jfJNP z;BHR6Poe$7c51E3K*yA#C}EK2mLxXkXFZ9+%&7@hgsN7?7|X%ZuU}_PrKzSw$<*+F z-@QV>Z={cUCaK`5lDERHU_@YS?rN_Zn$crp)$&Na_zv9uIlERV*sgGU)9ozBW(o(Y zH7uc`KU(o=b18Tf$Cpf=l!vc6#nB3tyi6Dy7#!=k5XZS}m+A%q>h`-M2}3%u?2q~U zTKA&+Xt^ThAV-#|KUffHjeW8$tBO?c%*)(t?jQ! z84t*l7{&&G>w-?J}$|6jBy`wD5tQBUE-1t7Ys4@ zJE6V7%eBJRa6Q}XW(ubMlkM6tEU@v}LhC#AE^W{h^k6LtzxG}S5e{R%Nu{ql3y9e- z$|OdBf~{dycaLwfAA{ORZ6Ya_Z;IJw@~}V%DSy+dSWW*_Ghqaud8#i~0LFlI?Xh#b zA`Yjlmz&Hv^D9=xhfLHWojpqJ_oFNz)R8$u6s^MZAz-RdQJfqHJk z&0S|q@OyELrwTZ&Wf>Z*c2#(>a&U=QU&LyeeWmjM?;di2!@`(|BYmhT$? z?9;(-81=sk$vJQ-5P;E~f;TSFw87%|wKnHiPW>O(Bu5YyaZjHZOHPFY2xTsmP(aUU z^U1+zA^zXB*%U#wAjuBBHC-~!NKD9hEktTW{PT!4`QHZwN8!pVz*r4LH5s$yURcb) zO$r-cF0W+FG10$wWer%NFjlL2;Z*|e(m5tOH}TVL{z0Mo3863~e1ID(vfzwbH@Z{V zf46QS=*%Mw(0B8|wFgvhxm3^W+U0*gyeBtUzn}hWI9P3w{=U^tll{N%3xrGgcxGJA zJpHXy^lc^fk=I|++h0UXfn?6-Tl<0$)o@eYj5lF)cSqR57U!wk@o#2DMzFd|HqEVsP5pJT< zQuyzA(*K?x@u8S#b28K|F8>SZ0Vp(^WKfZp*6n)yGxX;!tlBBiD-7m=S+`}8e_4kB zH^>wEglW1kA$6Or7eNJ5N*?i4Y5Mx#+W@(_d0lb_Yv?*>Bhv$3p+LKz$jYisa)t=T&Ygh(bj zx`dZB#6BD*J$fL^5=C~Of4oA6Yu>&asj#q63`n@+xy}u>aa-IT&7`}$yhQ6WKD;h!>9uNH|dbyCZ*>7}r0#Ya2po+#q`!C_EJ0sxxL zRI{u&02I>Qw{=iEmIM$&7NaE=7#f!*u%khM!f#x1jL2|X@i-e|>gYF3w}D8Z@xgZu zf@xF^M3Z%0Q#dS894avzMoH*qfw>j_!CQG?gNkOL9q;}@6&BjEAXE) zAR9Hr!vdpJ5?0j}o`Z{PNJHP7qN?pGjraNC+~1q(<7`Cj@so|Y zd3BKK#e9MSKMg>gF;(^J`!`&K*7@=;;2f=5(}6|14h99@(?G!-r*{{RT&tjsXleKR z%>;#{WIa!sHPEnP28^svHlDz6U;$g&jmuXpzi~)d=Ndgcu{4G>+C)QhATaJ4r zMZr!&_OS2(IHIs3>c=UD{6`Y%RL&=WlF?AF{1Hf*W3rqiW{taZNATNz3;hm8L?YHd z>C5(TDjGBcyE^Lk4@h~9AIzS?D)Mna=97pmK)Znzg2xjuH^_L!Fb%9Kx<46C@`1!# zA5iXsC}GoTy|3o`uzOx`JUTY@YKmvzb)`Oz8BGz_1q-SsAPvz5i2%m!?F*KPfou57 z?yYXaX_u;&R+zQP$(z{KWV5yDBYg? z!0Z8}t$qP^(-{EqXo(1&^ZD)f&YA(^jHLDHZr^b;PGXRIznly=7^b$gly=^EQqIN@ z!GFA>aRpB)*z4Ub0310ySp{$wI66FTKvN58k_ADr*8njAHdsXkY0`-HpMZ3`?xH&&jn*VTigV(L0Dz>dIirdOOtb-0L8~k!>fK0s+dH=O~b54%1nZ8fQ02YT{$NF1P zo7bOAw?Q!+UGm8y1vg{_>jVR?ixscq`LyrAN3np0TO5GA{sADB-dZBR5+D~fe*p4j zuTF4-H$U)D&5=F=L?1_h_c7lk#x4XcVmS$sLwJ6CI3K&Rr-H#l+s2ejWotAollJnKV+_>d5)^-pmZ?jaJO_it6vx!W*P{> z!TBtXFnGG&zBg;{?#055qZ_1>M+w{js?H*lZI4)Okrae~)T3e1md9+eK>+@k{5w<& zHS9;&YPJP{X0X}xC#&|F?e=&=hN(V?FT*&5z_ov~WV0JZpf2Rj9^Jh70||RRmC0H! z+J7k}XFQK$1z_^XmfOjB1H?q36#s4LH6SfB35m0xiOQ6@w0F2C2mxm>g~ix<1Yr*z zigW=riwspL0$r{6OG5QYs;t)Oro*)eKZ;1-BsOF~0eVVy%dF!|y?Q3kge`4Z zXT-@DRLF^c;MZ3=WOFOLms2Y5eWiLM(7U6H@u3fLm=D+hUX)ia&bl)j7Fx{LTz<3S zAvCad%*}H*1olb@wyOQEbfha=+Pk$tkq8`;OHWmhH~+()5PAtk@YSfP{b!p_Y=q#O zR7Bfr)M^T2Vn1i}ow9bD8U4uvG{QaOg-Ytl!ae5|P0FOLVM=@e!^C^@JAf0~9CT*# z7V9&{N0rAH&{m5N>fMJ54AYu7ZR3XbY?f2hJYy_ z$hvLcyd?QKApUas10TNVx@cpQ@A9E9w#chjPch56muErn1~-5>Yv7f!8&xOPyVr09 z37xh&klMN~dVrLo(~-)*qLY$r_cXP@M|Kp9ZyO7sJQfr0XFHxKs*wg=><{vo>oyUY za^<7F6|?!hH^)U#kjf!sxI*R*2bHa1hQANy)E)ty0qs?m6Kmay09y^^Z~w;a zO&H9uqLaVQNBjv$DIpTgiW!m33q%ua+4sNC#*C{Do8h6r?>bhq$+UL~RoH2H_GzQA3V8jiT^D zZVykz^ynDCPp#z(d8jpx0+Q+=h9FP*2Wl9h*re*+;HScpClXI|7?onOR$BC;Iv83Z zP|(;}RYkSLC4uv}kn60j+ihtgvS6MloMhrN&9LSWQ6mUQ-w{?)Klh_WSK6~wBUcRL zRp7gvY9Q3MK<^5KJ$-M!HaF9&ZD5V)EESILObIh%nX=yLjszARpSsGGEs>h%~Wu6%!tz#i| zWV7yc!`T_25^yx?xtUT-`J|_UIsn?(B)D3|U(I-WN~}b*c#w59Jg<^xYdYSSBF$xG z88A&Co9Sm!A78*={Ksl^(GmoxurTTlZQNS|l-*{_7u(=?6vT2X+pZS!QUTk-=hT70+vT)=B5VD}vVUM4y5XL}a+LHs%_(tQ!4);rG(f-8 z&PXiwKL15s=&hJ^p^h~&MTY|hKuL{bu9YE7$Mv&%dYuh>?zgIFx$-VtR@SLCE&HGR z(H$^h>(X9r6jGv#$!?97b0o9iZ-;`ocbf{E6tn^bA7PFFHNUz=ahiL?G7>y_LzWn8 zoAab?wMvF~F+&Kj@EV`@t?W#gW@TH(3zJvX-*?(I?@}^-pK>Yru`{Suh~9jIxi&$j zT$-r9pmK;C9=WS9R;c2r%!jCD+O6DCFe1;TS%dT;?+VRU z(1ueF;&2yX=EfT!v|CKG6u*N`ge}*2T!T*JJf3rDGhQWFoO;6NoZcD6tSJ!8Nrb_q zfaBscSwvQXfs1j)CZH1Sn!3!F6w~)5!NY2PLpP6|Cyy6^v8OIy!Q834ehSzlRdr={ zZgWLySE{_@$#jXMLjKaJBWjpPgUPKa%2UP5VsWjpM#4tYd8KM%%Rxp}P`7Df??)Te z0j~#Yv8ch(spwmYl{$54A|wA|;&phao^k{;^Vmhei@rkE^yY zQzAD7(zW@ubFB}gSI1}dQ(SNf`9BYE@|`n8zD7V9nYR8Gi$@w-A${M_oh98jP&KKU z8=FUx4KK7zT31VLS>T|84%Auk{K)nOng(OeS-Cd~oc;@a5lzJemrJYA z`i&jQI5ad~Z5|^Pmdmv{cCx8ceQh8k0G0;3*LLJZ9@3`ObO2jaW9hT7;N+d!jGmVb z(~N(yvZ(a0ILktUD^(tKA8%Z6<*CZ8Uky5!&^UF_Pe4f|tX41#p}QJh6^#0w`90Wt zqD~afk%QsX3i>S~7?A=`-VVxi0s)&?sUf}*)f@@{#^3!8Kmo}me|D4_h-!N?ovfOB z#30w9@sda`!(VnK&UF_%UJGhf@X1E?o+xiu)6ktp5b@3$6H8px8#=5jdeGcD?ylVd z8<&7G(oCU)8Cm48VZ+#m_RY8M^JDlI*gpx0&7Ylr8r1gqu;(wx11HND}(2*+Nsdb$rV<~#?1=vL? z6*YsjdgB%!;+s#|ucUU-zH9)tB0a`IW5;8~?l1nt@ZRtsc9d{*8q4DO$U&XLzG_*v z5%zW-6=+Qw?2A@0I0*(WZFaP^L=C;umNF#tDooe{(XaPkN*YmB3V9z|ws0J{npv>; z4X|Le1q}f97(Ft`R>-BTRFScSnYeX^v0X8&nZ&O3RQ%bstfHZY#hBI?jhz2X*L3Sx z#M{#*gb(kzZbF)F{=Hd-VGQqC=-7J-v|=rPX`~b2=ySJW$%7=Up&iaV$7`3K#IdNb zF$eyw9LI|lCJL7o@|n_0j#yDUbXMPY`Gq#w^4lNb;i_J|4-{nb%>Q_y!UGuckJ@5A z&h+p=6-#F4(EI4}l<@l3##Bb7(0>T8W80(DD^H-xh=H{qzFy~4?S#8i!J-)6Kc&+p z=qzR-{8W6#O-|2V%LHz3zF9DgxmgB~a6hTFt3_z=RlzyR%W-K!#af_x#LiiAC<;b3 zN3}xc@eNB_top~&vP#Rl_iooqPBC@?(YdX=%WVW-Y_{(?&V% z?szKu^ME&tXZAI@PTV~*2B$x9xcJ0HP3{jHx%jXli{W|PdyH!G6|o&MseKggn2#0> z)L|tP(#2(}N^~Zi8k-T&ezuhMqd!s!zP~o2&3LWQ_N_$j;XTgGi8gul(V1xi64(7P z)lW5gL1J3y@Vvk?;OG|ov(i(oaRtyXyUz>1?88_em55*YNx^Vd@N$_$}RhpQ1 zHy)eVd8F&fX$|Ngt^i?T-dTl>3icLRtwG@MZE6<*xzfP@%tzhps0b^VV+^;`F1aiB zie)-o$|35^Jkn*?dZmIFx&Au=0;)L|6dYAl#30THq~)Xw;ZgPJ!83{E6%=e;K&K*C zgk8(!f|Apn3n}F8WQ{pF)_Tze-t|{c>bke}fCQ>J6VwQ!$|!kM!re(X$|11uFJg+6 z;9tSR0R8kMu6}|yos1@7RSV-VDn(CRYR=>w&Pd5CV+slUDMXy3<>mPS=?ygM*G>bp z@yA%@)K{;^>6~aPbq#+FZ{z8MhM<9FpnwkS!^*M@Mrwhkm!nfkMY4xy=dpPy!rvRRo9F{*Y4 zTSEga1HbPuL$fKEHS|sp%oJmDy<<@$Q*^YNf(;+}_v(CD>*EQMoz!~p9t^6=^gI|; zv+=gF!(DWMcID;oUths+XLZhUiAY}l`O>568`%B;N-JSa;4b>_*XE$kg`u<&0Uo1L z0ber69U1r@lx_1d*YTrf;H#HmLZ<5UuHt-02C>}OY}9sq8dldVh$fiB{M$BhEVZ9j zU#d-wbY3q?^?Yavw_pCmK1}cS%ZrI)$^GDTOgX>6^0C%`L}5`wC|u&ysj}gGoNB`Q z*LmRUB0ED3OigsM^> zKWNi9i_Lfe*Yp7m`(-5v<|M!#v@i&l68e(D?d4u>HPl89x&JnPy)^eSd#`{B5!F)< zrb7&oDW!cXp(UQ?TMc6Me+;T&{|+&M$k%(F$4XJz>9OJCZ`-V?44eu9Jhk}$CYM+k zdZRbZrqMPqSj-mPs1)uUK^wl|IOWMVy1x&IivNzyWKFk*xo+@W_D?BI*0jB^m|;zK zn^}VY_jRwJ!_ZSqrEB4IC@mj|)1y)>yP!P7uasKyey0oeSN9{-eLG zFo(iitt?pY(KID<>_PXa=5IRQA2yVvnhGlZT`jMmOVDsXbshXBKvN#ML1LLkY-75f z9)36SHRr$8piGuyb|NQ$bX{%w5S(HetY@n~@^xGm_GQBWg9TJ2?hZ~PxKayLt$h-f z<&ZfUdgOR5%tk`}_x-Pc3cny5GB!TasEH3yJ*rL*(pUTjUbWv;63f?-{%b-Sx?5Lu zZ?1~0Kzr>&k18Ftk5->*KAZe7_E`b#-z9HO^Gqe9a=JTMqhGx@yNC>^pMQF z_OIKBgnpCPPS+H+vJ7rOD_fFOYTtKwQ@s=QUnP94hWWb=1H=uY3P(6K!}MwPw&B^Z8>J4Hy|?(jdjYk!g!gu<1w0G z1K<1|I$UPxy_IxR+9RPunCKKss^!niRt)L$SB$At|8^8Hvm%VT-ja2)+}l0#Wv>rC zQ?Krx4gy%y9nYY6i9`JVN{&hjK;O>)tke(LzACzn1?!+1{rUqj=z z(C?Zhdg*z&t}|Sn)E`4j{BkmQdRKJP83vs09Yz0>Lh2af=hxAz7$nn{mD777vT9_L zkV}!^FZ>76&L+XdQUz*ZBd%~JnSo=@1LN>Hucw3|Z1+VYV8;Al>v zviH)x@k5x=PakXNO*dCq$NxYXS>}ZOiEcbPOO=~rZS)uW`#E0)17J9{=MVPNNeb|K zW~XdJOCTp49p_r_OgU$7w)b=2{(Wpm4|-D8!gco>Y{?Jq*QhE1VoyUG^8Bo|bPD~E z^WFgmu*fJco6is$hcIqFk};XY#2%Rcw;^5wN*a2+wE=GQT0imsd!rmogVv&T%O$EZ z#}CU%@z4@Zzlz2Mm8((fAD{M?BM$#9>U6kB{Go$3+=rNy@E-`H!D!uV_rIi4ceSK3 z|7(WFm`#AamA zZ$$f+Y18_nDOW8P_CSRJ+CL4UC~jcOh48Q>f~sty`Sy^($Z~vKga2eaH&xd+4PjAU z?bd1LpCWX_NU{Dtk5pvF`ExV0L~|6TN0mOSf1YLN`*5Y>M$t{$X3l>iJL@ZgUoE`EuD$$CgcC+S#TC(&cVBuuTfC)rzl=11Dy5QS-bF98t`%*cP5TbB{_g4L3art-~;+HAG%=vKxpBYhs6@JGGoKgA|tFgG6d zdh`!oOsw>erqB}1R&cGuk59URdpfB|Kb$_%{#C*|^w4QEZ3ES-TJL#MjYw>+n<|o*5Bh#jXw!7 zkwVa&wV7{)gZz@=kKKd*N&+w(J$uo7jDm~Do8>?CC4SlpKNEEtNgwd_U?Zxia* zq;dTfV6nJ<7JVtpKGEq0?06)JDc6racmHDn?6Y?DUQ3Okmg*MhZ%o= zd!6u=yt=Lno^B5CAls_QvWP*?sB=2>zBjwD(KOEuqP6P;#8D2ZADJ5q&lf6EWvn&w z26UFhlU zhUTYBRxlO^#@upY!zUYk+aDaSBS75;13FxE^gK@u{UbfCo^T)RXK=2T_g^H!yZBDP z?R@|^@@mbpy=SliKer!moq)Q6Ld@efR|gAR2whbx;Hq$$4YvNi3rV+*U}&iRiLEo- zSU4kXnrec5KcxR;blmaW9!0=210N(@(bC}EQk3O6n51R8p73)kef8<8Q7wjsS z=918U22*=2KtHD&Guy(ohyvrcm83IDv<=M2htTfC55}AQaqT?2g~@s~0OflMAvhk% z62x{s%(e&|w!Z+UUdd(M#Jzf* z@|!K|AArb`#&SakO6h5FLwm>F4KF{m43mNDz8fn(OGc%58gB9Zrn(lht8stN#Ew=pd%uL(!O=PVD5XkJg!GzyD%x#gNv z5NKm)+Ki`kP?7oc`|E3K0O0AmoocoK%q3R`ue14TM+N&h2ndeVT+ZsRlJ}}&1C;=t zwF!08fiSNPKx@6uF5z(iiZ0Xwk*$h37YIsSlr^JW+3LV!g5>?6M!ONdU8}>X`EzCLQ(<) z#R@kTnTe9b%VrmH|2%9_c(YF{>!J01!xKytofmxMq;jjKL^r(Z85c;C;z8U2%ih<^KIOBX`xRyYtt4l)@Uv`*%pS1yuZ?|QqJv&Lxgj8Y z3TokBoOeJSL~x$4S*A-8;Lf#j-3N5|VdJXp^G%%SFeI_ z^yVbORzT-+Ucem58aJ^&?JTZ7xjHOrWBaGXj{wyo45y2ix!O;W(t&FW2gUi(XX|n< z$q_X5TNzHHr~K*#GRBiH^+i_eOUBN`c-gP^fPwC>x>{6UFMxX9Q&;sCxce1z0MuCl zf^sb*5mE#ae-J5y3FdD0g=jz7*&f^ zkVYY?G`cAsGs7td5KUYB)p3ZRs49?#bqoRb-=JE47e2%KK(Vd%Qf|9rY%eO^G5_@IR8?eoXyNlNw+n9&Pp zN&iY^uI#s|5&0k&Ae`?RSLE$;-Li^hg>rz*dm^}sR0^B^cF&6W9;p6d0n~mFaain> zC1qxUblo}1qbI(=F-t~X0=78GfC#G2?zf*eNxn@D@(};|`Wy3anC+B-gEbkpbdwLl zmYr)TzoG7<_fc|4K9sh>T+F4uW8~Cv%l79xSN4h#JAA}|g%;$}ghLb4W_OhmL(9>B zzMFmOrBlj9x19(Nq{c%F?+Iy6BbPi|rvvFd(Xv272Z6w)-Rx(lsAtW%4_JVYjh#{- z2%8Bk0?l0`8o+}%@4D*kwpxhWydF-Z zt~vhI$)zkTfYdY`uQd8BuYaX(QhxtU9w=|?WkARW8vxem-57-<79RqsMVbYK1uhFj z&Rm!K`Bl)RhkQ6DEqV!~Y}BVoVjnEHmX4>(s;dp6=eZ81s-ddQ)vxF&lb6lUtJkdQ zSR@=dOYb%eL?Lej?uZGT_Gs8zj`N0-C>qy^Jq#ucyXBMvA~=$$VQFk~`-4nFc1_E| zU}!y9wlqMuc7O-&dLxMuD1xxgc{?_goH!}9EE9ebnC^utycJjSD_;WV&BD|Du|B49 z?kG_DXNW0jwJtzWRaGLOW+({k3ct8{O#oC|i80u9B4C20Y14osrD38cr=DsnBK5;) zIHg>CwUG2KFtlDCur+|?AoJpx|;fSS!b@qX96BB$l2q&9VWA$O1%iGx&xU4{3 zGvk1js2q0#N0EWzL3me=P6a1FpJTIi4N`((y5Sh3=|CEKEh31L<9l-tTjH0?cOxMr zi`0(tb_&|%cG=C)l#ZS?CuGaO1g2|5Wmz8xB3lj=q4S`_p}#WZRia=iJOuwf1!li7 z(|tXR!bR0Hzf~$cc89X^8%M4xh6)}=Vdzk%+lqz2{rp(h_D>COh_c)(^YYj;b2VBa z@=d#&8im_Lc1oE(dM3khk0*CL^U6~_?$I|mpB399j^9oWI`+14J~)b<0E)E-EZtsq zT#z8o>ar%)S`yf>`4dcPzPF;9T-eORVW9u|n&YkPve$aaH-}s>5mz~i%b!m+H3O7S z#$reT_V9~CRN?A-8O0RnI9D6>^)TC+E=IE}H}7{AtQh9qt@6{E^j7OeYG@?PuGrjG z=MA3y`Skd33>TwU53%;Qnb2~SEn=%pO)T)El;9~sOCC~60I>C>+lZ8;g}9!t*ljH4 z?!-;`!xeXuw8t0A03^)MA?f7oB{r3(DFAkS9h!p2dllfhNkchykaa%7>FmGvOG?E4 z?KH&p(|6O(5(GKp{1rec>5U+>ZDx5LHLaU9dq{%<=RKQ0S-q;8vDnt}6{A5?*Y|c| z%44^xZlw-aW`qb^21pNl#7g}oryEz>8an>f+$D^rS+&H5+jfrf3ld5L42v2~ULK>M zgA%#8FbNp7NeQO7wV0pc_i#kjna$v88GjT{{3{ij7+H(zMwqxf61kpIJW`Oh z0N^d(|9RB$?4zF@7_UriZvW6n=~x{G+QKdsGAE-%}@mk2h_piaGhK4N&Bo1(2rJTCP5l#K+;h z@xUc1jf;`pbrLJWe6RG2*wMx75U>H;W%{bh&lKXQ!d}6l0mqvvjTVc}YoM;uULS5y zsFZtw6oo_nh81z0wKC*HQFxJEw?i9l5gd`x-UDu~n?i=;2}@N}P3~1soGly+7%x@R z41Aw->!5Ml5ANlr(JngTE3{OM%K;lq(zk+AZJT!=7Il{*yBtr!tMWsB5>1(g-t9Kt zEEB&y2owri89U;AZ78ZkC&$t__?D;scCF5_S5|B<#gEnMEG~7~L(g=-idj)&^Wz$& z7XOOa7j`FEV@q<}qo$$@EkSffY1xwEDWLYkUQ6oWJ7SHzGW-K!1cuGY_p0d13ld7K zVTq`>gR?3tx&@WuBgCpS9TxAP5^!GKHrIVGJV}J%^*p_`SNluVx7c zCpZEscZu~XuAKl3v=v4<7{x$2HiE?8s=tbW_+Zs{FLN1w?jjVdWo4zBi6`k|vQ^GQ zjF~@&CB2f1biyS)rLm$H+Ayd`IQsv{et+R z0JcO5W|$(F>b)wLp@(!$Rg2*zGasuFGnUk_r7HHP4uKzq~fucUpizp{ayD zN1LKU0Tq9X^9E^Iz?<=hHX#Rmc5;V0y2^sMHCSK9xToiHQ<*T_Jvf49D@b`BbUxAz z`Kdu6r*>^XT`Dp>cv#+3f=)I~cJB(5(i$rP9jJXot~w;CUOB^n;FPqbj|1ab1_R=|Y%v>42PX3Tc*A_|?r0?2|{p`Dj;+7ExH> zpt>8hDby5vNB54vIaVRI1aTj)R;7ZPwJ@HD3+rtk-F;I{C>>2fp5R_X^Nmq*-Ed;- zccn!zB&T(M%~p-?;rnPQK4j~2$4OGy|+SH*&}4-*ksEVAwsf~k-d+3aKu4I;n*Bol6mZz zarj+&zx(|D`TqU=<9mDi7q4@jYrU@L^KpMZt_ka1o0EgFo*|00z$$_R`Oi_;R|ktI zrqw+^A|eFDRmR(6q|dyNYQ{cF37H!ocgh6k2+WoGUTKkMA+wyt!|43$=(KXk3H%b2 zV%esD3uYRO_a(?1z2$Qs(hX525kRHK-MK065Vs7jo|!=lnSbsR98&dAcC$9E&ypq8 zqM}Zqi>C`w$(_IYS%=G(!u}TQqiBB0OY@SJvm;UlH`0nD$hY@DwB)LN)sgxucOyx~ zR^f!>y*1O|Epb7HQ$C27kojlNW5OI(DhM4lnBc zuPCnBAFN};6^Eae@gIIm|KeWNS~<_M@M)z7I#}e5KjjtB;vrUMdVW$pBRSsm{7Qr- ztMq%sOZK+~>KW;(`FCG-wd6=B;OGbpsaQD630pM86psgbzw45rflaRuh_FP}4`#Z* zEjiT}#n+KP6KIxl+a5KK8DwMKMYI{z|1P!+fZKeMiJ>;}-=I%fucg63QRj~t_(<({ zy@>U5nC*)&%8BN5IqIHbqQLL&hJ-em(8_&TJ0&lL&tU{ebG$Zz2KdVUH+l{_g8|HK>8`6 zpu8+3Fx~6Sm8HN#0&eOlp}RxkH1SA=e_&^T0F^cLFnAZAW75sK$btVd;Trl5JH=5A zES|za=*_C#{miB08DA)L-b+G-uo~e>)~0=U+*Fo`dF3xHIk`3#N#HS&;J)6~K}g*1 zIt!om_sPi7>MOdz-$hjrVa)I#)9spf$UEPn&JArP5vr8$#&*4BsmaE^+Rz7~WU;Fg zahV;V{HZVnb$g+=&HIP9?Q7!*4z_cq-o#10zy~==nV-0!w|Gx_WmDGs-Zi#Kit;Sl z3DM#7Uk`Oxq_LcI=lAI$RLO8RQWT8rxJYT2gM#8iVy_VLe7TE^|Ngc)h8v%gHvrma zlrYX78H{`JOeK>YZ#?!i*@U0RwtU65pscJHKlLbUJOEIH*0;}R>g5x@g^ zU%o4;rH+nSs!-Qx#mDX`7{fuMHf?1d4=1Ltp-_+S1Ck!wu_=TWVh<0~@v83u0qnio z@!|%V)!O#E#z$#)f`C$p_o*MBS!Im9s*~FjxeDC~8SMw@@JwnvmCf{>%u@{(wPjw( ztgtIV3Lq0o#nkEFh{su_4n;OGS2>Js?2~X@h%b-iO;Nch|K0hXQNzn56bTljg4cKx z3g*oP4UY=S&OKs`f4Wq?yGK%-Fn0Aao*PFH{YIJRU`WqK18ddYCvD@U`Lf-UK#>#G zWG4@5g!im{r-Q{ zx7we^?1ABGyf0UjoksIyo>(hZB+Ibp4j*IH1CNwDxzC_2jy8hpK-eg)4trJjo&Knaw)C0S!?NNk_o!=rnuixWORiA;NA*g0 zhGEQP9T{j)eW%HrSfu7$p%64c|MtOg=XOHC5Pb~S{^hQ@=ZEu z@k5gQe*S!j2Xgl)U16W?(6<_Eb6@ND+@j{mtXa+3FSCFMb_ikLo)l2knO?_G#k7dg zC8(ypLY(aLW!!kl(xclzQfTsBIBReCZy?(I+{M40K%(y-+mn8#0j0c`KAt>aNw;$w zv`YWV1I6UDIz4FLIb#)}uG}N{l5Fuzv+%-OFU^GoWO0TM$C<129DD7TqE?PE*D{7W zHkix5vqQN>NcK^ZMi9gtUg3rm^-m{Bb9R3^GqdRCy`dK#3;2>!`8VqeT}<^uWWtF1 zIU6VYzOTVuz3rn@NHl1Ejz&9OGcTGqo=PDc=1k8wLeIL-cG*LO(_aJ8F-O&ON`JS? ziy~^mi{g>?#LSu$hD2+6X*b%&Inffnj!0>Avw$wqC64cDd8!OqH`tL$lztZ8N zkIgrd@U~N5lIty3?^U9}R_bs~Wk2|->AD_*Rg(km73@5YPSG zU>A2Vm9jfhwK^F4@F)w>zHdCO)~2o3fE1^OS^3zgjAS@=ul1)TW6^0=mrET5FQa-V zE%>Wu>+{Kt++AY0L|CR@l|ohgwn=4F+A-9ecKkukLr{;!AjyM?^%R=ghVfyo>Z3qy zqM%!hJhpsQI87?fTUZMp``+#Ff?IC>C^8@+2EviS_X z%6gsnxk6;B{$!|naQJ-Y%X)g<*=}|81%}%uvYq5B2Y#+wIlk3xr~(#R%6d(1J+g<^ zJI{2<#H2h!%KqSpl1qwsME1VMl@H2x4SUy4>KW^)OV^;^_~eB&o|=0UgB|~wYW%U- z>*iHut2|(+1{sqVzP(myh&tPrRTPL)kjZqsqVa1d?#7`c3QeJpiYqk zQdA~!SRLq&%A<-3RO)cPLv+}&%C*%7W`g3R=gJj= zV^dMMP-?4#ktpVff_f`i8TWZnnN-7vfS96+9v)o4g{Dh^{haqOX?%#A+_!oa;e(pN zS|tU4J{lmoBRObaT*FLIFp`)>%`JA_O#K6xxK9v-*%r*@J#r~4n`^;0JeDjLJ9H`5 z$~sYJN1Mr|9dI>f9v6@Z;CTGV58M^e;E9l{7!7Jw)JMLERI$Vxf@g@TIG5*9i;IVlGucvP@hwnzn41Ac*t| zsV?GkDKe|Oazmj=dVB4WS?=XE7`Enx){IQ1*#&hIBk`meR#81({u!?z)U5IW?&|%( zb~UP zd2HFQI+I^oK@1kdPyDszXIGSaGE&aeKAQUL{=h;|t=OlxRbWoY$r^oYFNzB>9=^!8 zQ32@&?>va>w&@&ujy_>f(1XBaDv{B9_*bx`{UtJz@P|kK+UWuASp|sm!q<|g%2fg9 z`Lpa}nP2hV-N|FkOMko6HdMJ-wY97A<(4ZS>Ph+~bM{x1gMn(ymEwl9PC9xGyG=0mu~jLSH@c+QFc-%by-hIH6PrSm`P*7DOC#v&$z2+ zW?f6xZ~9Xw@uKtJmML#ZpL30In+W;rymDnI{$XNrZdj|O1jF*HSiq-RkGt)Ul%R$Hmw?`4dmQd?oehcV{>vAJcsVMU zcoQlQ%W19oI8UN9*!;2?k`QUsc=~ZX18f`_#&QG5FYAfZ7?blk24fQe zujUZXI7TYr((#gw&&}REU(;9aw%jAYDMdE4!{HnrFSW!Y2 z`a-Fe*AR6u>AZ zf90Y416zPr{nv>9dg8x1aS?(3w>tjYC;rc2DQf2ZyLyKzTQ z&$!{^9_asKmrGX$i0|`JL~-z3C?ov=HG}(07Og!bypt#TRTGhI2J@q@-Q?0f66m1K0`)DexBgwzjZ${_vJyvMBW&R z`WrDl0y&ibHyhF1PadT_rw)|>J z6W;;<-PGB!3m2VcUAK)ng&0}~z$TXZ9k1jcWYb|;>q8v@9x3Fprr!V@fYyOGj8d=v z%}|+DT!OO88fU4a(y4#?))+7}>aw2jt-dt>ZjQ4KbhzsKIrF7pAUe&#H`uCPIr4kEGN$;Nm51AYq;zr z$f6#N4CPFl@!!!& z&(Q4x2CUF)jpTvCZ+t=u=@(2ub-I1UcXQ%(jd|_`bL#BpUth=f;Z5}V;>oHW($?{Q z9#_q(mQB(J&oR-M7RLoX|o&kE@{_~sLmC|S1F^kvACIFyj zdSUhCUt848QBwYz<|T0XJw%>Qqjemc&>2WS?0D)4ctWun0vpfwMd+F$`#y9U5t zo+jsUH~r2+&+J$0`9A|VtOF?Z-rBcL90e?_UWt}PJ}`F>8}edD$&t;UE|HqBqqKgHDYEoY^_Y2Hovy6hK+5fFVeH8t{TLbKiCcH4N3DcRQtyevz3dHW_zA8cM)z z_8$HLQFf=mLTmkLz;T>0xW)d0NWP8v#=UG5KfM#%@z8&P!(=<)7}5l2`E;AJJ2k#B zyZeW=5=Xz{CWEfiT37l7UA?`vghZ$z&#{{)n`!|knB8ag1ZScE_*$n#{+1uH)j=yz zJN)WP*#sCZ(`3#3gl3=Uop&~i$0SXz(x-DoSk8xOL&r^}pj!Owm{sUFTIyiT;_Ri? zQ^}h3y7_20Fq@kEpbS}o@g6@<^Ik3V?=dYe9JJ%|$3ID!WbzT3^>qz6L-BmQdG_UH zT!_lq&?}mVjmwot*yc3wT%Z)%Jao@XfmQ-bv*fa%2#6hXA#_;^)Q@`{EhJ5zY$8oc zEQ$*AcF@H-x(4lTj;!XT4(*DEsY>Xt{k`60ul8SZQZ3zHoraCt$%ZSe9Pf`?Wh@Zg zop!0to}ZQ<=~Elvq*g4=llE+-a0GJOn)_T=57SbdQY_SM7Zp>j^DED7CJo(+X z2w=RT{RZlfvmJ!K9OFV7er7}$x6K+Hv)(}yX1v=N7}2xX){hs5aO#Wp{JE+_1xxW`y+a=X8{Gu zFh^Vemb4%Kv!FQo%c|7R?41j&{hQHAL!`_cFrnGYSTU*R`nf!KNxMY&o@ZJ8V$tvV z)$^IdrX`Hj&bJ53aQwBWVkUny7?<6Xm5heu zgqOoAJS-tft2Q}}z#Gk`1xvu;yEmXHPrWa7x(*zj{QNgU3Ts>NUmydRW>*`2Le$T8 z1Hj(*hW=q_{8*__3shwKJg&R__Q_e@MrQ<4Ei|eo!~Vu0by^iz?g0b6FQ@u@pOw9e zdO7S&buq7kl8cwAC?+UY>>L;@dEODR=t`P*$zPN3%3r%7b~$)j4Rw-=kQ$C+Eg@>f z0A`-G>_)~Ly`bwLh{X{C%eH8Hv=@#Yhpc z9GZr+O@|xfX8mxXplFZk_R+yxA42tG`8X?tc~}ERP@3)7k7bC)NZexwHPMCm4-Oa3 zGsrVgfWTUZXRyr-k@W)B1t)!Oyza(EOefz%b;H@x9w6S<8>wEUY++~J4~$$J5m?t1 zzMKGjF`9U=dqb4#Ls#Xqe84WlQFf!}qYdoDeIq9ShE)OgdC`r6h_;vhe)>%p2-^}O zIn;el@bodBG#)!Xgi>Sc1QfCiFr7G+w_LkQ>CRb)C$~MN*NBjPY*bi>2mLN}EHRY~ z>TTQt9*BUA=*BVqJcAvR1O`hRCZyvtLd2Tp#15g-e5vs(%R%hy96NhX@y1e+S0_f* z5uPiZ*sEtu#y*}~IKzHiv6NNX?m*QD$Au@fY4b0c{+SbXGd3oE`Nrf(9+zK_XLW1F z<+mho@RPwJJv1D0j-c;U6i2;|D?gFu=ROpyD z2Yd_@??|}nocIDl+S|Fm;%f{<-eCtW;O&5K-`%cV^}m67Ta2ONu*sj`+<>e{NgXgq zaJ%4;$s=_bJ6aK26X7P+PBUdYbUvRQ(M&5b#hWtvl+RNsv=C>-z8J&VRUJZd7{%NhzpS#Pa-j>l#Icc5Q(SMo> zrq5%a<_w;8Tzg-uM+q-4L!!4V`=g3{4dt!1yPST`>AhnRUpNUlj7VHTXEDSbEyqYX zB_wOs%85KLg+-(nOsam~f7UKvweFy+vXWFRQY0?U&9G1WC$>$Y9w+p-LlI;^?!tX_@}Ppf`cWG9VU3v$CG#SZRB(i) zGnu_OTw;>NlH9QB--)hUFHVq-B}NuP`&!hU9-w1hHVz#c3`{Dm1^Y#$>l{x-mF^mw zg5zNAkptCsVABW{PlA3x@hA(`p?J)u!`G=;x$CzbB;ddIMSE|TYQ-!$tf zoT`73;l95C&T^Ns{t`ktg6_@yax5(sp8kpmRe3RKwAR!sV#;GBkHxmMT9v9!f#mwc zEZGM3i*gwshcek5cB;bQ-H54oOhXjA3*)-ZSDZe= z9pCiVIljE?rz0@7J|g2L>7gJsk~yPp6hA6Z(K@}1njS{UYxKBBo^+0cI83RIb0Zva zU4`Sz5kHH4l!B~2AJ}eviC`ws<~VwLd0{eSGl}_;_OYXl@zG)3m*}0tkN2{ELZ%kz zC}tzIY6inql>`c8s~~(YsH{J6m!%F%AIrx<1k9M?Uw+bkZurdomXIsAF%d?mPGT)mK8{y>27^O1S|- zld3O9A?3(6jCF9JoP#(X0zYm-tx{ipcsK%GMhhjZbh_skCe3Aw#&1gs^C)= z?h4Bb&lFGOweOb^&Gt&{sz|%4RZR#~J(73un0Gp*oWi=WIl>|}w=_)P*4Odb(UQyO z$E45OyD<9w@46Ktq1$!g(b6VHj9;fj#2PSV4Lz#%%L%a(i3dtND-ww!1XTutu1^*~ zmD(;0vSSO8Gxu&vh3v4CUS>C=J74S$_$bTTvW6f*x$`GOv4xE_iA}!5y|LcN&XH)Y zhCLOPgeGXo>Z_Cyr3XSb9$Aj$h_dh@5RmGWJg;)q*^wUHdoHJutAkF|O%L|-vpQ+; zU*5HqH-D1LeP))rBSy8Y+!Wn3k+WVzBW$AB*xj=Dva!TH0M#>T2WvcyVMyzP3>sm| z{1ac9P+w17#r6s!OlA_B%C4G;;~Qezjl>jG*fzubZHigF`!gc^cD}Pj`VYWl8qwOG zs#+dqR_6xcN>2oEgK~-DLuK0c=XAde{3b%4qG!2}PvI!e2LA|1Nmf^5qrDOuJLv@)c%A1hvdN;#&9gTrjrbowug_>s= z-YL#~WhIi*I}y~%{jj?cwAvg5cf*LCCHrKx^)lLqL1w~;T{F^EZG<;#X99wZ z!mmhouU^U2*r7oQV`rvegY+Ic`F4%$PHk%o-5H z-G?R3;=qpUtn5O;++Eccj@!AuBCgIw^;t-V9W|B%(UkwX$0@%+O8PX7`?!qFHrFxF z7k|m8xQwgxw6|vdIM&Ihu^aU}cgCW|#Rt2kj*qT8Tb%*%Blb{bIyT~VL(s2A4;zKO zKO3<3U6Zc_!bdGd*s(&h!^`6@%#%%JZjGD;hrIj@G1|Y)Bry!jzo^@cIZ`JTexsQ{ zTTw7r70{W$^>TaOBB??I9pcv zhVMfOy$C63O$txYbT^V7b}L?PJ;2$gLP)qvB-z8f#-NjiB?Vn_cZQ3oE>aNM90c7icQ#k$o0@Dqg{-ouK&q1r00=eYbCP} zYZrra5zD1Ly;je%XO!-D&c{q7{rqNFI~5Kw-HGwiL(53kb0a!L!*fc4jhy1|QmVqb z3#dju;51zai68tfQQX}haVHr&v!SV9K0TNtcYz4;R!0+ucEpfG(w$K+x46^TA#aztSu@@ zeau*eS%^9tzBGR3Z-OmOyxd58a3sX~pu(QL)?jk^3&9imkC%``_9VAE1y&>?GlEdTZv?jp^UHi2kq72ZiwamKC8G}CvVVD2bUP-?M@Feq0?e& z>5=Zxmn8}?u8}bzpIf2cmR^nMm}h04^q0%Y9_gfn*e@@j^A)g~k;Xe=O*rgo{d<+A ziJQh-Hb-`k>9RIIx*RM>>rs(T3;z->^^D=$6iqspO=)IWW z850z}#o(nip@x=O2y%PqS+vO9vrFB!Ao3-S(Q3=GVcWvFx`lGtsIev@n zjE~lsl(K-sx0idL>hAvAw=U1?E5xoIa!O?k7-6O|t+{iKNwvJ=qcMp)ueHXD-OZB9 z@#Mz~zHYDjp{mq{R+^90eRI{Rrw}ToC@_TO8AD7 zJ1j=N&$#mBTqGzH_Q0jAmYX_oKg*Z8vEk@f@lFB!>!tE4-(l>eIAX$X5bdt*UmY92 z*Z2Bzt5_>|$2cyNNAS&u?H}o3$JAqC!yZiDBmPAuq96Chh$%|DH}e;sac81@W4dGR z&TzDp^#)KFC%Z?pnL&wvbjjvYzsQx)D$2f_DmPQ}(!as+%X9nJY2K)S4BJNdUOPO+ zm1lVJ+kSQ;#+cu{NCP=+p0v85tTJ-k*nJ|%smxkm0F$P4fBIqUb{MFc?#~D~Zg!tN za&2=ryIr-b?929`p)3h9v3BU$*Sa_Ru+c_a1#IUj7ZGDdvP+-k-ukfg5f{(9EkS=? z9YzC7*}ivO!8$G?!aRsyDK=($#>vOWS95OIvL-M*IGVRMh#BeM-7uX9C}5F)9jPdz&}6AQ_|7l{()S*DUr$X{z9{x^~2btvGu9N9}<0+pKb@ zGo8+%tCDzR4w#P0qws86onMG9?BVCTyt(s&yOO<=sV1Xu43S%3k6?5Rj#G-fI8_*E zF?-&}iYEh;#CNr{1)a2N7QA6x*y&GHlGh(LMj1)l*3SrX3k^bYyPmW$0WNqg`Mfy_h6;Jako5Q}nH?4~V< zWyrJ)of>PLQk$wAIUQo3h@3k7J^zy<( zbvXabTnHNsl=svSKs{RtTDccw_y@gb5KUg*XPU#@)^4JEop8KS(|9wM2ewBcou!e$H z8EBVN=)|N6wLq^*ef^YtC((;r!TTqZuz^0oWgUN_&LO)7rD9}fsmQsZgO;F2X%l?d z=hm&Z{p@Fx)CSV({`l~kpFa6wASP-p>U)$p?jGBf z{BASK((%4U1+NK|Ek$1XI9F<=QpoBuq$0W2E!rnaj27h~MicqcCiQJAV$YUA^s#}P zdGpHUT+1=wH{kbT8Cb5;;NB2n{6{$Z7qIkGyk}tS0HLxh>zlG#(9x9p*San<74PPH zUdQ0*D6x9p?_eUSv85eNHTtNufx6Ei<6FIWY4~54wa4K=kfXP1H!n)(0{$h2N4S|v zXUl_^e|N6_q7x^LEFHW2kCtaG92A{_LOTYs4a4r}DsO9=c#|#1FC+Yw7Mwl+Q?y5Othau@QEcO3!!MkV=1IyOt(>(b9Z&{?;F7dZB zK=%aY&x{Q(FGlup|L=cfzq zZ Date: Sun, 24 Mar 2019 23:10:16 -0400 Subject: [PATCH 006/326] Update README.md --- README.md | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ea07f4e0..bf2dee37 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,14 @@ # Data Structures and Algorithms in JavaScript -This repository covers the implementation of the classical algorithms and data structures. +This repository covers the implementation of the classical algorithms and data structures in JavaScript. - - -[![Interactive Data Structures](https://user-images.githubusercontent.com/418605/46118890-ba721180-c1d6-11e8-82bc-6a671428b422.png)](https://embed.kumu.io/85f1a4de5fb8430a10a1bf9c5118e015) - ## Data Structures We are covering the following data structures. +[![Interactive Data Structures](https://user-images.githubusercontent.com/418605/46118890-ba721180-c1d6-11e8-82bc-6a671428b422.png)](https://embed.kumu.io/85f1a4de5fb8430a10a1bf9c5118e015) + ### Linear Data Structures 1. **Arrays**: Built-in in most languages so not implemented here. [Code](https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/arrays/array.js) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Array). 2. **Linked Lists**: each data node has a link to the next (and previous). [Code](https://github.com/amejiarosario/algorithms.js/tree/master/src/data-structures/linked-lists) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Linked-Lists). @@ -27,3 +25,24 @@ We are covering the following data structures. 1. **Hash Maps**: implements map using a hash function. [Code](https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/hash-maps/hashmap.js) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#HashMaps) 2. **Tree Maps**: implement map using a self-balanced BST. WIP 3. **Graphs**: data *nodes* that can have a connection or *edge* to zero or more adjacent nodes. Unlike trees, nodes can have multiple parents, loops. [Code](https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/graphs/graph.js) | [Details](https://adrianmejia.com/blog/2018/05/14/data-structures-for-beginners-graphs-time-complexity-tutorial/) + +## Algorithms + +We cover the following algorithms and techniques. + +- Sorting algorithms + - Bubble Sort + - Insertion Sort + - Selection Sort + - Merge Sort + - Quicksort +- Greedy Algorithms +- Divide and Conquer +- Dynamic Programming +- Backtracking algorithms + +## Book + +You can check out the book that goes deeper into each topic and provide addtional illustrations and explanations. + + From de6e0513038e0a6a76e21e7453bb98c3d04aa72e Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Sun, 24 Mar 2019 23:15:07 -0400 Subject: [PATCH 007/326] improve readme --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index bf2dee37..db52127f 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ We are covering the following data structures. [![Interactive Data Structures](https://user-images.githubusercontent.com/418605/46118890-ba721180-c1d6-11e8-82bc-6a671428b422.png)](https://embed.kumu.io/85f1a4de5fb8430a10a1bf9c5118e015) ### Linear Data Structures -1. **Arrays**: Built-in in most languages so not implemented here. [Code](https://github.com/amejiarosario/algorithms.js/blob/master/src/data-structures/arrays/array.js) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Array). +1. **Arrays**: Built-in in most languages so not implemented here. [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Array). 2. **Linked Lists**: each data node has a link to the next (and previous). [Code](https://github.com/amejiarosario/algorithms.js/tree/master/src/data-structures/linked-lists) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Linked-Lists). 3. **Queue**: data flows in a "first-in, first-out" (FIFO) manner. [Code](https://github.com/amejiarosario/algorithms.js/tree/master/src/data-structures/queues) | [Details](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Queues). 4. **Stacks**: data flows in a "last-in, first-out" (LIFO) manner. [Code](https://adrianmejia.com/blog/2018/04/28/data-structures-time-complexity-for-beginners-arrays-hashmaps-linked-lists-stacks-queues-tutorial/#Stacks) | [Details](https://github.com/amejiarosario/algorithms.js/tree/master/src/data-structures/stacks). @@ -31,11 +31,11 @@ We are covering the following data structures. We cover the following algorithms and techniques. - Sorting algorithms - - Bubble Sort - - Insertion Sort - - Selection Sort - - Merge Sort - - Quicksort + - Bubble Sort: [Code](https://github.com/amejiarosario/dsa.js/blob/master/src/algorithms/sorting/bubble-sort.js) + - Insertion Sort: [Code](https://github.com/amejiarosario/dsa.js/blob/master/src/algorithms/sorting/insertion-sort.js) + - Selection Sort: [Code](https://github.com/amejiarosario/dsa.js/blob/master/src/algorithms/sorting/selection-sort.js) + - Merge Sort: [Code](https://github.com/amejiarosario/dsa.js/blob/master/src/algorithms/sorting/merge-sort.js) + - Quicksort: [Code](https://github.com/amejiarosario/dsa.js/blob/master/src/algorithms/sorting/quick-sort.js) - Greedy Algorithms - Divide and Conquer - Dynamic Programming From f12726b1163d5c63ef3a5a7873a90a35022ddd7e Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Sun, 24 Mar 2019 23:34:10 -0400 Subject: [PATCH 008/326] remove files --- lab/timsort.java | 928 ----------------------------------------------- lab/timsort.py | 679 ---------------------------------- 2 files changed, 1607 deletions(-) delete mode 100644 lab/timsort.java delete mode 100644 lab/timsort.py diff --git a/lab/timsort.java b/lab/timsort.java deleted file mode 100644 index 1d4e710a..00000000 --- a/lab/timsort.java +++ /dev/null @@ -1,928 +0,0 @@ -/* - * Copyright 2009 Google Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Sun designates this - * particular file as subject to the "Classpath" exception as provided - * by Sun in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - */ - -package java.util; - -/** - * A stable, adaptive, iterative mergesort that requires far fewer than - * n lg(n) comparisons when running on partially sorted arrays, while - * offering performance comparable to a traditional mergesort when run - * on random arrays. Like all proper mergesorts, this sort is stable and - * runs O(n log n) time (worst case). In the worst case, this sort requires - * temporary storage space for n/2 object references; in the best case, - * it requires only a small constant amount of space. - * - * This implementation was adapted from Tim Peters's list sort for - * Python, which is described in detail here: - * - * http://svn.python.org/projects/python/trunk/Objects/listsort.txt - * - * Tim's C code may be found here: - * - * http://svn.python.org/projects/python/trunk/Objects/listobject.c - * - * The underlying techniques are described in this paper (and may have - * even earlier origins): - * - * "Optimistic Sorting and Information Theoretic Complexity" - * Peter McIlroy - * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), - * pp 467-474, Austin, Texas, 25-27 January 1993. - * - * While the API to this class consists solely of static methods, it is - * (privately) instantiable; a TimSort instance holds the state of an ongoing - * sort, assuming the input array is large enough to warrant the full-blown - * TimSort. Small arrays are sorted in place, using a binary insertion sort. - * - * @author Josh Bloch - */ -class TimSort { - /** - * This is the minimum sized sequence that will be merged. Shorter - * sequences will be lengthened by calling binarySort. If the entire - * array is less than this length, no merges will be performed. - * - * This constant should be a power of two. It was 64 in Tim Peter's C - * implementation, but 32 was empirically determined to work better in - * this implementation. In the unlikely event that you set this constant - * to be a number that's not a power of two, you'll need to change the - * {@link #minRunLength} computation. - * - * If you decrease this constant, you must change the stackLen - * computation in the TimSort constructor, or you risk an - * ArrayOutOfBounds exception. See listsort.txt for a discussion - * of the minimum stack length required as a function of the length - * of the array being sorted and the minimum merge sequence length. - */ - private static final int MIN_MERGE = 32; - - /** - * The array being sorted. - */ - private final T[] a; - - /** - * The comparator for this sort. - */ - private final Comparator c; - - /** - * When we get into galloping mode, we stay there until both runs win less - * often than MIN_GALLOP consecutive times. - */ - private static final int MIN_GALLOP = 7; - - /** - * This controls when we get *into* galloping mode. It is initialized - * to MIN_GALLOP. The mergeLo and mergeHi methods nudge it higher for - * random data, and lower for highly structured data. - */ - private int minGallop = MIN_GALLOP; - - /** - * Maximum initial size of tmp array, which is used for merging. The array - * can grow to accommodate demand. - * - * Unlike Tim's original C version, we do not allocate this much storage - * when sorting smaller arrays. This change was required for performance. - */ - private static final int INITIAL_TMP_STORAGE_LENGTH = 256; - - /** - * Temp storage for merges. - */ - private T[] tmp; // Actual runtime type will be Object[], regardless of T - - /** - * A stack of pending runs yet to be merged. Run i starts at - * address base[i] and extends for len[i] elements. It's always - * true (so long as the indices are in bounds) that: - * - * runBase[i] + runLen[i] == runBase[i + 1] - * - * so we could cut the storage for this, but it's a minor amount, - * and keeping all the info explicit simplifies the code. - */ - private int stackSize = 0; // Number of pending runs on stack - private final int[] runBase; - private final int[] runLen; - - /** - * Creates a TimSort instance to maintain the state of an ongoing sort. - * - * @param a the array to be sorted - * @param c the comparator to determine the order of the sort - */ - private TimSort(T[] a, Comparator c) { - this.a = a; - this.c = c; - - // Allocate temp storage (which may be increased later if necessary) - int len = a.length; - @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"}) - T[] newArray = (T[]) new Object[len < 2 * INITIAL_TMP_STORAGE_LENGTH ? - len >>> 1 : INITIAL_TMP_STORAGE_LENGTH]; - tmp = newArray; - - /* - * Allocate runs-to-be-merged stack (which cannot be expanded). The - * stack length requirements are described in listsort.txt. The C - * version always uses the same stack length (85), but this was - * measured to be too expensive when sorting "mid-sized" arrays (e.g., - * 100 elements) in Java. Therefore, we use smaller (but sufficiently - * large) stack lengths for smaller arrays. The "magic numbers" in the - * computation below must be changed if MIN_MERGE is decreased. See - * the MIN_MERGE declaration above for more information. - */ - int stackLen = (len < 120 ? 5 : - len < 1542 ? 10 : - len < 119151 ? 19 : 40); - runBase = new int[stackLen]; - runLen = new int[stackLen]; - } - - /* - * The next two methods (which are package private and static) constitute - * the entire API of this class. Each of these methods obeys the contract - * of the public method with the same signature in java.util.Arrays. - */ - - static void sort(T[] a, Comparator c) { - sort(a, 0, a.length, c); - } - - static void sort(T[] a, int lo, int hi, Comparator c) { - if (c == null) { - Arrays.sort(a, lo, hi); - return; - } - - rangeCheck(a.length, lo, hi); - int nRemaining = hi - lo; - if (nRemaining < 2) - return; // Arrays of size 0 and 1 are always sorted - - // If array is small, do a "mini-TimSort" with no merges - if (nRemaining < MIN_MERGE) { - int initRunLen = countRunAndMakeAscending(a, lo, hi, c); - binarySort(a, lo, hi, lo + initRunLen, c); - return; - } - - /** - * March over the array once, left to right, finding natural runs, - * extending short natural runs to minRun elements, and merging runs - * to maintain stack invariant. - */ - TimSort ts = new TimSort(a, c); - int minRun = minRunLength(nRemaining); - do { - // Identify next run - int runLen = countRunAndMakeAscending(a, lo, hi, c); - - // If run is short, extend to min(minRun, nRemaining) - if (runLen < minRun) { - int force = nRemaining <= minRun ? nRemaining : minRun; - binarySort(a, lo, lo + force, lo + runLen, c); - runLen = force; - } - - // Push run onto pending-run stack, and maybe merge - ts.pushRun(lo, runLen); - ts.mergeCollapse(); - - // Advance to find next run - lo += runLen; - nRemaining -= runLen; - } while (nRemaining != 0); - - // Merge all remaining runs to complete sort - assert lo == hi; - ts.mergeForceCollapse(); - assert ts.stackSize == 1; - } - - /** - * Sorts the specified portion of the specified array using a binary - * insertion sort. This is the best method for sorting small numbers - * of elements. It requires O(n log n) compares, but O(n^2) data - * movement (worst case). - * - * If the initial part of the specified range is already sorted, - * this method can take advantage of it: the method assumes that the - * elements from index {@code lo}, inclusive, to {@code start}, - * exclusive are already sorted. - * - * @param a the array in which a range is to be sorted - * @param lo the index of the first element in the range to be sorted - * @param hi the index after the last element in the range to be sorted - * @param start the index of the first element in the range that is - * not already known to be sorted (@code lo <= start <= hi} - * @param c comparator to used for the sort - */ - @SuppressWarnings("fallthrough") - private static void binarySort(T[] a, int lo, int hi, int start, - Comparator c) { - assert lo <= start && start <= hi; - if (start == lo) - start++; - for ( ; start < hi; start++) { - T pivot = a[start]; - - // Set left (and right) to the index where a[start] (pivot) belongs - int left = lo; - int right = start; - assert left <= right; - /* - * Invariants: - * pivot >= all in [lo, left). - * pivot < all in [right, start). - */ - while (left < right) { - int mid = (left + right) >>> 1; - if (c.compare(pivot, a[mid]) < 0) - right = mid; - else - left = mid + 1; - } - assert left == right; - - /* - * The invariants still hold: pivot >= all in [lo, left) and - * pivot < all in [left, start), so pivot belongs at left. Note - * that if there are elements equal to pivot, left points to the - * first slot after them -- that's why this sort is stable. - * Slide elements over to make room to make room for pivot. - */ - int n = start - left; // The number of elements to move - // Switch is just an optimization for arraycopy in default case - switch(n) { - case 2: a[left + 2] = a[left + 1]; - case 1: a[left + 1] = a[left]; - break; - default: System.arraycopy(a, left, a, left + 1, n); - } - a[left] = pivot; - } - } - - /** - * Returns the length of the run beginning at the specified position in - * the specified array and reverses the run if it is descending (ensuring - * that the run will always be ascending when the method returns). - * - * A run is the longest ascending sequence with: - * - * a[lo] <= a[lo + 1] <= a[lo + 2] <= ... - * - * or the longest descending sequence with: - * - * a[lo] > a[lo + 1] > a[lo + 2] > ... - * - * For its intended use in a stable mergesort, the strictness of the - * definition of "descending" is needed so that the call can safely - * reverse a descending sequence without violating stability. - * - * @param a the array in which a run is to be counted and possibly reversed - * @param lo index of the first element in the run - * @param hi index after the last element that may be contained in the run. - It is required that @code{lo < hi}. - * @param c the comparator to used for the sort - * @return the length of the run beginning at the specified position in - * the specified array - */ - private static int countRunAndMakeAscending(T[] a, int lo, int hi, - Comparator c) { - assert lo < hi; - int runHi = lo + 1; - if (runHi == hi) - return 1; - - // Find end of run, and reverse range if descending - if (c.compare(a[runHi++], a[lo]) < 0) { // Descending - while(runHi < hi && c.compare(a[runHi], a[runHi - 1]) < 0) - runHi++; - reverseRange(a, lo, runHi); - } else { // Ascending - while (runHi < hi && c.compare(a[runHi], a[runHi - 1]) >= 0) - runHi++; - } - - return runHi - lo; - } - - /** - * Reverse the specified range of the specified array. - * - * @param a the array in which a range is to be reversed - * @param lo the index of the first element in the range to be reversed - * @param hi the index after the last element in the range to be reversed - */ - private static void reverseRange(Object[] a, int lo, int hi) { - hi--; - while (lo < hi) { - Object t = a[lo]; - a[lo++] = a[hi]; - a[hi--] = t; - } - } - - /** - * Returns the minimum acceptable run length for an array of the specified - * length. Natural runs shorter than this will be extended with - * {@link #binarySort}. - * - * Roughly speaking, the computation is: - * - * If n < MIN_MERGE, return n (it's too small to bother with fancy stuff). - * Else if n is an exact power of 2, return MIN_MERGE/2. - * Else return an int k, MIN_MERGE/2 <= k <= MIN_MERGE, such that n/k - * is close to, but strictly less than, an exact power of 2. - * - * For the rationale, see listsort.txt. - * - * @param n the length of the array to be sorted - * @return the length of the minimum run to be merged - */ - private static int minRunLength(int n) { - assert n >= 0; - int r = 0; // Becomes 1 if any 1 bits are shifted off - while (n >= MIN_MERGE) { - r |= (n & 1); - n >>= 1; - } - return n + r; - } - - /** - * Pushes the specified run onto the pending-run stack. - * - * @param runBase index of the first element in the run - * @param runLen the number of elements in the run - */ - private void pushRun(int runBase, int runLen) { - this.runBase[stackSize] = runBase; - this.runLen[stackSize] = runLen; - stackSize++; - } - - /** - * Examines the stack of runs waiting to be merged and merges adjacent runs - * until the stack invariants are reestablished: - * - * 1. runLen[i - 3] > runLen[i - 2] + runLen[i - 1] - * 2. runLen[i - 2] > runLen[i - 1] - * - * This method is called each time a new run is pushed onto the stack, - * so the invariants are guaranteed to hold for i < stackSize upon - * entry to the method. - */ - private void mergeCollapse() { - while (stackSize > 1) { - int n = stackSize - 2; - if (n > 0 && runLen[n-1] <= runLen[n] + runLen[n+1]) { - if (runLen[n - 1] < runLen[n + 1]) - n--; - mergeAt(n); - } else if (runLen[n] <= runLen[n + 1]) { - mergeAt(n); - } else { - break; // Invariant is established - } - } - } - - /** - * Merges all runs on the stack until only one remains. This method is - * called once, to complete the sort. - */ - private void mergeForceCollapse() { - while (stackSize > 1) { - int n = stackSize - 2; - if (n > 0 && runLen[n - 1] < runLen[n + 1]) - n--; - mergeAt(n); - } - } - - /** - * Merges the two runs at stack indices i and i+1. Run i must be - * the penultimate or antepenultimate run on the stack. In other words, - * i must be equal to stackSize-2 or stackSize-3. - * - * @param i stack index of the first of the two runs to merge - */ - private void mergeAt(int i) { - assert stackSize >= 2; - assert i >= 0; - assert i == stackSize - 2 || i == stackSize - 3; - - int base1 = runBase[i]; - int len1 = runLen[i]; - int base2 = runBase[i + 1]; - int len2 = runLen[i + 1]; - assert len1 > 0 && len2 > 0; - assert base1 + len1 == base2; - - /* - * Record the length of the combined runs; if i is the 3rd-last - * run now, also slide over the last run (which isn't involved - * in this merge). The current run (i+1) goes away in any case. - */ - runLen[i] = len1 + len2; - if (i == stackSize - 3) { - runBase[i + 1] = runBase[i + 2]; - runLen[i + 1] = runLen[i + 2]; - } - stackSize--; - - /* - * Find where the first element of run2 goes in run1. Prior elements - * in run1 can be ignored (because they're already in place). - */ - int k = gallopRight(a[base2], a, base1, len1, 0, c); - assert k >= 0; - base1 += k; - len1 -= k; - if (len1 == 0) - return; - - /* - * Find where the last element of run1 goes in run2. Subsequent elements - * in run2 can be ignored (because they're already in place). - */ - len2 = gallopLeft(a[base1 + len1 - 1], a, base2, len2, len2 - 1, c); - assert len2 >= 0; - if (len2 == 0) - return; - - // Merge remaining runs, using tmp array with min(len1, len2) elements - if (len1 <= len2) - mergeLo(base1, len1, base2, len2); - else - mergeHi(base1, len1, base2, len2); - } - - /** - * Locates the position at which to insert the specified key into the - * specified sorted range; if the range contains an element equal to key, - * returns the index of the leftmost equal element. - * - * @param key the key whose insertion point to search for - * @param a the array in which to search - * @param base the index of the first element in the range - * @param len the length of the range; must be > 0 - * @param hint the index at which to begin the search, 0 <= hint < n. - * The closer hint is to the result, the faster this method will run. - * @param c the comparator used to order the range, and to search - * @return the int k, 0 <= k <= n such that a[b + k - 1] < key <= a[b + k], - * pretending that a[b - 1] is minus infinity and a[b + n] is infinity. - * In other words, key belongs at index b + k; or in other words, - * the first k elements of a should precede key, and the last n - k - * should follow it. - */ - private static int gallopLeft(T key, T[] a, int base, int len, int hint, - Comparator c) { - assert len > 0 && hint >= 0 && hint < len; - int lastOfs = 0; - int ofs = 1; - if (c.compare(key, a[base + hint]) > 0) { - // Gallop right until a[base+hint+lastOfs] < key <= a[base+hint+ofs] - int maxOfs = len - hint; - while (ofs < maxOfs && c.compare(key, a[base + hint + ofs]) > 0) { - lastOfs = ofs; - ofs = (ofs << 1) + 1; - if (ofs <= 0) // int overflow - ofs = maxOfs; - } - if (ofs > maxOfs) - ofs = maxOfs; - - // Make offsets relative to base - lastOfs += hint; - ofs += hint; - } else { // key <= a[base + hint] - // Gallop left until a[base+hint-ofs] < key <= a[base+hint-lastOfs] - final int maxOfs = hint + 1; - while (ofs < maxOfs && c.compare(key, a[base + hint - ofs]) <= 0) { - lastOfs = ofs; - ofs = (ofs << 1) + 1; - if (ofs <= 0) // int overflow - ofs = maxOfs; - } - if (ofs > maxOfs) - ofs = maxOfs; - - // Make offsets relative to base - int tmp = lastOfs; - lastOfs = hint - ofs; - ofs = hint - tmp; - } - assert -1 <= lastOfs && lastOfs < ofs && ofs <= len; - - /* - * Now a[base+lastOfs] < key <= a[base+ofs], so key belongs somewhere - * to the right of lastOfs but no farther right than ofs. Do a binary - * search, with invariant a[base + lastOfs - 1] < key <= a[base + ofs]. - */ - lastOfs++; - while (lastOfs < ofs) { - int m = lastOfs + ((ofs - lastOfs) >>> 1); - - if (c.compare(key, a[base + m]) > 0) - lastOfs = m + 1; // a[base + m] < key - else - ofs = m; // key <= a[base + m] - } - assert lastOfs == ofs; // so a[base + ofs - 1] < key <= a[base + ofs] - return ofs; - } - - /** - * Like gallopLeft, except that if the range contains an element equal to - * key, gallopRight returns the index after the rightmost equal element. - * - * @param key the key whose insertion point to search for - * @param a the array in which to search - * @param base the index of the first element in the range - * @param len the length of the range; must be > 0 - * @param hint the index at which to begin the search, 0 <= hint < n. - * The closer hint is to the result, the faster this method will run. - * @param c the comparator used to order the range, and to search - * @return the int k, 0 <= k <= n such that a[b + k - 1] <= key < a[b + k] - */ - private static int gallopRight(T key, T[] a, int base, int len, - int hint, Comparator c) { - assert len > 0 && hint >= 0 && hint < len; - - int ofs = 1; - int lastOfs = 0; - if (c.compare(key, a[base + hint]) < 0) { - // Gallop left until a[b+hint - ofs] <= key < a[b+hint - lastOfs] - int maxOfs = hint + 1; - while (ofs < maxOfs && c.compare(key, a[base + hint - ofs]) < 0) { - lastOfs = ofs; - ofs = (ofs << 1) + 1; - if (ofs <= 0) // int overflow - ofs = maxOfs; - } - if (ofs > maxOfs) - ofs = maxOfs; - - // Make offsets relative to b - int tmp = lastOfs; - lastOfs = hint - ofs; - ofs = hint - tmp; - } else { // a[b + hint] <= key - // Gallop right until a[b+hint + lastOfs] <= key < a[b+hint + ofs] - int maxOfs = len - hint; - while (ofs < maxOfs && c.compare(key, a[base + hint + ofs]) >= 0) { - lastOfs = ofs; - ofs = (ofs << 1) + 1; - if (ofs <= 0) // int overflow - ofs = maxOfs; - } - if (ofs > maxOfs) - ofs = maxOfs; - - // Make offsets relative to b - lastOfs += hint; - ofs += hint; - } - assert -1 <= lastOfs && lastOfs < ofs && ofs <= len; - - /* - * Now a[b + lastOfs] <= key < a[b + ofs], so key belongs somewhere to - * the right of lastOfs but no farther right than ofs. Do a binary - * search, with invariant a[b + lastOfs - 1] <= key < a[b + ofs]. - */ - lastOfs++; - while (lastOfs < ofs) { - int m = lastOfs + ((ofs - lastOfs) >>> 1); - - if (c.compare(key, a[base + m]) < 0) - ofs = m; // key < a[b + m] - else - lastOfs = m + 1; // a[b + m] <= key - } - assert lastOfs == ofs; // so a[b + ofs - 1] <= key < a[b + ofs] - return ofs; - } - - /** - * Merges two adjacent runs in place, in a stable fashion. The first - * element of the first run must be greater than the first element of the - * second run (a[base1] > a[base2]), and the last element of the first run - * (a[base1 + len1-1]) must be greater than all elements of the second run. - * - * For performance, this method should be called only when len1 <= len2; - * its twin, mergeHi should be called if len1 >= len2. (Either method - * may be called if len1 == len2.) - * - * @param base1 index of first element in first run to be merged - * @param len1 length of first run to be merged (must be > 0) - * @param base2 index of first element in second run to be merged - * (must be aBase + aLen) - * @param len2 length of second run to be merged (must be > 0) - */ - private void mergeLo(int base1, int len1, int base2, int len2) { - assert len1 > 0 && len2 > 0 && base1 + len1 == base2; - - // Copy first run into temp array - T[] a = this.a; // For performance - T[] tmp = ensureCapacity(len1); - System.arraycopy(a, base1, tmp, 0, len1); - - int cursor1 = 0; // Indexes into tmp array - int cursor2 = base2; // Indexes int a - int dest = base1; // Indexes int a - - // Move first element of second run and deal with degenerate cases - a[dest++] = a[cursor2++]; - if (--len2 == 0) { - System.arraycopy(tmp, cursor1, a, dest, len1); - return; - } - if (len1 == 1) { - System.arraycopy(a, cursor2, a, dest, len2); - a[dest + len2] = tmp[cursor1]; // Last elt of run 1 to end of merge - return; - } - - Comparator c = this.c; // Use local variable for performance - int minGallop = this.minGallop; // " " " " " - outer: - while (true) { - int count1 = 0; // Number of times in a row that first run won - int count2 = 0; // Number of times in a row that second run won - - /* - * Do the straightforward thing until (if ever) one run starts - * winning consistently. - */ - do { - assert len1 > 1 && len2 > 0; - if (c.compare(a[cursor2], tmp[cursor1]) < 0) { - a[dest++] = a[cursor2++]; - count2++; - count1 = 0; - if (--len2 == 0) - break outer; - } else { - a[dest++] = tmp[cursor1++]; - count1++; - count2 = 0; - if (--len1 == 1) - break outer; - } - } while ((count1 | count2) < minGallop); - - /* - * One run is winning so consistently that galloping may be a - * huge win. So try that, and continue galloping until (if ever) - * neither run appears to be winning consistently anymore. - */ - do { - assert len1 > 1 && len2 > 0; - count1 = gallopRight(a[cursor2], tmp, cursor1, len1, 0, c); - if (count1 != 0) { - System.arraycopy(tmp, cursor1, a, dest, count1); - dest += count1; - cursor1 += count1; - len1 -= count1; - if (len1 <= 1) // len1 == 1 || len1 == 0 - break outer; - } - a[dest++] = a[cursor2++]; - if (--len2 == 0) - break outer; - - count2 = gallopLeft(tmp[cursor1], a, cursor2, len2, 0, c); - if (count2 != 0) { - System.arraycopy(a, cursor2, a, dest, count2); - dest += count2; - cursor2 += count2; - len2 -= count2; - if (len2 == 0) - break outer; - } - a[dest++] = tmp[cursor1++]; - if (--len1 == 1) - break outer; - minGallop--; - } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP); - if (minGallop < 0) - minGallop = 0; - minGallop += 2; // Penalize for leaving gallop mode - } // End of "outer" loop - this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field - - if (len1 == 1) { - assert len2 > 0; - System.arraycopy(a, cursor2, a, dest, len2); - a[dest + len2] = tmp[cursor1]; // Last elt of run 1 to end of merge - } else if (len1 == 0) { - throw new IllegalArgumentException( - "Comparison method violates its general contract!"); - } else { - assert len2 == 0; - assert len1 > 1; - System.arraycopy(tmp, cursor1, a, dest, len1); - } - } - - /** - * Like mergeLo, except that this method should be called only if - * len1 >= len2; mergeLo should be called if len1 <= len2. (Either method - * may be called if len1 == len2.) - * - * @param base1 index of first element in first run to be merged - * @param len1 length of first run to be merged (must be > 0) - * @param base2 index of first element in second run to be merged - * (must be aBase + aLen) - * @param len2 length of second run to be merged (must be > 0) - */ - private void mergeHi(int base1, int len1, int base2, int len2) { - assert len1 > 0 && len2 > 0 && base1 + len1 == base2; - - // Copy second run into temp array - T[] a = this.a; // For performance - T[] tmp = ensureCapacity(len2); - System.arraycopy(a, base2, tmp, 0, len2); - - int cursor1 = base1 + len1 - 1; // Indexes into a - int cursor2 = len2 - 1; // Indexes into tmp array - int dest = base2 + len2 - 1; // Indexes into a - - // Move last element of first run and deal with degenerate cases - a[dest--] = a[cursor1--]; - if (--len1 == 0) { - System.arraycopy(tmp, 0, a, dest - (len2 - 1), len2); - return; - } - if (len2 == 1) { - dest -= len1; - cursor1 -= len1; - System.arraycopy(a, cursor1 + 1, a, dest + 1, len1); - a[dest] = tmp[cursor2]; - return; - } - - Comparator c = this.c; // Use local variable for performance - int minGallop = this.minGallop; // " " " " " - outer: - while (true) { - int count1 = 0; // Number of times in a row that first run won - int count2 = 0; // Number of times in a row that second run won - - /* - * Do the straightforward thing until (if ever) one run - * appears to win consistently. - */ - do { - assert len1 > 0 && len2 > 1; - if (c.compare(tmp[cursor2], a[cursor1]) < 0) { - a[dest--] = a[cursor1--]; - count1++; - count2 = 0; - if (--len1 == 0) - break outer; - } else { - a[dest--] = tmp[cursor2--]; - count2++; - count1 = 0; - if (--len2 == 1) - break outer; - } - } while ((count1 | count2) < minGallop); - - /* - * One run is winning so consistently that galloping may be a - * huge win. So try that, and continue galloping until (if ever) - * neither run appears to be winning consistently anymore. - */ - do { - assert len1 > 0 && len2 > 1; - count1 = len1 - gallopRight(tmp[cursor2], a, base1, len1, len1 - 1, c); - if (count1 != 0) { - dest -= count1; - cursor1 -= count1; - len1 -= count1; - System.arraycopy(a, cursor1 + 1, a, dest + 1, count1); - if (len1 == 0) - break outer; - } - a[dest--] = tmp[cursor2--]; - if (--len2 == 1) - break outer; - - count2 = len2 - gallopLeft(a[cursor1], tmp, 0, len2, len2 - 1, c); - if (count2 != 0) { - dest -= count2; - cursor2 -= count2; - len2 -= count2; - System.arraycopy(tmp, cursor2 + 1, a, dest + 1, count2); - if (len2 <= 1) // len2 == 1 || len2 == 0 - break outer; - } - a[dest--] = a[cursor1--]; - if (--len1 == 0) - break outer; - minGallop--; - } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP); - if (minGallop < 0) - minGallop = 0; - minGallop += 2; // Penalize for leaving gallop mode - } // End of "outer" loop - this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field - - if (len2 == 1) { - assert len1 > 0; - dest -= len1; - cursor1 -= len1; - System.arraycopy(a, cursor1 + 1, a, dest + 1, len1); - a[dest] = tmp[cursor2]; // Move first elt of run2 to front of merge - } else if (len2 == 0) { - throw new IllegalArgumentException( - "Comparison method violates its general contract!"); - } else { - assert len1 == 0; - assert len2 > 0; - System.arraycopy(tmp, 0, a, dest - (len2 - 1), len2); - } - } - - /** - * Ensures that the external array tmp has at least the specified - * number of elements, increasing its size if necessary. The size - * increases exponentially to ensure amortized linear time complexity. - * - * @param minCapacity the minimum required capacity of the tmp array - * @return tmp, whether or not it grew - */ - private T[] ensureCapacity(int minCapacity) { - if (tmp.length < minCapacity) { - // Compute smallest power of 2 > minCapacity - int newSize = minCapacity; - newSize |= newSize >> 1; - newSize |= newSize >> 2; - newSize |= newSize >> 4; - newSize |= newSize >> 8; - newSize |= newSize >> 16; - newSize++; - - if (newSize < 0) // Not bloody likely! - newSize = minCapacity; - else - newSize = Math.min(newSize, a.length >>> 1); - - @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"}) - T[] newArray = (T[]) new Object[newSize]; - tmp = newArray; - } - return tmp; - } - - /** - * Checks that fromIndex and toIndex are in range, and throws an - * appropriate exception if they aren't. - * - * @param arrayLen the length of the array - * @param fromIndex the index of the first element of the range - * @param toIndex the index after the last element of the range - * @throws IllegalArgumentException if fromIndex > toIndex - * @throws ArrayIndexOutOfBoundsException if fromIndex < 0 - * or toIndex > arrayLen - */ - private static void rangeCheck(int arrayLen, int fromIndex, int toIndex) { - if (fromIndex > toIndex) - throw new IllegalArgumentException("fromIndex(" + fromIndex + - ") > toIndex(" + toIndex+")"); - if (fromIndex < 0) - throw new ArrayIndexOutOfBoundsException(fromIndex); - if (toIndex > arrayLen) - throw new ArrayIndexOutOfBoundsException(toIndex); - } -} diff --git a/lab/timsort.py b/lab/timsort.py deleted file mode 100644 index 6d6074e8..00000000 --- a/lab/timsort.py +++ /dev/null @@ -1,679 +0,0 @@ -""" -Intro ------ -This describes an adaptive, stable, natural mergesort, modestly called -timsort (hey, I earned it ). It has supernatural performance on many -kinds of partially ordered arrays (less than lg(N!) comparisons needed, and -as few as N-1), yet as fast as Python's previous highly tuned samplesort -hybrid on random arrays. - -In a nutshell, the main routine marches over the array once, left to right, -alternately identifying the next run, then merging it into the previous -runs "intelligently". Everything else is complication for speed, and some -hard-won measure of memory efficiency. - - -Comparison with Python's Samplesort Hybrid ------------------------------------------- -+ timsort can require a temp array containing as many as N//2 pointers, - which means as many as 2*N extra bytes on 32-bit boxes. It can be - expected to require a temp array this large when sorting random data; on - data with significant structure, it may get away without using any extra - heap memory. This appears to be the strongest argument against it, but - compared to the size of an object, 2 temp bytes worst-case (also expected- - case for random data) doesn't scare me much. - - It turns out that Perl is moving to a stable mergesort, and the code for - that appears always to require a temp array with room for at least N - pointers. (Note that I wouldn't want to do that even if space weren't an - issue; I believe its efforts at memory frugality also save timsort - significant pointer-copying costs, and allow it to have a smaller working - set.) - -+ Across about four hours of generating random arrays, and sorting them - under both methods, samplesort required about 1.5% more comparisons - (the program is at the end of this file). - -+ In real life, this may be faster or slower on random arrays than - samplesort was, depending on platform quirks. Since it does fewer - comparisons on average, it can be expected to do better the more - expensive a comparison function is. OTOH, it does more data movement - (pointer copying) than samplesort, and that may negate its small - comparison advantage (depending on platform quirks) unless comparison - is very expensive. - -+ On arrays with many kinds of pre-existing order, this blows samplesort out - of the water. It's significantly faster than samplesort even on some - cases samplesort was special-casing the snot out of. I believe that lists - very often do have exploitable partial order in real life, and this is the - strongest argument in favor of timsort (indeed, samplesort's special cases - for extreme partial order are appreciated by real users, and timsort goes - much deeper than those, in particular naturally covering every case where - someone has suggested "and it would be cool if list.sort() had a special - case for this too ... and for that ..."). - -+ Here are exact comparison counts across all the tests in sortperf.py, - when run with arguments "15 20 1". - - Column Key: - *sort: random data - \sort: descending data - /sort: ascending data - 3sort: ascending, then 3 random exchanges - +sort: ascending, then 10 random at the end - ~sort: many duplicates - =sort: all equal - !sort: worst case scenario - - First the trivial cases, trivial for samplesort because it special-cased - them, and trivial for timsort because it naturally works on runs. Within - an "n" block, the first line gives the # of compares done by samplesort, - the second line by timsort, and the third line is the percentage by - which the samplesort count exceeds the timsort count: - - n \sort /sort =sort -------- ------ ------ ------ - 32768 32768 32767 32767 samplesort - 32767 32767 32767 timsort - 0.00% 0.00% 0.00% (samplesort - timsort) / timsort - - 65536 65536 65535 65535 - 65535 65535 65535 - 0.00% 0.00% 0.00% - - 131072 131072 131071 131071 - 131071 131071 131071 - 0.00% 0.00% 0.00% - - 262144 262144 262143 262143 - 262143 262143 262143 - 0.00% 0.00% 0.00% - - 524288 524288 524287 524287 - 524287 524287 524287 - 0.00% 0.00% 0.00% - -1048576 1048576 1048575 1048575 - 1048575 1048575 1048575 - 0.00% 0.00% 0.00% - - The algorithms are effectively identical in these cases, except that - timsort does one less compare in \sort. - - Now for the more interesting cases. lg(n!) is the information-theoretic - limit for the best any comparison-based sorting algorithm can do on - average (across all permutations). When a method gets significantly - below that, it's either astronomically lucky, or is finding exploitable - structure in the data. - - n lg(n!) *sort 3sort +sort %sort ~sort !sort -------- ------- ------ ------- ------- ------ ------- -------- - 32768 444255 453096 453614 32908 452871 130491 469141 old - 448885 33016 33007 50426 182083 65534 new - 0.94% 1273.92% -0.30% 798.09% -28.33% 615.87% %ch from new - - 65536 954037 972699 981940 65686 973104 260029 1004607 - 962991 65821 65808 101667 364341 131070 - 1.01% 1391.83% -0.19% 857.15% -28.63% 666.47% - - 131072 2039137 2101881 2091491 131232 2092894 554790 2161379 - 2057533 131410 131361 206193 728871 262142 - 2.16% 1491.58% -0.10% 915.02% -23.88% 724.51% - - 262144 4340409 4464460 4403233 262314 4445884 1107842 4584560 - 4377402 262437 262459 416347 1457945 524286 - 1.99% 1577.82% -0.06% 967.83% -24.01% 774.44% - - 524288 9205096 9453356 9408463 524468 9441930 2218577 9692015 - 9278734 524580 524633 837947 2916107 1048574 - 1.88% 1693.52% -0.03% 1026.79% -23.92% 824.30% - -1048576 19458756 19950272 19838588 1048766 19912134 4430649 20434212 - 19606028 1048958 1048941 1694896 5832445 2097150 - 1.76% 1791.27% -0.02% 1074.83% -24.03% 874.38% - - Discussion of cases: - - *sort: There's no structure in random data to exploit, so the theoretical - limit is lg(n!). Both methods get close to that, and timsort is hugging - it (indeed, in a *marginal* sense, it's a spectacular improvement -- - there's only about 1% left before hitting the wall, and timsort knows - darned well it's doing compares that won't pay on random data -- but so - does the samplesort hybrid). For contrast, Hoare's original random-pivot - quicksort does about 39% more compares than the limit, and the median-of-3 - variant about 19% more. - - 3sort, %sort, and !sort: No contest; there's structure in this data, but - not of the specific kinds samplesort special-cases. Note that structure - in !sort wasn't put there on purpose -- it was crafted as a worst case for - a previous quicksort implementation. That timsort nails it came as a - surprise to me (although it's obvious in retrospect). - - +sort: samplesort special-cases this data, and does a few less compares - than timsort. However, timsort runs this case significantly faster on all - boxes we have timings for, because timsort is in the business of merging - runs efficiently, while samplesort does much more data movement in this - (for it) special case. - - ~sort: samplesort's special cases for large masses of equal elements are - extremely effective on ~sort's specific data pattern, and timsort just - isn't going to get close to that, despite that it's clearly getting a - great deal of benefit out of the duplicates (the # of compares is much less - than lg(n!)). ~sort has a perfectly uniform distribution of just 4 - distinct values, and as the distribution gets more skewed, samplesort's - equal-element gimmicks become less effective, while timsort's adaptive - strategies find more to exploit; in a database supplied by Kevin Altis, a - sort on its highly skewed "on which stock exchange does this company's - stock trade?" field ran over twice as fast under timsort. - - However, despite that timsort does many more comparisons on ~sort, and - that on several platforms ~sort runs highly significantly slower under - timsort, on other platforms ~sort runs highly significantly faster under - timsort. No other kind of data has shown this wild x-platform behavior, - and we don't have an explanation for it. The only thing I can think of - that could transform what "should be" highly significant slowdowns into - highly significant speedups on some boxes are catastrophic cache effects - in samplesort. - - But timsort "should be" slower than samplesort on ~sort, so it's hard - to count that it isn't on some boxes as a strike against it . - -+ Here's the highwater mark for the number of heap-based temp slots (4 - bytes each on this box) needed by each test, again with arguments - "15 20 1": - - 2**i *sort \sort /sort 3sort +sort %sort ~sort =sort !sort - 32768 16384 0 0 6256 0 10821 12288 0 16383 - 65536 32766 0 0 21652 0 31276 24576 0 32767 - 131072 65534 0 0 17258 0 58112 49152 0 65535 - 262144 131072 0 0 35660 0 123561 98304 0 131071 - 524288 262142 0 0 31302 0 212057 196608 0 262143 -1048576 524286 0 0 312438 0 484942 393216 0 524287 - - Discussion: The tests that end up doing (close to) perfectly balanced - merges (*sort, !sort) need all N//2 temp slots (or almost all). ~sort - also ends up doing balanced merges, but systematically benefits a lot from - the preliminary pre-merge searches described under "Merge Memory" later. - %sort approaches having a balanced merge at the end because the random - selection of elements to replace is expected to produce an out-of-order - element near the midpoint. \sort, /sort, =sort are the trivial one-run - cases, needing no merging at all. +sort ends up having one very long run - and one very short, and so gets all the temp space it needs from the small - temparray member of the MergeState struct (note that the same would be - true if the new random elements were prefixed to the sorted list instead, - but not if they appeared "in the middle"). 3sort approaches N//3 temp - slots twice, but the run lengths that remain after 3 random exchanges - clearly has very high variance. - - -A detailed description of timsort follows. - -Runs ----- -count_run() returns the # of elements in the next run. A run is either -"ascending", which means non-decreasing: - - a0 <= a1 <= a2 <= ... - -or "descending", which means strictly decreasing: - - a0 > a1 > a2 > ... - -Note that a run is always at least 2 long, unless we start at the array's -last element. - -The definition of descending is strict, because the main routine reverses -a descending run in-place, transforming a descending run into an ascending -run. Reversal is done via the obvious fast "swap elements starting at each -end, and converge at the middle" method, and that can violate stability if -the slice contains any equal elements. Using a strict definition of -descending ensures that a descending run contains distinct elements. - -If an array is random, it's very unlikely we'll see long runs. If a natural -run contains less than minrun elements (see next section), the main loop -artificially boosts it to minrun elements, via a stable binary insertion sort -applied to the right number of array elements following the short natural -run. In a random array, *all* runs are likely to be minrun long as a -result. This has two primary good effects: - -1. Random data strongly tends then toward perfectly balanced (both runs have - the same length) merges, which is the most efficient way to proceed when - data is random. - -2. Because runs are never very short, the rest of the code doesn't make - heroic efforts to shave a few cycles off per-merge overheads. For - example, reasonable use of function calls is made, rather than trying to - inline everything. Since there are no more than N/minrun runs to begin - with, a few "extra" function calls per merge is barely measurable. - - -Computing minrun ----------------- -If N < 64, minrun is N. IOW, binary insertion sort is used for the whole -array then; it's hard to beat that given the overheads of trying something -fancier. - -When N is a power of 2, testing on random data showed that minrun values of -16, 32, 64 and 128 worked about equally well. At 256 the data-movement cost -in binary insertion sort clearly hurt, and at 8 the increase in the number -of function calls clearly hurt. Picking *some* power of 2 is important -here, so that the merges end up perfectly balanced (see next section). We -pick 32 as a good value in the sweet range; picking a value at the low end -allows the adaptive gimmicks more opportunity to exploit shorter natural -runs. - -Because sortperf.py only tries powers of 2, it took a long time to notice -that 32 isn't a good choice for the general case! Consider N=2112: - ->>> divmod(2112, 32) -(66, 0) ->>> - -If the data is randomly ordered, we're very likely to end up with 66 runs -each of length 32. The first 64 of these trigger a sequence of perfectly -balanced merges (see next section), leaving runs of lengths 2048 and 64 to -merge at the end. The adaptive gimmicks can do that with fewer than 2048+64 -compares, but it's still more compares than necessary, and-- mergesort's -bugaboo relative to samplesort --a lot more data movement (O(N) copies just -to get 64 elements into place). - -If we take minrun=33 in this case, then we're very likely to end up with 64 -runs each of length 33, and then all merges are perfectly balanced. Better! - -What we want to avoid is picking minrun such that in - - q, r = divmod(N, minrun) - -q is a power of 2 and r>0 (then the last merge only gets r elements into -place, and r < minrun is small compared to N), or q a little larger than a -power of 2 regardless of r (then we've got a case similar to "2112", again -leaving too little work for the last merge to do). - -Instead we pick a minrun in range(32, 65) such that N/minrun is exactly a -power of 2, or if that isn't possible, is close to, but strictly less than, -a power of 2. This is easier to do than it may sound: take the first 6 -bits of N, and add 1 if any of the remaining bits are set. In fact, that -rule covers every case in this section, including small N and exact powers -of 2; merge_compute_minrun() is a deceptively simple function. - - -The Merge Pattern ------------------ -In order to exploit regularities in the data, we're merging on natural -run lengths, and they can become wildly unbalanced. That's a Good Thing -for this sort! It means we have to find a way to manage an assortment of -potentially very different run lengths, though. - -Stability constrains permissible merging patterns. For example, if we have -3 consecutive runs of lengths - - A:10000 B:20000 C:10000 - -we dare not merge A with C first, because if A, B and C happen to contain -a common element, it would get out of order wrt its occurrence(s) in B. The -merging must be done as (A+B)+C or A+(B+C) instead. - -So merging is always done on two consecutive runs at a time, and in-place, -although this may require some temp memory (more on that later). - -When a run is identified, its base address and length are pushed on a stack -in the MergeState struct. merge_collapse() is then called to see whether it -should merge it with preceding run(s). We would like to delay merging as -long as possible in order to exploit patterns that may come up later, but we -like even more to do merging as soon as possible to exploit that the run just -found is still high in the memory hierarchy. We also can't delay merging -"too long" because it consumes memory to remember the runs that are still -unmerged, and the stack has a fixed size. - -What turned out to be a good compromise maintains two invariants on the -stack entries, where A, B and C are the lengths of the three righmost not-yet -merged slices: - -1. A > B+C -2. B > C - -Note that, by induction, #2 implies the lengths of pending runs form a -decreasing sequence. #1 implies that, reading the lengths right to left, -the pending-run lengths grow at least as fast as the Fibonacci numbers. -Therefore the stack can never grow larger than about log_base_phi(N) entries, -where phi = (1+sqrt(5))/2 ~= 1.618. Thus a small # of stack slots suffice -for very large arrays. - -If A <= B+C, the smaller of A and C is merged with B (ties favor C, for the -freshness-in-cache reason), and the new run replaces the A,B or B,C entries; -e.g., if the last 3 entries are - - A:30 B:20 C:10 - -then B is merged with C, leaving - - A:30 BC:30 - -on the stack. Or if they were - - A:500 B:400: C:1000 - -then A is merged with B, leaving - - AB:900 C:1000 - -on the stack. - -In both examples, the stack configuration after the merge still violates -invariant #2, and merge_collapse() goes on to continue merging runs until -both invariants are satisfied. As an extreme case, suppose we didn't do the -minrun gimmick, and natural runs were of lengths 128, 64, 32, 16, 8, 4, 2, -and 2. Nothing would get merged until the final 2 was seen, and that would -trigger 7 perfectly balanced merges. - -The thrust of these rules when they trigger merging is to balance the run -lengths as closely as possible, while keeping a low bound on the number of -runs we have to remember. This is maximally effective for random data, -where all runs are likely to be of (artificially forced) length minrun, and -then we get a sequence of perfectly balanced merges (with, perhaps, some -oddballs at the end). - -OTOH, one reason this sort is so good for partly ordered data has to do -with wildly unbalanced run lengths. - - -Merge Memory ------------- -Merging adjacent runs of lengths A and B in-place is very difficult. -Theoretical constructions are known that can do it, but they're too difficult -and slow for practical use. But if we have temp memory equal to min(A, B), -it's easy. - -If A is smaller (function merge_lo), copy A to a temp array, leave B alone, -and then we can do the obvious merge algorithm left to right, from the temp -area and B, starting the stores into where A used to live. There's always a -free area in the original area comprising a number of elements equal to the -number not yet merged from the temp array (trivially true at the start; -proceed by induction). The only tricky bit is that if a comparison raises an -exception, we have to remember to copy the remaining elements back in from -the temp area, lest the array end up with duplicate entries from B. But -that's exactly the same thing we need to do if we reach the end of B first, -so the exit code is pleasantly common to both the normal and error cases. - -If B is smaller (function merge_hi, which is merge_lo's "mirror image"), -much the same, except that we need to merge right to left, copying B into a -temp array and starting the stores at the right end of where B used to live. - -A refinement: When we're about to merge adjacent runs A and B, we first do -a form of binary search (more on that later) to see where B[0] should end up -in A. Elements in A preceding that point are already in their final -positions, effectively shrinking the size of A. Likewise we also search to -see where A[-1] should end up in B, and elements of B after that point can -also be ignored. This cuts the amount of temp memory needed by the same -amount. - -These preliminary searches may not pay off, and can be expected *not* to -repay their cost if the data is random. But they can win huge in all of -time, copying, and memory savings when they do pay, so this is one of the -"per-merge overheads" mentioned above that we're happy to endure because -there is at most one very short run. It's generally true in this algorithm -that we're willing to gamble a little to win a lot, even though the net -expectation is negative for random data. - - -Merge Algorithms ----------------- -merge_lo() and merge_hi() are where the bulk of the time is spent. merge_lo -deals with runs where A <= B, and merge_hi where A > B. They don't know -whether the data is clustered or uniform, but a lovely thing about merging -is that many kinds of clustering "reveal themselves" by how many times in a -row the winning merge element comes from the same run. We'll only discuss -merge_lo here; merge_hi is exactly analogous. - -Merging begins in the usual, obvious way, comparing the first element of A -to the first of B, and moving B[0] to the merge area if it's less than A[0], -else moving A[0] to the merge area. Call that the "one pair at a time" -mode. The only twist here is keeping track of how many times in a row "the -winner" comes from the same run. - -If that count reaches MIN_GALLOP, we switch to "galloping mode". Here -we *search* B for where A[0] belongs, and move over all the B's before -that point in one chunk to the merge area, then move A[0] to the merge -area. Then we search A for where B[0] belongs, and similarly move a -slice of A in one chunk. Then back to searching B for where A[0] belongs, -etc. We stay in galloping mode until both searches find slices to copy -less than MIN_GALLOP elements long, at which point we go back to one-pair- -at-a-time mode. - -A refinement: The MergeState struct contains the value of min_gallop that -controls when we enter galloping mode, initialized to MIN_GALLOP. -merge_lo() and merge_hi() adjust this higher when galloping isn't paying -off, and lower when it is. - - -Galloping ---------- -Still without loss of generality, assume A is the shorter run. In galloping -mode, we first look for A[0] in B. We do this via "galloping", comparing -A[0] in turn to B[0], B[1], B[3], B[7], ..., B[2**j - 1], ..., until finding -the k such that B[2**(k-1) - 1] < A[0] <= B[2**k - 1]. This takes at most -roughly lg(B) comparisons, and, unlike a straight binary search, favors -finding the right spot early in B (more on that later). - -After finding such a k, the region of uncertainty is reduced to 2**(k-1) - 1 -consecutive elements, and a straight binary search requires exactly k-1 -additional comparisons to nail it. Then we copy all the B's up to that -point in one chunk, and then copy A[0]. Note that no matter where A[0] -belongs in B, the combination of galloping + binary search finds it in no -more than about 2*lg(B) comparisons. - -If we did a straight binary search, we could find it in no more than -ceiling(lg(B+1)) comparisons -- but straight binary search takes that many -comparisons no matter where A[0] belongs. Straight binary search thus loses -to galloping unless the run is quite long, and we simply can't guess -whether it is in advance. - -If data is random and runs have the same length, A[0] belongs at B[0] half -the time, at B[1] a quarter of the time, and so on: a consecutive winning -sub-run in B of length k occurs with probability 1/2**(k+1). So long -winning sub-runs are extremely unlikely in random data, and guessing that a -winning sub-run is going to be long is a dangerous game. - -OTOH, if data is lopsided or lumpy or contains many duplicates, long -stretches of winning sub-runs are very likely, and cutting the number of -comparisons needed to find one from O(B) to O(log B) is a huge win. - -Galloping compromises by getting out fast if there isn't a long winning -sub-run, yet finding such very efficiently when they exist. - -I first learned about the galloping strategy in a related context; see: - - "Adaptive Set Intersections, Unions, and Differences" (2000) - Erik D. Demaine, Alejandro López-Ortiz, J. Ian Munro - -and its followup(s). An earlier paper called the same strategy -"exponential search": - - "Optimistic Sorting and Information Theoretic Complexity" - Peter McIlroy - SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), pp - 467-474, Austin, Texas, 25-27 January 1993. - -and it probably dates back to an earlier paper by Bentley and Yao. The -McIlroy paper in particular has good analysis of a mergesort that's -probably strongly related to this one in its galloping strategy. - - -Galloping with a Broken Leg ---------------------------- -So why don't we always gallop? Because it can lose, on two counts: - -1. While we're willing to endure small per-merge overheads, per-comparison - overheads are a different story. Calling Yet Another Function per - comparison is expensive, and gallop_left() and gallop_right() are - too long-winded for sane inlining. - -2. Galloping can-- alas --require more comparisons than linear one-at-time - search, depending on the data. - -#2 requires details. If A[0] belongs before B[0], galloping requires 1 -compare to determine that, same as linear search, except it costs more -to call the gallop function. If A[0] belongs right before B[1], galloping -requires 2 compares, again same as linear search. On the third compare, -galloping checks A[0] against B[3], and if it's <=, requires one more -compare to determine whether A[0] belongs at B[2] or B[3]. That's a total -of 4 compares, but if A[0] does belong at B[2], linear search would have -discovered that in only 3 compares, and that's a huge loss! Really. It's -an increase of 33% in the number of compares needed, and comparisons are -expensive in Python. - -index in B where # compares linear # gallop # binary gallop -A[0] belongs search needs compares compares total ----------------- ----------------- -------- -------- ------ - 0 1 1 0 1 - - 1 2 2 0 2 - - 2 3 3 1 4 - 3 4 3 1 4 - - 4 5 4 2 6 - 5 6 4 2 6 - 6 7 4 2 6 - 7 8 4 2 6 - - 8 9 5 3 8 - 9 10 5 3 8 - 10 11 5 3 8 - 11 12 5 3 8 - ... - -In general, if A[0] belongs at B[i], linear search requires i+1 comparisons -to determine that, and galloping a total of 2*floor(lg(i))+2 comparisons. -The advantage of galloping is unbounded as i grows, but it doesn't win at -all until i=6. Before then, it loses twice (at i=2 and i=4), and ties -at the other values. At and after i=6, galloping always wins. - -We can't guess in advance when it's going to win, though, so we do one pair -at a time until the evidence seems strong that galloping may pay. MIN_GALLOP -is 7, and that's pretty strong evidence. However, if the data is random, it -simply will trigger galloping mode purely by luck every now and again, and -it's quite likely to hit one of the losing cases next. On the other hand, -in cases like ~sort, galloping always pays, and MIN_GALLOP is larger than it -"should be" then. So the MergeState struct keeps a min_gallop variable -that merge_lo and merge_hi adjust: the longer we stay in galloping mode, -the smaller min_gallop gets, making it easier to transition back to -galloping mode (if we ever leave it in the current merge, and at the -start of the next merge). But whenever the gallop loop doesn't pay, -min_gallop is increased by one, making it harder to transition back -to galloping mode (and again both within a merge and across merges). For -random data, this all but eliminates the gallop penalty: min_gallop grows -large enough that we almost never get into galloping mode. And for cases -like ~sort, min_gallop can fall to as low as 1. This seems to work well, -but in all it's a minor improvement over using a fixed MIN_GALLOP value. - - -Galloping Complication ----------------------- -The description above was for merge_lo. merge_hi has to merge "from the -other end", and really needs to gallop starting at the last element in a run -instead of the first. Galloping from the first still works, but does more -comparisons than it should (this is significant -- I timed it both ways). -For this reason, the gallop_left() and gallop_right() functions have a -"hint" argument, which is the index at which galloping should begin. So -galloping can actually start at any index, and proceed at offsets of 1, 3, -7, 15, ... or -1, -3, -7, -15, ... from the starting index. - -In the code as I type it's always called with either 0 or n-1 (where n is -the # of elements in a run). It's tempting to try to do something fancier, -melding galloping with some form of interpolation search; for example, if -we're merging a run of length 1 with a run of length 10000, index 5000 is -probably a better guess at the final result than either 0 or 9999. But -it's unclear how to generalize that intuition usefully, and merging of -wildly unbalanced runs already enjoys excellent performance. - -~sort is a good example of when balanced runs could benefit from a better -hint value: to the extent possible, this would like to use a starting -offset equal to the previous value of acount/bcount. Doing so saves about -10% of the compares in ~sort. However, doing so is also a mixed bag, -hurting other cases. - - -Comparing Average # of Compares on Random Arrays ------------------------------------------------- -[NOTE: This was done when the new algorithm used about 0.1% more compares - on random data than does its current incarnation.] - -Here list.sort() is samplesort, and list.msort() this sort: - -""" -import random -from time import clock as now - -def fill(n): - from random import random - return [random() for i in xrange(n)] - -def mycmp(x, y): - global ncmp - ncmp += 1 - return cmp(x, y) - -def timeit(values, method): - global ncmp - X = values[:] - bound = getattr(X, method) - ncmp = 0 - t1 = now() - bound(mycmp) - t2 = now() - return t2-t1, ncmp - -format = "%5s %9.2f %11d" -f2 = "%5s %9.2f %11.2f" - -def drive(): - count = sst = sscmp = mst = mscmp = nelts = 0 - while True: - n = random.randrange(100000) - nelts += n - x = fill(n) - - t, c = timeit(x, 'sort') - sst += t - sscmp += c - - t, c = timeit(x, 'msort') - mst += t - mscmp += c - - count += 1 - if count % 10: - continue - - print "count", count, "nelts", nelts - print format % ("sort", sst, sscmp) - print format % ("msort", mst, mscmp) - print f2 % ("", (sst-mst)*1e2/mst, (sscmp-mscmp)*1e2/mscmp) - -drive() -""" - -I ran this on Windows and kept using the computer lightly while it was -running. time.clock() is wall-clock time on Windows, with better than -microsecond resolution. samplesort started with a 1.52% #-of-comparisons -disadvantage, fell quickly to 1.48%, and then fluctuated within that small -range. Here's the last chunk of output before I killed the job: - -count 2630 nelts 130906543 - sort 6110.80 1937887573 -msort 6002.78 1909389381 - 1.80 1.49 - -We've done nearly 2 billion comparisons apiece at Python speed there, and -that's enough . - -For random arrays of size 2 (yes, there are only 2 interesting ones), -samplesort has a 50%(!) comparison disadvantage. This is a consequence of -samplesort special-casing at most one ascending run at the start, then -falling back to the general case if it doesn't find an ascending run -immediately. The consequence is that it ends up using two compares to sort -[2, 1]. Gratifyingly, timsort doesn't do any special-casing, so had to be -taught how to deal with mixtures of ascending and descending runs -efficiently in all cases. -""" From 3935fe1663a01766c0207e88857d4b7faf04f8ce Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Mon, 25 Mar 2019 17:34:03 -0400 Subject: [PATCH 009/326] bubble sort fixed --- book/chapters/algorithms-intro.adoc | 11 +++++---- book/chapters/bubble-sort.adoc | 34 +++++++++++++-------------- book/chapters/insertion-sort.adoc | 4 +++- book/chapters/merge-sort.adoc | 5 ++-- book/chapters/quick-sort.adoc | 3 ++- book/chapters/selection-sort.adoc | 4 +++- book/chapters/sorting-intro.adoc | 8 +++---- book/chapters/sorting-properties.adoc | 28 ++++++++++++---------- src/data-structures/graphs/graph.js | 1 - 9 files changed, 53 insertions(+), 45 deletions(-) diff --git a/book/chapters/algorithms-intro.adoc b/book/chapters/algorithms-intro.adoc index 7c8d0acb..d44ad6e6 100644 --- a/book/chapters/algorithms-intro.adoc +++ b/book/chapters/algorithms-intro.adoc @@ -1,16 +1,17 @@ -In this part of the book, we are going to cover algorithms in more details. We will provide algorithmic tools for classic problems that will help you improve your problem-solving skills. +In this part of the book, we are going to cover examples of classical algorithms in more details. Also, we will provide algorithmic tools for improving your problem-solving skills. IMPORTANT: There's not a single approach to solve all problems but knowing well-known techniques can help you build your own faster. We are going to start with <> // and searching algorithms, -then you are going to learn some techniques for solving problems. +such as <>, <> and some others. +Later, you are going to learn some algorithmic paradigms that will help you to identify common patterns and solve problems from different angles. .We are going to discuss the following techniques for solving algorithms problems: - <>: makes greedy choices using heuristics to find the best solution without looking back. -- <>: technique for solving problems with _overlapping subproblems_. It uses _memoization_ to avoid duplicated work. +- <>: a technique for speeding up recursive algorithms when there are many _overlapping subproblems_. It uses _memoization_ to avoid duplicating work. - <>: _divide_ problems into smaller pieces, _conquer_ each subproblem and then _join_ the results. -- <>: search _all (or some)_ possible paths. However it stops and _go back_ as soon as some contraint is broken. -- _Brute Force_: generate all possible solutions. (Use it as the last resort or as starting point to optimize it with other techniques). +- <>: search _all (or some)_ possible paths. However, it stops and _go back_ as soon as notice the current solution is not working. +- _Brute Force_: generate all possible solutions and tries all of them. (Use it as a last resort or as the starting point to optimize it with other techniques). diff --git a/book/chapters/bubble-sort.adoc b/book/chapters/bubble-sort.adoc index d7453bf2..b77e47c9 100644 --- a/book/chapters/bubble-sort.adoc +++ b/book/chapters/bubble-sort.adoc @@ -1,28 +1,29 @@ = Bubble Sort - -((Bubble sort)) is a simple sorting algorithm that "bubbles up" the biggest values to the right side of the array. -It's also call _((sinking sort))_ because the largest values sinks to the right side of the array. -This algorithm is adaptive which means that if the array is already sorted it will take only _O(n)_ to "sort". -However, if the array is quite out of order it will require _O(n^2^)_ to sort. +(((Bubble Sort))) +(((Sorting, Bubble Sort))) +(((Sinking Sort))) +(((Sorting, Sinking Sort))) +Bubble sort is a simple sorting algorithm that "bubbles up" the biggest values to the right side of the array. +It's also called _sinking sort_ because the most significant values "sink" to the right side of the array. +This algorithm is adaptive which means that if the array is already sorted, it will take only _O(n)_ to "sort". +However, if the array is entirely out of order, it will require _O(n^2^)_ to sort. == Bubble Sort Implementation -Bubble sort can be implemented in any programming language. Let's implement it first and explain it. - .Bubble Sort implementation in JavaScript [source, javascript] ---- include::{codedir}/algorithms/sorting/bubble-sort.js[tag=sort, indent=0] ---- -<1> Convert any kind of iterable (array, sets, etc.) into an array or if is already and array it clones it so the input is not modified. -<2> Starting from 0 compare current and next element +<1> Convert any iterable (array, sets, etc.) into an array or if it's already an array it clones it, so the input is not modified. +<2> Starting from index 0 compare current and next element <3> If they are out of order, swap the pair -<4> Repeat pair comparison `n - 1` times, until the largest element is on the right side. -<5> (optimization) If there was no swap means that the array is already sorted and no more work is needed. This make this sorting adaptive, if everything is sorted it will be only one pass: _O(n)_. -<6> Each step moves the largest element from `0` to `n - i` to the right side. So, we need to do this `n - 1` to sort the array in case most elements needs to be swapped. +<4> Repeat pair comparison until the last element that has been bubbled up to the right side `array.length - i`. +<5> (optimization) If there was no swap means that the array is already sorted and no more work is needed. This single pass makes this sorting _adaptive_ if everything is sorted it will be only one _O(n)_ operations. +<6> Each step moves the largest element from where it was to the right side. So, we need to do this `n - 1` times to sort the array in case most elements need to be swapped. -The swap function is implemented as follows. +The `swap` function is implemented as follows. .Swap function [source, javascript] @@ -36,7 +37,7 @@ It uses JavaScript ES6 destructing arrays. **** *Assignment separate from declaration* -A variable can be assign to its values using the destructing syntax. +A variable can be assigned to its values using the destructing syntax. [source, javascript] ---- @@ -49,7 +50,7 @@ console.log(b); //️↪️ 2 *Swapping variables* -Two variables values can be swapped in one destructuring expression. +Two variables' values can be swapped in one line using destructuring expression. [source, javascript] ---- @@ -58,10 +59,9 @@ console.log(a); //↪️ 2 console.log(b); //️↪️ 1 ---- -Without destructuring assignment, swapping two values requires a temporary variable. +Without the destructuring assignment, swapping two values requires a temporary variable. **** - Bubble sort has a <> running time, as you might infer from the nested for-loop. == Bubble Sort Properties diff --git a/book/chapters/insertion-sort.adoc b/book/chapters/insertion-sort.adoc index 4606c26a..49be157e 100644 --- a/book/chapters/insertion-sort.adoc +++ b/book/chapters/insertion-sort.adoc @@ -1,6 +1,8 @@ = Insertion Sort -((Insertion sort)) is a simple sorting algorithm. It is one of the most natural way of sorting. If you are given some cards that's probably how you are going to sort them. +(((Sorting, Insertion Sort))) +(((Insertion Sort))) +Insertion sort is a simple sorting algorithm. It is one of the most natural way of sorting. If you are given some cards that's probably how you are going to sort them. // Good illustration on of sorting a deck of cards: https://www.khanacademy.org/computing/computer-science/algorithms/insertion-sort/a/insertion-sort diff --git a/book/chapters/merge-sort.adoc b/book/chapters/merge-sort.adoc index 27157d95..5fe563b3 100644 --- a/book/chapters/merge-sort.adoc +++ b/book/chapters/merge-sort.adoc @@ -1,6 +1,7 @@ = Merge Sort - -((Merge Sort)) is an efficient sorting algorithm that uses <> paradigm to accomplish its task faster. It uses auxiliary memory in the process of sorting. +(((Sorting, Merge Sort))) +(((Merge Sort))) +Merge Sort is an efficient sorting algorithm that uses <> paradigm to accomplish its task faster. It uses auxiliary memory in the process of sorting. indexterm:[Divide and Conquer] Merge sort algorithm splits the array in halves until 2 or less elements are left. It sorts these two elements and then merge back all halves until the whole array is sorted. diff --git a/book/chapters/quick-sort.adoc b/book/chapters/quick-sort.adoc index 73abcd5f..9aa9ba4b 100644 --- a/book/chapters/quick-sort.adoc +++ b/book/chapters/quick-sort.adoc @@ -1,5 +1,6 @@ = Quicksort - +(((Sorting, QuickSort))) +(((QuickSort))) Quicksort is an efficient recursive sorting algorithm that uses <> paradigm to sort faster. It can be implemented in-place so it doesn't require additonal memory. indexterm:[Divide and Conquer] diff --git a/book/chapters/selection-sort.adoc b/book/chapters/selection-sort.adoc index 4297bddc..77eb0170 100644 --- a/book/chapters/selection-sort.adoc +++ b/book/chapters/selection-sort.adoc @@ -1,6 +1,8 @@ = Selection Sort -The ((selection sort)) is a simple sorting algorithm. As its name indicates, it chooses the lowest element from the list and move it where it should be. +(((Sorting, Selection Sort))) +(((Selection Sort))) +The selection sort is a simple sorting algorithm. As its name indicates, it chooses the lowest element from the list and move it where it should be. .Selection sort algorithm . Start with the element in position 0. diff --git a/book/chapters/sorting-intro.adoc b/book/chapters/sorting-intro.adoc index 050650ee..aca38ddc 100644 --- a/book/chapters/sorting-intro.adoc +++ b/book/chapters/sorting-intro.adoc @@ -1,15 +1,15 @@ = Sorting Algorithms Sorting is one of the most common solutions when we want to extract some insights about a collection of data. -We can sort to get the maximum or minimun value and many algorithmic problems involves sorting data first. +We can sort to get the maximum or minimum value and many algorithmic problems involves sorting data first. -.We are going to explore 3 basic sorting algorithms _O(n^2^)_ which have low overhead: +.We are going to explore three basic sorting algorithms _O(n^2^)_ which have low overhead: - <> - <> - <> -.Efficient sorting algorithms _O(n log n)_: +.and then discuss efficient sorting algorithms _O(n log n)_ such as: - <> - <> -Before we dive into the most well-known sorting algorithms, let's discuess about the sorting properties. +Before we dive into the most well-known sorting algorithms, let's discuss the sorting properties. diff --git a/book/chapters/sorting-properties.adoc b/book/chapters/sorting-properties.adoc index e0fe0fa3..0220d011 100644 --- a/book/chapters/sorting-properties.adoc +++ b/book/chapters/sorting-properties.adoc @@ -1,12 +1,14 @@ = Sorting Properties -Sorting implementations with the same time complexity might manipulate the data differently. We want to understand these differences so we can be aware of the side-effects it will have on data or extra resources. For instance, some solutions will need auxiliary memory to store temporary data while sorting while other can do it in place. +Sorting implementations with the same time complexity might manipulate the data differently. We want to understand these differences so we can be aware of the side-effects it will have on data or extra resources they will require. For instance, some solutions will need auxiliary memory to store temporary data while sorting while others can do it in place. -== Stable +Sorting properties are stable, adaptive, online and in-place. Let's go one by one. -An ((stable sorting)) algorithms keeps the relative order of items with the same comparison criteria. +== Stable +(((Sorting, stable))) +An ((stable sorting)) algorithms keep the relative order of items with the same comparison criteria. -This specially useful when you want to sort on multiple phases. +This especially useful when you want to sort on multiple phases. .Let's say you have the following data: [source, javascript] @@ -30,9 +32,9 @@ const users = [ ]; ---- -Then, here comes the important part, if you sort by `age` you might get two different results. +Then, here comes the _critical_ part, if you sort by `age` you might get (at least two) different results. -.If the sorting algorithm is *stable*, it should keep the items with the same age ordered by `name`: +.If the sorting algorithm is *stable*; it should keep the items with the same age ordered by `name`: [source, javascript] ---- [ @@ -54,18 +56,18 @@ Then, here comes the important part, if you sort by `age` you might get two diff ]; ---- -Both results are correctly sorted by `age`, however, having a stable sorting is better if you want to keep the relative possition of keys with the same value. +Both results are sorted by `age`; however, having a stable sorting is better if you want to keep the relative position of data with the same value. == In-place - -An ((in-place sorting)) algorithm would a _space complexity_ of O(1). In other words, it does not use any other auxiliary memory because it moves the items in the collection itself. -This is specially useful for memory constraint enviroments like robotics or embedded systems in appliances. +(((Sorting, in-place))) +An ((in-place sorting)) algorithm would have a _space complexity_ of O(1). In other words, it does not use any other auxiliary memory because it moves the items in the collection itself. +No requiring extra memory for sorting is especially useful for memory constraint environments like robotics, smart devices, or embedded systems in appliances. == Online - +(((Sorting, online))) It can sort a list as it receives it. -((Online sorting)) algorithms doesn't have to re-sort the whole collection for every new item added. +((Online sorting)) algorithms don't have to re-sort the whole collection for every new item added. == Adaptive - +(((Sorting, adaptive))) Algorithms with ((adaptive sorting)) run faster, close to _O(n)_, on an already sorted (or partially sorted) collection. diff --git a/src/data-structures/graphs/graph.js b/src/data-structures/graphs/graph.js index c799d5bf..04095aa5 100644 --- a/src/data-structures/graphs/graph.js +++ b/src/data-structures/graphs/graph.js @@ -248,7 +248,6 @@ class Graph { }); return paths; } -// tag::constructor[] } Graph.UNDIRECTED = Symbol('undirected graph'); // one-way edges From 5e6ebba8a196cf4c5bfc732f11441619c710fe01 Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Mon, 25 Mar 2019 18:18:44 -0400 Subject: [PATCH 010/326] merge sort fixes --- book/chapters/bubble-sort.adoc | 11 ++++------ book/chapters/insertion-sort.adoc | 21 +++++++------------ book/chapters/merge-sort.adoc | 31 ++++++++-------------------- src/algorithms/sorting/merge-sort.js | 6 +++--- 4 files changed, 23 insertions(+), 46 deletions(-) diff --git a/book/chapters/bubble-sort.adoc b/book/chapters/bubble-sort.adoc index b77e47c9..ada2cb21 100644 --- a/book/chapters/bubble-sort.adoc +++ b/book/chapters/bubble-sort.adoc @@ -5,7 +5,7 @@ (((Sorting, Sinking Sort))) Bubble sort is a simple sorting algorithm that "bubbles up" the biggest values to the right side of the array. It's also called _sinking sort_ because the most significant values "sink" to the right side of the array. -This algorithm is adaptive which means that if the array is already sorted, it will take only _O(n)_ to "sort". +This algorithm is adaptive, which means that if the array is already sorted, it will take only _O(n)_ to "sort". However, if the array is entirely out of order, it will require _O(n^2^)_ to sort. == Bubble Sort Implementation @@ -19,13 +19,10 @@ include::{codedir}/algorithms/sorting/bubble-sort.js[tag=sort, indent=0] <2> Starting from index 0 compare current and next element <3> If they are out of order, swap the pair <4> Repeat pair comparison until the last element that has been bubbled up to the right side `array.length - i`. -<5> (optimization) If there was no swap means that the array is already sorted and no more work is needed. This single pass makes this sorting _adaptive_ if everything is sorted it will be only one _O(n)_ operations. +<5> (optimization) If there were no swaps, this means that the array is sorted. This single pass makes this sorting _adaptive_, and it will only require _O(n)_ operations. <6> Each step moves the largest element from where it was to the right side. So, we need to do this `n - 1` times to sort the array in case most elements need to be swapped. - -The `swap` function is implemented as follows. - -.Swap function +.The `swap` function is implemented as follows: [source, javascript] ---- include::{codedir}/algorithms/sorting/sorting-common.js[tag=swap, indent=0] @@ -50,7 +47,7 @@ console.log(b); //️↪️ 2 *Swapping variables* -Two variables' values can be swapped in one line using destructuring expression. +Two variables' values can be swapped in one line using the destructuring expression. [source, javascript] ---- diff --git a/book/chapters/insertion-sort.adoc b/book/chapters/insertion-sort.adoc index 49be157e..be19d684 100644 --- a/book/chapters/insertion-sort.adoc +++ b/book/chapters/insertion-sort.adoc @@ -2,34 +2,27 @@ (((Sorting, Insertion Sort))) (((Insertion Sort))) -Insertion sort is a simple sorting algorithm. It is one of the most natural way of sorting. If you are given some cards that's probably how you are going to sort them. +Insertion sort is a simple sorting algorithm. It is one of the most natural ways of sorting. If I give you some cards to sort you will probably use this algorithm without knowing. // Good illustration on of sorting a deck of cards: https://www.khanacademy.org/computing/computer-science/algorithms/insertion-sort/a/insertion-sort == Insertion Sort Implementation +Insertion sort does the following: It starts from the 2nd element, and it tries to find anything to the left that could be bigger than the current item. + It will swap all the elements with higher value + and insert the current element where it belongs. + .Insertion sort [source, javascript] ---- include::{codedir}/algorithms/sorting/insertion-sort.js[tag=sort, indent=0] ---- -<1> Convert to array or clone the array. +<1> Convert to an array or clone the array. <2> Start with the 2nd element. Everything on the left is considered sorted. -<3> Compare current element (2nd) to the previous one. If is bigger swap them, if not move to the next one. +<3> Compare current element (2nd) to the previous one. If `left - 1` is bigger, it will swap places. If not, it will continue checking the next one to the left. <4> We check every element on the left side and swap any of them that are out of order -// .Swap function -// [source, javascript] -// ---- -// include::{codedir}/algorithms/insertion-sort.js[tag=swap, indent=0] -// ---- - -// .This is how it works: -// . Visit 2nd. If the current element is smaller than the previous one move to the place it should be. -// . Visit next element and do the same thing. The left side is always sorted and the right is not (yet). -// . Repeat for the rest of the array. - == Insertion Sort Properties - <>: [big]#✅# Yes diff --git a/book/chapters/merge-sort.adoc b/book/chapters/merge-sort.adoc index 5fe563b3..d4a33b0a 100644 --- a/book/chapters/merge-sort.adoc +++ b/book/chapters/merge-sort.adoc @@ -1,28 +1,15 @@ = Merge Sort (((Sorting, Merge Sort))) (((Merge Sort))) -Merge Sort is an efficient sorting algorithm that uses <> paradigm to accomplish its task faster. It uses auxiliary memory in the process of sorting. +Merge Sort is an efficient sorting algorithm that uses <> paradigm to accomplish its task faster. However, It uses auxiliary memory in the process of sorting. indexterm:[Divide and Conquer] -Merge sort algorithm splits the array in halves until 2 or less elements are left. It sorts these two elements and then merge back all halves until the whole array is sorted. +Merge sort algorithm splits the array into halves until 2 or fewer elements are left. It sorts these two elements and then merges back all halves until the whole collection is sorted. image:image11.png[Mergesort visualization,width=500,height=600] == Merge Sort Implementation -Merge sort implementation is as follows - -.Merge Sort Algorithm -. It moves one element at a time (from left to right). Everything on the left of the current element is already sorted, while everything to the right is not. -. Start with the first element and make it the current element. -. Compare elements to right of the current element. -. Merge up big values to the right of the array. -.. Swap elements if the previous element is bigger than the previous one. -. Move the current pointer to the next element and repeat for the rest of the array - - -Let's convert these words into code! - .Merge Sort implementation in JavaScript (mergeSort) [source, javascript] ---- @@ -30,29 +17,29 @@ include::{codedir}/algorithms/sorting/merge-sort.js[tag=sort, indent=0] ---- <1> Convert any kind of iterable (array, sets, etc.) into an array -As you can see this function is just a wrapper to transform things to array. The heavy lifting is done in `splitSort` as you can see below. +As you can see this function is just a wrapper to transform things into an array. The heavy lifting is done in `splitSort` as you can see below. .Merge Sort implementation in JavaScript (splitSort) [source, javascript] ---- include::{codedir}/algorithms/sorting/merge-sort.js[tag=splitSort, indent=0] ---- -<1> Sort two elements manually. +<1> Base case: Sort two or less items manually. <2> Recursively divide the array in half until two or less elements are left. <3> Merge back the sorted halves in ascending order. -Let's now take a look at the merge function: +Let's take a look at the merge function: .Merge Sort implementation in JavaScript (merge) [source, javascript] ---- include::{codedir}/algorithms/sorting/merge-sort.js[tag=merge, indent=0] ---- -<1> We need to keep track of 3 arrays indices (mergedArray, a1 and a2). -<2> If `array1` has the lowest current value, we insert it into the merged array if not we then insert `array2`. -<3> End result is array1 and array2 combined in ascending order (sorted). +<1> We need to keep track of 3 arrays indices: `index` which keeps track of the combined array position, `i1` which is the `array1` index and `a2` for `array2`. +<2> If `array1` current element (`i1`) has the lowest value, we insert it into the `mergedArray` if not we then insert `array2` element. +<3> `mergedArray` is `array1` and `array2` combined in ascending order (sorted). -Merge sort has a _O(n log n)_ running time. For more details about the how to extract the runtime go to <>. +Merge sort has an _O(n log n)_ running time. For more details about how to extract the runtime go to <> section. == Merge Sort Properties diff --git a/src/algorithms/sorting/merge-sort.js b/src/algorithms/sorting/merge-sort.js index ad0f67a9..a4a08882 100644 --- a/src/algorithms/sorting/merge-sort.js +++ b/src/algorithms/sorting/merge-sort.js @@ -2,8 +2,8 @@ /** * Merge two arrays in ascending order * - * @param {array} array1 - * @param {array} array2 + * @param {array} array1 sorted array 1 + * @param {array} array2 sorted array 2 * @returns {array} merged arrays in asc order * * @example @@ -15,7 +15,7 @@ function merge(array1, array2 = []) { // merge elements on a and b in asc order. Run-time O(a + b) for (let index = 0, i1 = 0, i2 = 0; - index < mergedLength; index += 1) { // <1> + index < mergedLength; index++) { // <1> if (i2 >= array2.length || (i1 < array1.length && array1[i1] <= array2[i2])) { mergedArray[index] = array1[i1]; // <2> From 09af83dd43fe49fa69827a91945c20758448e2ae Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Mon, 25 Mar 2019 20:01:59 -0400 Subject: [PATCH 011/326] quicksort fixes --- book/chapters/quick-sort.adoc | 51 ++++++++++++++++++------ src/algorithms/sorting/quick-sort.js | 20 +++++----- src/algorithms/sorting/sorting-common.js | 21 +++++----- 3 files changed, 62 insertions(+), 30 deletions(-) diff --git a/book/chapters/quick-sort.adoc b/book/chapters/quick-sort.adoc index 9aa9ba4b..aa9e2b8b 100644 --- a/book/chapters/quick-sort.adoc +++ b/book/chapters/quick-sort.adoc @@ -1,21 +1,21 @@ = Quicksort (((Sorting, QuickSort))) (((QuickSort))) -Quicksort is an efficient recursive sorting algorithm that uses <> paradigm to sort faster. It can be implemented in-place so it doesn't require additonal memory. +Quicksort is an efficient recursive sorting algorithm that uses <> paradigm to sort faster. It can be implemented in-place, so it doesn't require additonal memory. indexterm:[Divide and Conquer] -In practice quicksort outperforms efficient sorting algorithms like <>. And, of course, It also outperforms simple sorting algorithms like <>, <> and <>. +In practice, quicksort outperforms other sorting algorithms like <>. And, of course, It also outperforms simple sorting algorithms like <>, <> and <>. -Quicksort basically picks a "pivot" element (preferably random) and move all the elements that are smaller than the pivot to the right and the ones that are bigger to the left. It does this recursively until all the array is sorted. +Quicksort picks a "pivot" element (preferably random) and move all the parts that are smaller than the pivot to the right and the ones that are bigger to the left. It does this recursively until all the array is sorted. == Quicksort Implementation Quicksort implementation uses the divide-and-conquer in the following way: .Quicksort Algorithm -. Pick a "pivot" element (at random) -. Move everything that is lower than the pivot to the left and everything that is bigger than the pivot to the right. -. Recursively repeat step 1 and 2, the sub-arrays on the left and on the right WITHOUT including the pivot. +. Pick a "pivot" element (at random). +. Move everything lower than the pivot to the left and everything more significant than the pivot to the right. +. Recursively repeat step #1 and #2 in the sub-arrays on the left and on the right WITHOUT including the pivot. Let's convert these words into code! @@ -29,19 +29,45 @@ include::{codedir}/algorithms/sorting/quick-sort.js[tag=quickSort, indent=0] <3> Do the partition of the sub-array at the right of the pivot. <4> Only do the partition when there's something to divide. -The real heavy-lifting is don in the partion function. Let's implement that: +The `partition` function does the real heavy-lifting. 🏋️‍♀️ .Quicksort implementation in JavaScript (partition) [source, javascript] ---- include::{codedir}/algorithms/sorting/Quick-sort.js[tag=partition, indent=0] ---- -<1> Make the rightmost element as the pivot. -<2> This is the place holder for the final pivot index. We start in low and as we move all the lower elements to the left we will get the final place where the pivot should be. -<3> Move one element at a time comparing it to the pivot value. -<4> If the current element value is less than the pivot, then increment pivot index (pivot should be place after all the lower values). We also swap the value before incrementing because current element that is lower than the pivot to be at its left side. +<1> Take the leftmost element as the pivot. +<2> `pivotFinalIndex` is the placeholder for the final position where the pivot will be placed when the array is sorted. +<3> Check all values other than the pivot to see if any value is smaller than the pivot. +<4> If the `current` element's value is less than the pivot, then increment `pivotFinalIndex` to make room on the left side. +<5> We also swap the smaller element to the left side since it's smaller than the pivot. +<6> Finally, we move the pivot to its final position. Everything to the left is smaller than the pivot and everything to the right is bigger. -Merge sort has a _O(n log n)_ running time. For more details about the how to extract the runtime go to <>. +*What would happen if use Quicksort for an array in reverse order?* + +E.g. `[10, 7, 5, 4, 2, 1]`, if we always choose the first element as the pivot, we would have to swap everything to the left of `10`. + +So in the first partition we would have `[7, 5, 4, 2, 1, 10]`. +Then, we take `7` would be the next pivot and we have to swap everything to the left. +This is the worst-case for this quicksort since it will perform O(n^2^) work. +If instead of partitioning by the first we do it by the middle element (or even better at random) we would have better peformance. That's why we usually shuffle the array to avoid edge cases. + +[source, javascript] +---- +include::{codedir}/algorithms/sorting/Quick-sort.js[tag=sort, indent=0] +---- +<1> Convert to array (or clone array). If you want to modify the input directly remove this line. +<2> Shuffle array to avoid edge cases (desc order arrays) + +And you can see the implementation of `shuffle` below: + +.Shuffling an array +[source, javascript] +---- +include::{codedir}/algorithms/sorting/sorting-common.js[tag=shuffle, indent=0] +---- + +With the optimization, Quicksort has a _O(n log n)_ running time. Similar to the merge sort we divide the array into halves each time. For more details about the how to extract the runtime go to <>. == Quicksort Properties @@ -60,3 +86,4 @@ indexterm:[Space Complexity, Linear] // https://algs4.cs.princeton.edu/23quicksort/ // https://twitter.com/mathias/status/1036626116654637057?lang=en // https://www.toptal.com/developers/sorting-algorithms/quick-sort +// https://stackoverflow.com/q/19255999/684957 // Is Quicksort “adaptive” and “online”? diff --git a/src/algorithms/sorting/quick-sort.js b/src/algorithms/sorting/quick-sort.js index e84be147..2d6f9927 100644 --- a/src/algorithms/sorting/quick-sort.js +++ b/src/algorithms/sorting/quick-sort.js @@ -1,4 +1,4 @@ -const { swap } = require('./sorting-common'); +const { swap, shuffle } = require('./sorting-common'); // tag::partition[] /** @@ -14,18 +14,18 @@ const { swap } = require('./sorting-common'); * @returns {integer} pivot index */ function partition(array, low, high) { - const pivotInitialIndex = high; // <1> - let pivotIndex = low; // <2> + const pivotIndex = low; // <1> + let pivotFinalIndex = pivotIndex; // <2> - for (let current = low; current < high; current += 1) { // <3> - if (array[current] <= array[pivotInitialIndex]) { // <4> - swap(array, current, pivotIndex); - pivotIndex += 1; + for (let current = pivotIndex + 1; current <= high; current++) { + if (array[current] < array[pivotIndex]) { // <3> + pivotFinalIndex += 1; // <4> + swap(array, current, pivotFinalIndex); // <5> } } - swap(array, pivotInitialIndex, pivotIndex); - return pivotIndex; + swap(array, pivotIndex, pivotFinalIndex); // <6> + return pivotFinalIndex; } // end::partition[] @@ -54,9 +54,11 @@ function quickSort(array, low = 0, high = array.length - 1) { * Quick sort * Runtime: O(n log n) * @param {Array|Set} collection elements to be sorted + * @returns {Array} sorted array */ function quickSortWrapper(collection) { const array = Array.from(collection); // <1> + shuffle(array); // <2> return quickSort(array); } // end::sort[] diff --git a/src/algorithms/sorting/sorting-common.js b/src/algorithms/sorting/sorting-common.js index 819dda7b..ca0402b1 100644 --- a/src/algorithms/sorting/sorting-common.js +++ b/src/algorithms/sorting/sorting-common.js @@ -12,20 +12,23 @@ function swap(array, from, to) { } // end::swap[] +// tag::shuffle[] /** - * Move an element in an array *from* a postion *to* another. + * Shuffle items in an array in-place * Runtime: O(n) - * @param {array} array - * @param {integer} from index of the element to remove (source) - * @param {integer} to index where the removed element would be move (destination) + * @param {*} array */ -function moveElement(array, from, to) { - if (from === to + 1) return; - const [elementToInsert] = array.splice(from, 1); // delete from position - array.splice(to + 1, 0, elementToInsert); // insert element in to the position. +function shuffle(array) { + const { length } = array; + for (let index = 0; index < length; index++) { + const newIndex = Math.floor(Math.random() * length); + swap(array, index, newIndex); + } + return array; } +// end::shuffle[] module.exports = { swap, - moveElement, + shuffle, }; From a1bae50766aaca14343b38073ff9c9e54cf1e533 Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Tue, 26 Mar 2019 05:53:24 -0400 Subject: [PATCH 012/326] greedy and d and c --- book/chapters/backtracking.adoc | 3 +- book/chapters/chapter4.adoc | 12 ++++-- .../divide-and-conquer--fibonacci.adoc | 21 +++++------ book/chapters/divide-and-conquer--intro.adoc | 21 ++++++----- .../dynamic-programming--fibonacci.adoc | 10 ++--- book/chapters/dynamic-programming--intro.adoc | 6 ++- book/chapters/greedy-algorithms--intro.adoc | 24 ++++++------ .../greedy-algorithms--knapsack-problem.adoc | 12 +++--- book/chapters/quick-sort.adoc | 10 ++--- book/chapters/selection-sort.adoc | 37 ++++++++++++------- .../fibanacci-dynamic-programming.js | 1 + src/algorithms/knapsack-fractional.js | 3 +- src/algorithms/sorting/selection-sort.js | 6 +-- 13 files changed, 94 insertions(+), 72 deletions(-) diff --git a/book/chapters/backtracking.adoc b/book/chapters/backtracking.adoc index 790662d4..347e88cd 100644 --- a/book/chapters/backtracking.adoc +++ b/book/chapters/backtracking.adoc @@ -1,5 +1,6 @@ = Backtracking - +(((Backtracking))) +(((Algorithmic Techniques, Backtracking))) Backtracking algorithms are used to find *all (or some)* solutions that satisfy a contraint. Bracktracking builds a solution step by step using recursion. diff --git a/book/chapters/chapter4.adoc b/book/chapters/chapter4.adoc index 70cdfbb1..70c1770a 100644 --- a/book/chapters/chapter4.adoc +++ b/book/chapters/chapter4.adoc @@ -14,20 +14,23 @@ include::sorting-properties.adoc[] // Slow Sorting - +<<< include::bubble-sort.adoc[] +<<< include::insertion-sort.adoc[] +<<< include::selection-sort.adoc[] // include::insertion-selection-bubble-sort.adoc[] // Fast Sorting - +<<< include::merge-sort.adoc[] +<<< include::quick-sort.adoc[] :leveloffset: -1 @@ -36,13 +39,16 @@ include::quick-sort.adoc[] // // Algorithms Techniques // - +<<< include::divide-and-conquer.adoc[] +<<< include::dynamic-programming.adoc[] +<<< include::greedy-algorithms.adoc[] +<<< include::backtracking.adoc[] // --- end algorithms --- diff --git a/book/chapters/divide-and-conquer--fibonacci.adoc b/book/chapters/divide-and-conquer--fibonacci.adoc index 6649ff09..fab9577e 100644 --- a/book/chapters/divide-and-conquer--fibonacci.adoc +++ b/book/chapters/divide-and-conquer--fibonacci.adoc @@ -1,11 +1,10 @@ -= Recursive Fibonacci Numers - -To illustrate how we can solve a problem using divide and conquer, let's write a program to find the n-th fibonacci number. -indexterm:[Fibonacci] += Recursive Fibonacci Numbers +(((Fibonacci))) +To illustrate how we can solve a problem using divide and conquer, let's write a program to find the n-th Fibonacci number. .Fibonacci Numbers **** -Fibancci sequence is a serie of numbers that starts with `0, 1`, the next values are calculated as the sum of the previous two. So, we have: +Fibonacci sequence is a series of numbers that starts with `0, 1`; the next values are calculated as the sum of the previous two. So, we have: `0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, ...` **** @@ -28,7 +27,7 @@ The implementation above does the job, but what's the runtime? For that, let's take a look at the job performed calculating the `fib(5)` number. Since `fib(5) = fib(4) + fib(3)`, we need to find the answer for `fib(4)` and `fib(3)`. We do that recursively until we reach the base cases of `fib(1)` and `fib(0)`. If we represent the calls in a tree, we would have the following: // http://bit.ly/2UmwzZV -[graphviz, Recursive fibonacci call tree, png] +[graphviz, Recursive Fibonacci call tree, png] .... graph G { "fib(5)" -- { "fib(4)", "fib(3)" } @@ -54,13 +53,13 @@ graph G { In the diagram, we see the two recursive calls needed to compute each number. So if we follow the _O(branches^depth^)_ we get O(2^n^). [big]#🐢# -NOTE: Fibonacci is not a perfect binary tree since some nodes only have one children instead of two. The exact runtime for recursive Fibonacci is _O(1.6^n^)_ (still exponential time complexity). +NOTE: Fibonacci is not a perfect binary tree since some nodes only have one child instead of two. The exact runtime for recursive Fibonacci is _O(1.6^n^)_ (still exponential time complexity). -An exponential time complexity is pretty bad. Can we do better? +Exponential time complexity is pretty bad. Can we do better? -In the call tree you can notice that every element in red and with asterisks `*` it's called more than once. We are repeating calculations too many times! +In the call tree, you can notice that every element in red and with asterisks `*` it's called more than once. We are repeating calculations too many times! [quote, Dynamic Programming] -Those who cannot rememeber the past are condemned to repated it. +Those who cannot remember the past are condemned to repeat it. -For these cases when subproblems repeat themselves, we can optimize them using dynamic programming. +For these cases when subproblems repeat themselves, we can optimize them using <>. Let's do that in the next section. diff --git a/book/chapters/divide-and-conquer--intro.adoc b/book/chapters/divide-and-conquer--intro.adoc index 343292ac..5876599f 100644 --- a/book/chapters/divide-and-conquer--intro.adoc +++ b/book/chapters/divide-and-conquer--intro.adoc @@ -1,16 +1,17 @@ -Divide and conquer is an strategy for solving algorithmic problems. -It splits the input into manageble parts recursively and finally join solved pieces to form the end result. -indexterm:[Divide and Conquer] +(((Divide and Conquer))) +(((Algorithmic Techniques, Divide and Conquer))) +Divide and conquer is a strategy for solving algorithmic problems. +It splits the input into manageable parts recursively and finally joins solved pieces to form the solution. -We have already done some divide and conquer algorithms. This list will refresh you the memory. +We have already implemented some algorithms using the divide and conquer technique. .Examples of divide and conquer algorithms: -- <>: *divides* the input into pairs, sort them and them *join* all the pieces in ascending order. -- <>: *splits* the data by a random number called "pivot", then move everything smaller than the pivot to the left and anything bigger to the right. Repeat the process on the left and right side. Note: since this works in place doesn't need a join part. -- <>: find a value in a sorted collection by *spliting* the data in half until it finds the value. -- <>: *Take out* the first element from the input and solve permutation for the reminder of the data recursively, then *join* results and append the elements that were take out. +- <>: *divides* the input into pairs, sort them, and them *join* all the pieces in ascending order. +- <>: *splits* the data by a random number called "pivot", then move everything smaller than the pivot to the left and anything more significant to the right. Repeat the process on the left and right side. Note: since this works in place doesn't need a "join" part. +- <>: find a value in a sorted collection by *splitting* the data in half until it sees the value. +- <>: *Take out* the first element from the input and solve permutation for the remainder of the data recursively, then *join* results and append the items that were taken out. -.In general, the divide and conquer algorithms have the following receipe: +.In general, the divide and conquer algorithms have the following pattern: 1. *Divide* data into subproblems. 2. *Conquer* each subproblem. -3. *Combine* solutions. +3. *Combine* results. diff --git a/book/chapters/dynamic-programming--fibonacci.adoc b/book/chapters/dynamic-programming--fibonacci.adoc index 26066210..906aa18b 100644 --- a/book/chapters/dynamic-programming--fibonacci.adoc +++ b/book/chapters/dynamic-programming--fibonacci.adoc @@ -1,9 +1,8 @@ = Fibonacci Sequence with Dynamic Programming - +(((Fibonacci))) Let's solve the same Fibonacci problem but this time with dynamic programming. When we have recursive functions doing duplicated work is the perfect place for a dynamic programming optimization. We can save (or cache) the results of previous operations and speed up future computations. -indexterm:[Fibonacci] .Recursive Fibonacci Implemenation using Dynamic Programming [source, javascript] @@ -11,7 +10,7 @@ indexterm:[Fibonacci] include::{codedir}/algorithms/fibanacci-dynamic-programming.js[tag=snippet,indent=0] ---- -This implementation checks if we already calculated the value in the past, if not it will save it for later use. +This implementation checks if we already calculated the value, if so it will save it for later use. [graphviz, Recursive Fibonacci call tree with dp, svg] .... @@ -23,7 +22,8 @@ graph G { } .... -This looks pretty linear now. It's runtime _O(n)_! +This graph looks pretty linear now. It's runtime _O(n)_! indexterm:[Runtime, Linear] -TIP: Saving previous results for later is a technique called "memoization" and is very common to optimize recursive algorithms with exponential time complexity. +(((Memoization))) +TIP: Saving previous results for later is a technique called "memoization". This is very common to optimize recursive algorithms with overlapping subproblems. It can make exponential algorithms linear! diff --git a/book/chapters/dynamic-programming--intro.adoc b/book/chapters/dynamic-programming--intro.adoc index 115cb03c..3323beb5 100644 --- a/book/chapters/dynamic-programming--intro.adoc +++ b/book/chapters/dynamic-programming--intro.adoc @@ -1,4 +1,6 @@ -Dynamic programming (dp) is a way to solve algorithmic problems with *overlapping subproblems*. Algorithms using dp find the base case and building a solution from the ground-up. They also _keep track_ of previous answers to avoid re-computing the same operations. +(((Dynamic Programming))) +(((Algorithmic Techniques, Dynamic Programming))) +Dynamic programming (dp) is a way to solve algorithmic problems with *overlapping subproblems*. Algorithms using dp find the base case and building a solution from the ground-up. Dp _keep track_ of previous results to avoid re-computing the same operations. // https://twitter.com/amejiarosario/status/1103050924933726208 // https://www.quora.com/How-should-I-explain-dynamic-programming-to-a-4-year-old/answer/Jonathan-Paulson @@ -18,6 +20,6 @@ $$*$$*_Write down 1+1+1+1+1+1+1+1+1+1_*$$*$$ --{sp} $$*$$*_Quickly_*$$*$$ Eleven! ---{sp} Why you get the result so quickly? Ah, you got it faster by adding one to the memorized previous result. So Dynamic Programming is a fancy way of saying: "remembering past results to save time later" +--{sp} Why you get the result so quickly? Ah, you got it faster by adding one to the memorized previous answer. So Dynamic Programming is a fancy way of saying: "remembering past solutions to save time later." **** diff --git a/book/chapters/greedy-algorithms--intro.adoc b/book/chapters/greedy-algorithms--intro.adoc index fe21fdf0..b0a2789d 100644 --- a/book/chapters/greedy-algorithms--intro.adoc +++ b/book/chapters/greedy-algorithms--intro.adoc @@ -1,9 +1,11 @@ -Greedy algorithms are designed to find solution by going one step at time and using heuristics to determine the best choice. +(((Greedy Algorithms))) +(((Algorithmic Techniques, Greedy Algorithms))) +Greedy algorithms are designed to find a solution by going one step at a time and using heuristics to determine the best choice. They are quick but not always lead to most optimum results since it might not take into consideration all the options to give a solution. -A good example of a greedy algorithms that doesn't work well is finding the largest sum on a tree. +An excellent example of a greedy algorithm that doesn't work well is finding the largest sum on a tree. -[graphviz, Find largest sum, svg] +[graphviz, Find the largest sum, svg] .... graph G { 5 -- 3 [color="#B8E986", penwidth=2] @@ -17,27 +19,27 @@ graph G { } .... -The greedy algorithm will start at the root and say "Which number is bigger 3 or 7?" Then go with 7 and later 4. As you can see in the diagram, the largest sum would be the path `7 - 3 - 87`. A greedy algorithm never go back on it's options. This makes it different from dynamic programming which exhaustive and it's gurantee to find the best option. +Let's say the greedy heuristics are set to take the more significant value. The greedy algorithm will start at the root and say "Which number is bigger 3 or 7?" Then go with 7 and later 4. As you can see in the diagram, the most significant sum would be the path `7 - 3 - 87`. A greedy algorithm never goes back on its options. This greedy choice makes it different from dynamic programming which exhaustive and it's guaranteed to find the best option. However, when they work well, they are usually faster than other options. -Greedy algorithms are well suited when a local optimal solution is also a global optimal solution. +Greedy algorithms are well suited when an optimal local solution is also a globally optimal solution. [TIP] ==== -Greedy algorithms makes the choice that looks best in the moment based on a heuristic such as smallest, largest, best ratio, and so on. -This algorithm only give one shot at finding the solution and never goes back to consider other options. +Greedy algorithms make the choice that looks best at the moment based on a heuristic such as smallest, largest, best ratio, and so on. +This algorithm only gives one shot at finding the solution and never goes back to consider other options. ==== -Don't get the wrong idea some greedy algorithms works very well if they are designed correctly. +Don't get the wrong idea; some greedy algorithms work very well if they are designed correctly. -.Some examples greedy algorithms that works well: +.Some examples of greedy algorithms that works well: - <>: we select the best (minimum value) remove it from the input and then select the next minimum until everything is processed. -- <>: the "merge" uses a greedy algorithm, where it combine two sorted arrays by looking at their current values and choosing the best (minimum) at every time. +- <>: the "merge" uses a greedy algorithm, where it combines two sorted arrays by looking at their current values and choosing the best (minimum) at every time. indexterm:[Merge Sort] .In general, we can follow these steps to design Greedy Algorithms: 1. Take a sample from the input data (usually in a data structure like array/list, tree, graph). 2. Greedy choice: use a heuristic function that will choose the best candidate. E.g., Largest/smallest number, best ratio, etc. -3. Reduce the processed input and pepeat step #1 and #2 until all data is gone. +3. Reduce the processed input and repeat step #1 and #2 until all data is gone. 4. Return solution. 5. Check correctness with different examples and edge cases. diff --git a/book/chapters/greedy-algorithms--knapsack-problem.adoc b/book/chapters/greedy-algorithms--knapsack-problem.adoc index 51e16cef..eb6f8a8f 100644 --- a/book/chapters/greedy-algorithms--knapsack-problem.adoc +++ b/book/chapters/greedy-algorithms--knapsack-problem.adoc @@ -2,7 +2,7 @@ We are going to use the "Fractional Knapsack Problem" to learn how to design greedy algorithms. The problem is the following: -> You are going to steal legumes (rice, beans, chickpeas, lentils) and you only brought a knapsack. What proportion of items can you choose to to get the highest loot without exceeding the maximum weight of the bag? +> You are going to resell legumes (rice, beans, chickpeas, lentils) and you only brought a knapsack. What proportion of items can you choose to get the highest loot without exceeding the maximum weight of the bag? Let's say we have the following items available. @@ -19,18 +19,17 @@ const items = [ const maxWeight = 7; ---- -So, we have 4 items that we can take. We can't take them all because the total weight is `13` and the maximum we can carry is `7`. We can't just take the first one because with value `1` because obviosly is not the best profit. +So, we have four items that we can choose from. We can't take them all because the total weight is `13` and the maximum we can carry is `7`. We can't just take the first one because with value `1` because is not the best profit. How would you solve this problem? - First, we have to define what parameters are we going to use to make our *greedy choice*. This some ideas: -- We can take items with the *largest* value in hopes to get maximize profit. Based on that we can make take the last item and first having a total weight of 7 and total value of 8. +- We can take items with the *largest* value in hopes to maximize profit. Based on that we can make take the last item and first having a total weight of 7 and a total cost of 8. -- Also, we could take items *smallest* weight so we can fit as much as possible. Let's analyze both options. So we can take the first 2 items for a total value of 5 and total weight of 4. This is worst! [big]#👎# +- Also, we could take items *smallest* weight so we can fit as much as possible. Let's analyze both options. So we can choose the first two items for a total value of 5 and a total weight of 4. This is worse than picking the largest value! [big]#👎# -- One last idea, we can take items based on the *best* value/weight ratio and take fractions of an item to fill up the knapsack to maximum weight. In that case, we can take the last item and 2/3 of the 2nd item. We get a total value of `9.67` and total weight of `7`. +- One last idea, we can take items based on the *best* value/weight ratio and take fractions of an article to fill up the knapsack to maximum weight. In that case, we can buy the last item in full and 2/3 of the 2nd item. We get a total value of `9.67` and a total weight of `7`. This heuristics seems to be the most profitable. [big]#👍# .Items value/weight ratio ---- @@ -42,7 +41,6 @@ First, we have to define what parameters are we going to use to make our *greedy Let's implement this algorithm! - .Factional Knapsack Problem Implementation [source, javascript] ---- diff --git a/book/chapters/quick-sort.adoc b/book/chapters/quick-sort.adoc index aa9e2b8b..7d59576e 100644 --- a/book/chapters/quick-sort.adoc +++ b/book/chapters/quick-sort.adoc @@ -1,7 +1,7 @@ = Quicksort (((Sorting, QuickSort))) (((QuickSort))) -Quicksort is an efficient recursive sorting algorithm that uses <> paradigm to sort faster. It can be implemented in-place, so it doesn't require additonal memory. +Quicksort is an efficient recursive sorting algorithm that uses <> paradigm to sort faster. It can be implemented in-place, so it doesn't require additional memory. indexterm:[Divide and Conquer] In practice, quicksort outperforms other sorting algorithms like <>. And, of course, It also outperforms simple sorting algorithms like <>, <> and <>. @@ -41,7 +41,7 @@ include::{codedir}/algorithms/sorting/Quick-sort.js[tag=partition, indent=0] <3> Check all values other than the pivot to see if any value is smaller than the pivot. <4> If the `current` element's value is less than the pivot, then increment `pivotFinalIndex` to make room on the left side. <5> We also swap the smaller element to the left side since it's smaller than the pivot. -<6> Finally, we move the pivot to its final position. Everything to the left is smaller than the pivot and everything to the right is bigger. +<6> Finally, we move the pivot to its final position. Everything to the left is smaller than the pivot, and everything to the right is bigger. *What would happen if use Quicksort for an array in reverse order?* @@ -49,8 +49,8 @@ E.g. `[10, 7, 5, 4, 2, 1]`, if we always choose the first element as the pivot, So in the first partition we would have `[7, 5, 4, 2, 1, 10]`. Then, we take `7` would be the next pivot and we have to swap everything to the left. -This is the worst-case for this quicksort since it will perform O(n^2^) work. -If instead of partitioning by the first we do it by the middle element (or even better at random) we would have better peformance. That's why we usually shuffle the array to avoid edge cases. +Descending arrays are the worst-case for this quicksort since it will perform O(n^2^) work. +If instead of partitioning by the first element we do it by the middle (or even better at random) we would have better performance. That's why we usually shuffle the array before doing quicksort to avoid edge cases. [source, javascript] ---- @@ -67,7 +67,7 @@ And you can see the implementation of `shuffle` below: include::{codedir}/algorithms/sorting/sorting-common.js[tag=shuffle, indent=0] ---- -With the optimization, Quicksort has a _O(n log n)_ running time. Similar to the merge sort we divide the array into halves each time. For more details about the how to extract the runtime go to <>. +With the optimization, Quicksort has an _O(n log n)_ running time. Similar to the merge sort we divide the array into halves each time. For more details about how to extract the runtime go to <>. == Quicksort Properties diff --git a/book/chapters/selection-sort.adoc b/book/chapters/selection-sort.adoc index 77eb0170..bc763236 100644 --- a/book/chapters/selection-sort.adoc +++ b/book/chapters/selection-sort.adoc @@ -2,33 +2,33 @@ (((Sorting, Selection Sort))) (((Selection Sort))) -The selection sort is a simple sorting algorithm. As its name indicates, it chooses the lowest element from the list and move it where it should be. +The selection sort is a simple sorting algorithm. As its name indicates, it _selects_ the lowest element from the list and moves it where it should be. .Selection sort algorithm . Start with the element in position 0. -. Find minimum element in the rest of array. If a new minimun is found swap them. -. Repeat with the element in postion 1 and so until the last one. +. Find the minimum item in the rest of the array. If a new minimum is found swap them. +. Repeat step #1 and #2 with the next element until the last one. image:selection-sort.gif[] == Selection sort implementation -For implementing the selection sort we need 2 indexes. +For implementing the selection sort, we need two indexes. .Selection sort [source, javascript] ---- include::{codedir}/algorithms/sorting/selection-sort.js[tag=sort, indent=0] ---- -<1> Converts any collection to an array or clone existing array -<2> Visit all elements in the array starting from 1st element (index 0) -<3> Everything to the left side is considered sorted in it's final position. So, select `left` as the minimun value -<4> Compare the `selection` to every element to the right -<5> If it finds a value *smaller* than the selection, then update the `selection`. -<6> Put the next smallest item to it's final position +<1> Converts any collection to an array or clone existing array. +<2> Visit all elements in the array starting from the 1st element (index 0). +<3> Everything to the left side is considered sorted in its final position. So, select `left` as the initial minimum value. +<4> Compare the `selection` to every element to the right side. +<5> If it finds a value _smaller_ than the selection, then update the `selection`. +<6> Put the next smallest item to its final position -TIP: Selection sort minimize the number of swaps. It only does a swap per cycle while insertion sort and bubble sort swap inmediately. +TIP: Selection sort minimizes the number of swaps. It does one swap per iteration while insertion sort and bubble sort could swap many times with the same array. -One index is for the position in question (selection/left) and another one for finding the minimun in the rest of the array (right). +One index is for the position in question (selection/left) and another one for finding the minimum in the rest of the array (right). == Selection Sort Properties @@ -39,5 +39,16 @@ One index is for the position in question (selection/left) and another one for f - Time Complexity: [big]#⛔️# <> _O(n^2^)_ - Space Complexity: [big]#✅# <> _O(1)_ -CAUTION: Selection sort is the worst compared <> and <>. The only advantage of selection sort is that it minimizes the number of swaps. In case, that swapping is expensive then it could make sense to use this one over the others. +*Why selection sort is not stable?* + +To recap, _stable_ means that items with the same value keep their initial position. +Let's see what would happen with the selection sort if we (select) sort the following array `2, 5, 2, 1`. To distinguish them let's say `2a` and `2b`, so `2a, 5, 2b, 1`. + +Initially, we select the first element `2a` and check if there's anything less than 2 in the array. We find out that position 3 has an item with a smaller value (`1`) so we swap them. + +Now, we have: `1, 5, 2b, 2a`. +There you have it, `2b` now comes before `2a`. + +// CAUTION: In practice, selection sort performance is the worst compared <> and <>. The only advantage of selection sort is that it minimizes the number of swaps. In case, that swapping is expensive, then it could make sense to use this one over the others. + indexterm:[Runtime, Quadratic] diff --git a/src/algorithms/fibanacci-dynamic-programming.js b/src/algorithms/fibanacci-dynamic-programming.js index ed5a2850..9c20e082 100644 --- a/src/algorithms/fibanacci-dynamic-programming.js +++ b/src/algorithms/fibanacci-dynamic-programming.js @@ -2,6 +2,7 @@ /** * Get Fibonacci number on the n-th position. * @param {integer} n position on the sequence + * @param {Map} memo cache of previous solutions * @returns {integer} n-th number */ function fib(n, memo = new Map()) { diff --git a/src/algorithms/knapsack-fractional.js b/src/algorithms/knapsack-fractional.js index 237dece2..daef2a41 100644 --- a/src/algorithms/knapsack-fractional.js +++ b/src/algorithms/knapsack-fractional.js @@ -1,3 +1,4 @@ +/* eslint-disable no-mixed-operators */ // tag::snippet[] /** * Solves Bounded Knapsack Problem (BKP) @@ -11,7 +12,7 @@ function solveFractionalKnapsack(input, max) { const items = []; // sort by value/weight ratio - input.sort((a, b) => a.value/a.weight - b.value/b.weight); // eslint-disable-line + input.sort((a, b) => a.value / a.weight - b.value / b.weight); while (input.length && weight < max) { const bestRatioItem = input.pop(); diff --git a/src/algorithms/sorting/selection-sort.js b/src/algorithms/sorting/selection-sort.js index ac53960e..98e1238d 100644 --- a/src/algorithms/sorting/selection-sort.js +++ b/src/algorithms/sorting/selection-sort.js @@ -16,10 +16,10 @@ function selectionSort(collection) { const array = Array.from(collection); // <1> for (let left = 0; left < array.length; left++) { // <2> - let selection = left; // minimum value <3> + let selection = left; // <3> - for (let right = left + 1; right < array.length; right++) { // <4> - if (array[selection] > array[right]) { + for (let right = left + 1; right < array.length; right++) { + if (array[selection] > array[right]) { // <4> selection = right; // <5> } } From 9eb2bfbb987e00768a1d74f11d8989384bad38ff Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Tue, 26 Mar 2019 06:43:20 -0400 Subject: [PATCH 013/326] finish part 4 --- book/chapters/algorithmic-toolbox.adoc | 31 +++++++++++++ book/chapters/algorithms-analysis.adoc | 2 +- book/chapters/backtracking.adoc | 45 +++++++++---------- book/chapters/big-o-examples.adoc | 4 +- book/chapters/chapter4.adoc | 3 ++ book/chapters/epigraph.adoc | 2 +- .../greedy-algorithms--knapsack-problem.adoc | 2 +- 7 files changed, 61 insertions(+), 28 deletions(-) create mode 100644 book/chapters/algorithmic-toolbox.adoc diff --git a/book/chapters/algorithmic-toolbox.adoc b/book/chapters/algorithmic-toolbox.adoc new file mode 100644 index 00000000..7304dad5 --- /dev/null +++ b/book/chapters/algorithmic-toolbox.adoc @@ -0,0 +1,31 @@ += Algorithmic Toolbox + +Have you ever given a programming problem and freeze without knowing where to start? +Well, in this section we are going to give some tips, so you don't get stuck while coding. + +TIP: Don't start coding right away. First, solve the problem, then write the code. + +.Steps to solve algorithmic problems +. *Understand* the requirements. Reframe it in your own words. +. Draw a *simple example* (no edge cases yet) +. Brainstorm +.. How would you solve this problem *manually*? (without a computer) Is there any formula or theorem you can use? +.. Is there any heuristics (largest, smallest, best ratio) or can you spot a pattern to solve this problem using a <>? +.. Can you address the simple base case and generalize for other cases using a *recursive solution*? +.. Do you have to generate multiple solutions or try different paths? Try <>. +.. If anything else fails, how would you solve it the dumbest way possible (brute force). We can optimize it later. +. Optimize the solution. +.. Re-read requirements and see if you can take advantage of anything. E.g. is the array sorted? +.. Do you have a bunch of overlapping problems? Try <>. +.. Can you trade-off space for speed? Use a <> to speed up results +. Test your algorithm with multiple examples +. *Code*, yes, now you can code. +.. Modularize your code with functions (don't do it all in one giant function please 🙏) +. Test your code. +.. Choose a typical input and test against your code. +.. Brainstorm about edge cases (empty, null values, overflows, +.. How would scale your code? + +These steps should get you going even with the toughest algorithmic problems. + +Stay effective! diff --git a/book/chapters/algorithms-analysis.adoc b/book/chapters/algorithms-analysis.adoc index 59f3e440..f85bc17c 100644 --- a/book/chapters/algorithms-analysis.adoc +++ b/book/chapters/algorithms-analysis.adoc @@ -59,7 +59,7 @@ To give you a clearer picture of how different algorithms perform as the input s |Find all permutations of a string |4 sec. |> vigintillion years |> centillion years |∞ |∞ |============================================================================================= -indexterm:[Permutation] +indexterm:(((Permutations))) However, if you keep the input size constant, you can notice the difference between an efficient algorithm and a slow one. An excellent sorting algorithm is `mergesort` for instance, and inefficient algorithm for large inputs is `bubble sort` . Organizing 1 million elements with merge sort takes 20 seconds while bubble sort takes 12 days, ouch! The amazing thing is that both programs are measured on the same hardware with the same data! diff --git a/book/chapters/backtracking.adoc b/book/chapters/backtracking.adoc index 347e88cd..13496bd1 100644 --- a/book/chapters/backtracking.adoc +++ b/book/chapters/backtracking.adoc @@ -1,11 +1,11 @@ = Backtracking (((Backtracking))) (((Algorithmic Techniques, Backtracking))) -Backtracking algorithms are used to find *all (or some)* solutions that satisfy a contraint. +Backtracking algorithms are used to find *all (or some)* solutions that satisfy a constraint. -Bracktracking builds a solution step by step using recursion. +Backtracking builds a solution step by step using recursion. If during the process it realizes a given path is not going to lead to a solution, -it stops and step back (backtracking) to try a different alternative. +it stops and steps back (backtracks) to try another alternative. Some examples that use backtracking is a solving Sudoku/crosswords puzzle, and graph operations. @@ -14,37 +14,37 @@ image:Sudoku_solved_by_bactracking.gif[] endif::backend-pdf[] Listing all possible solutions might sound like a brute force. -However, is not the same. +However, it is not the same. Backtracking algorithms are faster than brute force one. -.Brute Force vs Backtracking Algorithms +.Brute Force vs. Backtracking Algorithms **** -*Brute force* evaluates every possiblity. -*Bracktracking* is an optimized brute force. +*Brute force* evaluates every possibility. +*Backtracking* is an optimized brute force. It stops evaluating a path as soon as some of the conditions are broken and move on to the next. -However, it an only be applied if a quick test can be run to tell if a candidate will contribute to a valid solution. +However, it can only be applied if a quick test can be run to tell if a candidate will contribute to a valid solution. **** == How to develop backtracking algorithms? -Backtracking algorithms can be tricky to get right or reason about but we are going to follow this recipe to make it easier. +Backtracking algorithms can be tricky to get right or reason about, but we are going to follow this recipe to make it easier. .Steps to create backtracking algorithms -. Iterate through all the elements in the input +. Iterate through the given input . Make a change -. Recursive function moving to the next element -. Test if the current change is a solution -. Revert back the change (backtracking) +. Recursively move to the next element +. Test if the current change is a possible solution +. Revert the change (backtracking) and try with the next item Let's do an exercise to explain better how backtracking works. // https://leetcode.com/problems/combination-sum/description/ == Permutations +(((Permutations))) +(((Words permutations))) +> Return all the permutations (without repetitions) of a word. -> Return all the permutations (without repetitions) of a given word. - -indexterm:[Permutation] For instace, if you are given the word `art` these are the possible permutations: ---- @@ -58,7 +58,6 @@ For instace, if you are given the word `art` these are the possible permutations Now, let's implement the program to generate all permutations of a word. - NOTE: We already solved this problem using an <>, now let's do it using backtracking. .Word permutations using backtracking @@ -66,15 +65,15 @@ NOTE: We already solved this problem using an < Iterate through all the elements in the input +<1> Iterate through all the elements <2> Make a change: swap letters <3> Recursive function moving to the next element <4> Test if the current change is a solution: reached the end of the string. <5> Revert back the change (backtracking): Undo swap from step 2 -As you can see, we iterate through each letter and swap with the following letters until we reach the end of the string. Then, we rollback the change and try another path. +As you can see, we iterate through each element and swap with the following letters until we reach the end of the string. Then, we roll back the change and try another path. -In the following tree, you can visualize how the backtracking algorithm is swaping the letters. +In the following tree, you can visualize how the backtracking algorithm is swapping the letters. We are taking the `art` as an example. [graphviz, Words Permutations, svg] .... @@ -116,12 +115,12 @@ digraph g { .... .Legend: -- The [red]#red# words are the iterations added to the solution array. +- The asterisk (`*`) indicates `start` index. - *Black* arrows indicate the `swap` operations. - *Grey* arrows indicate the _backtracking_ operation (undo swap). -- The asterisk (`*`) indicates `start` index. +- The [red]#red# words are the iterations added to the solution array. -Most of backtracking algorihtms do something similar. What changes is the test function to determine if a current iteration is a solution or not. +Most of the backtracking algorithms do something similar. What changes is the test function or base case to determine if a current iteration is a solution or not. diff --git a/book/chapters/big-o-examples.adoc b/book/chapters/big-o-examples.adoc index 9c416ada..7674a871 100644 --- a/book/chapters/big-o-examples.adoc +++ b/book/chapters/big-o-examples.adoc @@ -219,9 +219,9 @@ A factorial is the multiplication of all the numbers less than itself down to 1. [#factorial-example] === Getting all permutations of a word - +(((Permutations))) +(((Words permutations))) One classic example of an _O(n!)_ algorithm is finding all the different words that can be formed with a given set of letters. -indexterm:[Permutation] .Word's permutations // image:image15.png[image,width=528,height=377] diff --git a/book/chapters/chapter4.adoc b/book/chapters/chapter4.adoc index 70c1770a..c6fad63d 100644 --- a/book/chapters/chapter4.adoc +++ b/book/chapters/chapter4.adoc @@ -51,6 +51,9 @@ include::greedy-algorithms.adoc[] <<< include::backtracking.adoc[] +<<< +include::algorithmic-toolbox.adoc[] + // --- end algorithms --- :leveloffset: -1 diff --git a/book/chapters/epigraph.adoc b/book/chapters/epigraph.adoc index 4eecf051..a70dd6d3 100644 --- a/book/chapters/epigraph.adoc +++ b/book/chapters/epigraph.adoc @@ -1,4 +1,4 @@ [epigraph] = Epigraph -Thanks for reading this book! +Thanks for reading this book. Stay effective! diff --git a/book/chapters/greedy-algorithms--knapsack-problem.adoc b/book/chapters/greedy-algorithms--knapsack-problem.adoc index eb6f8a8f..11f38b4a 100644 --- a/book/chapters/greedy-algorithms--knapsack-problem.adoc +++ b/book/chapters/greedy-algorithms--knapsack-problem.adoc @@ -19,7 +19,7 @@ const items = [ const maxWeight = 7; ---- -So, we have four items that we can choose from. We can't take them all because the total weight is `13` and the maximum we can carry is `7`. We can't just take the first one because with value `1` because is not the best profit. +So, we have four items that we can choose from. We can't take them all because the total weight is `13` and the maximum we can carry is `7`. We can't just take the first one because with value `1` because it is not the best profit. How would you solve this problem? From a0f21a2fdee1fa3f61806e8b8494c07d7d77bced Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Tue, 26 Mar 2019 18:51:33 -0400 Subject: [PATCH 014/326] update indexes --- book/chapters/array.adoc | 6 ++- book/chapters/big-o-examples.adoc | 25 +++++++---- book/chapters/bubble-sort.adoc | 2 + book/chapters/chapter3.adoc | 5 +++ .../divide-and-conquer--fibonacci.adoc | 3 +- .../dynamic-programming--fibonacci.adoc | 3 +- book/chapters/graph.adoc | 3 +- book/chapters/insertion-sort.adoc | 4 +- book/chapters/linked-list.adoc | 7 ++- book/chapters/map-hashmap-vs-treemap.adoc | 5 ++- book/chapters/map-hashmap.adoc | 4 +- book/chapters/map-intro.adoc | 3 +- book/chapters/map-treemap.adoc | 3 +- book/chapters/merge-sort.adoc | 4 ++ .../non-linear-data-structures-intro.adoc | 2 +- book/chapters/queue.adoc | 3 +- book/chapters/quick-sort.adoc | 7 ++- book/chapters/selection-sort.adoc | 3 +- book/chapters/set.adoc | 5 ++- book/chapters/stack.adoc | 3 +- book/chapters/tree--avl.adoc | 23 +++++----- book/chapters/tree--binary-search-tree.adoc | 20 +++++---- .../chapters/tree--binary-tree-traversal.adoc | 23 +++++----- book/chapters/tree--search.adoc | 44 +++++++++---------- .../tree--self-balancing-rotations.adoc | 24 +++++----- book/chapters/tree.adoc | 20 ++++++--- src/data-structures/trees/avl-tree.js | 10 ++--- 27 files changed, 160 insertions(+), 104 deletions(-) diff --git a/book/chapters/array.adoc b/book/chapters/array.adoc index 68fe1cd7..4da1ac43 100644 --- a/book/chapters/array.adoc +++ b/book/chapters/array.adoc @@ -1,5 +1,6 @@ = Array - +(((Array))) +(((Data Structures, Linear, Array))) Arrays are one of the most used data structures. You probably have used it a lot but are you aware of the runtimes of `splice`, `shift` and other operations? In this chapter, we are going deeper into the most common operations and their runtimes. == Array Basics @@ -184,4 +185,5 @@ To sum up, the time complexity on an array is: ^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ | Array ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) |=== -indexterm:[Runtime, Linear] +(((Linear))) +(((Runtime, Linear))) diff --git a/book/chapters/big-o-examples.adoc b/book/chapters/big-o-examples.adoc index 7674a871..d0ec307c 100644 --- a/book/chapters/big-o-examples.adoc +++ b/book/chapters/big-o-examples.adoc @@ -23,7 +23,8 @@ image:image5.png[CPU time needed vs. Algorithm runtime as the input size increas The above chart shows how the running time of an algorithm is related to the amount of work the CPU has to perform. As you can see O(1) and O(log n) are very scalable. However, O(n^2^) and worst can make your computer run for years [big]#😵# on large datasets. We are going to give some examples so you can identify each one. == Constant - +(((Constant))) +(((Runtime, Constant))) Represented as *O(1)*, it means that regardless of the input size the number of operations executed is always the same. Let’s see an example. [#constant-example] @@ -44,7 +45,8 @@ Another more real life example is adding an element to the begining of a <> you will get the answer in seconds! [big]#🚀# == Cubic - +(((Cubic))) +(((Runtime, Cubic))) Cubic *O(n^3^)* and higher polynomial functions usually involve many nested loops. As an example of a cubic algorithm is a multi-variable equation solver (using brute force): [#cubic-example] @@ -174,7 +179,8 @@ WARNING: This just an example, there are better ways to solve multi-variable equ As you can see three nested loops usually translates to O(n^3^). If you have a four variable equation and four nested loops it would be O(n^4^) and so on when we have a runtime in the form of _O(n^c^)_, where _c > 1_, we can refer as a *polynomial runtime*. == Exponential - +(((Exponential))) +(((Runtime, Exponential))) Exponential runtimes, O(2^n^), means that every time the input grows by one the number of operations doubles. Exponential programs are only usable for a tiny number of elements (<100) otherwise it might not finish on your lifetime. [big]#💀# Let’s do an example. @@ -203,7 +209,8 @@ include::{codedir}/runtimes/07-sub-sets.js[tag=snippet] Every time the input grows by one the resulting array doubles. That’s why it has an *O(2^n^)*. == Factorial - +(((Factorial))) +(((Runtime, Factorial))) Factorial runtime, O(n!), is not scalable at all. Even with input sizes of ~10 elements, it will take a couple of seconds to compute. It’s that slow! [big]*🍯🐝* .Factorial diff --git a/book/chapters/bubble-sort.adoc b/book/chapters/bubble-sort.adoc index ada2cb21..0e3f5aa1 100644 --- a/book/chapters/bubble-sort.adoc +++ b/book/chapters/bubble-sort.adoc @@ -7,6 +7,8 @@ Bubble sort is a simple sorting algorithm that "bubbles up" the biggest values t It's also called _sinking sort_ because the most significant values "sink" to the right side of the array. This algorithm is adaptive, which means that if the array is already sorted, it will take only _O(n)_ to "sort". However, if the array is entirely out of order, it will require _O(n^2^)_ to sort. +(((Quadratic))) +(((Runtime, Quadratic))) == Bubble Sort Implementation diff --git a/book/chapters/chapter3.adoc b/book/chapters/chapter3.adoc index 2e63d7c6..2ab87fc3 100644 --- a/book/chapters/chapter3.adoc +++ b/book/chapters/chapter3.adoc @@ -11,20 +11,25 @@ include::tree.adoc[] // (g) +<<< include::tree--binary-search-tree.adoc[] +<<< include::tree--search.adoc[] +<<< include::tree--self-balancing-rotations.adoc[] :leveloffset: +1 +<<< include::tree--avl.adoc[] :leveloffset: -1 // (g) // include::map.adoc[] +<<< include::map-intro.adoc[] :leveloffset: +1 diff --git a/book/chapters/divide-and-conquer--fibonacci.adoc b/book/chapters/divide-and-conquer--fibonacci.adoc index fab9577e..1f67e5fb 100644 --- a/book/chapters/divide-and-conquer--fibonacci.adoc +++ b/book/chapters/divide-and-conquer--fibonacci.adoc @@ -52,7 +52,8 @@ graph G { .... In the diagram, we see the two recursive calls needed to compute each number. So if we follow the _O(branches^depth^)_ we get O(2^n^). [big]#🐢# - +(((Exponential))) +(((Runtime, Exponential))) NOTE: Fibonacci is not a perfect binary tree since some nodes only have one child instead of two. The exact runtime for recursive Fibonacci is _O(1.6^n^)_ (still exponential time complexity). Exponential time complexity is pretty bad. Can we do better? diff --git a/book/chapters/dynamic-programming--fibonacci.adoc b/book/chapters/dynamic-programming--fibonacci.adoc index 906aa18b..35ce7255 100644 --- a/book/chapters/dynamic-programming--fibonacci.adoc +++ b/book/chapters/dynamic-programming--fibonacci.adoc @@ -23,7 +23,8 @@ graph G { .... This graph looks pretty linear now. It's runtime _O(n)_! -indexterm:[Runtime, Linear] +(((Linear))) +(((Runtime, Linear))) (((Memoization))) TIP: Saving previous results for later is a technique called "memoization". This is very common to optimize recursive algorithms with overlapping subproblems. It can make exponential algorithms linear! diff --git a/book/chapters/graph.adoc b/book/chapters/graph.adoc index b26e972e..cd00ad0d 100644 --- a/book/chapters/graph.adoc +++ b/book/chapters/graph.adoc @@ -1,5 +1,6 @@ = Graph - +(((Graph))) +(((Data Structures, Non-Linear, Graph))) Graphs are one of my favorite data structures. They have a lot of cool applications like optimizing routes, social network analysis to name a few. You are probably using apps that use graphs every day. First, let’s start with the basics. diff --git a/book/chapters/insertion-sort.adoc b/book/chapters/insertion-sort.adoc index be19d684..4ea544d8 100644 --- a/book/chapters/insertion-sort.adoc +++ b/book/chapters/insertion-sort.adoc @@ -31,4 +31,6 @@ include::{codedir}/algorithms/sorting/insertion-sort.js[tag=sort, indent=0] - <>: [big]#✅# Yes - Time Complexity: [big]#⛔️# <> _O(n^2^)_ - Space Complexity: [big]#✅# <> _O(1)_ -indexterm:[Runtime, Quadratic] + +(((Quadratic))) +(((Runtime, Quadratic))) diff --git a/book/chapters/linked-list.adoc b/book/chapters/linked-list.adoc index 3f71823a..c6556ed7 100644 --- a/book/chapters/linked-list.adoc +++ b/book/chapters/linked-list.adoc @@ -1,5 +1,7 @@ = Linked List - +(((Linked List))) +(((List))) +(((Data Structures, Linear, Linked List))) A list (or Linked List) is a linear data structure where each node is linked to another one. Linked Lists can be: @@ -248,8 +250,9 @@ So far, we have seen two liner data structures with different use cases. Here’ | Linked List (singly) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) | Linked List (doubly) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) |=== +(((Linear))) +(((Runtime, Linear))) -indexterm:[Runtime, Linear] If you compare the singly linked list vs. doubly linked list, you will notice that the main difference is deleting elements from the end. For a singly list is *O(n)*, while for a doubly list is *O(1)*. Comparing an array with a doubly linked list, both have different use cases: diff --git a/book/chapters/map-hashmap-vs-treemap.adoc b/book/chapters/map-hashmap-vs-treemap.adoc index 5a6ce3bc..903c5aef 100644 --- a/book/chapters/map-hashmap-vs-treemap.adoc +++ b/book/chapters/map-hashmap-vs-treemap.adoc @@ -26,4 +26,7 @@ As we discussed so far, there are trade-off between the implementations |=== {empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. -indexterm:[Runtime, Logarithmic] +(((Linear))) +(((Runtime, Linear))) +(((Logarithmic))) +(((Runtime, Logarithmic))) diff --git a/book/chapters/map-hashmap.adoc b/book/chapters/map-hashmap.adoc index a1a81c84..b5d4fef6 100644 --- a/book/chapters/map-hashmap.adoc +++ b/book/chapters/map-hashmap.adoc @@ -1,5 +1,7 @@ = HashMap - +(((HashMap))) +(((HashTable))) +(((Data Structures, Non-Linear, HashMap))) A HashMap is a Map implementation. HashMaps are composed of two things: 1) a hash function and 2) a bucket array to store values. diff --git a/book/chapters/map-intro.adoc b/book/chapters/map-intro.adoc index 628071a3..36112d03 100644 --- a/book/chapters/map-intro.adoc +++ b/book/chapters/map-intro.adoc @@ -1,5 +1,6 @@ = Map - +(((Map))) +(((Data Structures, Non-Linear, Map))) A map is a data structure to store pairs of data: *key* and *value*. In an array, you can only store values. The array’s key is always the position index. However, in a *Map* the key can be whatever you want. IMPORTANT: Map is a data structure that _maps_ *keys* to *values*. diff --git a/book/chapters/map-treemap.adoc b/book/chapters/map-treemap.adoc index a830d237..f2584688 100644 --- a/book/chapters/map-treemap.adoc +++ b/book/chapters/map-treemap.adoc @@ -1,5 +1,6 @@ = TreeMap - +(((TreeMap))) +(((Data Structures, Non-Linear, TreeMap))) A TreeMap is a Map implementation using Binary Search Trees. Implementing a Map with a tree, TreeMap, has a couple of advantages over a HashMap: diff --git a/book/chapters/merge-sort.adoc b/book/chapters/merge-sort.adoc index d4a33b0a..b5b09ecd 100644 --- a/book/chapters/merge-sort.adoc +++ b/book/chapters/merge-sort.adoc @@ -50,3 +50,7 @@ Merge sort has an _O(n log n)_ running time. For more details about how to extr - Recursive: Yes - Time Complexity: [big]#✅# <> _O(n log n)_ - Space Complexity: [big]#⚠️# <> _O(n)_, use auxiliary memory + +(((Linearithmic))) +(((Runtime, Linearithmic))) +(((Space complexity, Linear))) diff --git a/book/chapters/non-linear-data-structures-intro.adoc b/book/chapters/non-linear-data-structures-intro.adoc index 99fdc409..0a1455e1 100644 --- a/book/chapters/non-linear-data-structures-intro.adoc +++ b/book/chapters/non-linear-data-structures-intro.adoc @@ -1,6 +1,6 @@ [partintro] -- -Non-Linear data structures are everywhere whether you realize it or not. They are used in databases, Web (HTML DOM tree), search algorithms, finding the best route to get home and so on. We are going to learn the basic concepts and when to choose one over the other. +Non-Linear data structures are everywhere whether you realize it or not. You can find them in databases, Web (HTML DOM tree), search algorithms, finding the best route to get home and many more uses. We are going to learn the basic concepts and when to choose one over the other. .In this chapter we are going to learn: - Exciting <> data structure applications diff --git a/book/chapters/queue.adoc b/book/chapters/queue.adoc index 1e6579b3..5907afd0 100644 --- a/book/chapters/queue.adoc +++ b/book/chapters/queue.adoc @@ -1,5 +1,6 @@ = Queue - +(((Queue))) +(((Data Structures, Linear, Queue))) A queue is a linear data structure where the data flows in a *First-In-First-Out* (FIFO) manner. .Queue data structure is like a line of people: the First-in, is the First-out diff --git a/book/chapters/quick-sort.adoc b/book/chapters/quick-sort.adoc index 7d59576e..ef573770 100644 --- a/book/chapters/quick-sort.adoc +++ b/book/chapters/quick-sort.adoc @@ -77,8 +77,11 @@ With the optimization, Quicksort has an _O(n log n)_ running time. Similar to th - <>: [big]#️❌# No, the pivot element can be choose at random. - Recursive: Yes - Time Complexity: [big]#✅# <> _O(n log n)_ -- Space Complexity: [big]#⚠️# <> _O(n)_ -indexterm:[Space Complexity, Linear] +- Space Complexity: [big]#✅# <> _O(1)_ + +(((Linearithmic))) +(((Runtime, Linearithmic))) +(((Space complexity, Constant))) // Resources: // https://www.khanacademy.org/computing/computer-science/algorithms/quick-sort/a/linear-time-partitioning diff --git a/book/chapters/selection-sort.adoc b/book/chapters/selection-sort.adoc index bc763236..067be502 100644 --- a/book/chapters/selection-sort.adoc +++ b/book/chapters/selection-sort.adoc @@ -51,4 +51,5 @@ There you have it, `2b` now comes before `2a`. // CAUTION: In practice, selection sort performance is the worst compared <> and <>. The only advantage of selection sort is that it minimizes the number of swaps. In case, that swapping is expensive, then it could make sense to use this one over the others. -indexterm:[Runtime, Quadratic] +(((Quadratic))) +(((Runtime, Quadratic))) diff --git a/book/chapters/set.adoc b/book/chapters/set.adoc index 8f2f3498..b130ebd9 100644 --- a/book/chapters/set.adoc +++ b/book/chapters/set.adoc @@ -1,5 +1,6 @@ = Set - +(((Set))) +(((Data Structures, Non-Linear, Set))) A set is a data structure where duplicated entries are not allowed. Set is like an array with unique values. NOTE: JavaScript has already a built-in Set data structure. @@ -212,6 +213,8 @@ rehash happens, it will take *O(n)* instead of *O(1)*. A `TreeSet` is always *O( {empty}* = Amortized run time. E.g. rehashing might affect run time to *O(n)*. indexterm:[Runtime, Linear] +(((Logarithmic))) +(((Runtime, Logarithmic))) To recap, HashSet and TreeSet will keep data without duplicates. The difference besides runtime is that: diff --git a/book/chapters/stack.adoc b/book/chapters/stack.adoc index f869d999..95178270 100644 --- a/book/chapters/stack.adoc +++ b/book/chapters/stack.adoc @@ -1,5 +1,6 @@ = Stack - +(((Stack))) +(((Data Structures, Linear, Stack))) The stack is a data structure that restricts the way you add and remove data. It only allows you to insert and retrieve in a *Last-In-First-Out* (LIFO) fashion. An analogy is to think the stack is a rod and the data are discs. You can only take out the last one you put in. diff --git a/book/chapters/tree--avl.adoc b/book/chapters/tree--avl.adoc index b84c17f7..d4f22205 100644 --- a/book/chapters/tree--avl.adoc +++ b/book/chapters/tree--avl.adoc @@ -1,13 +1,14 @@ = AVL Tree - +(((AVL Tree))) +(((Tree, AVL))) AVL Tree is named after their inventors (**A**delson-**V**elsky and **L**andis). -This self-balancing tree keep track of subtree sizes to know if a rebalance is needed or not. +This self-balancing tree keeps track of subtree sizes to know if a rebalance is needed or not. We can compare the size of the left and right subtrees using a balance factor. [NOTE] ==== -The *balanced factor* on each node is calculated recurviely as follows: +The *balanced factor* on each node is calculated recursively as follows: ---- Balance Factor = (left subtree height) - (right subtree height) @@ -15,8 +16,8 @@ Balance Factor = (left subtree height) - (right subtree height) ==== -The implementation will got into the BST node. -We will need two methods to calculate the left and right subtree, and with those we can get the balance factor. +The implementation will go in the BST node class. +We will need two methods to calculate the left and right subtree, and with those, we can get the balance factor. .Balance Factor methods on the BST node [source, javascript] @@ -27,7 +28,7 @@ include::{codedir}/data-structures/trees/binary-tree-node.js[tag=avl, indent=0] == Implementing AVL Tree -Implementing an AVL Tree is not too hard, since it builds upon what we did in the Binary Search Tree. +Implementing an AVL Tree is not too hard since it builds upon what we did in the Binary Search Tree. .AVL Tree class [source, javascript] @@ -35,17 +36,17 @@ Implementing an AVL Tree is not too hard, since it builds upon what we did in th include::{codedir}/data-structures/trees/avl-tree.js[tag=AvlTree] ---- -As you can see, AVL tree inherits from the BST class. -The insert and remove operations works the same as in the BST, except that at the end we call `balanceUptream`. -This function makes balance the tree after every change if is needed. Let's see how it's implemented. +As you can see, the AVL tree inherits from the BST class. +The insert and remove operations work the same as in the BST, except that at the end we call `balanceUpstream`. +This function checks if the tree is symmetrical after every change to the tree. If the tree went out of balance, it would execute the appropriated rotation to fix it. .Balance Upstream for AVL tree [source, javascript] ---- -include::{codedir}/data-structures/trees/avl-tree.js[tag=balanceUptream] +include::{codedir}/data-structures/trees/avl-tree.js[tag=balanceUpstream] ---- -This function recurively goes from the modified node to the root checking if each node in between is balanced. +This function recursively goes from the modified node to the root checking if each node in between is balanced. Now, let's examine how does the balancing works on AVL tree. .Balance method for AVL tree diff --git a/book/chapters/tree--binary-search-tree.adoc b/book/chapters/tree--binary-search-tree.adoc index 4241335d..d12316a9 100644 --- a/book/chapters/tree--binary-search-tree.adoc +++ b/book/chapters/tree--binary-search-tree.adoc @@ -1,10 +1,12 @@ = Binary Search Tree - -.The Binary Search Tree (BST) is a tree data structure that keeps the following constraints: +(((Binary Search Tree))) +(((BST))) +(((Data Structures, Non-Linear, Binary Search Tree))) +.To recap, the Binary Search Tree (BST) is a tree data structure that keeps the following constraints: * Each node must have at most two children. Usually referred to as "left" and "right". * All trees must a have a "root" node. -* The order of nodes values must be: left child < parent < right child. -* Nodes might need re-ordering after each insert/delete operation to keep the `left < parent < right` constraint. +* The order of nodes values must be: `left child < parent < right child`. +* Nodes might need re-ordering after each insert/delete operation to keep the `left <= parent < right` constraint. == Implementing a Binary Search Tree @@ -42,8 +44,8 @@ With the methods `add` and `remove` we have to guarantee that our tree always ha === Inserting new elements in a BST .For inserting an element, in a BST, we have two scenarios: -1. If the tree is empty (root element is null), we add the newly created node as root, and we are done! -2. If the root is not null. Start from the root, then compare the node’s value against the new element. If the node has higher than a new item, we move to the right child, otherwise to the left. We check each node recursively until we find an empty spot where we can put the new element and keep the rule `right < parent < left`. +1. If the tree is empty (root element is null), we add the newly created node as root, and that's it! +2. If the root is not null. Start from it and compare the node’s value against the new element. If the node has higher than a new item, we move to the right child, otherwise to the left. We check each node recursively until we find an empty spot where we can put the new element and keep the rule `right < parent < left`. 3. If we insert the same value multiple times, we don’t want duplicates. So, we can keep track of multiples using a duplicity counter. For instance, let’s say that we want to insert the values 19, 21, 10, 2, 8 in a BST: @@ -84,7 +86,7 @@ Deleting a node from a BST have three cases. ==== Removing a leaf (Node with 0 children) -Deleting a leaf is the easiest, we look for their parent and set the child to null. +Deleting a leaf is the easiest; we look for their parent and set the child to null. .Removing node without children from a BST. image:image37.png[image,width=528,height=200] @@ -110,7 +112,7 @@ Removing a parent of two children is the trickiest of all cases because we need image:image39.png[image,width=528,height=404] -In the example, we delete the root node 19. This leaves the two orphans (node 10 and node 21). There are no more parents because node 19 was the *root* element. One way to solve this problem is to *combine* the left subtree (Node 10 and descendants) into the right subtree (node 21). The final result is node 21 is the new root. +In the example, we delete the root node 19. This deletion leaves two orphans (node 10 and node 21). There are no more parents because node 19 was the *root* element. One way to solve this problem is to *combine* the left subtree (Node 10 and descendants) into the right subtree (node 21). The final result is node 21 is the new root. What would happen if node 21 had a left child (e.g., node 20)? Well, we would move node 10 and its descendants' bellow node 20. @@ -126,7 +128,7 @@ include::{codedir}/data-structures/trees/binary-search-tree.js[tag=remove, inden <1> Try to find if the value exists on the tree. <2> If the value doesn’t exist we are done! <3> Create new subtree without the value to delete -<4> Check the multiplicity (duplicates) and decrement the count in case we have multiple nodes with the same value +<4> Check the multiplicity (duplicates) and decrement the count if we have multiple nodes with the same value <5> If the `nodeToRemove` was the root, then we move the removed node’s children as the new root. <6> If it was not the root, then we go to the deleted node’s parent and put their children there. diff --git a/book/chapters/tree--binary-tree-traversal.adoc b/book/chapters/tree--binary-tree-traversal.adoc index 9b8d7ce6..ab440b61 100644 --- a/book/chapters/tree--binary-tree-traversal.adoc +++ b/book/chapters/tree--binary-tree-traversal.adoc @@ -1,10 +1,11 @@ = Binary Tree Traversal - -As mentioned before, there are different ways to visit all the nodes or search for a value in a binary tree. On this section we are going to focus on depth-first tree traversal. The implementations are recursive since it's more elegant and concise. Let's explore them. +(((Binary Tree Traversal))) +As mentioned before, there are different ways to visit all the nodes or search for a value in a binary tree. On this section, we are going to focus on depth-first tree traversal. == In Order Traversal - -If you tree happens to be a binary search tree (BST), then could use "in order" traversal to get the values sorted in ascending order. To accomplish this, you have to visit the nodes in a `left-root-right` order. +(((Tree Traversal, In Order))) +(((In Order Traversal))) +If your tree happens to be a binary search tree (BST), then you can use "in order" traversal to get the values sorted in ascending order. To accomplish this, you have to visit the nodes in a `left-root-right` order. If we have the following tree: ---- @@ -27,11 +28,12 @@ Check out the implementation: include::{codedir}/data-structures/trees/binary-search-tree.js[tag=inOrderTraversal, indent=0] ---- -This function goes recursively to the leftmost element and then yield that node, then we go to the right child (if any) and repeat the process. This will get us the values ordered. +This function goes recursively to the leftmost element and then yield that node, then we go to the right child (if any) and repeat the process. This method will get us the values ordered. == Pre Order Traversal - -Pre-order traveral visits nodes in this order `root-left-right` recursively. +(((Tree Traversal, Pre Order))) +(((Pre Order Traversal))) +Pre-order traversal visits nodes in this order `root-left-right` recursively. .Usage of pre-order traversal: - Create a copy of the tree. @@ -58,10 +60,11 @@ If we have the following tree: Pre-order traverval will return `10, 5, 4, 3, 30, 15, 40`. == Post-order Traversal +(((Tree Traversal, Post Order))) +(((Post Order Traversal))) +Post-order traversal goes to each node in this order `left-right-root` recursively. -Post-order traveral goes to each node in this order `left-right-root` recursively. - -.Usages of the post-order tree traveral +.Usages of the post-order tree traversal - Traversal is used to delete the tree because you visit the children before removing the parent. - Get the postfix expression of an expression tree used in the http://en.wikipedia.org/wiki/Reverse_Polish_notation[reverse polish notation]. diff --git a/book/chapters/tree--search.adoc b/book/chapters/tree--search.adoc index daedd965..77fcb505 100644 --- a/book/chapters/tree--search.adoc +++ b/book/chapters/tree--search.adoc @@ -1,7 +1,7 @@ = Tree Search & Traversal So far we covered, how to insert/delete/search values in a binary search tree (BST). -However, not all binary trees are BST, so there are other ways to look for values or visit all nodes in a certain order. +However, not all binary trees are BST, so there are other ways to look for values or visit all nodes in a particular order. If we have the following tree: ---- @@ -15,7 +15,8 @@ If we have the following tree: ---- Depending on what traversal methods we used we will have a different visiting order. - +(((Tree Traversal))) +(((Tree, Traversal))) .Tree traversal methods - Breadth-first traversal (a.k.a level order traversal): `10, 5, 30, 4, 15, 40, 3` - Depth-first traversal @@ -23,14 +24,16 @@ Depending on what traversal methods we used we will have a different visiting or ** Pre-order (root-left-right): `10, 5, 4, 3, 30, 15, 40` ** Post-order (left-right-root): `3, 4, 5, 15, 40, 30, 10` -Why do we care? Well, there are certain problems that you solve more optimally using one or another traversal method. For instance to get the size of a subtree, finding maximums/minimums, and so on. +Why do we care? Well, there are specific problems that you can solve more optimally using one or another traversal method. For instance to get the size of a subtree, finding maximums/minimums, and so on. -Let's cover Breadth-first search (BFS) and Depth-first search (DFS). +Let's cover the Breadth-first search (BFS) and Depth-first search (DFS). [Breadth First Search] == Breadth-First Search for Binary Tree - -Breadth-first search goeas wide (breadth) before going deep. Hence, the name. In other words, it goes level by level. It visits all the inmediate nodes or children and then move on to the children's children. +(((BFS))) +(((Breadth-First Search))) +(((Tree, Breadth-First Search))) +The breadth-first search goes wide (breadth) before going deep. Hence, the name. In other words, it goes level by level. It visits all the immediate nodes or children and then moves on to the children's children. Let's how can we implement it! .Breath-First Search (BFS) Implementation @@ -41,12 +44,14 @@ include::{codedir}/data-structures/trees/binary-search-tree.js[tag=bfs,indent=0] As you see, the BFS uses a <> data structure. We enqueue all the children of the current node and then dequeue them as we visit them. -Note the asterisk (`*`) in front of the function means that this function is a generator that yield values. +Note the asterisk (`*`) in front of the function means that this function is a generator that yields values. +(((JavaScript Notes, Generators))) +(((Generators))) .JavaScript Generators **** -JavaScript generators were added as part of ES6, they allow process possibly expensive operations one by one. You can convert any function into a generator by adding the asterisk in front and `yield`ing a value. +JavaScript generators were added as part of ES6; they allow process possibly expensive operations one by one. You can convert any function into a generator by adding the asterisk in front and `yield`ing a value. Then you can use `next()` to get the value and also `done` to know if it's the last value. Here are some examples: @@ -79,8 +84,10 @@ console.log(Array.from(dummyIdMaker())); // [0, 1, 2] == Depth-First Search for Binary Tree - -Depth-First search goes deep before going wide. It means, that starting for the root it goes as deep as it can until it found a leaf node (node without children), then it visits all the remaing nodes that were in the path. +(((DFS))) +(((Depth-First Search))) +(((Tree, Depth-First Search))) +Depth-First search goes deep (depth) before going wide. It means that starting for the root it goes as deep as it can until it found a leaf node (node without children), then it visits all the remaining nodes that were in the path. .Depth-First Search (DFS) Implementation with a Stack [source, javascript] @@ -88,18 +95,18 @@ Depth-First search goes deep before going wide. It means, that starting for the include::{codedir}/data-structures/trees/binary-search-tree.js[tag=dfs,indent=0] ---- -This is a iterative implementation of a DFS using an <>. -It's almost identical to the BFS but instead of using a <> we usa a Stack. +This is an iterative implementation of a DFS using an <>. +It's almost identical to the BFS, but instead of using a <> we use a Stack. We can also implement it as recursive functions are we are going to see in the <> section. == Depth-First Search vs. Breadth-First Search -We can see visually the difference on how the DFS and BFS search for nodes: +We can see visually the difference between how the DFS and BFS search for nodes: .Depth-First Search vs. Breadth-First Search image:depth-first-search-dfs-breadth-first-search-bfs.jpg[] -As you can see the DFS in two iterations is already at one of the farthest node from the root while BFS search nearby nodes first. +As you can see the DFS in two iterations is already at one of the farthest nodes from the root while BFS search nearby nodes first. .Use DFS when: - The node you are looking for is likely to be *far* from the root. @@ -108,14 +115,7 @@ As you can see the DFS in two iterations is already at one of the farthest node - The node you are looking for is *nearby* the root. :leveloffset: +1 - +<<< include::tree--binary-tree-traversal.adoc[] :leveloffset: -1 - - - - - - - diff --git a/book/chapters/tree--self-balancing-rotations.adoc b/book/chapters/tree--self-balancing-rotations.adoc index bde52210..0f8d842e 100644 --- a/book/chapters/tree--self-balancing-rotations.adoc +++ b/book/chapters/tree--self-balancing-rotations.adoc @@ -1,11 +1,11 @@ = Self-balancing Binary Search Trees -Binary Search Trees (BST) are a great data structure to find elements very fast _O(n log n)_. -However, when the BST branches has different branch sizes then the performance suffers. -In the worst case, all nodes can go to one side (e.g. right) and then the search time would be linear. +Binary Search Trees (BST) are an excellent data structure to find elements very fast _O(log n)_. +However, when the BST branches have different branch sizes, then the performance suffers. +In the worst case, all nodes can go to one side (e.g., right) and then the search time would be linear. At this point searching element won't be any better on that tree than an array or linked list. Yikes! -Self-balanced trees will automatically balanced the tree when an element is inserted to keep search performace. +Self-balanced trees will automatically rebalance the tree when an element is inserted to keep search performance. We balance a tree by making the height (distance from a node to the root) of any leaf on the tree as similar as possible. .From unbalanced BST to balanced BST @@ -27,18 +27,18 @@ To be more specific we rotated node `1` to the left to balance the tree. Let's examine all the possible rotation we can do to balance a tree. == Tree Rotations - +(((Tree Rotations))) We can do single rotations left and right and also we can do double rotations. Let's go one by one. === Single Right Rotation -Right rotation moves a node on the right as a child of other node. +Right rotation moves a node on the right as a child of another node. Take a look at the `@example` in the code below. As you can see we have an unbalanced tree `4-3-2-1`. We want to balance the tree, for that we need to do a right rotation of node 3. -So, the node 3 is moved as the right child of the previous child. +So, we move node 3 as the right child of the previous child. .Single right rotation implementation [source, javascript] @@ -54,7 +54,7 @@ include::{codedir}/data-structures/trees/tree-rotations.js[tag=rightRotation] The `swapParentChild` as it name says, swap the children. For our example, it swaps `node 4`'s left children from `node 3` to `node 2`. -Take a look at the implementation +Take a look at the implementation. .Swap Parent and Child Implementation [source, javascript] @@ -73,7 +73,7 @@ After `swapParentChild`, we have the following: Still not quite what we want. So, `newParent.setRightAndUpdateParent(node)` will make `node 3` the right child of `node 2`. -Finally, we remove left child of `node 3` to be `null`. +Finally, we remove the left child of `node 3` to be `null`. ---- 4 @@ -109,7 +109,7 @@ If you are curious about the `setRightAndUpdateParent` and `setLeftAndUpdatePare include::{codedir}/data-structures/trees/binary-tree-node.js[tag=setAndUpdateParent] ---- -You can also checkout the full +You can also check out the full https://github.com/amejiarosario/dsa.js/blob/adfd8a660bbe0a7068fd7881aff9f51bdb9f92ae/src/data-structures/trees/binary-tree-node.js#L9[binary tree node implementation]. === Left Right Rotation @@ -122,7 +122,7 @@ This time are we going to do a double rotation. include::{codedir}/data-structures/trees/tree-rotations.js[tag=leftRightRotation] ---- -As you can see we do a left and then a right rotation. This is also called `LR rotation` +As you can see we do a left and then a right rotation. This rotation is also known as `LR rotation` === Right Left Rotation @@ -134,7 +134,7 @@ Very similar to `leftRightRotation`. The difference is that we rotate right and include::{codedir}/data-structures/trees/tree-rotations.js[tag=rightLeftRotation] ---- -This rotation is also refered as `RL rotation`. +This rotation is also referred to as `RL rotation`. == Self-balancing trees implementations diff --git a/book/chapters/tree.adoc b/book/chapters/tree.adoc index 08db9eb0..7738aeea 100644 --- a/book/chapters/tree.adoc +++ b/book/chapters/tree.adoc @@ -1,5 +1,6 @@ = Tree - +(((Tree))) +(((Data Structures, Non-Linear, Tree))) A tree is a non-linear data structure where a node can have zero or more connections. The topmost node in a tree is called *root*. The linked nodes to the root are called *children* or *descendants*. .Tree Data Structure: root node and descendants. @@ -50,13 +51,14 @@ image:image31.jpg[image] There are different kinds of trees depending on the restrictions. E.g. The trees that have two children or less are called *binary tree*, while trees with at most three children are called *Ternary Tree*. Since binary trees are most common we are going to cover them here and others in another chapter. === Binary Tree - +(((Binary Tree))) +(((Data Structures, Non-Linear, Binary Tree))) The binary restricts the nodes to have at most two children. Trees, in general, can have 3, 4, 23 or more, but not binary trees. .Binary tree has at most 2 children while non-binary trees can have more. image:image32.png[image,width=321,height=193] -Binary trees are the one of the most common types and it's used to build other data structures and applications. +Binary trees are one of the most used kinds of tree, and they are used to build other data structures. .Binary Tree Applications - <> @@ -66,7 +68,8 @@ Binary trees are the one of the most common types and it's used to build other d === Binary Search Tree (BST) - +(((Binary Search Tree))) +(((Data Structures, Non-Linear, Binary Search Tree))) The Binary Search Tree (BST) is a specialization of the binary tree. BST has the same restriction as a binary tree; each node has at most two children. However, there’s another restriction: the values are ordered. It means the left child’s value has to be less or equal than the parent. In turn, the right child’s value has to be bigger than the parent. > BST: left ≤ parent < right @@ -76,16 +79,19 @@ image:image33.png[image,width=348,height=189] === Binary Heap - +(((Binary Heap))) +(((Heap))) +(((Max-Heap))) +(((Min-Heap))) +(((Data Structures, Non-Linear, Binary Heap))) The heap (max-heap) is a type of binary tree where the children's values are higher than the parent. Opposed to the BST, the left child doesn’t have to be smaller than the right child. .Heap vs BST image:image34.png[image,width=325,height=176] - The (max) heap has the maximum value in the root, while BST doesn’t. -There is two kind of heaps: min-heap and max-heap. +There are two kinds of heaps: min-heap and max-heap. For a *max-heap*, the root has the highest value. The heap guarantee that as you move away from the root, the values get smaller. The opposite is true for a *min-heap*. In a min-heap, the lowest value is at the root, and as you go down the lower to the descendants, they will keep increasing values. .Max-heap keeps the highest value at the top while min-heap keep the lowest at the root. diff --git a/src/data-structures/trees/avl-tree.js b/src/data-structures/trees/avl-tree.js index c3b013d2..fc6379b1 100644 --- a/src/data-structures/trees/avl-tree.js +++ b/src/data-structures/trees/avl-tree.js @@ -39,13 +39,13 @@ function balance(node) { } // end::balance[] -// tag::balanceUptream[] +// tag::balanceUpstream[] /** * Bubbles up balancing nodes a their parents * * @param {TreeNode} node */ -function balanceUptream(node) { +function balanceUpstream(node) { let current = node; let newParent; while (current) { @@ -54,7 +54,7 @@ function balanceUptream(node) { } return newParent; } -// end::balanceUptream[] +// end::balanceUpstream[] // tag::AvlTree[] /** @@ -68,7 +68,7 @@ class AvlTree extends BinarySearchTree { */ add(value) { const node = super.add(value); - this.root = balanceUptream(node); + this.root = balanceUpstream(node); return node; } @@ -80,7 +80,7 @@ class AvlTree extends BinarySearchTree { const node = super.find(value); if (node) { const found = super.remove(value); - this.root = balanceUptream(node.parent); + this.root = balanceUpstream(node.parent); return found; } From 8f519dc0b2a4ca1d1c3ce2d50ee93a75d1b121e9 Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Tue, 26 Mar 2019 19:48:08 -0400 Subject: [PATCH 015/326] readproof part 3 --- book/chapters/chapter3.adoc | 4 ++- book/chapters/graph-search.adoc | 44 +++++++++++++++-------- book/chapters/graph.adoc | 14 ++++---- book/chapters/map-hashmap-vs-treemap.adoc | 4 +-- book/chapters/map-hashmap.adoc | 26 +++++++------- book/chapters/map-intro.adoc | 2 +- book/chapters/map-treemap.adoc | 2 ++ book/chapters/set.adoc | 8 +++-- 8 files changed, 64 insertions(+), 40 deletions(-) diff --git a/book/chapters/chapter3.adoc b/book/chapters/chapter3.adoc index 2ab87fc3..642f058f 100644 --- a/book/chapters/chapter3.adoc +++ b/book/chapters/chapter3.adoc @@ -35,12 +35,15 @@ include::map-intro.adoc[] :leveloffset: +1 // (g) +<<< include::map-hashmap.adoc[] // (g) +<<< include::map-treemap.adoc[] // (g) +<<< include::map-hashmap-vs-treemap.adoc[] :leveloffset: -1 @@ -51,7 +54,6 @@ include::set.adoc[] // (g) include::graph.adoc[] -// TODO: pending include::graph-search.adoc[] // Graph summary diff --git a/book/chapters/graph-search.adoc b/book/chapters/graph-search.adoc index ce126fc8..7940f260 100644 --- a/book/chapters/graph-search.adoc +++ b/book/chapters/graph-search.adoc @@ -1,14 +1,12 @@ = Graph Search -Graph search allows you to visit all the elements connected given a starting node. There are two ways to navigate the graph, one is using Depth-First Search (DFS) and the other one is Breadth-First Search (BFS). Let's see the difference. +Graph search allows you to visit search elements. -== Depth-First Search for Graphs - -With Depth-First Search (DFS) we go deep before going wide. +WARNING: Graph search is very similar to <>. So, if you read that sections some of the concepts here will be familiar to you. -// TODO: add arrows to show DFS and create another one for BFS +There are two ways to navigate the graph, one is using Depth-First Search (DFS) and the other one is Breadth-First Search (BFS). Let's see the difference using the following graph. -[graphviz, dfs-graph, png] +[graphviz, directed graph, png] .... digraph G { @@ -29,7 +27,7 @@ digraph G { 3 -> 4 3 -> 2 - label="DFS" + // label="DFS" { rank=same; 3, 1 } { rank=same; 0, 4 } @@ -37,30 +35,48 @@ digraph G { } .... +== Depth-First Search for Graphs + +With Depth-First Search (DFS) we go deep before going wide. + +Let's say that we use DFS on the graph shown above, starting with node `0`. +A DFS, will probably visit 5, then visit `1` and continue going down `3` and `2`. As you can see, we need to keep track of visited nodes, since in graphs we can have cycles like `1-3-2`. +Finally, we back up to the remaining node `0` children: node `4`. + +So, DFS would visit the graph: `[0, 5, 1, 3, 2, 4]`. + +// TODO: add arrows to show DFS and create another one for BFS + == Breadth-First Search for Graphs With Breadth-First Search (BFS) we go wide before going deep. // TODO: BFS traversal +Let's say that we use BFS on the graph shown above, starting with the same node `0`. +A BFS, will visit 5 as well, then visit `1` and will not go down to it's children. +It will first finish all the children of node `0`, so it will visit node `4`. +After all the children of node `0` are visited it continue with all the children of node `5`, `1` and `4`. + +In summary, BFS would visit the graph: `[0, 5, 1, 4, 3, 2]` -== Depth-First Search vs Breadth-First Search in a Graph +== Depth-First Search vs. Breadth-First Search in a Graph -DFS and BFS can implementation can be almost identical the difference is the underlying data structured. In our implementation, we have a generic `graphSearch` where we pass the first element to start the search the data structure that we can to use: +DFS and BFS can implementation can be almost identical; the difference is the underlying data structured. In our implementation, we have a generic `graphSearch` where we pass the first element to start the search the data structure that we can to use: -.DFS and BFS implemenation +.DFS and BFS implementation [source, javascript] ---- include::{codedir}/data-structures/graphs/graph.js[tag=graphSearch,indent=0] ---- -Using an <> (LIFO) for DFS will make use keep visiting the last node children, while having a <> (FIFO) will allows to visit adjacent nodes first and "queue" their children for later visting. +Using an <> (LIFO) for DFS will make use keep visiting the last node children while having a <> (FIFO) will allow to visit adjacent nodes first and "queue" their children for later visiting. TIP: you can also implement the DFS as a recursive function, similar to what we did in the <>. -You might wonder what's the difference between search algorithms in a tree and a graph? Check out the next section. +You might wonder what the difference between search algorithms in a tree and a graph is? Check out the next section. == DFS/BFS on Tree vs Graph -The difference between searching a tree and a graph is that the tree has always an starting point (root node). However, in a graph you can start searching anywhere. There's no root in graph. +The difference between searching a tree and a graph is that the tree always has a starting point (root node). However, in a graph, you can start searching anywhere. There's no root. -NOTE: Every tree is a graph but not every graph is a tree. +NOTE: Every tree is a graph, but not every graph is a tree. diff --git a/book/chapters/graph.adoc b/book/chapters/graph.adoc index cd00ad0d..2385898d 100644 --- a/book/chapters/graph.adoc +++ b/book/chapters/graph.adoc @@ -18,7 +18,7 @@ include::{codedir}/data-structures/graphs/node.js[tag=constructor] As you can see, it’s pretty similar to the Linked List node. The only difference is that it uses an *array* of adjacent nodes instead of just one or two. -Other difference between a linked list and graph is that a linked list always has a root node, while the graph doesn’t. +Other difference between a linked list and graph is that a linked list always has a root node (or first element), while the graph doesn’t. You can start traversing a graph from anywhere. Let’s examine these graph properties! == Graph Properties @@ -48,13 +48,15 @@ A graph can have *cycles* or not. .Cyclic vs Acyclic Graphs. image:image44.jpg[image,width=444,height=194] - +(((Cyclic Graph))) A *cyclic graph* is the one that you can pass through a node more than once. E.g., On the cyclic illustration, if you start in the green node, then go the orange and purple, finally, you could come back to green again. Thus, it has a *cycle*. - +(((Acyclic Graph))) An acyclic graph is the one that you can’t pass through a node more than once. E.g., in the acyclic illustration, can you to find a path where you can pass through the same vertex more than one? +(((Directed Acyclic Graph))) +(((DAG))) The *Directed Acyclic Graph (DAG)* is unique. It has many applications like scheduling tasks, spreadsheets change propagation, and so forth. DAG is also called *Tree* data structure only when each node has only *one parent*. === Connected vs Disconnected vs Complete Graphs @@ -69,7 +71,7 @@ A *connected graph* is the opposite to disconnected, there’s a path between ev A *complete graph* is where every node is adjacent to all the other nodes in the graph. E.g., If there are seven nodes, every node has six edges. === Weighted Graphs - +(((Weighted Graphs))) Weighted graphs have labels in the edges (a.k.a *weight* or *cost*). The link weight can represent many things like distance, travel time, or anything else. .Weighted Graph representing USA airports distance in miles. @@ -108,7 +110,7 @@ There are two main ways to graphs one is: * Adjacency List === Adjacency Matrix - +(((Adjacency Matrix))) Representing graphs as adjacency matrix is done using a two-dimensional array. For instance, let’s say we have the following graph: .Graph and its adjacency matrix. @@ -155,7 +157,7 @@ TIP: When the graph has few connections compared to the number of nodes we say t The space complexity of the adjacency matrix is *O(|V|^2^)*, where |V| is the number of vertices/nodes. === Adjacency List - +(((Adjacency List))) Another way to represent a graph is by using an adjacency list. This time instead of using an array (matrix) we use a list. .Graph represented as an Adjacency List. diff --git a/book/chapters/map-hashmap-vs-treemap.adoc b/book/chapters/map-hashmap-vs-treemap.adoc index 903c5aef..8cb6391e 100644 --- a/book/chapters/map-hashmap-vs-treemap.adoc +++ b/book/chapters/map-hashmap-vs-treemap.adoc @@ -8,13 +8,13 @@ .When to use a TreeMap vs. HashMap? * `HashMap` is more time-efficient. A `TreeMap` is more space-efficient. * `TreeMap` search complexity is *O(log n)*, while an optimized `HashMap` is *O(1)* on average.  -* `HashMap`’s keys are in insertion order (or random in some implementations). `TreeMap`’s keys are always sorted. +* `HashMap`’s keys are in insertion order (or random depending in the implementation). `TreeMap`’s keys are always sorted. * `TreeMap` offers some statistical data for free such as: get minimum, get maximum, median, find ranges of keys. `HashMap` doesn’t. * `TreeMap` has a guarantee always an *O(log n)*, while `HashMap`s has an amortized time of *O(1)* but in the rare case of a rehash, it would take an *O(n)*. == TreeMap Time complexity vs HashMap -As we discussed so far, there are trade-off between the implementations +As we discussed so far, there is a trade-off between the implementations. .Time complexity for different Maps implementations |=== diff --git a/book/chapters/map-hashmap.adoc b/book/chapters/map-hashmap.adoc index b5d4fef6..280860a0 100644 --- a/book/chapters/map-hashmap.adoc +++ b/book/chapters/map-hashmap.adoc @@ -3,8 +3,8 @@ (((HashTable))) (((Data Structures, Non-Linear, HashMap))) A HashMap is a Map implementation. HashMaps are composed of two things: -1) a hash function and -2) a bucket array to store values. +1) a _hash function_ and +2) a bucket _array_ to store values. Before going into the implementation details let’s give an overview of how it works. Let’s say we want to keep a tally of things and animals: @@ -22,14 +22,14 @@ image:image41.png[image,width=528,height=299] .This is the main idea: -1. We use a *hash function* to transform the keys (e.g., dog, cat, rat, …) into an array index. This array is called *bucket*. -2. The bucket holds the values (linked list in case of collisions). +1. We use a *hash function* to transform the keys (e.g., dog, cat, rat, …) into an array index. This _array_ is called *bucket*. +2. The bucket holds the values or list of values in case of collisions. In the illustration, we have a bucket size of 10. In bucket 0, we have a collision. Both `cat` and `art` keys map to the same bucket even thought their hash codes are different. In a HashMap, a *collision* is when different keys lead to the same index. They are nasty for performance since it can reduce the search time from *O(1)* to *O(n)*. -Having a big bucket size can avoid a collision but also can waste too much memory. We are going to build an _optimized_ HashMap that re-sizes itself when it is getting full. This avoids collisions and doesn’t spend too much memory upfront. Let’s start with the hash function. +Having a big bucket size can avoid a collision but also can waste too much memory. We are going to build an _optimized_ HashMap that re-sizes itself when it is getting full. This auto-resizing avoids collisions and don't need to allocate too much memory upfront. Let’s start with the *hash function*. == Designing an optimized hash function @@ -76,7 +76,7 @@ include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=naiveHashCodeEx Notice that `rat` and `art` have the same hash code! These are collisions that we need to solve. -Collisions happened because we are adding the letter's unicode and are not taking the order into account nor the type. We can do better by offsetting the character value based on their position in the string. We can also add the object type, so number `10` produce different output than string `'10'`. +Collisions happened because we are adding the letter's Unicode and are not taking the order into account nor the type. We can do better by offsetting the character value based on their position in the string. We can also add the object type, so number `10` produce different output than string `'10'`. .Hashing function implementation that offset character value based on the position [source, javascript] @@ -88,14 +88,14 @@ Since Unicode uses 20 bits, we can offset each character by 20 bits based on the .JavaScript built-in `BigInt` **** -BigInt allows to operate beyond the maximum safe limit of integers. +BigInt allows operating beyond the maximum safe limit of integers. [source, javascript] ---- Number.MAX_SAFE_INTEGER // => 9,007,199,254,740,991 ---- -BigInt has no virtually limits (until you run out of memory). It uses the suffix `n`. +BigInt has no virtual limits (until you run out of physical memory). It uses the suffix `n`. [source, javascript] ---- @@ -103,7 +103,7 @@ BigInt has no virtually limits (until you run out of memory). It uses the suffix ---- **** -As you can imagine, summing 20bits per letter leads to a humongous number! That's the case even for 3 letters words. We are using `BigInt` so it doesn’t overflow. +As you can imagine, summing 20bits per letter leads to a humongous number! That's the case even for three letters words. We are using `BigInt`, so it doesn’t overflow. .Verifying there's not hashing code duplicates [source, javascript] @@ -111,9 +111,9 @@ As you can imagine, summing 20bits per letter leads to a humongous number! That' include::{codedir}/data-structures/maps/hash-maps/hashing.js[tag=hashCodeOffsetExample, indent=0] ---- -We don’t have duplicates anymore! If the keys have different content or type they have a different hash code. However, we need to represent these unbounded integers to finite buckets in an array. We do that using *compression function*. This function can be as simple as `% BUCKET_SIZE`. +We don’t have duplicates anymore! If the keys have different content or type, they have a different hash code. However, we need to represent these unbounded integers to finite buckets in an array. We do that using *compression function*. This function can be as simple as `% BUCKET_SIZE`. -However, there’s an issue with the last implementation. It doesn’t matter how big (and different) is the hash code number if we at the end use the modulus to get an array index. The part of the hash code that truly matters is the last bits. +However, there’s an issue with the last implementation. It doesn’t matter how enormous (and different) is the hash code number if we at the end use the modulus to get an array index. The part of the hash code that truly matters is the last bits. .Look at this example with a bucket size of 4. [source, javascript] @@ -149,7 +149,7 @@ Let’s design a better HashMap with what we learned. === Implementing an optimized hash function -We are going to use a battle tested non-cryptographic hash function called FNV Hash. +We are going to use a battle-tested non-cryptographic hash function called FNV Hash. .FNV (Fowler/Noll/Vo) Hash **** @@ -176,7 +176,7 @@ hashCode('cat') //↪️ 4201630708 hashCode('cats') //↪️ 3304940933 ---- -A one letter change produce a totally different output. +A one letter change produce a very different output. We are using the FVN-1a prime number (`16777619`) and the offset (`2166136261`) to reduce collisions even further. If you are curious where these numbers come from check out this http://bit.ly/fvn-1a[link]. diff --git a/book/chapters/map-intro.adoc b/book/chapters/map-intro.adoc index 36112d03..2c69f4eb 100644 --- a/book/chapters/map-intro.adoc +++ b/book/chapters/map-intro.adoc @@ -15,7 +15,7 @@ include::{codedir}/data-structures/maps/map.js[tag=snippet, indent=0] In short, you set `key`/`value` pair and then you can get the `value` using the `key`. -The attractive part of Maps is that they are very performant usually *O(1)* or *O(log n)* depending on the implementation. We can implement the maps using two different techniques: +The attractive part of Maps is that they are very performant usually *O(1)* or *O(log n)* depending on the implementation. We can implement the maps using two different underlying data structures: * *HashMap*: it’s a map implementation using an *array* and a *hash function*. The job of the hash function is to convert the `key` into an index that maps to the `value`. Optimized HashMap can have an average runtime of *O(1)*. * *TreeMap*: it’s a map implementation that uses a self-balanced Binary Search Tree (like <>). The BST nodes store the key, and the value and nodes are sorted by key guaranteeing an *O(log n)* look up. diff --git a/book/chapters/map-treemap.adoc b/book/chapters/map-treemap.adoc index f2584688..42047bf5 100644 --- a/book/chapters/map-treemap.adoc +++ b/book/chapters/map-treemap.adoc @@ -2,6 +2,8 @@ (((TreeMap))) (((Data Structures, Non-Linear, TreeMap))) A TreeMap is a Map implementation using Binary Search Trees. +(((Binary Search Tree))) +(((BST))) Implementing a Map with a tree, TreeMap, has a couple of advantages over a HashMap: diff --git a/book/chapters/set.adoc b/book/chapters/set.adoc index b130ebd9..a46775da 100644 --- a/book/chapters/set.adoc +++ b/book/chapters/set.adoc @@ -46,8 +46,9 @@ We can implement a `map` using a *balanced BST* and using a *hash function*. If Let’s implement both! == Implementing a TreeSet - -We are to use a balanced BST (Red-Black Tree) to implement TreeSet. +(((TreeSet))) +(((Data Structures, Non-Linear, TreeSet))) +We are to use a self-balanced BST (Red-Black Tree) to implement TreeSet. .TreeSet's constructor method and size attribute [source, javascript] @@ -146,7 +147,8 @@ Check out our https://github.com/amejiarosario/dsa.js/blob/f69b744a1bddd3d99243c Let’s now, implement a `HashSet`. == Implementing a HashSet - +(((HashSet))) +(((Data Structures, Non-Linear, HashSet))) The *HashSet* is the set implementation using a HashMap as its underlying data structure. The HashSet interface will be the same as the built-in `Set` or our previously implemented `TreeSet`. From 32cf3af985fc5833806feecde5550d5b3e263b4c Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Thu, 28 Mar 2019 06:34:27 -0400 Subject: [PATCH 016/326] improve first part --- book/chapters/algorithmic-toolbox.adoc | 24 ++--- book/chapters/algorithms-analysis-intro.adoc | 2 +- book/chapters/algorithms-analysis.adoc | 27 +++--- book/chapters/array.adoc | 87 +++++++++++++------ book/chapters/chapter2.adoc | 4 + book/chapters/chapter3.adoc | 2 +- .../linear-data-structures-intro.adoc | 14 +-- .../linear-data-structures-outro.adoc | 9 +- book/chapters/linked-list.adoc | 37 ++++---- book/chapters/queue.adoc | 47 ++-------- book/chapters/stack.adoc | 6 +- .../linked-lists/linked-list.js | 2 +- 12 files changed, 133 insertions(+), 128 deletions(-) diff --git a/book/chapters/algorithmic-toolbox.adoc b/book/chapters/algorithmic-toolbox.adoc index 7304dad5..b9041c5a 100644 --- a/book/chapters/algorithmic-toolbox.adoc +++ b/book/chapters/algorithmic-toolbox.adoc @@ -3,28 +3,30 @@ Have you ever given a programming problem and freeze without knowing where to start? Well, in this section we are going to give some tips, so you don't get stuck while coding. -TIP: Don't start coding right away. First, solve the problem, then write the code. +TIP: TL;DR: Don't start coding right away. First, solve the problem, then write the code. Make it work first, make it better later. .Steps to solve algorithmic problems . *Understand* the requirements. Reframe it in your own words. . Draw a *simple example* (no edge cases yet) -. Brainstorm +. *Brainstorm* possible solutions .. How would you solve this problem *manually*? (without a computer) Is there any formula or theorem you can use? .. Is there any heuristics (largest, smallest, best ratio) or can you spot a pattern to solve this problem using a <>? -.. Can you address the simple base case and generalize for other cases using a *recursive solution*? +.. Can you address the simple base case and generalize for other cases using a *recursive solution*? Can you divide the problem in subproblems? Try <>. .. Do you have to generate multiple solutions or try different paths? Try <>. +.. List all the data structures that you know that might solve this problem. .. If anything else fails, how would you solve it the dumbest way possible (brute force). We can optimize it later. -. Optimize the solution. +. *Test* your algorithm idea with multiple examples +. *Optimize* the solution –Only optimize when you have something working don't try to do both at the same time! +.. Can you trade-off space for speed? Use a <> to speed up results! +.. Do you have a bunch of recursive and overlapping problems? Try <>. .. Re-read requirements and see if you can take advantage of anything. E.g. is the array sorted? -.. Do you have a bunch of overlapping problems? Try <>. -.. Can you trade-off space for speed? Use a <> to speed up results -. Test your algorithm with multiple examples -. *Code*, yes, now you can code. +. *Write Code*, yes, now you can code. .. Modularize your code with functions (don't do it all in one giant function please 🙏) -. Test your code. +.. Comment down edge cases but don't address until the basic cases are working. +. *Test* your code. .. Choose a typical input and test against your code. -.. Brainstorm about edge cases (empty, null values, overflows, -.. How would scale your code? +.. Brainstorm about edge cases (empty, null values, overflows, largest supported inputs) +.. How would scale your code beyond the current boundaries? These steps should get you going even with the toughest algorithmic problems. diff --git a/book/chapters/algorithms-analysis-intro.adoc b/book/chapters/algorithms-analysis-intro.adoc index 86c6b682..47b17cf1 100644 --- a/book/chapters/algorithms-analysis-intro.adoc +++ b/book/chapters/algorithms-analysis-intro.adoc @@ -1,4 +1,4 @@ [partintro] -- -In this section we are going to cover the basics about algorithms analysis. Also, we are going to discuss eight of the most commmon runtimes of algorithms and provide a code example for each one. +In this part, we are going to cover the basics of algorithms analysis. Also, we are going to discuss the most common runtimes of algorithms and provide a code example for each one. -- diff --git a/book/chapters/algorithms-analysis.adoc b/book/chapters/algorithms-analysis.adoc index f85bc17c..d9e3c051 100644 --- a/book/chapters/algorithms-analysis.adoc +++ b/book/chapters/algorithms-analysis.adoc @@ -13,7 +13,7 @@ But why stop with the running time? We could also compare the memory "used" by different algorithms, and we called that *space complexity*. .In this chapter you will learn: -- What’s the best way to measure your code performance. +- What’s the best way to measure the performance of your code regardless of what hardware you use. - Learn how to use Big O notation to compare algorithms. - How to use algorithms analysis to improve your programs speed. @@ -24,15 +24,17 @@ Before going deeper, into space and time complexity, let's cover the basics real Algorithms (as you might know) are steps of how to do some task. When you cook, you follow a recipe (or an algorithm) to prepare a dish. Let's say you want to make a pizza. .Example of an algorithm -// [source, js] // undefined functions +[source, javascript] ---- -function bakePizza(dough, toppins = ['cheese']) { - const heatedOven = heatOvenTo(550); - punchDown(dough); - rollOut(dough); - applyToppings(dough, toppings); - const pizza = heatedOven.bakePizza(dough) - return pizza; +import { punchDown, rollOut, applyToppings, Oven } from '../pizza-utils'; + +function makePizza(dough, toppins = ['cheese']) { + const oven = new Oven(450); + const punchedDough = punchDown(dough); + const rolledDough = rollOut(punchedDough); + const rawPizza = applyToppings(rolledDough, toppings); + const pizzaPromise = oven.bake(rawPizza, { minutes: 20 }); + return pizzaPromise; } ---- @@ -44,8 +46,6 @@ TIP: Algorithms are instructions on how to perform a task. Not all algorithms are created equal. There are “good” and “bad” algorithms. The good ones are fast; the bad ones are slow. Slow algorithms cost more money to run. Inefficient algorithms could make some calculations impossible in our lifespan! -Most algorithms are affected by the size of the input. Let's say you need to arrange numbers in ascending order. Sorting ten digits will naturally take much less time than sorting 2 million of them. - To give you a clearer picture of how different algorithms perform as the input size grows, take a look at the following table. .Relationship between algorithm input size and time taken to complete @@ -59,8 +59,9 @@ To give you a clearer picture of how different algorithms perform as the input s |Find all permutations of a string |4 sec. |> vigintillion years |> centillion years |∞ |∞ |============================================================================================= -indexterm:(((Permutations))) -However, if you keep the input size constant, you can notice the difference between an efficient algorithm and a slow one. An excellent sorting algorithm is `mergesort` for instance, and inefficient algorithm for large inputs is `bubble sort` . +Most algorithms are affected by the size of the input (`n`). Let's say you need to arrange numbers in ascending order. Sorting ten digits will naturally take much less time than sorting out 2 million. But, how much longer? Some algorithms as the input size grow they take proportionally more time, we classify them as <> runtime [or `O(n)`]. Others might take power two longer; we call them <> running time [or `O(n^2^)`]. + +If you keep the input size the same, and run diffferent algorithms implementations you would notice the difference between an efficient algorithm and a slow one. An excellent sorting algorithm is `mergesort` for instance, and inefficient algorithm for large inputs is `bubble sort` . Organizing 1 million elements with merge sort takes 20 seconds while bubble sort takes 12 days, ouch! The amazing thing is that both programs are measured on the same hardware with the same data! diff --git a/book/chapters/array.adoc b/book/chapters/array.adoc index 4da1ac43..fd6f2fe3 100644 --- a/book/chapters/array.adoc +++ b/book/chapters/array.adoc @@ -1,11 +1,13 @@ = Array (((Array))) (((Data Structures, Linear, Array))) -Arrays are one of the most used data structures. You probably have used it a lot but are you aware of the runtimes of `splice`, `shift` and other operations? In this chapter, we are going deeper into the most common operations and their runtimes. +Arrays are one of the most used data structures. You probably have used it a lot but are you aware of the runtimes of `splice`, `shift`, `indexOf` and other operations? In this chapter, we are going deeper into the most common operations and their runtimes. == Array Basics -An array is a collection of things (strings, characters, numbers, objects, etc.). They can be many or zero. Strings are a collection of Unicode characters and most of the array concepts apply to them. +An array is a collection of things (strings, characters, numbers, objects, etc.). They can be many or zero. + +TIP: Strings are a collection of Unicode characters and most of the array concepts apply to them. .Fixed vs. Dynamic Size Arrays **** @@ -34,7 +36,7 @@ const array0 = []; array0[2] = 1; ---- -Using the index, you can replace whatever value you want. +Using the index, you can replace whatever value you want. The runtime is constant: _O(1)_. === Inserting at the beginning of the array @@ -43,14 +45,18 @@ What if you want to insert a new element at the beginning of the array? You woul .Insert to head [source, javascript] ---- -array.unshift(0); //=> [0, 2, 5, 1, 9, 6, 7] +const array = [2, 5, 1, 9, 6, 7]; +array.unshift(0); // ↪️ 8 +// array: [0, 2, 5, 1, 9, 6, 7] ---- As you can see, `2` was the index 0, now was pushed to index 1, and everything else was moved one place. `unshift` takes *O(n)* since it affects all the elements in the array. .JavaScript built-in `array.unshift` **** -The `unshift()` method adds one or more elements to the beginning of an array and returns the new length of the array. Runtime: O(n). +The `unshift()` method adds one or more elements to the beginning of an array and returns the new length of the array. + +Runtime: O(n). **** === Inserting at the middle of the array @@ -60,15 +66,19 @@ Inserting a new element in the middle involves moving part of the array but not .Inserting element in the middle [source, javascript] ---- -array.splice(1, 0, 111); // <1> +const array = [2, 5, 1, 9, 6, 7]; +array.splice(1, 0, 111); // ↪️ [] <1> +// array: [2, 111, 5, 1, 9, 6, 7] ---- -<1> at the position 1, delete 0 elements and insert 111. The array would be `[2, 111, 5, 1, 9, 6, 7]` +<1> at the position `1`, delete `0` elements and insert `111`. The Big O for this operation would be *O(n)* since in worst case it would move most of the elements to the right. .JavaScript built-in `array.splice` **** -The `splice()` method changes the contents of an array by removing existing elements and/or adding new elements. Runtime: O(n). +The `splice()` method changes the contents of an array by removing existing elements and/or adding new elements. Returns an array containing the deleted elements. + +Runtime: O(n). **** === Inserting at the end of the array @@ -79,15 +89,18 @@ We can push new values to the end of the array like this: [source, javascript] ---- const array = [2, 5, 1, 9, 6, 7]; -array.push(4); // <1> +array.push(4); // ↪️ 7 <1> +// array: [2, 5, 1, 9, 6, 7, 4] ---- -<1> The `4` element would be pushed to the end `[2, 5, 1, 9, 6, 7, 4]`. +<1> The `4` element would be pushed to the end of the array. Notice that `push` returns the new length of the array. Adding to the tail of the array doesn’t change other indexes. E.g., element 2 is still at index 0. So, this is a constant time operation *O(1)*. .JavaScript built-in `array.push` **** -The `push()` method adds one or more elements to the end of an array and returns the new length of the array. Runtime: O(1). +The `push()` method adds one or more elements to the end of an array and returns the new length of the array. + +Runtime: O(1). **** == Searching by value and index @@ -98,7 +111,7 @@ Searching by index is very easy using the `[]` operator: [source, javascript] ---- const array = [2, 5, 1, 9, 6, 7]; -array[4]; //↪️ 6 +array[4]; // ↪️ 6 ---- Searching by index takes a constant time, *O(1)*, to retrieve values out of the array. If we want to get fancier we can create a function: @@ -111,7 +124,7 @@ Searching by index takes a constant time, *O(1)*, to retrieve values out of the include::{codedir}/data-structures/arrays/array.js[tag=searchByIndex] ---- -Finding out if an element is in the array or not is a different story. +Finding out if a value is in the array or not is a different story. // image:image18.png[image,width=528,height=338] @@ -129,32 +142,40 @@ Deleting (similar to insertion) there are three possible scenarios, removing at === Deleting element from the beginning -Deleting from the beginning can be done using the `splice` function and also the `shift`. Let’s use the `shift` since it’s simpler. +Deleting from the beginning can be done using the `splice` function and also the `shift`. For simplicity, we will use the latter. .Deleting from the beginning of the array. [source, javascript] ---- -array.shift(); //=> [5, 1, 9, 6, 7] +const array = [2, 111, 5, 1, 9, 6, 7]; +// Deleting from the beginning of the array. +array.shift(); // ↪️2 +array.shift(); // ↪️111 +// array: [5, 1, 9, 6, 7] ---- -As expected, this will make every index to change, so this takes *O(n)*. +As expected, this will change every index, so this takes *O(n)*. .JavaScript built-in array.shift **** -The `shift()` method removes the first element from an array and returns that removed element. This method changes the length of the array. Runtime: O(n). -**** +The `shift()` method shift all elements to the left. In turn, it removes the first element from an array and returns that removed element. This method changes the length of the array. +Runtime: O(n). +**** === Deleting element from the middle -We can use the splice operator for this. +We can use the `splice` method for deleting an item from the middle of an array. .Deleting from the middle [source, javascript] ---- -array.splice(2, 1); // delete 1 element at position 2 -// => array: [2, 5, 9, 6, 7] +const array = [0, 1, 2, 3, 4]; +// Deleting from the middle +array.splice(2, 1); // ↪️[2] <1> +// array: [0, 1, 3, 4] ---- +<1> delete 1 element at position 2 Deleting from the middle might cause most the elements of the array to move back one position to fill in for the eliminated item. Thus, runtime: O(n). @@ -165,25 +186,41 @@ Removing the last element is very straightforward: .Deleting last element from the array [source, javascript] ---- -array.pop(); // => array: [2, 5, 1, 9, 6] +const array = [2, 5, 1, 9, 111]; +array.pop(); // ↪️111 +// array: [2, 5, 1, 9, 111] ---- No element other element has been shifted, so it’s an _O(1)_ runtime. .JavaScript built-in `array.pop` **** -The `pop()` method removes the last element from an array and returns that element. This method changes the length of the array. Runtime: O(1). +The `pop()` method removes the last element from an array and returns that element. This method changes the length of the array. + +Runtime: O(1). **** == Array Complexity To sum up, the time complexity on an array is: -.Time complexity for the array operations +.Time/Space complexity for the array operations |=== -.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space ^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ | Array ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) |=== (((Linear))) (((Runtime, Linear))) +(((Constant))) +(((Runtime, Constant))) + +Array Operations +|=== +| Operation | Time Complexity | Usage +| push ^| O(1) | Insert element to the right side. +| pop ^| O(1) | Remove the rightmost element. +| unshift ^| O(n) | Insert element to the left side. +| shift ^| O(n) | Remove leftmost element. +| splice ^| O(n) | Insert and remove from anywhere. +|=== diff --git a/book/chapters/chapter2.adoc b/book/chapters/chapter2.adoc index fb9674e2..7bc821d0 100644 --- a/book/chapters/chapter2.adoc +++ b/book/chapters/chapter2.adoc @@ -10,15 +10,19 @@ include::linear-data-structures-intro.adoc[] include::array.adoc[] // (g) +<<< include::linked-list.adoc[] // (g) +<<< include::stack.adoc[] // (g) +<<< include::queue.adoc[] // (g) +<<< include::linear-data-structures-outro.adoc[] :leveloffset: -1 diff --git a/book/chapters/chapter3.adoc b/book/chapters/chapter3.adoc index 642f058f..6106c3b1 100644 --- a/book/chapters/chapter3.adoc +++ b/book/chapters/chapter3.adoc @@ -59,6 +59,6 @@ include::graph-search.adoc[] // Graph summary = Summary -In this section, we learned about Graphs applications, properties and how we can implement them. We mention that you can represent a graph as a matrix or as a list of adjacencies. We went for implementing the later since it's more space efficient. We cover the basic graph operations like adding and removing nodes and edges. In the algorithms section, we are going to cover searching values in the graph. +In this section, we learned about Graphs applications, properties and how we can create them. We mention that you can represent a graph as a matrix or as a list of adjacencies. We went for implementing the later since it's more space efficient. We cover the basic graph operations like adding and removing nodes and edges. In the algorithms section, we are going to cover searching values in the graph. :leveloffset: -1 diff --git a/book/chapters/linear-data-structures-intro.adoc b/book/chapters/linear-data-structures-intro.adoc index 8687088d..a9b5d3b3 100644 --- a/book/chapters/linear-data-structures-intro.adoc +++ b/book/chapters/linear-data-structures-intro.adoc @@ -1,14 +1,16 @@ [partintro] -- -Data Structures comes in many flavors. There’s no one to rule them all. There are tradeoffs for each one of them. Even thought in your day-to-day, you might not need to re-implementing them, knowing how they work internally would help you choose the right tool for the job. We are going to explore the most common data structures time and space complexity. +Data Structures comes in many flavors. There’s no one to rule them all. You have to know the tradeoffs so you can choose the right one for the job. + +Even though in your day-to-day, you might not need to re-implementing them, knowing how they work internally would help you how when to use over the other or even tweak them to create a new one. We are going to explore the most common data structures time and space complexity. .In this part we are going to learn about the following linear data structures: -- Array -- Linked List -- Stack -- Queue +- <> +- <> +- <> +- <> -Later, in the next part we are going to explore non-linear data structures like Graphs and Trees. +Later, in the next part, we are going to explore non-linear data structures like <> and <>. ifdef::backend-html5[] If you want to have a general overview of each one, take a look at the following interactive diagram: diff --git a/book/chapters/linear-data-structures-outro.adoc b/book/chapters/linear-data-structures-outro.adoc index 55de385b..dbd204b3 100644 --- a/book/chapters/linear-data-structures-outro.adoc +++ b/book/chapters/linear-data-structures-outro.adoc @@ -1,9 +1,6 @@ - = Array vs. Linked List & Queue vs. Stack -In this chapter, we explored the most used linear data structures such as Arrays, Linked Lists, Stacks and Queues. We implemented them and discussed the runtime of their operations. - -To sum up, +In this part of the book, we explored the most used linear data structures such as Arrays, Linked Lists, Stacks and Queues. We implemented them and discussed the runtime of their operations. .Use Arrays when… * You need to access data in random order fast (using an index). @@ -22,9 +19,9 @@ To sum up, * You need to access your data as last-in, first-out (LIFO). * You need to implement a <> -.Time Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) +.Time/Space Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) |=== -.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space ^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ | <> ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) | <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) diff --git a/book/chapters/linked-list.adoc b/book/chapters/linked-list.adoc index c6556ed7..016d4960 100644 --- a/book/chapters/linked-list.adoc +++ b/book/chapters/linked-list.adoc @@ -2,36 +2,37 @@ (((Linked List))) (((List))) (((Data Structures, Linear, Linked List))) -A list (or Linked List) is a linear data structure where each node is linked to another one. +A list (or Linked List) is a linear data structure where each node is "linked" to the next. -Linked Lists can be: +.Linked Lists can be: - Singly: every item has a pointer to the next node - Doubly: every node has a reference to the next and previous object - Circular: the last element points to the first one. -We are going to explore the first two in the next sections. + == Singly Linked List -Each element or node is *linked* to the next one by a reference. When a node only has the reference to the next element, it's called *singly linked list*: +Each element or node is *connected* to the next one by a reference. When a node only has one connection it's called *singly linked list*: .Singly Linked List Representation: each node has a reference (blue arrow) to the next one. image:image19.png[image,width=498,height=97] - -Usually, a Linked List is referenced by the first element in called *head* (or *root* node). For instance, if you want to get the `cat` element from the example above, then the only way to get there is using the next field on the head node. You would get `art` first, then use the next field recursively until you eventually get the `cat` element. +Usually, a Linked List is referenced by the first element in called *head* (or *root* node). For instance, if you want to get the `cat` element from the example above, then the only way to get there is using the `next` field on the head node. You would get `art` first, then use the next field recursively until you eventually get the `cat` element. == Doubly Linked List -When each node has a reference to the next item and also the previous one, then we have a *doubly linked list*. +When each node has a connection to the `next` item and also the `previous` one, then we have a *doubly linked list*. .Doubly Linked List: each node has a reference to the next and previous element. image:image20.png[image,width=528,height=74] +With a doubly list you can not only move forward but also backward. If you keep the reference to the last element (`cat`) you can step back and reach the middle part. + If we implement the code for the `Node` elements, it would be something like this: // image:image21.png[image,width=528,height=285] -.Linked List Node +.Linked List Node Implementation [source, javascript] ---- include::{codedir}/data-structures/linked-lists/node.js[tag=snippet] @@ -39,11 +40,11 @@ include::{codedir}/data-structures/linked-lists/node.js[tag=snippet] == Linked List vs. Array -Arrays allow you to access data anywhere in the collection using an index. However, Linked List visits nodes in sequential order. In the worst case scenario, it takes _O(n)_ to get an element from a Linked List. You might be wondering: Isn’t always an array more efficient with O(1) access time? It depends. +Arrays allow you to access data anywhere in the collection using an index. However, Linked List visits nodes in sequential order. In the worst case scenario, it takes _O(n)_ to get an element from a Linked List. You might be wondering: Isn’t always an array more efficient with _O(1)_ access time? It depends. -We also have to understand the space complexity to see the trade-offs between arrays and linked lists. An array pre-allocates contiguous blocks of memory. When the array is getting full, it has to copy all the elements over to a new space usually 2x bigger. It takes _O(n)_ to copy all the items over. On the other hand, LinkedList’s nodes only reserve precisely the amount of memory it needs. They don’t have to be next to each other, nor large chunks of memory have to be booked beforehand like arrays. Linked List is more on a "grow as you go" basis. +We also have to understand the space complexity to see the trade-offs between arrays and linked lists. An array pre-allocates contiguous blocks of memory. When it is getting full, it has to create a bigger array (usually 2x) and copy all the elements. It takes _O(n)_ to copy all the items over. On the other hand, LinkedList’s nodes only reserve precisely the amount of memory it needs. They don’t have to be next to each other, nor large chunks of memory have to be booked beforehand like arrays. Linked List is more on a "grow as you go" basis. -Another difference is that adding/deleting at the beginning on an array takes O(n), however, in the linked list is a constant operation O(1) as we will implement later. +Another difference is that adding/deleting at the beginning on an array takes O(n); however, the linked list is a constant operation O(1) as we will implement later. A drawback of a linked list is that if you want to insert/delete an element at the end of the list, you would have to navigate the whole collection to find the last one O(n). However, this can be solved by keeping track of the last element in the list. We are going to implement that! @@ -62,7 +63,7 @@ include::{codedir}/data-structures/linked-lists/linked-list.js[tag=constructor] } ---- -In our constructor, we keep a reference of the first (and last node for performance reasons). +In our constructor, we keep a reference of the `first` and also `last` node for performance reasons. == Searching by value @@ -77,6 +78,7 @@ include::{codedir}/data-structures/linked-lists/linked-list.js[tag=searchByValue If we find the element, we will return the index otherwise `undefined`. The runtime for locating an item by value is _O(n)_. For finding elements by value or position we are using the following helper function: + .Find elements using a callback [source, javascript] ---- @@ -134,9 +136,6 @@ Appending an element at the end of the list can be done very effectively if we h .Add element to the end of the linked list image:image24.png[image,width=498,height=208] - -In code: - .Linked List's add to the end of the list implementation [source, javascript] ---- @@ -155,8 +154,6 @@ Let’s do an example, with a doubly linked list. We want to insert the `new` no .Inserting node in the middle of a doubly linked list. image:image25.png[image,width=528,height=358] -Let’s work in the code to do this: - .Linked List's add to the middle of the list [source, javascript] ---- @@ -186,8 +183,6 @@ image:image26.png[image,width=528,height=74] For instance, to remove the head (“art”) node, we change the variable `first` to point to the second node “dog”. We also remove the variable `previous` from the "dog" node, so it doesn't point to the “art” node. The garbage collector will get rid of the “art” node when it seems nothing is using it anymore. -In code, it looks like this: - .Linked List's remove from the beginning of the list [source, javascript] ---- @@ -206,8 +201,6 @@ image:image27.png[image,width=528,height=221] For instance, if we want to remove the last node “cat”. We use the last pointer to avoid iterating through the whole list. We check `last.previous` to get the “dog” node and make it the new `last` and remove its next reference to “cat”. Since nothing is pointing to “cat” then is out of the list and eventually is deleted from memory by the garbage collector. -Let’s code this up like this: - .Linked List's remove from the end of the list [source, javascript] ---- @@ -244,7 +237,7 @@ So far, we have seen two liner data structures with different use cases. Here’ .Big O cheat sheet for Linked List and Array |=== -.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space ^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ | Array ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) | Linked List (singly) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) diff --git a/book/chapters/queue.adoc b/book/chapters/queue.adoc index 5907afd0..bea2271e 100644 --- a/book/chapters/queue.adoc +++ b/book/chapters/queue.adoc @@ -1,12 +1,14 @@ = Queue (((Queue))) (((Data Structures, Linear, Queue))) +(((First-In First-out))) +(((FIFO))) A queue is a linear data structure where the data flows in a *First-In-First-Out* (FIFO) manner. .Queue data structure is like a line of people: the First-in, is the First-out image:image30.png[image,width=528,height=171] -A queue is like a line of people at the bank, the person that arrived first is the first to go out as well. +A queue is like a line of people at the bank; the person that arrived first is the first to go out as well. Similar to the stack, we only have two operations (insert and remove). In a Queue, we add elements to the back of the list and remove it from the front. @@ -23,7 +25,7 @@ include::{codedir}/data-structures/queues/queue.js[tag=constructor] We initialize the Queue creating a linked list. Now, let’s add the `enqueue` and `dequeue` methods. == Insertion - +(((Enqueue))) For inserting elements on queue, also know as *enqueue*, we add items to the back of the list using `addLast`: .Queue's enqueue @@ -35,7 +37,7 @@ include::{codedir}/data-structures/queues/queue.js[tag=enqueue, indent=0] As discussed, this operation has a constant runtime. == Deletion - +(((Dequeue))) For removing elements from a queue, also know as *dequeue*, we remove elements from the front of the list using `removeFirst`: .Queue's dequeue @@ -63,46 +65,11 @@ You can see that the items are dequeue in the same order they were added, FIFO ( As an experiment, we can see in the following table that if we had implemented the Queue using an array, its enqueue time would be _O(n)_ instead of _O(1)_. Check it out: -.Time complexity for queue operations +.Time/Space complexity for queue operations |=== -.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space ^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ | Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) | Queue (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) |=== indexterm:[Runtime, Linear] - -= Summary - -In this chapter, we explored the most used linear data structures such as Arrays, Linked Lists, Stacks and Queues. We implemented them and discussed the runtime of their operations. - -To sum up, - -.Use Arrays when… -* You need to access data in random order fast (using an index). -* Your data is multi-dimensional (e.g., matrix, tensor). - -.Use Linked Lists when: -* You will access your data sequentially. -* You want to save memory and only allocate memory as you need it. -* You want constant time to remove/add from extremes of the list. - -.Use a Queue when: -* You need to access your data in a first-come, first served basis (FIFO). -* You need to implement a <> - -.Use a Stack when: -* You need to access your data as last-in, first-out (LIFO). -* You need to implement a <> - -.Time Complexity of Linear Data Structures (Array, LinkedList, Stack & Queues) -|=== -.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity -^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ -| <> ^|O(1) ^|O(n) ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(n) ^|O(1) ^|O(n) -| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(n)* ^|O(n) -| <> ^|O(n) ^|O(n) ^|O(1) ^|O(n) ^|O(1) ^|O(1) ^|O(n) ^|*O(1)* ^|O(n) -| <> ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) -| Queue (w/array) ^|- ^|- ^|- ^|- ^|*O(n)* ^|- ^|- ^|O(1) ^|O(n) -| <> (w/list) ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) -|=== diff --git a/book/chapters/stack.adoc b/book/chapters/stack.adoc index 95178270..e0051ed7 100644 --- a/book/chapters/stack.adoc +++ b/book/chapters/stack.adoc @@ -1,6 +1,8 @@ = Stack (((Stack))) (((Data Structures, Linear, Stack))) +(((Last-In First-out))) +(((LIFO))) The stack is a data structure that restricts the way you add and remove data. It only allows you to insert and retrieve in a *Last-In-First-Out* (LIFO) fashion. An analogy is to think the stack is a rod and the data are discs. You can only take out the last one you put in. @@ -66,9 +68,9 @@ As you can see if we add new items they will be the first to go out to honor LIF Implementing the stack with an array and linked list would lead to the same time complexity: -.Time complexity for the stack operations +.Time/Space complexity for the stack operations |=== -.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space Complexity +.2+.^s| Data Structure 2+^s| Searching By 3+^s| Inserting at the 3+^s| Deleting from .2+.^s| Space ^|_Index/Key_ ^|_Value_ ^|_beginning_ ^|_middle_ ^|_end_ ^|_beginning_ ^|_middle_ ^|_end_ | Stack ^|- ^|- ^|- ^|- ^|O(1) ^|- ^|- ^|O(1) ^|O(n) |=== diff --git a/src/data-structures/linked-lists/linked-list.js b/src/data-structures/linked-lists/linked-list.js index 3197b68b..8cee4abe 100644 --- a/src/data-structures/linked-lists/linked-list.js +++ b/src/data-structures/linked-lists/linked-list.js @@ -148,7 +148,7 @@ class LinkedList { // tag::find[] /** - * Iterate through the list until callback returns thruthy + * Iterate through the list until callback returns a truthy value * @example see #get and #indexOf * @param {Function} callback evaluates current node and index. * If any value other than undefined it's returned it will stop the search. From 01c91490e37fc0bbcb214d3b138b9af9514e730b Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Thu, 28 Mar 2019 07:25:43 -0400 Subject: [PATCH 017/326] better wording --- book/chapters/algorithms-analysis.adoc | 46 +++++++++++-------- .../divide-and-conquer--fibonacci.adoc | 1 + 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/book/chapters/algorithms-analysis.adoc b/book/chapters/algorithms-analysis.adoc index d9e3c051..823dcb6d 100644 --- a/book/chapters/algorithms-analysis.adoc +++ b/book/chapters/algorithms-analysis.adoc @@ -43,10 +43,10 @@ If you play a game, you are devising strategies (or algorithms) to help you win. TIP: Algorithms are instructions on how to perform a task. == Comparing Algorithms - +(((Comparing Algorithms))) Not all algorithms are created equal. There are “good” and “bad” algorithms. The good ones are fast; the bad ones are slow. Slow algorithms cost more money to run. Inefficient algorithms could make some calculations impossible in our lifespan! -To give you a clearer picture of how different algorithms perform as the input size grows, take a look at the following table. +To give you a clearer picture of how different algorithms perform as the input size grows, take a look at the following problems and how their relative execution time changes as the input size increases. .Relationship between algorithm input size and time taken to complete [cols=",,,,,",options="header",] @@ -59,30 +59,32 @@ To give you a clearer picture of how different algorithms perform as the input s |Find all permutations of a string |4 sec. |> vigintillion years |> centillion years |∞ |∞ |============================================================================================= -Most algorithms are affected by the size of the input (`n`). Let's say you need to arrange numbers in ascending order. Sorting ten digits will naturally take much less time than sorting out 2 million. But, how much longer? Some algorithms as the input size grow they take proportionally more time, we classify them as <> runtime [or `O(n)`]. Others might take power two longer; we call them <> running time [or `O(n^2^)`]. +Most algorithms are affected by the size of the input (`n`). Let's say you need to arrange numbers in ascending order. Sorting ten items will naturally take much less time than sorting out 2 million. But, how much longer? Some algorithms as the input size grow they take proportionally more time, we classify them as <> runtime [or `O(n)`]. Others might take power two longer; we call them <> running time [or `O(n^2^)`]. -If you keep the input size the same, and run diffferent algorithms implementations you would notice the difference between an efficient algorithm and a slow one. An excellent sorting algorithm is `mergesort` for instance, and inefficient algorithm for large inputs is `bubble sort` . +From another perspective, if you keep the input size the same and run different algorithms implementations, you would notice the difference between an efficient algorithm and a slow one. For example, a good sorting algorithm is <>, and an inefficient algorithm for large inputs is <>. Organizing 1 million elements with merge sort takes 20 seconds while bubble sort takes 12 days, ouch! -The amazing thing is that both programs are measured on the same hardware with the same data! +The amazing thing is that both programs are solving the same problem with the same data and hardware and yet there's a big difference in time! -After completing this book, you are going to *think differently*. +After completing this book, you are going to _think algorithmically_. You will be able to scale your programs while you are designing them. -Find bottlenecks of existing software and have an "algorithmic toolbox" to switch algorithms and make them faster without having to upgrade hardware. [big]#💸# +Find bottlenecks of existing software and have an <> to optimize algorithms and make them faster without having to pay more for cloud computing (e.g., AWS EC2 instances). [big]#💸# +<<< == Increasing your code performance The first step to improve your code performance is to measure it. As somebody said: +(((quotes))) [quote, H. J. Harrington] Measurement is the first step that leads to control and eventually to improvement. If you can’t measure something, you can’t understand it. If you can’t understand it, you can’t control it. If you can’t control it, you can’t improve it. In this section, we are going to learn the basics of measuring our current code performance and compare it with other algorithms. === Calculating Time Complexity - +(((Time complexity))) Time complexity, in computer science, is a function that describes the number of operations a program will execute given the size of the input `n`. -How do get a function that gives us the number of operations that will be executed? Well, we count line by line and mind code inside loops. Let's do an example to explain this point. For instance, we have a function to find the minimum value on an array called `getMin`. +How do you get a function that gives us the number of operations that will be executed? Well, we count line by line and mind code inside loops. Let's do an example to explain this point. For instance, we have a function to find the minimum value on an array called `getMin`. .Translating lines of code to an approximate number of operations image:image4.png[Operations per line] @@ -96,18 +98,18 @@ _3n + 3_ That means that if give an array of 3 elements e.g. `getMin([3, 2, 9])`, then it will execute around _3(3)+3 = 12_ operations. Of course, this is not for every case. For instance, Line 12 is only executed if the condition on line 11 is met. As you might learn in the next section, we want to get the big picture and get rid of smaller terms to compare algorithms easier. == Space Complexity - +(((Space Complexity))) Space complexity is similar to time complexity. However, instead of the count of operations executed, it will account for the amount of memory used additionally to the input. For calculating the *space complexity* we keep track of the “variables” and memory used. In the `getMin` example, we just create a single variable called `min`. So, the space complexity is 1. On other algorithms, If we have to use an auxiliary array, then the space complexity would be `n`. === Simplifying Complexity with Asymptotic Analysis - +(((Asymptotic Analysis))) When we are comparing algorithms, we don't want to have complex expressions. What would you prefer comparing two algorithms like "3n^2^ + 7n" vs. "1000 n + 2000" or compare them as "n^2^ vs. n"? Well, that when the asymptotic analysis comes to the rescue. -Asymptotic analysis is the of functions when their inputs approach infinity. +TIP: Asymptotic analysis describes the behavior of functions as their inputs approach to infinity. -In the previous example, we analyzed `getMin` with an array of size 3, what happen size is 10 or 10k or a million? +In the previous example, we analyzed `getMin` with an array of size 3; what happen size is 10 or 10k or a million? .Operations performed by an algorithm with a time complexity of 3n+3 [cols=",,",options="header",] @@ -118,15 +120,19 @@ In the previous example, we analyzed `getMin` with an array of size 3, what happ |1M |3(1M)+3 |3,000,003 |=========================== -As the input size `n` grows bigger and bigger then the expression _3n + 3_ could be represented as _3n_ without loosing too much or even _n_. Dropping terms might look like a stretch at first, but you will see that what matters the most is the higher order terms of the function rather than lesser terms and constants. There’s a notation called *Big O*, where O refers to the *order of the function*. +As the input size `n` grows bigger and bigger then the expression _3n + 3_ is closer and closer to _3n_. Dropping terms might look like a stretch at first, but you will see that what matters the most is the higher order terms of the function rather than lesser terms and constants. -If you have a program which runs time is like +=== What is Big O Notation? +(((Big O))) +There’s a notation called *Big O*, where `O` refers to the *order of the function*. -_7n^3^ + 3n^2^ + 5_ +TIP: Big O = Big Order of a function. + +If you have a program which runtime is: -You can safely say that its run time is _n^3^_. The other terms will become less and less significant as the input grows bigger. +_7n^3^ + 3n^2^ + 5_ -=== What is Big O Notation anyways? +You can express it in Big O notation as _O(n^3^)_. The other terms (_3n^2^ + 5_) will become less and less significant as the input grows bigger. Big O notation, only cares about the “biggest” terms in the time/space complexity. So, it combines what we learn about time and space complexity, asymptotic analysis and adds a worst-case scenario. @@ -141,7 +147,7 @@ TIP: Big O only cares about the highest order of the run time function and the w WARNING: Don't drop terms that multiplying other terms. _O(n log n)_ is not equivalent to _O(n)_. However, _O(n + log n)_ is. -There are many common notations like polynomial, _O(n^2^)_ like we saw in the `getMin` example; constant O(1) and many more that we are going to explore in the next chapter. +There are many common notations like polynomial, _O(n^2^)_ like we saw in the `getMin` example; constant _O(1)_ and many more that we are going to explore in the next chapter. Again, time complexity is not a direct measure of how long a program takes to execute but rather how many operations it performs in given the input size. Nevertheless, there’s a relationship between time complexity and clock time as we can see in the following table. @@ -162,7 +168,7 @@ NOTE: These times are under the assumption of running on 1 GHz CPU and that it c == Summary -In this chapter, we learned how you could measure your algorithm performance using time complexity. Rather than timing how long your program take to run you can approximate the number of operations it will perform based on the input size. +In this chapter, we learned how you could measure your algorithm performance using time complexity. Rather than timing how long your program takes to run you can approximate the number of operations it will perform based on the input size. We learned about time and space complexity and how they can be translated to Big O notation. Big O refers to the *order* of the function. diff --git a/book/chapters/divide-and-conquer--fibonacci.adoc b/book/chapters/divide-and-conquer--fibonacci.adoc index 1f67e5fb..a215ff43 100644 --- a/book/chapters/divide-and-conquer--fibonacci.adoc +++ b/book/chapters/divide-and-conquer--fibonacci.adoc @@ -60,6 +60,7 @@ Exponential time complexity is pretty bad. Can we do better? In the call tree, you can notice that every element in red and with asterisks `*` it's called more than once. We are repeating calculations too many times! +(((quotes))) [quote, Dynamic Programming] Those who cannot remember the past are condemned to repeat it. From 38e8b3d59069aaefccbff486ef6df8f84c52b448 Mon Sep 17 00:00:00 2001 From: Adrian Mejia Date: Thu, 28 Mar 2019 07:48:28 -0400 Subject: [PATCH 018/326] fix preface --- book/chapters/preface.adoc | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/book/chapters/preface.adoc b/book/chapters/preface.adoc index 37daf2e9..0c8a30ee 100644 --- a/book/chapters/preface.adoc +++ b/book/chapters/preface.adoc @@ -3,28 +3,28 @@ == What is in this book? -_{book-title}_ is a book that can be read from cover to cover, where each section builds on top of the previous one. Also, it can be used as a reference manual where developers can refresh certain topics before an interview or looking for ideas to solve a problem optimally. (Cheat sheet summarizing all time complexities and implementations can be found in the <