Skip to content

Commit 28f9147

Browse files
committed
Updates for October 2024 class
1 parent 8a1576a commit 28f9147

13 files changed

+250
-161
lines changed

.DS_Store

0 Bytes
Binary file not shown.

StartHere.ipynb

+6-6
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,9 @@
1515
"cell_type": "code",
1616
"execution_count": 5,
1717
"metadata": {
18+
"jupyter": {
19+
"source_hidden": true
20+
},
1821
"tags": []
1922
},
2023
"outputs": [
@@ -109,13 +112,10 @@
109112
"# My Upcoming O'Reilly Live Training Courses\n",
110113
"https://deitel.com/LearnWithDeitel\n",
111114
"\n",
112-
"* **September 10**—Python Full Throttle (with updates through 3.12)\n",
113-
"* **September 17**—Java Full Throttle (with updates through Java 22/23)\n",
114-
"* **October 1**—Python Full Throttle (with updates through 3.12)\n",
115-
"* **October 8**—Modern C++ Full Throttle: Intro to C++20 & the Standard Library — A Presentation-Only Intro to Fundamentals, Arrays, Vectors, Pointers, OOP, Ranges, Views, Functional Programming; Brief Intro to Concepts, Modules & Coroutines\n",
116-
"* **October 15**—Python Data Science Full Throttle (includes a new segment on **programming with Generative AI APIs**)\n",
117115
"* **November 5**—Java Full Throttle (with updates through Java 22/23)\n",
118-
"* **November 12**—Python Full Throttle (with updates through 3.12)"
116+
"* **November 12**—Python Full Throttle (with updates through 3.12)\n",
117+
"* **December 3**—Python Full Throttle (with updates through 3.12)\n",
118+
"* **December 10**—Python Data Science Full Throttle (includes a new segment on **programming with Generative AI APIs**)\n"
119119
]
120120
},
121121
{

WrapUp.ipynb

+3-6
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,10 @@
4949
"# My Upcoming O'Reilly Live Training Courses\n",
5050
"https://deitel.com/LearnWithDeitel\n",
5151
"\n",
52-
"* **September 10**—Python Full Throttle (with updates through 3.12)\n",
53-
"* **September 17**—Java Full Throttle (with updates through Java 22/23)\n",
54-
"* **October 1**—Python Full Throttle (with updates through 3.12)\n",
55-
"* **October 8**—Modern C++ Full Throttle: Intro to C++20 & the Standard Library — A Presentation-Only Intro to Fundamentals, Arrays, Vectors, Pointers, OOP, Ranges, Views, Functional Programming; Brief Intro to Concepts, Modules & Coroutines\n",
56-
"* **October 15**—Python Data Science Full Throttle (includes a new segment on **programming with Generative AI APIs**)\n",
5752
"* **November 5**—Java Full Throttle (with updates through Java 22/23)\n",
58-
"* **November 12**—Python Full Throttle (with updates through 3.12)"
53+
"* **November 12**—Python Full Throttle (with updates through 3.12)\n",
54+
"* **December 3**—Python Full Throttle (with updates through 3.12)\n",
55+
"* **December 10**—Python Data Science Full Throttle (includes a new segment on **programming with Generative AI APIs**)\n"
5956
]
6057
},
6158
{

ch11/.DS_Store

6 KB
Binary file not shown.

ch11/Ch11.ipynb

+68-35
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,35 @@
252252
"blob.tags # list of (word, part-of-speech-tag) tuples"
253253
]
254254
},
255+
{
256+
"cell_type": "markdown",
257+
"metadata": {
258+
"collapsed": true,
259+
"jupyter": {
260+
"outputs_hidden": true
261+
}
262+
},
263+
"source": [
264+
"<!--\n",
265+
"import nltk\n",
266+
"from nltk import pos_tag\n",
267+
"from nltk.tokenize import word_tokenize\n",
268+
"\n",
269+
"# Ensure the NLTK resources are downloaded\n",
270+
"nltk.download('punkt')\n",
271+
"nltk.download('averaged_perceptron_tagger')\n",
272+
"\n",
273+
"# Text to analyze\n",
274+
"text = \"Yesterday was a beautiful day. Tomorrow looks like bad weather.\"\n",
275+
"\n",
276+
"# Tokenize and tag parts of speech\n",
277+
"tokens = word_tokenize(text)\n",
278+
"pos_tags = pos_tag(tokens)\n",
279+
"\n",
280+
"pos_tags\n",
281+
"-->"
282+
]
283+
},
255284
{
256285
"cell_type": "markdown",
257286
"metadata": {},
@@ -978,6 +1007,32 @@
9781007
"# 11.3 Visualizing Word Frequencies with Bar Charts and Word Clouds (1 of 4)"
9791008
]
9801009
},
1010+
{
1011+
"cell_type": "markdown",
1012+
"metadata": {},
1013+
"source": [
1014+
"* Could not find an old-English stop words list\n",
1015+
"* So I asked GenAI \n",
1016+
"> **GenAI prompt:** Create a Python list of old english stop words from Romeo and Juliet. Include the items that would appear after apostrophes so words like s and t will be removed during stop word elimination. Do not include words that are already in the NLTK English stop words list."
1017+
]
1018+
},
1019+
{
1020+
"cell_type": "code",
1021+
"execution_count": null,
1022+
"metadata": {},
1023+
"outputs": [],
1024+
"source": [
1025+
"stops += [\n",
1026+
" 'thou', 'thee', 'thy', 'thine', 'art', 'hast', 'hath', 'doth', 'dost',\n",
1027+
" 'wilt', 'shalt', 'ye', 'ere', 'oft', 'naught', 'nay', 'anon', 'tarry', \n",
1028+
" 'wot', 'whence', 'hence', 'whither', 'prithee', 'sirrah', 'zounds', \n",
1029+
" 'forsooth', 'verily', 'fie', 'marry', 'troth', 'wherefore', 'hark', \n",
1030+
" 'hither', 'thither', 'yon', 'yonder', 'thence', \n",
1031+
" \"'tis\", \"'twas\", \"'twere\", \"'twill\", \"'twould\", \"'d\", \"'s\", \"'ll\", \"'re\", \n",
1032+
" \"'ve\", \"'m\", \"'t\", \"o'\", \"'n\"\n",
1033+
"]"
1034+
]
1035+
},
9811036
{
9821037
"cell_type": "code",
9831038
"execution_count": null,
@@ -1016,7 +1071,7 @@
10161071
},
10171072
"outputs": [],
10181073
"source": [
1019-
"items = [item for item in items if item[0] not in stops and item[0] != '’']"
1074+
"items = [item for item in items if item[0] not in stops]"
10201075
]
10211076
},
10221077
{
@@ -1190,7 +1245,8 @@
11901245
"source": [
11911246
"#import matplotlib.pyplot as plt\n",
11921247
"axes = df.plot.bar(x='word', y='count')\n",
1193-
"plt.gcf().tight_layout() # compress chart to ensure all components fit "
1248+
"plt.gcf().tight_layout() # compress chart to ensure all components fit \n",
1249+
"plt.show()"
11941250
]
11951251
},
11961252
{
@@ -1369,7 +1425,7 @@
13691425
"outputs": [],
13701426
"source": [
13711427
"from IPython.display import Image\n",
1372-
"Image(filename='RomeoAndJulietHeart.png', width=400)"
1428+
"Image(filename='RomeoAndJulietHeart.png', width=600)"
13731429
]
13741430
},
13751431
{
@@ -1410,7 +1466,7 @@
14101466
},
14111467
{
14121468
"cell_type": "code",
1413-
"execution_count": 15,
1469+
"execution_count": null,
14141470
"metadata": {
14151471
"tags": []
14161472
},
@@ -1421,7 +1477,7 @@
14211477
},
14221478
{
14231479
"cell_type": "code",
1424-
"execution_count": 16,
1480+
"execution_count": null,
14251481
"metadata": {
14261482
"tags": []
14271483
},
@@ -1454,15 +1510,14 @@
14541510
},
14551511
{
14561512
"cell_type": "code",
1457-
"execution_count": 23,
1513+
"execution_count": null,
14581514
"metadata": {
14591515
"tags": []
14601516
},
14611517
"outputs": [],
14621518
"source": [
14631519
"document = nlp(\n",
1464-
" \"\"\"In 1994, Tim Berners-Lee founded the World Wide Web Consortium \n",
1465-
" which is devoted to developing web technologies\"\"\")"
1520+
" 'In 1994, Tim Berners-Lee founded the World Wide Web Consortium which is devoted to developing web technologies')"
14661521
]
14671522
},
14681523
{
@@ -1486,22 +1541,11 @@
14861541
},
14871542
{
14881543
"cell_type": "code",
1489-
"execution_count": 24,
1544+
"execution_count": null,
14901545
"metadata": {
14911546
"tags": []
14921547
},
1493-
"outputs": [
1494-
{
1495-
"name": "stdout",
1496-
"output_type": "stream",
1497-
"text": [
1498-
"1994: DATE\n",
1499-
"Tim Berners-Lee: PERSON\n",
1500-
"the World Wide Web Consortium \n",
1501-
" : ORG\n"
1502-
]
1503-
}
1504-
],
1548+
"outputs": [],
15051549
"source": [
15061550
"for entity in document.ents:\n",
15071551
" print(f'{entity.text}: {entity.label_}')"
@@ -1516,7 +1560,7 @@
15161560
},
15171561
{
15181562
"cell_type": "code",
1519-
"execution_count": 27,
1563+
"execution_count": null,
15201564
"metadata": {
15211565
"tags": []
15221566
},
@@ -1531,22 +1575,11 @@
15311575
},
15321576
{
15331577
"cell_type": "code",
1534-
"execution_count": 28,
1578+
"execution_count": null,
15351579
"metadata": {
15361580
"tags": []
15371581
},
1538-
"outputs": [
1539-
{
1540-
"name": "stdout",
1541-
"output_type": "stream",
1542-
"text": [
1543-
"2026: DATE\n",
1544-
"Amanda Brown: PERSON\n",
1545-
"Google: ORG\n",
1546-
"Gemini AI: PRODUCT\n"
1547-
]
1548-
}
1549-
],
1582+
"outputs": [],
15501583
"source": [
15511584
"for entity in document.ents:\n",
15521585
" print(f'{entity.text}: {entity.label_}')"

ch12_Mastodon/ch12_Mastodon.ipynb

+17-2
Original file line numberDiff line numberDiff line change
@@ -1078,7 +1078,7 @@
10781078
" # use Mastodon.py utility function fetch_next to get next page of results\n",
10791079
" result = mastodon.fetch_next(previous_result) \n",
10801080
"\n",
1081-
" # if there are results add them to saved_toots; otherwise, temrinate loop\n",
1081+
" # if there are results add them to saved_toots; otherwise, terminate loop\n",
10821082
" if result:\n",
10831083
" saved_toots += result\n",
10841084
" else:\n",
@@ -1100,6 +1100,7 @@
11001100
"cell_type": "code",
11011101
"execution_count": null,
11021102
"metadata": {
1103+
"scrolled": true,
11031104
"tags": []
11041105
},
11051106
"outputs": [],
@@ -1811,6 +1812,15 @@
18111812
"bad_locations = get_geocodes(toots)"
18121813
]
18131814
},
1815+
{
1816+
"cell_type": "code",
1817+
"execution_count": null,
1818+
"metadata": {},
1819+
"outputs": [],
1820+
"source": [
1821+
"bad_locations"
1822+
]
1823+
},
18141824
{
18151825
"cell_type": "markdown",
18161826
"metadata": {
@@ -1886,6 +1896,9 @@
18861896
"cell_type": "code",
18871897
"execution_count": null,
18881898
"metadata": {
1899+
"jupyter": {
1900+
"source_hidden": true
1901+
},
18891902
"tags": []
18901903
},
18911904
"outputs": [],
@@ -1897,6 +1910,9 @@
18971910
"cell_type": "code",
18981911
"execution_count": null,
18991912
"metadata": {
1913+
"jupyter": {
1914+
"source_hidden": true
1915+
},
19001916
"tags": []
19011917
},
19021918
"outputs": [],
@@ -2038,7 +2054,6 @@
20382054
"cell_type": "code",
20392055
"execution_count": null,
20402056
"metadata": {
2041-
"scrolled": true,
20422057
"tags": []
20432058
},
20442059
"outputs": [],

ch14/Ch14.ipynb

+15-17
Original file line numberDiff line numberDiff line change
@@ -151,9 +151,7 @@
151151
},
152152
{
153153
"cell_type": "markdown",
154-
"metadata": {
155-
"jp-MarkdownHeadingCollapsed": true
156-
},
154+
"metadata": {},
157155
"source": [
158156
"## 14.2.1 k-Nearest Neighbors Algorithm (k-NN) \n",
159157
"* Predict a sample’s class by looking at the **_k_ training samples** **nearest in \"distance\"** to the **sample** \n",
@@ -441,7 +439,8 @@
441439
" axes.set_xticks([]) # remove x-axis tick marks\n",
442440
" axes.set_yticks([]) # remove y-axis tick marks\n",
443441
" axes.set_title(target)\n",
444-
"plt.tight_layout()"
442+
"plt.tight_layout()\n",
443+
"plt.show()"
445444
]
446445
},
447446
{
@@ -913,7 +912,8 @@
913912
"source": [
914913
"figure = plt.figure(figsize=(7, 6))\n",
915914
"axes = sns.heatmap(confusion_df, annot=True, \n",
916-
" cmap=matplotlib.colormaps['nipy_spectral_r']) "
915+
" cmap=matplotlib.colormaps['nipy_spectral_r']) \n",
916+
"plt.show()"
917917
]
918918
},
919919
{
@@ -1414,14 +1414,7 @@
14141414
},
14151415
"outputs": [],
14161416
"source": [
1417-
"tsne = TSNE(n_components=2, learning_rate='auto', init='pca', random_state=11) "
1418-
]
1419-
},
1420-
{
1421-
"cell_type": "markdown",
1422-
"metadata": {},
1423-
"source": [
1424-
"**New: `learning_rate='auto'` and `init='pca'` added because they will soon be new defaults for TSNE.**"
1417+
"tsne = TSNE(n_components=2, random_state=11) "
14251418
]
14261419
},
14271420
{
@@ -1487,13 +1480,15 @@
14871480
"cell_type": "code",
14881481
"execution_count": null,
14891482
"metadata": {
1483+
"scrolled": true,
14901484
"tags": []
14911485
},
14921486
"outputs": [],
14931487
"source": [
14941488
"import matplotlib.pyplot as plt\n",
14951489
"figure = plt.figure(figsize=(5, 5))\n",
1496-
"dots = plt.scatter(reduced_data[:, 0], reduced_data[:, 1], c='black')"
1490+
"dots = plt.scatter(reduced_data[:, 0], reduced_data[:, 1], c='black')\n",
1491+
"plt.show()"
14971492
]
14981493
},
14991494
{
@@ -1548,7 +1543,8 @@
15481543
"dots = plt.scatter(reduced_data[:, 0], reduced_data[:, 1],\n",
15491544
" c=digits.target, cmap=matplotlib.colormaps['nipy_spectral_r'])\n",
15501545
" \n",
1551-
"colorbar = plt.colorbar(dots) "
1546+
"colorbar = plt.colorbar(dots) \n",
1547+
"plt.show()"
15521548
]
15531549
},
15541550
{
@@ -1957,7 +1953,8 @@
19571953
"import seaborn as sns\n",
19581954
"sns.set_style('whitegrid')\n",
19591955
"grid = sns.pairplot(data=iris_df, vars=iris_df.columns[0:4], hue='species')\n",
1960-
"grid.fig.set_size_inches(9, 5.5)"
1956+
"grid.fig.set_size_inches(9, 5.5)\n",
1957+
"plt.show()"
19611958
]
19621959
},
19631960
{
@@ -2305,7 +2302,8 @@
23052302
"# plot centroids as larger black dots\n",
23062303
"import matplotlib.pyplot as plt\n",
23072304
"\n",
2308-
"dots = plt.scatter(iris_centers[:,0], iris_centers[:,1], s=100, c='k')"
2305+
"dots = plt.scatter(iris_centers[:,0], iris_centers[:,1], s=100, c='k')\n",
2306+
"plt.show()"
23092307
]
23102308
},
23112309
{

0 commit comments

Comments
 (0)