From ff6274adc7008f79b71d3a695736994da70edfd2 Mon Sep 17 00:00:00 2001 From: edwardsanchez001 Date: Mon, 12 Jul 2021 21:50:17 -0500 Subject: [PATCH] turning in --- .../.ipynb_checkpoints/Q1-checkpoint.ipynb | 347 ++++++++++++++++++ .../.ipynb_checkpoints/Q3-checkpoint.ipynb | 207 +++++++++++ your-code/Q1.ipynb | 247 +++++++++++-- your-code/Q3.ipynb | 10 +- your-code/doc1.txt | 1 + your-code/doc2.txt | 1 + your-code/doc3.txt | 1 + 7 files changed, 770 insertions(+), 44 deletions(-) create mode 100644 your-code/.ipynb_checkpoints/Q1-checkpoint.ipynb create mode 100644 your-code/.ipynb_checkpoints/Q3-checkpoint.ipynb create mode 100644 your-code/doc1.txt create mode 100644 your-code/doc2.txt create mode 100644 your-code/doc3.txt diff --git a/your-code/.ipynb_checkpoints/Q1-checkpoint.ipynb b/your-code/.ipynb_checkpoints/Q1-checkpoint.ipynb new file mode 100644 index 0000000..4185922 --- /dev/null +++ b/your-code/.ipynb_checkpoints/Q1-checkpoint.ipynb @@ -0,0 +1,347 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the cell below, create a Python function that wraps your previous solution for the Bag of Words lab.\n", + "\n", + "Requirements:\n", + "\n", + "1. Your function should accept the following parameters:\n", + " * `docs` [REQUIRED] - array of document paths.\n", + " * `stop_words` [OPTIONAL] - array of stop words. The default value is an empty array.\n", + "\n", + "1. Your function should return a Python object that contains the following:\n", + " * `bag_of_words` - array of strings of normalized unique words in the corpus.\n", + " * `term_freq` - array of the term-frequency vectors." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ironhack is cool.\n", + "I love Ironhack.\n", + "I am a student at Ironhack.\n", + "ironhack is cool i love ironhack i am a student at ironhack\n", + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']\n", + "[['ironhack', 'is', 'cool'], ['i', 'love', 'ironhack'], ['i', 'am', 'a', 'student', 'at', 'ironhack']]\n", + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n" + ] + }, + { + "data": { + "text/plain": [ + "{'bag_of_words': ['ironhack',\n", + " 'is',\n", + " 'cool',\n", + " 'i',\n", + " 'love',\n", + " 'am',\n", + " 'a',\n", + " 'student',\n", + " 'at'],\n", + " 'term_freq': [[1, 1, 1, 0, 0, 0, 0, 0, 0],\n", + " [1, 0, 0, 1, 1, 0, 0, 0, 0],\n", + " [1, 0, 0, 1, 0, 1, 1, 1, 1]]}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Import required libraries\n", + "import re\n", + "\n", + "# Define function\n", + "def get_bow_from_docs(docs, stop_words=[]):\n", + "\n", + " # In the function, first define the variables you will use such as `corpus`, `bag_of_words`, and `term_freq`.\n", + "\n", + "\n", + "\n", + "\n", + " corpus = []\n", + "\n", + "\n", + " # Write your code here\n", + " for i in range(len(docs)):\n", + " with open(docs[i], \"r\") as a_file:\n", + " for line in a_file:\n", + " stripped_line = line.strip()\n", + " corpus.append(stripped_line)\n", + " print(stripped_line)\n", + "\n", + " corpus = [x.lower() for x in corpus]\n", + " corpus = re.sub('\\.', '', ' '.join(corpus))\n", + " #corpus = [re.sub(r\"\\.\",\"\",corpus[j]) for j in range(len(corpus))]\n", + " print(corpus)\n", + "\n", + "\n", + " corpus = corpus.split()\n", + " bag_of_words = []\n", + " for i in corpus:\n", + " if i not in bag_of_words:\n", + " bag_of_words.append(i)\n", + " else:\n", + " continue\n", + "\n", + " print(bag_of_words)\n", + "\n", + " corpus = ['ironhack is cool','i love ironhack','i am a student at ironhack']\n", + " # Write your code here\n", + " pieces = []\n", + " term_freq = []\n", + " for x in corpus:\n", + " pieces.append(x.split(\" \"))\n", + " print(pieces)\n", + "\n", + " for s in pieces:\n", + " temp = []\n", + " for b in bag_of_words:\n", + " y = s.count(b)\n", + " temp.append(y)\n", + " term_freq.append(temp)\n", + "\n", + " print(term_freq)\n", + "\n", + " # Now return your output as an object\n", + " return {\n", + " \"bag_of_words\": bag_of_words,\n", + " \"term_freq\": term_freq\n", + " }\n", + " \n", + "docs = ['doc1.txt', 'doc2.txt', 'doc3.txt']\n", + "get_bow_from_docs(docs)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test your function without stop words. You should see the output like below:\n", + "\n", + "```{'bag_of_words': ['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at'], 'term_freq': [[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]}```" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ironhack is cool.\n", + "I love Ironhack.\n", + "I am a student at Ironhack.\n", + "ironhack is cool i love ironhack i am a student at ironhack\n", + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']\n", + "[['ironhack', 'is', 'cool'], ['i', 'love', 'ironhack'], ['i', 'am', 'a', 'student', 'at', 'ironhack']]\n", + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n", + "{'bag_of_words': ['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at'], 'term_freq': [[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]}\n" + ] + } + ], + "source": [ + "# Define doc paths array\n", + "docs = ['doc1.txt', 'doc2.txt', 'doc3.txt']\n", + "\n", + "# Obtain BoW from your function\n", + "bow = get_bow_from_docs(docs)\n", + "\n", + "# Print BoW\n", + "print(bow)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If your attempt above is successful, nice work done!\n", + "\n", + "Now test your function again with the stop words. In the previous lab we defined the stop words in a large array. In this lab, we'll import the stop words from Scikit-Learn." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting sklearn\n", + " Downloading sklearn-0.0.tar.gz (1.1 kB)\n", + "Collecting scikit-learn\n", + " Downloading scikit_learn-0.24.2-cp39-cp39-macosx_10_13_x86_64.whl (7.3 MB)\n", + "\u001b[K |████████████████████████████████| 7.3 MB 1.4 MB/s eta 0:00:01 |▎ | 71 kB 388 kB/s eta 0:00:19 |█████ | 1.2 MB 540 kB/s eta 0:00:12\n", + "\u001b[?25hCollecting threadpoolctl>=2.0.0\n", + " Downloading threadpoolctl-2.1.0-py3-none-any.whl (12 kB)\n", + "Collecting joblib>=0.11\n", + " Downloading joblib-1.0.1-py3-none-any.whl (303 kB)\n", + "\u001b[K |████████████████████████████████| 303 kB 18.5 MB/s eta 0:00:01\n", + "\u001b[?25hRequirement already satisfied: numpy>=1.13.3 in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.20.3)\n", + "Collecting scipy>=0.19.1\n", + " Downloading scipy-1.7.0-cp39-cp39-macosx_10_9_x86_64.whl (32.1 MB)\n", + "\u001b[K |████████████████████████████████| 32.1 MB 34 kB/s eta 0:00:01 |████ | 3.9 MB 20.8 MB/s eta 0:00:02 |█████▋ | 5.7 MB 2.1 MB/s eta 0:00:13 |███████▍ | 7.4 MB 2.1 MB/s eta 0:00:12 |███████▊ | 7.7 MB 2.1 MB/s eta 0:00:12 | 8.7 MB 2.1 MB/s eta 0:00:12 |██████████▏ | 10.2 MB 2.7 MB/s eta 0:00:09 |███████████ | 10.9 MB 2.7 MB/s eta 0:00:08��███▌ | 11.5 MB 2.7 MB/s eta 0:00:08 |██████████████████ | 18.2 MB 1.7 MB/s eta 0:00:09 |███████████████████ | 19.0 MB 1.7 MB/s eta 0:00:08 |███████████████████████▉ | 23.9 MB 3.2 MB/s eta 0:00:03 |████████████████████████ | 24.0 MB 3.2 MB/s eta 0:00:03\n", + "\u001b[?25hBuilding wheels for collected packages: sklearn\n", + " Building wheel for sklearn (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25h Created wheel for sklearn: filename=sklearn-0.0-py2.py3-none-any.whl size=1316 sha256=b559f5573299fb814c815e87f877c47310696271371da7e797325b440c8734fa\n", + " Stored in directory: /Users/edwardsanchez/Library/Caches/pip/wheels/e4/7b/98/b6466d71b8d738a0c547008b9eb39bf8676d1ff6ca4b22af1c\n", + "Successfully built sklearn\n", + "Installing collected packages: threadpoolctl, scipy, joblib, scikit-learn, sklearn\n", + "Successfully installed joblib-1.0.1 scikit-learn-0.24.2 scipy-1.7.0 sklearn-0.0 threadpoolctl-2.1.0\n", + "\u001b[33mWARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.\n", + "You should consider upgrading via the '/Library/Frameworks/Python.framework/Versions/3.9/bin/python3.9 -m pip install --upgrade pip' command.\u001b[0m\n" + ] + }, + { + "ename": "ImportError", + "evalue": "cannot import name 'stop_words' from 'sklearn.feature_extraction' (/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/sklearn/feature_extraction/__init__.py)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msystem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'pip3 install sklearn'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeature_extraction\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mstop_words\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstop_words\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mENGLISH_STOP_WORDS\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'stop_words' from 'sklearn.feature_extraction' (/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/sklearn/feature_extraction/__init__.py)" + ] + } + ], + "source": [ + "!pip3 install sklearn\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "frozenset({'a', 'hence', 'nothing', 'anyone', 'from', 'each', 'their', 'how', 'down', 'enough', 'ours', 'anyhow', 'mill', 'about', 'all', 'sometime', 'anything', 'noone', 'toward', 'may', 'hereby', 'fill', 'see', 'part', 'less', 'or', 'whatever', 'and', 'twelve', 'amount', 'whereas', 'you', 'etc', 'therefore', 'still', 'above', 'often', 'whither', 'during', 'whoever', 'could', 'only', 'myself', 'by', 'indeed', 'thick', 'detail', 'these', 'ourselves', 'system', 'between', 'when', 'interest', 'empty', 'should', 'though', 'became', 'someone', 'but', 'thereafter', 'seeming', 'towards', 'per', 'de', 'itself', 'amoungst', 'eight', 'nowhere', 'upon', 're', 'everything', 'cannot', 'in', 'nine', 'hasnt', 'we', 'without', 'back', 'go', 'least', 'serious', 'while', 'eleven', 'our', 'be', 'which', 'beside', 'cant', 'neither', 'am', 'at', 'ever', 'among', 'show', 'inc', 'sometimes', 'whole', 'although', 'nobody', 'already', 'top', 'she', 'give', 'much', 'move', 'everywhere', 'besides', 'no', 'thru', 'before', 'below', 'con', 'somewhere', 'made', 'throughout', 'us', 'out', 'yet', 'never', 'the', 'of', 'are', 'who', 'yourselves', 'them', 'hereupon', 'ltd', 'might', 'mostly', 'none', 'whom', 'very', 'with', 'therein', 'sixty', 'whereupon', 'un', 'moreover', 'full', 'alone', 'latterly', 'again', 'whereafter', 'describe', 'front', 'afterwards', 'bill', 'over', 'do', 'behind', 'co', 'anyway', 'also', 'always', 'fire', 'until', 'every', 'call', 'they', 'too', 'here', 'on', 'so', 'will', 'others', 'please', 'nor', 'almost', 'everyone', 'for', 'then', 'his', 'is', 'ie', 'found', 'if', 'few', 'through', 'forty', 'whence', 'couldnt', 'what', 'namely', 'otherwise', 'have', 'third', 'seemed', 'other', 'whether', 'wherein', 'being', 'own', 'fifteen', 'sincere', 'three', 'elsewhere', 'seem', 'thereupon', 'thereby', 'four', 'had', 'me', 'well', 'under', 'eg', 'wherever', 'becomes', 'side', 'it', 'take', 'because', 'seems', 'thus', 'find', 'yours', 'as', 'against', 'some', 'been', 'put', 'name', 'that', 'last', 'becoming', 'via', 'one', 'whose', 'more', 'hundred', 'somehow', 'thin', 'cry', 'herself', 'would', 'themselves', 'since', 'my', 'meanwhile', 'where', 'done', 'hereafter', 'next', 'up', 'such', 'formerly', 'beyond', 'something', 'must', 'mine', 'her', 'get', 'whereby', 'was', 'around', 'than', 'many', 'onto', 'any', 'yourself', 'has', 'this', 'can', 'however', 'even', 'its', 'both', 'same', 'whenever', 'keep', 'either', 'former', 'except', 'several', 'six', 'anywhere', 'after', 'now', 'fifty', 'off', 'there', 'across', 'why', 'hers', 'bottom', 'thence', 'five', 'to', 'further', 'most', 'another', 'an', 'once', 'two', 'those', 'twenty', 'first', 'together', 'else', 'perhaps', 'become', 'rather', 'were', 'ten', 'into', 'beforehand', 'nevertheless', 'i', 'he', 'due', 'himself', 'within', 'not', 'amongst', 'herein', 'him', 'latter', 'along', 'your'})\n" + ] + } + ], + "source": [ + "from sklearn.feature_extraction import _stop_words\n", + "print(_stop_words.ENGLISH_STOP_WORDS)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should have seen a large list of words that looks like:\n", + "\n", + "```frozenset({'across', 'mine', 'cannot', ...})```\n", + "\n", + "`frozenset` is a type of Python object that is immutable. In this lab you can use it just like an array without conversion." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, test your function with supplying `stop_words.ENGLISH_STOP_WORDS` as the second parameter." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "ename": "KeyError", + "evalue": "0", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mbow\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_bow_from_docs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbow\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_stop_words\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mENGLISH_STOP_WORDS\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbow\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36mget_bow_from_docs\u001b[0;34m(docs, stop_words)\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;31m# Write your code here\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdocs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 17\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdocs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"r\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0ma_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 18\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mline\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0mstripped_line\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mline\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyError\u001b[0m: 0" + ] + } + ], + "source": [ + "bow = get_bow_from_docs(bow, _stop_words.ENGLISH_STOP_WORDS)\n", + "\n", + "print(bow)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should have seen:\n", + "\n", + "```{'bag_of_words': ['ironhack', 'cool', 'love', 'student'], 'term_freq': [[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1]]}```" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['doc1.txt', 'doc2.txt', 'doc3.txt']\n" + ] + } + ], + "source": [ + "print(docs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/your-code/.ipynb_checkpoints/Q3-checkpoint.ipynb b/your-code/.ipynb_checkpoints/Q3-checkpoint.ipynb new file mode 100644 index 0000000..36c638f --- /dev/null +++ b/your-code/.ipynb_checkpoints/Q3-checkpoint.ipynb @@ -0,0 +1,207 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Lambda** is a special Python function type that is **anonymous**. By *anonymous* it means a lambda function does not have name. Lambda functions are embedded inside codes so that you don't call them like calling regular Python functions.\n", + "\n", + "**`Map`** applies a function to all the items in an input list. The function that is applied can be a standard or a lambda function.\n", + "\n", + "For instance, below is an example of multiplying number tuples in a list:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[2, 12, 30]" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "items = [(1,2), (3,4), (5,6)]\n", + "\n", + "def multiply(num_tuple):\n", + " return num_tuple[0]*num_tuple[1]\n", + "list(map(multiply, items))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "...is the same as:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[2, 12, 30]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "items = [(1,2), (3,4), (5,6)]\n", + "list(map(lambda item: item[0]*item[1], items))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Why do we sometimes use `lambda` and `map`? Because, as you see in the example above, they make your code really concise by combining 3 lines of code to 1 line.\n", + "\n", + "Besides `map`, there is also **`filter`** that selectively returns elements in an array according to whether you return `True`. There is also **`reduce`** that performs computation on a list of items then returns result.\n", + "\n", + "Here is a [good tutorial](http://book.pythontips.com/en/latest/map_filter.html) about `map`, `filter`, and `reduce`. Read it if you are not familiar with how they are used. Then proceed to the next cell." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the next cell, use `filter` and `lambda` to return a list that contains positive numbers only. The output should be:\n", + "\n", + "```[1, 4, 5]```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "numbers = [1, 4, -1, -100, 0, 5, -99]\n", + "\n", + "# Enter your code below" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, use `reduce` and `lambda` to return a string that only contains English terms. The English terms are separated with a whitespace ` `.\n", + "\n", + "Hints: \n", + "\n", + "* If your Jupyter Notebook cannot import `langdetect`, you need to install it with `pip install langdetect`. If Jupyter Notebook still cannot find the library, try install with `python3 -m pip install langdetect`. This is because you need to install `langdetect` in the same Python run environment where Jupyter Notebook is running.\n", + "\n", + "* If a word is English, `langdetect.detect(word)=='en'` will return `True`.\n", + "\n", + "Your output should read:\n", + "\n", + "```'good morning everyone'```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import langdetect\n", + "from functools import reduce\n", + "words = ['good morning', '早上好', 'доброго', 'おはようございます', 'everyone', '大家', 'каждый', 'みんな']\n", + "\n", + "# Enter your code below" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Bonus Question" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, if you still have time, convert your response in Q2 by using `lambda`, `map`, `filter`, or `reduce` where applicable. Enter your function in the cell below.\n", + "\n", + "As you write Python functions, generally you don't want to make your function too long. Long functions are difficult to read and difficult to debug. If a function is getting too long, consider breaking it into sever shorter functions where the main function calls other functions. If anything goes wrong, you can output debug messages in each of the functions to check where exactly the error is." + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": {}, + "outputs": [], + "source": [ + "# Enter your code below" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test your function below with the Bag of Words lab docs (it's easier for you to debug your code). Your output should be:\n", + "\n", + "```{'bag_of_words': ['ironhack', 'cool', 'love', 'student'], 'term_freq': [[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1]]}```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.feature_extraction import stop_words\n", + "bow = get_bow_from_docs([\n", + " '../../lab-bag-of-words/your-code/doc1.txt', \n", + " '../../lab-bag-of-words/your-code/doc2.txt', \n", + " '../../lab-bag-of-words/your-code/doc3.txt'],\n", + " stop_words.ENGLISH_STOP_WORDS\n", + ")\n", + "\n", + "print(bow)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/your-code/Q1.ipynb b/your-code/Q1.ipynb index 8b07d3d..4185922 100644 --- a/your-code/Q1.ipynb +++ b/your-code/Q1.ipynb @@ -19,49 +19,108 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ironhack is cool.\n", + "I love Ironhack.\n", + "I am a student at Ironhack.\n", + "ironhack is cool i love ironhack i am a student at ironhack\n", + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']\n", + "[['ironhack', 'is', 'cool'], ['i', 'love', 'ironhack'], ['i', 'am', 'a', 'student', 'at', 'ironhack']]\n", + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n" + ] + }, + { + "data": { + "text/plain": [ + "{'bag_of_words': ['ironhack',\n", + " 'is',\n", + " 'cool',\n", + " 'i',\n", + " 'love',\n", + " 'am',\n", + " 'a',\n", + " 'student',\n", + " 'at'],\n", + " 'term_freq': [[1, 1, 1, 0, 0, 0, 0, 0, 0],\n", + " [1, 0, 0, 1, 1, 0, 0, 0, 0],\n", + " [1, 0, 0, 1, 0, 1, 1, 1, 1]]}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Import required libraries\n", + "import re\n", "\n", "# Define function\n", "def get_bow_from_docs(docs, stop_words=[]):\n", - " \n", - " # In the function, first define the variables you will use such as `corpus`, `bag_of_words`, and `term_freq`.\n", - " \n", - " \n", - " \n", - " \"\"\"\n", - " Loop `docs` and read the content of each doc into a string in `corpus`.\n", - " Remember to convert the doc content to lowercases and remove punctuation.\n", - " \"\"\"\n", "\n", - " \n", - " \n", - " \"\"\"\n", - " Loop `corpus`. Append the terms in each doc into the `bag_of_words` array. The terms in `bag_of_words` \n", - " should be unique which means before adding each term you need to check if it's already added to the array.\n", - " In addition, check if each term is in the `stop_words` array. Only append the term to `bag_of_words`\n", - " if it is not a stop word.\n", - " \"\"\"\n", + " # In the function, first define the variables you will use such as `corpus`, `bag_of_words`, and `term_freq`.\n", "\n", - " \n", - " \n", - " \n", - " \"\"\"\n", - " Loop `corpus` again. For each doc string, count the number of occurrences of each term in `bag_of_words`. \n", - " Create an array for each doc's term frequency and append it to `term_freq`.\n", - " \"\"\"\n", "\n", - " \n", - " \n", - " # Now return your output as an object\n", + "\n", + "\n", + " corpus = []\n", + "\n", + "\n", + " # Write your code here\n", + " for i in range(len(docs)):\n", + " with open(docs[i], \"r\") as a_file:\n", + " for line in a_file:\n", + " stripped_line = line.strip()\n", + " corpus.append(stripped_line)\n", + " print(stripped_line)\n", + "\n", + " corpus = [x.lower() for x in corpus]\n", + " corpus = re.sub('\\.', '', ' '.join(corpus))\n", + " #corpus = [re.sub(r\"\\.\",\"\",corpus[j]) for j in range(len(corpus))]\n", + " print(corpus)\n", + "\n", + "\n", + " corpus = corpus.split()\n", + " bag_of_words = []\n", + " for i in corpus:\n", + " if i not in bag_of_words:\n", + " bag_of_words.append(i)\n", + " else:\n", + " continue\n", + "\n", + " print(bag_of_words)\n", + "\n", + " corpus = ['ironhack is cool','i love ironhack','i am a student at ironhack']\n", + " # Write your code here\n", + " pieces = []\n", + " term_freq = []\n", + " for x in corpus:\n", + " pieces.append(x.split(\" \"))\n", + " print(pieces)\n", + "\n", + " for s in pieces:\n", + " temp = []\n", + " for b in bag_of_words:\n", + " y = s.count(b)\n", + " temp.append(y)\n", + " term_freq.append(temp)\n", + "\n", + " print(term_freq)\n", + "\n", + " # Now return your output as an object\n", " return {\n", " \"bag_of_words\": bag_of_words,\n", " \"term_freq\": term_freq\n", " }\n", - " " + " \n", + "docs = ['doc1.txt', 'doc2.txt', 'doc3.txt']\n", + "get_bow_from_docs(docs)\n" ] }, { @@ -75,12 +134,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ironhack is cool.\n", + "I love Ironhack.\n", + "I am a student at Ironhack.\n", + "ironhack is cool i love ironhack i am a student at ironhack\n", + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']\n", + "[['ironhack', 'is', 'cool'], ['i', 'love', 'ironhack'], ['i', 'am', 'a', 'student', 'at', 'ironhack']]\n", + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n", + "{'bag_of_words': ['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at'], 'term_freq': [[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]}\n" + ] + } + ], "source": [ "# Define doc paths array\n", - "docs = []\n", + "docs = ['doc1.txt', 'doc2.txt', 'doc3.txt']\n", "\n", "# Obtain BoW from your function\n", "bow = get_bow_from_docs(docs)\n", @@ -103,9 +177,74 @@ "execution_count": null, "metadata": {}, "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting sklearn\n", + " Downloading sklearn-0.0.tar.gz (1.1 kB)\n", + "Collecting scikit-learn\n", + " Downloading scikit_learn-0.24.2-cp39-cp39-macosx_10_13_x86_64.whl (7.3 MB)\n", + "\u001b[K |████████████████████████████████| 7.3 MB 1.4 MB/s eta 0:00:01 |▎ | 71 kB 388 kB/s eta 0:00:19 |█████ | 1.2 MB 540 kB/s eta 0:00:12\n", + "\u001b[?25hCollecting threadpoolctl>=2.0.0\n", + " Downloading threadpoolctl-2.1.0-py3-none-any.whl (12 kB)\n", + "Collecting joblib>=0.11\n", + " Downloading joblib-1.0.1-py3-none-any.whl (303 kB)\n", + "\u001b[K |████████████████████████████████| 303 kB 18.5 MB/s eta 0:00:01\n", + "\u001b[?25hRequirement already satisfied: numpy>=1.13.3 in /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.20.3)\n", + "Collecting scipy>=0.19.1\n", + " Downloading scipy-1.7.0-cp39-cp39-macosx_10_9_x86_64.whl (32.1 MB)\n", + "\u001b[K |████████████████████████████████| 32.1 MB 34 kB/s eta 0:00:01 |████ | 3.9 MB 20.8 MB/s eta 0:00:02 |█████▋ | 5.7 MB 2.1 MB/s eta 0:00:13 |███████▍ | 7.4 MB 2.1 MB/s eta 0:00:12 |███████▊ | 7.7 MB 2.1 MB/s eta 0:00:12 | 8.7 MB 2.1 MB/s eta 0:00:12 |██████████▏ | 10.2 MB 2.7 MB/s eta 0:00:09 |███████████ | 10.9 MB 2.7 MB/s eta 0:00:08��███▌ | 11.5 MB 2.7 MB/s eta 0:00:08 |██████████████████ | 18.2 MB 1.7 MB/s eta 0:00:09 |███████████████████ | 19.0 MB 1.7 MB/s eta 0:00:08 |███████████████████████▉ | 23.9 MB 3.2 MB/s eta 0:00:03 |████████████████████████ | 24.0 MB 3.2 MB/s eta 0:00:03\n", + "\u001b[?25hBuilding wheels for collected packages: sklearn\n", + " Building wheel for sklearn (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25h Created wheel for sklearn: filename=sklearn-0.0-py2.py3-none-any.whl size=1316 sha256=b559f5573299fb814c815e87f877c47310696271371da7e797325b440c8734fa\n", + " Stored in directory: /Users/edwardsanchez/Library/Caches/pip/wheels/e4/7b/98/b6466d71b8d738a0c547008b9eb39bf8676d1ff6ca4b22af1c\n", + "Successfully built sklearn\n", + "Installing collected packages: threadpoolctl, scipy, joblib, scikit-learn, sklearn\n", + "Successfully installed joblib-1.0.1 scikit-learn-0.24.2 scipy-1.7.0 sklearn-0.0 threadpoolctl-2.1.0\n", + "\u001b[33mWARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.\n", + "You should consider upgrading via the '/Library/Frameworks/Python.framework/Versions/3.9/bin/python3.9 -m pip install --upgrade pip' command.\u001b[0m\n" + ] + }, + { + "ename": "ImportError", + "evalue": "cannot import name 'stop_words' from 'sklearn.feature_extraction' (/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/sklearn/feature_extraction/__init__.py)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msystem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'pip3 install sklearn'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeature_extraction\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mstop_words\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstop_words\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mENGLISH_STOP_WORDS\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'stop_words' from 'sklearn.feature_extraction' (/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/sklearn/feature_extraction/__init__.py)" + ] + } + ], "source": [ - "from sklearn.feature_extraction import stop_words\n", - "print(stop_words.ENGLISH_STOP_WORDS)" + "!pip3 install sklearn\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "frozenset({'a', 'hence', 'nothing', 'anyone', 'from', 'each', 'their', 'how', 'down', 'enough', 'ours', 'anyhow', 'mill', 'about', 'all', 'sometime', 'anything', 'noone', 'toward', 'may', 'hereby', 'fill', 'see', 'part', 'less', 'or', 'whatever', 'and', 'twelve', 'amount', 'whereas', 'you', 'etc', 'therefore', 'still', 'above', 'often', 'whither', 'during', 'whoever', 'could', 'only', 'myself', 'by', 'indeed', 'thick', 'detail', 'these', 'ourselves', 'system', 'between', 'when', 'interest', 'empty', 'should', 'though', 'became', 'someone', 'but', 'thereafter', 'seeming', 'towards', 'per', 'de', 'itself', 'amoungst', 'eight', 'nowhere', 'upon', 're', 'everything', 'cannot', 'in', 'nine', 'hasnt', 'we', 'without', 'back', 'go', 'least', 'serious', 'while', 'eleven', 'our', 'be', 'which', 'beside', 'cant', 'neither', 'am', 'at', 'ever', 'among', 'show', 'inc', 'sometimes', 'whole', 'although', 'nobody', 'already', 'top', 'she', 'give', 'much', 'move', 'everywhere', 'besides', 'no', 'thru', 'before', 'below', 'con', 'somewhere', 'made', 'throughout', 'us', 'out', 'yet', 'never', 'the', 'of', 'are', 'who', 'yourselves', 'them', 'hereupon', 'ltd', 'might', 'mostly', 'none', 'whom', 'very', 'with', 'therein', 'sixty', 'whereupon', 'un', 'moreover', 'full', 'alone', 'latterly', 'again', 'whereafter', 'describe', 'front', 'afterwards', 'bill', 'over', 'do', 'behind', 'co', 'anyway', 'also', 'always', 'fire', 'until', 'every', 'call', 'they', 'too', 'here', 'on', 'so', 'will', 'others', 'please', 'nor', 'almost', 'everyone', 'for', 'then', 'his', 'is', 'ie', 'found', 'if', 'few', 'through', 'forty', 'whence', 'couldnt', 'what', 'namely', 'otherwise', 'have', 'third', 'seemed', 'other', 'whether', 'wherein', 'being', 'own', 'fifteen', 'sincere', 'three', 'elsewhere', 'seem', 'thereupon', 'thereby', 'four', 'had', 'me', 'well', 'under', 'eg', 'wherever', 'becomes', 'side', 'it', 'take', 'because', 'seems', 'thus', 'find', 'yours', 'as', 'against', 'some', 'been', 'put', 'name', 'that', 'last', 'becoming', 'via', 'one', 'whose', 'more', 'hundred', 'somehow', 'thin', 'cry', 'herself', 'would', 'themselves', 'since', 'my', 'meanwhile', 'where', 'done', 'hereafter', 'next', 'up', 'such', 'formerly', 'beyond', 'something', 'must', 'mine', 'her', 'get', 'whereby', 'was', 'around', 'than', 'many', 'onto', 'any', 'yourself', 'has', 'this', 'can', 'however', 'even', 'its', 'both', 'same', 'whenever', 'keep', 'either', 'former', 'except', 'several', 'six', 'anywhere', 'after', 'now', 'fifty', 'off', 'there', 'across', 'why', 'hers', 'bottom', 'thence', 'five', 'to', 'further', 'most', 'another', 'an', 'once', 'two', 'those', 'twenty', 'first', 'together', 'else', 'perhaps', 'become', 'rather', 'were', 'ten', 'into', 'beforehand', 'nevertheless', 'i', 'he', 'due', 'himself', 'within', 'not', 'amongst', 'herein', 'him', 'latter', 'along', 'your'})\n" + ] + } + ], + "source": [ + "from sklearn.feature_extraction import _stop_words\n", + "print(_stop_words.ENGLISH_STOP_WORDS)" ] }, { @@ -128,11 +267,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "KeyError", + "evalue": "0", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mbow\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_bow_from_docs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbow\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_stop_words\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mENGLISH_STOP_WORDS\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbow\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36mget_bow_from_docs\u001b[0;34m(docs, stop_words)\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;31m# Write your code here\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdocs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 17\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdocs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"r\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0ma_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 18\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mline\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0mstripped_line\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mline\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyError\u001b[0m: 0" + ] + } + ], "source": [ - "bow = get_bow_from_docs(bow, stop_words.ENGLISH_STOP_WORDS)\n", + "bow = get_bow_from_docs(bow, _stop_words.ENGLISH_STOP_WORDS)\n", "\n", "print(bow)" ] @@ -146,6 +298,23 @@ "```{'bag_of_words': ['ironhack', 'cool', 'love', 'student'], 'term_freq': [[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1]]}```" ] }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['doc1.txt', 'doc2.txt', 'doc3.txt']\n" + ] + } + ], + "source": [ + "print(docs)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -170,7 +339,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.9.5" } }, "nbformat": 4, diff --git a/your-code/Q3.ipynb b/your-code/Q3.ipynb index 75055ac..36c638f 100644 --- a/your-code/Q3.ipynb +++ b/your-code/Q3.ipynb @@ -13,7 +13,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -22,7 +22,7 @@ "[2, 12, 30]" ] }, - "execution_count": 11, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -44,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 2, "metadata": {}, "outputs": [ { @@ -53,7 +53,7 @@ "[2, 12, 30]" ] }, - "execution_count": 10, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -199,7 +199,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.9.5" } }, "nbformat": 4, diff --git a/your-code/doc1.txt b/your-code/doc1.txt new file mode 100644 index 0000000..e66288d --- /dev/null +++ b/your-code/doc1.txt @@ -0,0 +1 @@ +Ironhack is cool. \ No newline at end of file diff --git a/your-code/doc2.txt b/your-code/doc2.txt new file mode 100644 index 0000000..b21feac --- /dev/null +++ b/your-code/doc2.txt @@ -0,0 +1 @@ +I love Ironhack. \ No newline at end of file diff --git a/your-code/doc3.txt b/your-code/doc3.txt new file mode 100644 index 0000000..653c5b7 --- /dev/null +++ b/your-code/doc3.txt @@ -0,0 +1 @@ +I am a student at Ironhack. \ No newline at end of file