diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb index c574eba..1086b00 100644 --- a/your-code/challenge-1.ipynb +++ b/your-code/challenge-1.ipynb @@ -15,7 +15,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -33,12 +33,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Durante un tiempo no estuvo segura de si su marido era su marido.\n" + ] + } + ], "source": [ "str_list = ['Durante', 'un', 'tiempo', 'no', 'estuvo', 'segura', 'de', 'si', 'su', 'marido', 'era', 'su', 'marido']\n", - "# Your code here:\n" + "# Your code here:\n", + "string = \" \".join(str_list) + \".\"\n", + "print(string)\n", + " " ] }, { @@ -50,12 +61,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'Grocery list: bananas, bread, brownie mix, broccoli.'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "food_list = ['Bananas', 'Chocolate', 'bread', 'diapers', 'Ice Cream', 'Brownie Mix', 'broccoli']\n", - "# Your code here:\n" + "# Your code here:\n", + "grocery_list = \"Grocery list: \"\n", + "grocery_list += \", \".join([food.lower() for food in food_list if food.lower().startswith(\"b\")]) + \".\"\n", + "grocery_list\n", + " " ] }, { @@ -69,9 +95,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'The area of the circle with radius: 4.5 is: 63.61725123519331.'" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "import math\n", "\n", @@ -90,7 +127,8 @@ " # Your code here:\n", " return pi * (x**2)\n", " \n", - "# Your output string here:\n" + "# Your output string here:\n", + "f\"{string1 } {radius } {string2 } {area(radius)}.\"\n" ] }, { @@ -106,9 +144,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 47, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'s': 13, 'o': 18, 'm': 3, 'e': 20, 'a': 12, 'y': 3, 't': 18, 'h': 12, 'w': 8, 'r': 10, 'l': 6, 'd': 9, 'i': 21, 'n': 8, 'f': 10, 'c': 5, '’': 1, 'v': 2, 'b': 1, 'u': 5, 'p': 1, 'k': 2, 'g': 2}\n" + ] + } + ], "source": [ "poem = \"\"\"Some say the world will end in fire,\n", "Some say in ice.\n", @@ -120,7 +166,11 @@ "Is also great\n", "And would suffice.\"\"\"\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "poem = poem.lower().replace(\",\", \"\").replace(\"\\n\", \" \").replace(\".\", \"\")\n", + "word_dict = {letter: poem.count(letter) for letter in poem if letter != \" \"}\n", + "\"\"\"word_dict.pop(\" \")\"\"\"\n", + "print(word_dict)\n" ] }, { @@ -132,9 +182,68 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 94, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "{'&',\n", + " 'angry',\n", + " 'apple',\n", + " 'beheld',\n", + " 'beneath',\n", + " 'bore',\n", + " 'both',\n", + " 'bright',\n", + " 'day',\n", + " 'deceitful',\n", + " 'did',\n", + " 'end',\n", + " 'fears',\n", + " 'foe',\n", + " 'friend',\n", + " 'garden',\n", + " 'glad',\n", + " 'grew',\n", + " 'grow',\n", + " 'had',\n", + " 'he',\n", + " 'i',\n", + " 'into',\n", + " 'knew',\n", + " 'mine',\n", + " 'morning',\n", + " 'my',\n", + " 'night',\n", + " 'not',\n", + " 'outstretched',\n", + " 'pole',\n", + " 'see',\n", + " 'shine',\n", + " 'smiles',\n", + " 'soft',\n", + " 'stole',\n", + " 'sunned',\n", + " 'tears',\n", + " 'that',\n", + " 'till',\n", + " 'told',\n", + " 'tree',\n", + " 'veild',\n", + " 'was',\n", + " 'waterd',\n", + " 'when',\n", + " 'wiles',\n", + " 'with',\n", + " 'wrath'}" + ] + }, + "execution_count": 94, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "blacklist = ['and', 'as', 'an', 'a', 'the', 'in', 'it']\n", "\n", @@ -158,7 +267,15 @@ "In the morning glad I see; \n", "My foe outstretched beneath the tree.\"\"\"\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "#rule = blacklist\n", + "poem = \" \".join(poem.split()).lower().replace(\",\", \" \").replace(\";\", \"\").replace(\".\", \" \").replace(\"\\n\", \"\").replace(\":\", \"\") # removing non character, extra space and lower case all word\n", + " \n", + "poem_word_set = set(poem.split()) # creating set of word in poem\n", + "blacklist_set = set(blacklist) # converting blacklist in to set\n", + "\n", + "unique_word_poem = poem_word_set.difference(blacklist_set) # usnig set.difference to find word in poem but not in blacklist\n", + "unique_word_poem" ] }, { @@ -172,16 +289,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 105, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['T', 'P']" + ] + }, + "execution_count": 105, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "import re\n", "\n", "poem = \"\"\"The apparition of these faces in the crowd;\n", "Petals on a wet, black bough.\"\"\"\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "rule = \"[A-Z]\"\n", + "re.findall(rule, poem) " ] }, { @@ -193,13 +323,30 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 125, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['123abc', 'abc123', 'JohnSmith1', 'ABBY4']" + ] + }, + "execution_count": 125, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "data = ['123abc', 'abc123', 'JohnSmith1', 'ABBY4', 'JANE']\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "new_list = [item for item in data if re.findall(\"\\d\", item)]\n", + "\n", + "\"\"\"for item in data:\n", + " if re.findall(\"\\d\", item):\n", + " new_list.append(item)\"\"\"\n", + "new_list" ] }, { @@ -215,18 +362,31 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 131, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['123abc', 'abc123', 'JohnSmith1']" + ] + }, + "execution_count": 131, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "data = ['123abc', 'abc123', 'JohnSmith1', 'ABBY4', 'JANE']\n", - "# Your code here:\n" + "# Your code here:\n", + "bonus_list = [item for item in data if re.findall(\"\\d\", item) if re.findall(\"[a-z]\", item)]\n", + "bonus_list" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -240,7 +400,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.9.13" } }, "nbformat": 4, diff --git a/your-code/challenge-2.ipynb b/your-code/challenge-2.ipynb index 6873bd2..8a35984 100644 --- a/your-code/challenge-2.ipynb +++ b/your-code/challenge-2.ipynb @@ -72,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -88,11 +88,16 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "# Write your code here\n" + "# Write your code here\n", + "corpus = []\n", + "for doc in docs:\n", + " with open(doc) as document:\n", + " sentence = document.read()\n", + " corpus.append(sentence)\n" ] }, { @@ -104,10 +109,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Ironhack is cool.', 'I love Ironhack.', 'I am a student at Ironhack.']\n" + ] + } + ], + "source": [ + "print(corpus)" + ] }, { "cell_type": "markdown", @@ -132,11 +147,46 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack is cool', 'i love ironhack', 'i am a student at ironhack']\n" + ] + }, + { + "data": { + "text/plain": [ + "\"import re\\nlower_corpus = []\\nfor sentence in corpus:\\n sentence = sentence.lower()\\n sentence = re.sub('[^A-Za-z0-9 ]+', '', string)\\n lower_corpus.append(string)\\n\\nprint(lower_corpus)\\n\"" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# Write your code here" + "# Write your code here\n", + "corpus = []\n", + "for doc in docs:\n", + " with open(doc) as document:\n", + " sentence = document.read().lower().replace(\".\", \"\")\n", + " corpus.append(sentence)\n", + "print(corpus)\n", + "\n", + "# Alternatively\n", + "\"\"\"import re\n", + "lower_corpus = []\n", + "for sentence in corpus:\n", + " sentence = sentence.lower()\n", + " sentence = re.sub('[^A-Za-z0-9 ]+', '', string)\n", + " lower_corpus.append(string)\n", + "\n", + "print(lower_corpus)\n", + "\"\"\"\n" ] }, { @@ -148,10 +198,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "bag_of_words = []" + ] }, { "cell_type": "markdown", @@ -166,11 +218,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "# Write your code here\n" + "# Write your code here\n", + "for sentence in corpus: # first iteration in list corpus\n", + " for word in sentence.split(): # second iteration in list element ->> sentence\n", + " if word not in bag_of_words: # check if word already in bags_of_word\n", + " bag_of_words.append(word) # append word\n", + "\n", + "bag_of_words" ] }, { @@ -200,11 +269,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n" + ] + } + ], "source": [ - "# Write your code here\n" + "# Write your code here\n", + "term_frequency = corpus\n", + "# Create empty term-freq matrix of list\n", + "term_freq = [[0 for j in range(len(bag_of_words))] for i in range(len(term_frequency))]\n", + "\n", + "# Populate term-freq matrix\n", + "for i, sentence in enumerate(term_frequency):\n", + " for j, word in enumerate(bag_of_words):\n", + " term_freq[i][j] = sentence.split().count(word)\n", + "\n", + "print(term_freq)" ] }, { @@ -256,13 +343,39 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1]]\n" + ] + } + ], "source": [ "stop_words = ['all', 'six', 'less', 'being', 'indeed', 'over', 'move', 'anyway', 'fifty', 'four', 'not', 'own', 'through', 'yourselves', 'go', 'where', 'mill', 'only', 'find', 'before', 'one', 'whose', 'system', 'how', 'somewhere', 'with', 'thick', 'show', 'had', 'enough', 'should', 'to', 'must', 'whom', 'seeming', 'under', 'ours', 'has', 'might', 'thereafter', 'latterly', 'do', 'them', 'his', 'around', 'than', 'get', 'very', 'de', 'none', 'cannot', 'every', 'whether', 'they', 'front', 'during', 'thus', 'now', 'him', 'nor', 'name', 'several', 'hereafter', 'always', 'who', 'cry', 'whither', 'this', 'someone', 'either', 'each', 'become', 'thereupon', 'sometime', 'side', 'two', 'therein', 'twelve', 'because', 'often', 'ten', 'our', 'eg', 'some', 'back', 'up', 'namely', 'towards', 'are', 'further', 'beyond', 'ourselves', 'yet', 'out', 'even', 'will', 'what', 'still', 'for', 'bottom', 'mine', 'since', 'please', 'forty', 'per', 'its', 'everything', 'behind', 'un', 'above', 'between', 'it', 'neither', 'seemed', 'ever', 'across', 'she', 'somehow', 'be', 'we', 'full', 'never', 'sixty', 'however', 'here', 'otherwise', 'were', 'whereupon', 'nowhere', 'although', 'found', 'alone', 're', 'along', 'fifteen', 'by', 'both', 'about', 'last', 'would', 'anything', 'via', 'many', 'could', 'thence', 'put', 'against', 'keep', 'etc', 'amount', 'became', 'ltd', 'hence', 'onto', 'or', 'con', 'among', 'already', 'co', 'afterwards', 'formerly', 'within', 'seems', 'into', 'others', 'while', 'whatever', 'except', 'down', 'hers', 'everyone', 'done', 'least', 'another', 'whoever', 'moreover', 'couldnt', 'throughout', 'anyhow', 'yourself', 'three', 'from', 'her', 'few', 'together', 'top', 'there', 'due', 'been', 'next', 'anyone', 'eleven', 'much', 'call', 'therefore', 'interest', 'then', 'thru', 'themselves', 'hundred', 'was', 'sincere', 'empty', 'more', 'himself', 'elsewhere', 'mostly', 'on', 'fire', 'am', 'becoming', 'hereby', 'amongst', 'else', 'part', 'everywhere', 'too', 'herself', 'former', 'those', 'he', 'me', 'myself', 'made', 'twenty', 'these', 'bill', 'cant', 'us', 'until', 'besides', 'nevertheless', 'below', 'anywhere', 'nine', 'can', 'of', 'your', 'toward', 'my', 'something', 'and', 'whereafter', 'whenever', 'give', 'almost', 'wherever', 'is', 'describe', 'beforehand', 'herein', 'an', 'as', 'itself', 'at', 'have', 'in', 'seem', 'whence', 'ie', 'any', 'fill', 'again', 'hasnt', 'inc', 'thereby', 'thin', 'no', 'perhaps', 'latter', 'meanwhile', 'when', 'detail', 'same', 'wherein', 'beside', 'also', 'that', 'other', 'take', 'which', 'becomes', 'you', 'if', 'nobody', 'see', 'though', 'may', 'after', 'upon', 'most', 'hereupon', 'eight', 'but', 'serious', 'nothing', 'such', 'why', 'a', 'off', 'whereby', 'third', 'i', 'whole', 'noone', 'sometimes', 'well', 'amoungst', 'yours', 'their', 'rather', 'without', 'so', 'five', 'the', 'first', 'whereas', 'once']\n", "\n", - "# Write your code below\n" + "# Write your code below\n", + "\n", + "bag_of_words = []\n", + "# iterating corpus and populating bag_of_words\n", + "for sentence in corpus: \n", + " for word in sentence.split(): \n", + " if word not in bag_of_words and word not in stop_words: \n", + " bag_of_words.append(word) \n", + "bag_of_words\n", + "\n", + "term_freq = [[0 for j in range(len(bag_of_words))] for i in range(len(term_frequency))]\n", + "\n", + "# Populate term-freq matrix\n", + "for i, sentence in enumerate(term_frequency):\n", + " for j, word in enumerate(bag_of_words):\n", + " term_freq[i][j] = sentence.split().count(word)\n", + "\n", + "print(term_freq)\n", + "\n" ] }, { @@ -296,15 +409,35 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'ironhack': 3, 'is': 4, 'cool': 2, 'love': 5, 'am': 0, 'student': 6, 'at': 1}\n", + "[[0 0 1 1 1 0 0]\n", + " [0 0 0 1 0 1 0]\n", + " [1 1 0 1 0 0 1]]\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "from sklearn.feature_extraction.text import CountVectorizer\n", + " \n", + "vectorizer = CountVectorizer()\n", + "corpus = np.array((corpus))\n", + "bag_of_words = vectorizer.fit_transform(corpus)\n", + "print(vectorizer.vocabulary_)\n", + "print(bag_of_words.toarray())" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -318,7 +451,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.9.13" } }, "nbformat": 4,