diff --git a/your-code/.ipynb_checkpoints/Learning-checkpoint.ipynb b/your-code/.ipynb_checkpoints/Learning-checkpoint.ipynb new file mode 100644 index 0000000..0630b55 --- /dev/null +++ b/your-code/.ipynb_checkpoints/Learning-checkpoint.ipynb @@ -0,0 +1,697 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# String Operations\n", + "\n", + "\n", + "Lesson Goals\n", + "\n", + " Gain an understanding of Python string operations.\n", + " Learn how to subset and split strings.\n", + " Understand how to leverage boolean methods for string operations.\n", + " Learn how to manipulate string cases with Python.\n", + " Learn how to strip white spaces and replace strings with other strings.\n", + " Learn some basic regular expressions and how to apply them.\n", + "\n", + "Introduction\n", + "\n", + "As a data analyst, you will find yourself wrangling with text strings regularly. Categorical variables, documents, and other text-based data often come inconsistently structured. Because of this, it is helpful to know about different methods for transforming, cleaning, and extracting text. Python comes with several tools for performing string operations. In this lesson, we will learn about how to use these tools to work with strings.\n", + "Python String Operations\n", + "\n", + "Thus far in this program, you have seen a few examples here and there that involve string operations in the context of other topics we have covered. In this section, we will cover string operations more comprehensively so that you have a solid understanding of how to use them.\n", + "\n", + "Recall from your Python prework that the + operator concatenates two strings together and that the * operator repeats a string a given number of times. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HelloWorld\n" + ] + } + ], + "source": [ + "print('Hello' + 'World')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HelloHelloHelloHelloHelloHelloHelloHello\n" + ] + } + ], + "source": [ + "print('Hello' * 8)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Recall that you can also join strings in a list together using a designated separator with the join method. " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Happy Puppies'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "x = 'Happy'\n", + "y = 'Puppies'\n", + "z = [x,y]\n", + "\n", + "' '.join(z)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We also covered how to get the length of strings and how to subset them via indexing." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "10" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len('automobile')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "a\n", + "o\n", + "e\n" + ] + } + ], + "source": [ + "word = 'automobile'\n", + "\n", + "print(word[0])\n", + "print(word[5])\n", + "print(word[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can use the split method to turn strings into lists based on a separator that we designate (spaces if left empty)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['They', 'ate', 'the', 'mystery', 'meat.', 'It', 'tasted', 'like', 'chicken.']\n", + "['They ate the mystery meat', ' It tasted like chicken', '']\n", + "['They ate the ', 'ystery ', 'eat. It tasted like chicken.']\n" + ] + } + ], + "source": [ + "a = 'They ate the mystery meat. It tasted like chicken.'\n", + "\n", + "print(a.split())\n", + "print(a.split('.'))\n", + "print(a.split('m'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also use boolean methods such as startswith, endswith, and in to check if strings start with, end with, or contain certain characters or other strings. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "False\n" + ] + } + ], + "source": [ + "b = 'There is no business like show business.'\n", + "\n", + "print(b.startswith('T'))\n", + "print(b.startswith('There'))\n", + "print(b.startswith('there'))" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "False\n" + ] + } + ], + "source": [ + "print(b.endswith('.'))\n", + "print(b.endswith('business.'))\n", + "print(b.endswith('Business.'))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "False\n" + ] + } + ], + "source": [ + "print('like' in b)\n", + "print('business' in b)\n", + "print('Business' in b)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note from the examples above that these are case sensitive. Speaking of cases, Python provides us with several useful ways to change the cases of strings." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "she had a marvelous assortment of puppets.\n", + "SHE HAD A MARVELOUS ASSORTMENT OF PUPPETS.\n", + "She had a marvelous assortment of puppets.\n", + "She Had A Marvelous Assortment Of Puppets.\n" + ] + } + ], + "source": [ + "c = 'shE HaD a maRveLoUs aSsoRtmeNt of PUPPETS.'\n", + "\n", + "print(c.lower())\n", + "print(c.upper())\n", + "print(c.capitalize())\n", + "print(c.title())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also remove any white space from the beginning and end of a string using the strip method. If we want to remove white space from just the beginning, we would use lstrip. If we wanted to remove white space from just the end, we would use rstrip. " + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I have a tendency to leave trailing spaces.\n", + "I have a tendency to leave trailing spaces. \n", + " I have a tendency to leave trailing spaces.\n" + ] + } + ], + "source": [ + "d = ' I have a tendency to leave trailing spaces. '\n", + "\n", + "print(d.strip())\n", + "print(d.lstrip())\n", + "print(d.rstrip())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Another useful string operation, which we saw briefly in the data wrangling lessons, is using the replace method which replaces one string with another. " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I thought the movie was horrible!\n", + "I thought the movie was just OK!\n" + ] + } + ], + "source": [ + "e = 'I thought the movie was wonderful!'\n", + "\n", + "print(e.replace('wonderful', 'horrible'))\n", + "print(e.replace('wonderful', 'just OK'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Regular Expressions\n", + "\n", + "Python's string operation methods can take us a long way, but we will inevitably encounter a situation where we need to rely on some additional tools called regular expressions. Regular expressions allow us to perform different types of pattern matching on text in order to arrive at the result we want.\n", + "\n", + "In order to use regular expressions, we will import the re library. " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "import re" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some of the most useful methods in the re library are:\n", + "\n", + " search: Returns the first instance of an expression in a string.\n", + " findall: Finds all instances of an expression in a string and returns them as a list.\n", + " split: Splits a string based on a specified delimiter.\n", + " sub: Substitutes a string/substring with another.\n", + "\n", + "Regular expressions consist of sequences that represent certain types of characters that can appear in strings. We can use the findall method to return all characters in a string that match a series of characters as follows. " + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['neigh']\n" + ] + } + ], + "source": [ + "text = 'My neighbor, Mr. Rogers, has 5 dogs.'\n", + "print(re.findall('neigh', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we want to return all the characters that match within the text, we can turn the series of characters in the pattern into a set by enclosing them in square brackets([]). " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['n', 'g', 'b', 'o', 'r', 'r', 'o', 'g', 'r', 's', 's', 'o', 'g', 's']\n" + ] + } + ], + "source": [ + "print(re.findall('[mngbors]', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This found any character we explicitly designated in our regular expression and returned them as a list. Note that the capital M's were not returned since regular expressions are case sensitive.\n", + "\n", + "Regular expressions also have predefined sets that we can use as shortcuts so that, for example, we don't have to type out every letter in the alphabet or every number in order to match them. Below are some of the most useful regular expression sets.\n", + "\n", + " [a-z]: Any lowercase letter between a and z.\n", + " [A-Z]: Any uppercase letter between A and Z.\n", + " [0-9]: Any numeric character between 0 and 9.\n", + "\n", + "See http://regex101.com/" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['y', 'n', 'e', 'i', 'g', 'h', 'b', 'o', 'r', 'r', 'o', 'g', 'e', 'r', 's', 'h', 'a', 's', 'd', 'o', 'g', 's']\n" + ] + } + ], + "source": [ + "print(re.findall('[a-z]', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that this set returned all lower case letters and excluded the capital M's and R's, the number 5, and all the punctuation marks. We can add the ^ character inside the square brackets to return everything that doesn't match the sequence we have designated. " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['M', ' ', ',', ' ', 'M', '.', ' ', 'R', ',', ' ', ' ', '5', ' ', '.']\n" + ] + } + ], + "source": [ + "print(re.findall('[^a-z]', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this case, it returned the capital letters, the number, and all punctuation and white spaces.\n", + "\n", + "What if we wanted to extract both upper and lower case letters from our string? We can just add A-Z inside our square brackets. " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['M', 'y', 'n', 'e', 'i', 'g', 'h', 'b', 'o', 'r', 'M', 'r', 'R', 'o', 'g', 'e', 'r', 's', 'h', 'a', 's', 'd', 'o', 'g', 's']\n" + ] + } + ], + "source": [ + "print(re.findall('[a-zA-Z]', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And if we wanted to also extract spaces, we can add a space." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['M', 'y', ' ', 'n', 'e', 'i', 'g', 'h', 'b', 'o', 'r', ' ', 'M', 'r', ' ', 'R', 'o', 'g', 'e', 'r', 's', ' ', 'h', 'a', 's', ' ', ' ', 'd', 'o', 'g', 's']\n" + ] + } + ], + "source": [ + "print(re.findall('[a-zA-Z ]', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once we get to a point where we are adding multiple things to our regular expression, we will want to leverage additional shortcuts called character classes (also known as special sequences). Below are some of the most useful ones and what they match.\n", + "\n", + " \\w: Any alphanumeric character.\n", + " \\W: Any non-alphanumeric character.\n", + " \\d: Any numeric character.\n", + " \\D: Any non-numeric character.\n", + " \\s: Any whitespace characters.\n", + " \\S: Any non-whitespace characters.\n", + " .: Any character except newline (\\n).\n", + "\n", + "Let's take a look at how some of these work." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['5']\n" + ] + } + ], + "source": [ + "print(re.findall('[\\d]', text))" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['M', 'y', 'n', 'e', 'i', 'g', 'h', 'b', 'o', 'r', 'M', 'r', 'R', 'o', 'g', 'e', 'r', 's', 'h', 'a', 's', '5', 'd', 'o', 'g', 's']\n" + ] + } + ], + "source": [ + "print(re.findall('[\\w]', text))" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['M', 'y', 'n', 'e', 'i', 'g', 'h', 'b', 'o', 'r', ',', 'M', 'r', '.', 'R', 'o', 'g', 'e', 'r', 's', ',', 'h', 'a', 's', '5', 'd', 'o', 'g', 's', '.']\n" + ] + } + ], + "source": [ + "print(re.findall('[\\S]', text))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['M', 'y', ' ', 'n', 'e', 'i', 'g', 'h', 'b', 'o', 'r', ',', ' ', 'M', 'r', '.', ' ', 'R', 'o', 'g', 'e', 'r', 's', ',', ' ', 'h', 'a', 's', ' ', '5', ' ', 'd', 'o', 'g', 's', '.']\n" + ] + } + ], + "source": [ + "print(re.findall('.', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can use the split method to split a string on specific characters, such as commas or any numeric values. " + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['My neighbor', 'Mr. Rogers', 'has 5 dogs.']\n" + ] + } + ], + "source": [ + "print(re.split(', ', text))" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['My neighbor, Mr. Rogers, has ', ' dogs.']\n" + ] + } + ], + "source": [ + "print(re.split('[0-9]', text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's also take a look at how we can use the sub method to substitute out how many dogs our neighbor has. " + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "My neighbor, Mr. Rogers, has 100 dogs.\n" + ] + } + ], + "source": [ + "print(re.sub('[0-9]', '100', text))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/your-code/.ipynb_checkpoints/challenge-1-checkpoint.ipynb b/your-code/.ipynb_checkpoints/challenge-1-checkpoint.ipynb new file mode 100644 index 0000000..58550f0 --- /dev/null +++ b/your-code/.ipynb_checkpoints/challenge-1-checkpoint.ipynb @@ -0,0 +1,453 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# String Operations Lab\n", + "\n", + "**Before your start:**\n", + "\n", + "- Read the README.md file\n", + "- Comment as much as you can and use the resources in the README.md file\n", + "- Happy learning!" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import re" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Challenge 1 - Combining Strings\n", + "\n", + "Combining strings is an important skill to acquire. There are multiple ways of combining strings in Python, as well as combining strings with variables. We will explore this in the first challenge. In the cell below, combine the strings in the list and add spaces between the strings (do not add a space after the last string). Insert a period after the last string." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Durante un tiempo no estuvo segura de si su marido era su marido.'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "str_list = ['Durante', 'un', 'tiempo', 'no', 'estuvo', 'segura', 'de', 'si', 'su', 'marido', 'era', 'su', 'marido']\n", + "# Your code here:\n", + "' '.join(str_list) + '.'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the cell below, use the list of strings to create a grocery list. Start the list with the string `Grocery list: ` and include a comma and a space between each item except for the last one. Include a period at the end. Only include foods in the list that start with the letter 'b' and ensure all foods are lower case." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Grocery list: Bananas, bread, Brownie Mix, broccoli.'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "food_list = ['Bananas', 'Chocolate', 'bread', 'diapers', 'Ice Cream', 'Brownie Mix', 'broccoli']\n", + "# Your code here:\n", + "\n", + "food_with_b = list()\n", + "\n", + "for food in food_list:\n", + " if food.startswith('b') or food.startswith('B'):\n", + " food_with_b.append(food)\n", + "\n", + "'Grocery list: ' + ', '.join(food_with_b) + '.'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the cell below, write a function that computes the area of a circle using its radius. Compute the area of the circle and insert the radius and the area between the two strings. Make sure to include spaces between the variable and the strings. \n", + "\n", + "Note: You can use the techniques we have learned so far or use f-strings. F-strings allow us to embed code inside strings. You can read more about f-strings [here](https://www.python.org/dev/peps/pep-0498/)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'The area of the circle with radius: 5 is: 78.53981633974483'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import math\n", + "\n", + "string1 = \"The area of the circle with radius:\"\n", + "string2 = \"is:\"\n", + "radius = 4.5\n", + "\n", + "def area(x, pi = math.pi):\n", + " # This function takes a radius and returns the area of a circle. We also pass a default value for pi.\n", + " # Input: Float (and default value for pi)\n", + " # Output: Float\n", + " \n", + " # Sample input: 5.0\n", + " # Sample Output: 78.53981633\n", + " \n", + " # Your code here:\n", + " radius = pi * (x*x)\n", + " return radius\n", + " \n", + "# Your output string here:\n", + "x = 5\n", + "string1 + ' ' + str(x) + ' ' + string2 + ' ' + str(area(x))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Challenge 2 - Splitting Strings\n", + "\n", + "We have first looked at combining strings into one long string. There are times where we need to do the opposite and split the string into smaller components for further analysis. \n", + "\n", + "In the cell below, split the string into a list of strings using the space delimiter. Count the frequency of each word in the string in a dictionary. Strip the periods, line breaks and commas from the text. Make sure to remove empty strings from your dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'Some': 2,\n", + " 'say': 3,\n", + " 'the': 1,\n", + " 'world': 1,\n", + " 'will': 1,\n", + " 'end': 1,\n", + " 'in': 2,\n", + " 'fire': 2,\n", + " 'ice': 2,\n", + " 'From': 1,\n", + " 'what': 1,\n", + " 'I’ve': 1,\n", + " 'tasted': 1,\n", + " 'of': 2,\n", + " 'desire': 1,\n", + " 'I': 3,\n", + " 'hold': 1,\n", + " 'with': 1,\n", + " 'those': 1,\n", + " 'who': 1,\n", + " 'favor': 1,\n", + " 'But': 1,\n", + " 'if': 1,\n", + " 'it': 1,\n", + " 'had': 1,\n", + " 'to': 1,\n", + " 'perish': 1,\n", + " 'twice': 1,\n", + " 'think': 1,\n", + " 'know': 1,\n", + " 'enough': 1,\n", + " 'hate': 1,\n", + " 'To': 1,\n", + " 'that': 1,\n", + " 'for': 1,\n", + " 'destruction': 1,\n", + " 'Is': 1,\n", + " 'also': 1,\n", + " 'great': 1,\n", + " 'And': 1,\n", + " 'would': 1,\n", + " 'suffice': 1}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "poem = \"\"\"Some say the world will end in fire,\n", + "Some say in ice.\n", + "From what I’ve tasted of desire\n", + "I hold with those who favor fire.\n", + "But if it had to perish twice,\n", + "I think I know enough of hate\n", + "To say that for destruction ice\n", + "Is also great\n", + "And would suffice.\"\"\"\n", + "\n", + "# Your code here:\n", + "\n", + "poem = poem.replace(',',' ')\n", + "poem = poem.replace('\\n',' ')\n", + "poem = poem.replace('.',' ')\n", + "\n", + "lista_original = poem.split(' ')\n", + "\n", + "word_freq = dict.fromkeys(poem.split(' '), 0)\n", + "\n", + "for palabra in lista_original:\n", + " word_freq[palabra] = word_freq[palabra] + 1\n", + "del word_freq['']\n", + "(word_freq)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the cell below, find all the words that appear in the text and do not appear in the blacklist. You must parse the string but can choose any data structure you wish for the words that do not appear in the blacklist. Remove all non letter characters and convert all words to lower case." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'i was angry with my friend i told my wrath my wrath did end i was angry with my foe i told it not my wrath did grow and i waterd it in fears night morning with my tears and i sunned it with smiles and with soft deceitful wiles and it grew both day and night till it bore an apple bright and my foe beheld it shine and he knew that it was mine and into my garden stole when the night had veild the pole in the morning glad i see my foe outstretched beneath the tree'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "blacklist = ['and', 'as', 'an', 'a', 'the', 'in', 'it']\n", + "\n", + "poem = \"\"\"I was angry with my friend; \n", + "I told my wrath, my wrath did end.\n", + "I was angry with my foe: \n", + "I told it not, my wrath did grow. \n", + "\n", + "And I waterd it in fears,\n", + "Night & morning with my tears: \n", + "And I sunned it with smiles,\n", + "And with soft deceitful wiles. \n", + "\n", + "And it grew both day and night. \n", + "Till it bore an apple bright. \n", + "And my foe beheld it shine,\n", + "And he knew that it was mine. \n", + "\n", + "And into my garden stole, \n", + "When the night had veild the pole; \n", + "In the morning glad I see; \n", + "My foe outstretched beneath the tree.\"\"\"\n", + "\n", + "# Your code here:\n", + "poem = re.sub(r'[!@#$%^&*.,:;]', \"\", poem)\n", + "poem = re.sub(r'\\n', \" \", poem)\n", + "poem = poem.lower()\n", + "poem" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Challenge 3 - Regular Expressions\n", + "\n", + "Sometimes, we would like to perform more complex manipulations of our string. This is where regular expressions come in handy. In the cell below, return all characters that are upper case from the string specified below." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['T', 'P']" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "poem = \"\"\"The apparition of these faces in the crowd;\n", + "Petals on a wet, black bough.\"\"\"\n", + "\n", + "# Your code here:\n", + "re.findall(r'[A-Z]', poem)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the cell below, filter the list provided and return all elements of the list containing a number. To filter the list, use the `re.search` function. Check if the function does not return `None`. You can read more about the `re.search` function [here](https://docs.python.org/3/library/re.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'123abc abc123 JohnSmith1 ABBY4 JANE'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data = ['123abc', 'abc123', 'JohnSmith1', 'ABBY4', 'JANE']\n", + "\n", + "# Your code here:\n", + "\n", + "datos = str(' '.join(data))\n", + "datos" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['123', 'abc123', 'JohnSmith1', 'ABBY4']" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "re.findall(r'(?:\\w+)[\\d+]', datos)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Bonus Challenge - Regular Expressions II\n", + "\n", + "In the cell below, filter the list provided to keep only strings containing at least one digit and at least one lower case letter. As in the previous question, use the `re.search` function and check that the result is not `None`.\n", + "\n", + "To read more about regular expressions, check out [this link](https://developers.google.com/edu/python/regular-expressions)." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'123abc abc123 JohnSmith1 ABBY4 JANE'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data = ['123abc', 'abc123', 'JohnSmith1', 'ABBY4', 'JANE']\n", + "# Your code here:\n", + "datos = str(' '.join(data))\n", + "datos" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['abc1', 'mith1']\n", + "['123a']\n" + ] + } + ], + "source": [ + "print(re.findall(r'(?:[a-z+])+[\\d+]', datos))\n", + "print(re.findall(r'[\\d+]+(?:[a-z+])', datos))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/your-code/.ipynb_checkpoints/challenge-2-checkpoint.ipynb b/your-code/.ipynb_checkpoints/challenge-2-checkpoint.ipynb new file mode 100644 index 0000000..f3b5e6b --- /dev/null +++ b/your-code/.ipynb_checkpoints/challenge-2-checkpoint.ipynb @@ -0,0 +1,455 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Bag of Words Lab\n", + "\n", + "## Introduction\n", + "\n", + "**Bag of words (BoW)** is an important technique in text mining and [information retrieval](https://en.wikipedia.org/wiki/Information_retrieval). BoW uses term-frequency vectors to represent the content of text documents which makes it possible to use mathematics and computer programs to analyze and compare text documents.\n", + "\n", + "BoW contains the following information:\n", + "\n", + "1. A dictionary of all the terms (words) in the text documents. The terms are normalized in terms of the letter case (e.g. `Ironhack` => `ironhack`), tense (e.g. `had` => `have`), singular form (e.g. `students` => `student`), etc.\n", + "1. The number of occurrences of each normalized term in each document.\n", + "\n", + "For example, assume we have three text documents:\n", + "\n", + "DOC 1: **Ironhack is cool.**\n", + "\n", + "DOC 2: **I love Ironhack.**\n", + "\n", + "DOC 3: **I am a student at Ironhack.**\n", + "\n", + "The BoW of the above documents looks like below:\n", + "\n", + "| TERM | DOC 1 | DOC 2 | Doc 3 |\n", + "|---|---|---|---|\n", + "| a | 0 | 0 | 1 |\n", + "| am | 0 | 0 | 1 |\n", + "| at | 0 | 0 | 1 |\n", + "| cool | 1 | 0 | 0 |\n", + "| i | 0 | 1 | 1 |\n", + "| ironhack | 1 | 1 | 1 |\n", + "| is | 1 | 0 | 0 |\n", + "| love | 0 | 1 | 0 |\n", + "| student | 0 | 0 | 1 |\n", + "\n", + "\n", + "The term-frequency array of each document in BoW can be considered a high-dimensional vector. Data scientists use these vectors to represent the content of the documents. For instance, DOC 1 is represented with `[0, 0, 0, 1, 0, 1, 1, 0, 0]`, DOC 2 is represented with `[0, 0, 0, 0, 1, 1, 0, 1, 0]`, and DOC 3 is represented with `[1, 1, 1, 0, 1, 1, 0, 0, 1]`. **Two documents are considered identical if their vector representations have close [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity).**\n", + "\n", + "In real practice there are many additional techniques to improve the text mining accuracy such as using [stop words](https://en.wikipedia.org/wiki/Stop_words) (i.e. neglecting common words such as `a`, `I`, `to` that don't contribute much meaning), synonym list (e.g. consider `New York City` the same as `NYC` and `Big Apple`), and HTML tag removal if the data sources are webpages. In Module 3 you will learn how to use those advanced techniques for [natural language processing](https://en.wikipedia.org/wiki/Natural_language_processing), a component of text mining.\n", + "\n", + "In real text mining projects data analysts use packages such as Scikit-Learn and NLTK, which you will learn in Module 3, to extract BoW from texts. In this exercise, however, we would like you to create BoW manually with Python. This is because by manually creating BoW you can better understand the concept and also practice the Python skills you have learned so far." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The Challenge\n", + "\n", + "We need to create a BoW from a list of documents. The documents (`doc1.txt`, `doc2.txt`, and `doc3.txt`) can be found in the `your-code` directory of this exercise. You will read the content of each document into an array of strings named `corpus`.\n", + "\n", + "*What is a corpus (plural: corpora)? Read the reference in the README file.*\n", + "\n", + "Your challenge is to use Python to generate the BoW of these documents. Your BoW should look like below:\n", + "\n", + "```python\n", + "bag_of_words = ['a', 'am', 'at', 'cool', 'i', 'ironhack', 'is', 'love', 'student']\n", + "\n", + "term_freq = [\n", + " [0, 0, 0, 1, 0, 1, 1, 0, 0],\n", + " [0, 0, 0, 0, 1, 1, 0, 1, 0],\n", + " [1, 1, 1, 0, 1, 1, 0, 0, 1],\n", + "]\n", + "```\n", + "\n", + "Now let's define the `docs` array that contains the paths of `doc1.txt`, `doc2.txt`, and `doc3.txt`." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "docs = ['doc1.txt', 'doc2.txt', 'doc3.txt']" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define an empty array `corpus` that will contain the content strings of the docs. Loop `docs` and read the content of each doc into the `corpus` array." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "corpus = []\n", + "\n", + "# Write your code here\n", + "for doc in docs:\n", + " with open (doc,\"r\") as f:\n", + " corpus.append(f.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print `corpus`." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Ironhack is cool.', 'I love Ironhack.', 'I am a student at Ironhack.']\n" + ] + } + ], + "source": [ + "print(corpus)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You expected to see:\n", + "\n", + "```['ironhack is cool', 'i love ironhack', 'i am a student at ironhack']```\n", + "\n", + "But you actually saw:\n", + "\n", + "```['Ironhack is cool.', 'I love Ironhack.', 'I am a student at Ironhack.']```\n", + "\n", + "This is because you haven't done two important steps:\n", + "\n", + "1. Remove punctuation from the strings\n", + "\n", + "1. Convert strings to lowercase\n", + "\n", + "Write your code below to process `corpus` (convert to lower case and remove special characters)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack is cool.', 'i love ironhack.', 'i am a student at ironhack.']\n" + ] + } + ], + "source": [ + "# Write your code here\n", + "\n", + "for i in range(len(corpus)):\n", + " corpus[i] = corpus[i].lower()\n", + "print(corpus)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack is cool', 'i love ironhack', 'i am a student at ironhack']\n" + ] + } + ], + "source": [ + "import string\n", + "\n", + "def remove_special(text):\n", + " punc = '.'\n", + " for ele in text: \n", + " if ele in punc: \n", + " text = text.replace(ele, \"\") \n", + " return text\n", + "\n", + "corpus = [remove_special(i) for i in corpus]\n", + "print(corpus)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now define `bag_of_words` as an empty array. It will be used to store the unique terms in `corpus`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "bag_of_words = []" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Loop through `corpus`. In each loop, do the following:\n", + "\n", + "1. Break the string into an array of terms. \n", + "1. Create a sub-loop to iterate the terms array. \n", + " * In each sub-loop, you'll check if the current term is already contained in `bag_of_words`. If not in `bag_of_words`, append it to the array." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "\n", + "terms = str(corpus)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'ironhack is cool i love ironhack i am a student at ironhack'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "terms = re.sub(r'[\\[\\'!@#$%^&*.,:;\\]]', \"\", terms)\n", + "terms" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "for word in terms.split():\n", + " if word not in bag_of_words:\n", + " bag_of_words.append(word)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print `bag_of_words`. You should see: \n", + "\n", + "```['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']```\n", + "\n", + "If not, fix your code in the previous cell." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']\n" + ] + } + ], + "source": [ + "print(bag_of_words)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we define an empty array called `term_freq`. Loop `corpus` for a second time. In each loop, create a sub-loop to iterate the terms in `bag_of_words`. Count how many times each term appears in each doc of `corpus`. Append the term-frequency array to `term_freq`." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "term_freq = []\n", + "\n", + "# Write your code here\n", + "\n", + "for array in corpus:\n", + " freq = []\n", + " word_list = array.split(' ')\n", + " for word in bag_of_words:\n", + " if word in word_list:\n", + " freq.append(1)\n", + " else:\n", + " freq.append(0)\n", + " term_freq.append(freq)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print `term_freq`. You should see:\n", + "\n", + "```[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]```" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n" + ] + } + ], + "source": [ + "print(term_freq)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**If your output is correct, congratulations! You've solved the challenge!**\n", + "\n", + "If not, go back and check for errors in your code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Bonus Question\n", + "\n", + "Optimize your solution for the above question by removing stop words from the BoW. For your convenience, a list of stop words is defined for you in the next cell. With the stop words removed, your output should look like:\n", + "\n", + "```\n", + "bag_of_words = [am', 'at', 'cool', ironhack', 'is', 'love', 'student']\n", + "\n", + "term_freq = [\n", + "\t[0, 0, 1, 1, 1, 0, 0],\n", + " \t[0, 0, 0, 1, 0, 1, 0],\n", + " \t[1, 1, 0, 1, 0, 0, 1]\n", + "]\n", + "```\n", + "\n", + "**Requirements:**\n", + "\n", + "1. Combine all your previous codes to the cell below.\n", + "1. Improve your solution by ignoring stop words in `bag_of_words`.\n", + "\n", + "After you're done, your `bag_of_words` should be:\n", + "\n", + "```['ironhack', 'is', 'cool', 'love', 'am', 'student', 'at']```\n", + "\n", + "And your `term_freq` should be:\n", + "\n", + "```[[1, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1, 1]]```" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "stop_words = ['all', 'six', 'less', 'being', 'indeed', 'over', 'move', 'anyway', 'fifty', 'four', 'not', 'own', 'through', 'yourselves', 'go', 'where', 'mill', 'only', 'find', 'before', 'one', 'whose', 'system', 'how', 'somewhere', 'with', 'thick', 'show', 'had', 'enough', 'should', 'to', 'must', 'whom', 'seeming', 'under', 'ours', 'has', 'might', 'thereafter', 'latterly', 'do', 'them', 'his', 'around', 'than', 'get', 'very', 'de', 'none', 'cannot', 'every', 'whether', 'they', 'front', 'during', 'thus', 'now', 'him', 'nor', 'name', 'several', 'hereafter', 'always', 'who', 'cry', 'whither', 'this', 'someone', 'either', 'each', 'become', 'thereupon', 'sometime', 'side', 'two', 'therein', 'twelve', 'because', 'often', 'ten', 'our', 'eg', 'some', 'back', 'up', 'namely', 'towards', 'are', 'further', 'beyond', 'ourselves', 'yet', 'out', 'even', 'will', 'what', 'still', 'for', 'bottom', 'mine', 'since', 'please', 'forty', 'per', 'its', 'everything', 'behind', 'un', 'above', 'between', 'it', 'neither', 'seemed', 'ever', 'across', 'she', 'somehow', 'be', 'we', 'full', 'never', 'sixty', 'however', 'here', 'otherwise', 'were', 'whereupon', 'nowhere', 'although', 'found', 'alone', 're', 'along', 'fifteen', 'by', 'both', 'about', 'last', 'would', 'anything', 'via', 'many', 'could', 'thence', 'put', 'against', 'keep', 'etc', 'amount', 'became', 'ltd', 'hence', 'onto', 'or', 'con', 'among', 'already', 'co', 'afterwards', 'formerly', 'within', 'seems', 'into', 'others', 'while', 'whatever', 'except', 'down', 'hers', 'everyone', 'done', 'least', 'another', 'whoever', 'moreover', 'couldnt', 'throughout', 'anyhow', 'yourself', 'three', 'from', 'her', 'few', 'together', 'top', 'there', 'due', 'been', 'next', 'anyone', 'eleven', 'much', 'call', 'therefore', 'interest', 'then', 'thru', 'themselves', 'hundred', 'was', 'sincere', 'empty', 'more', 'himself', 'elsewhere', 'mostly', 'on', 'fire', 'am', 'becoming', 'hereby', 'amongst', 'else', 'part', 'everywhere', 'too', 'herself', 'former', 'those', 'he', 'me', 'myself', 'made', 'twenty', 'these', 'bill', 'cant', 'us', 'until', 'besides', 'nevertheless', 'below', 'anywhere', 'nine', 'can', 'of', 'your', 'toward', 'my', 'something', 'and', 'whereafter', 'whenever', 'give', 'almost', 'wherever', 'is', 'describe', 'beforehand', 'herein', 'an', 'as', 'itself', 'at', 'have', 'in', 'seem', 'whence', 'ie', 'any', 'fill', 'again', 'hasnt', 'inc', 'thereby', 'thin', 'no', 'perhaps', 'latter', 'meanwhile', 'when', 'detail', 'same', 'wherein', 'beside', 'also', 'that', 'other', 'take', 'which', 'becomes', 'you', 'if', 'nobody', 'see', 'though', 'may', 'after', 'upon', 'most', 'hereupon', 'eight', 'but', 'serious', 'nothing', 'such', 'why', 'a', 'off', 'whereby', 'third', 'i', 'whole', 'noone', 'sometimes', 'well', 'amoungst', 'yours', 'their', 'rather', 'without', 'so', 'five', 'the', 'first', 'whereas', 'once']\n", + "\n", + "# Write your code below\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Additional Challenge for the Nerds\n", + "\n", + "We will learn Scikit-Learn in Module 3 which has built in the BoW feature. Try to use Scikit-Learn to generate the BoW for this challenge and check whether the output is the same as yours. You will need to do some googling to find out how to use Scikit-Learn to generate BoW.\n", + "\n", + "**Notes:**\n", + "\n", + "* To install Scikit-Learn, use `pip install sklearn`. \n", + "\n", + "* Scikit-Learn removes stop words by default. You don't need to manually remove stop words.\n", + "\n", + "* Scikit-Learn's output has slightly different format from the output example demonstrated above. It's ok, you don't need to convert the Scikit-Learn output.\n", + "\n", + "The Scikit-Learn output will look like below:\n", + "\n", + "```python\n", + "# BoW:\n", + "{u'love': 5, u'ironhack': 3, u'student': 6, u'is': 4, u'cool': 2, u'am': 0, u'at': 1}\n", + "\n", + "# term_freq:\n", + "[[0 0 1 1 1 0 0]\n", + " [0 0 0 1 0 1 0]\n", + " [1 1 0 1 0 0 1]]\n", + " ```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/your-code/Learning.ipynb b/your-code/Learning.ipynb index 44e4bac..0630b55 100644 --- a/your-code/Learning.ipynb +++ b/your-code/Learning.ipynb @@ -675,7 +675,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -689,7 +689,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.2" + "version": "3.9.7" } }, "nbformat": 4, diff --git a/your-code/challenge-1.ipynb b/your-code/challenge-1.ipynb index 4302084..58550f0 100644 --- a/your-code/challenge-1.ipynb +++ b/your-code/challenge-1.ipynb @@ -15,7 +15,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -33,12 +33,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'Durante un tiempo no estuvo segura de si su marido era su marido.'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "str_list = ['Durante', 'un', 'tiempo', 'no', 'estuvo', 'segura', 'de', 'si', 'su', 'marido', 'era', 'su', 'marido']\n", - "# Your code here:\n" + "# Your code here:\n", + "' '.join(str_list) + '.'" ] }, { @@ -50,12 +62,31 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'Grocery list: Bananas, bread, Brownie Mix, broccoli.'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "food_list = ['Bananas', 'Chocolate', 'bread', 'diapers', 'Ice Cream', 'Brownie Mix', 'broccoli']\n", - "# Your code here:\n" + "# Your code here:\n", + "\n", + "food_with_b = list()\n", + "\n", + "for food in food_list:\n", + " if food.startswith('b') or food.startswith('B'):\n", + " food_with_b.append(food)\n", + "\n", + "'Grocery list: ' + ', '.join(food_with_b) + '.'" ] }, { @@ -69,9 +100,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'The area of the circle with radius: 5 is: 78.53981633974483'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "import math\n", "\n", @@ -88,9 +130,12 @@ " # Sample Output: 78.53981633\n", " \n", " # Your code here:\n", + " radius = pi * (x*x)\n", + " return radius\n", " \n", - " \n", - "# Your output string here:" + "# Your output string here:\n", + "x = 5\n", + "string1 + ' ' + str(x) + ' ' + string2 + ' ' + str(area(x))" ] }, { @@ -106,9 +151,63 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 5, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'Some': 2,\n", + " 'say': 3,\n", + " 'the': 1,\n", + " 'world': 1,\n", + " 'will': 1,\n", + " 'end': 1,\n", + " 'in': 2,\n", + " 'fire': 2,\n", + " 'ice': 2,\n", + " 'From': 1,\n", + " 'what': 1,\n", + " 'I’ve': 1,\n", + " 'tasted': 1,\n", + " 'of': 2,\n", + " 'desire': 1,\n", + " 'I': 3,\n", + " 'hold': 1,\n", + " 'with': 1,\n", + " 'those': 1,\n", + " 'who': 1,\n", + " 'favor': 1,\n", + " 'But': 1,\n", + " 'if': 1,\n", + " 'it': 1,\n", + " 'had': 1,\n", + " 'to': 1,\n", + " 'perish': 1,\n", + " 'twice': 1,\n", + " 'think': 1,\n", + " 'know': 1,\n", + " 'enough': 1,\n", + " 'hate': 1,\n", + " 'To': 1,\n", + " 'that': 1,\n", + " 'for': 1,\n", + " 'destruction': 1,\n", + " 'Is': 1,\n", + " 'also': 1,\n", + " 'great': 1,\n", + " 'And': 1,\n", + " 'would': 1,\n", + " 'suffice': 1}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "poem = \"\"\"Some say the world will end in fire,\n", "Some say in ice.\n", @@ -120,7 +219,20 @@ "Is also great\n", "And would suffice.\"\"\"\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "\n", + "poem = poem.replace(',',' ')\n", + "poem = poem.replace('\\n',' ')\n", + "poem = poem.replace('.',' ')\n", + "\n", + "lista_original = poem.split(' ')\n", + "\n", + "word_freq = dict.fromkeys(poem.split(' '), 0)\n", + "\n", + "for palabra in lista_original:\n", + " word_freq[palabra] = word_freq[palabra] + 1\n", + "del word_freq['']\n", + "(word_freq)" ] }, { @@ -132,9 +244,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'i was angry with my friend i told my wrath my wrath did end i was angry with my foe i told it not my wrath did grow and i waterd it in fears night morning with my tears and i sunned it with smiles and with soft deceitful wiles and it grew both day and night till it bore an apple bright and my foe beheld it shine and he knew that it was mine and into my garden stole when the night had veild the pole in the morning glad i see my foe outstretched beneath the tree'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "blacklist = ['and', 'as', 'an', 'a', 'the', 'in', 'it']\n", "\n", @@ -158,7 +281,11 @@ "In the morning glad I see; \n", "My foe outstretched beneath the tree.\"\"\"\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "poem = re.sub(r'[!@#$%^&*.,:;]', \"\", poem)\n", + "poem = re.sub(r'\\n', \" \", poem)\n", + "poem = poem.lower()\n", + "poem" ] }, { @@ -172,14 +299,26 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['T', 'P']" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "poem = \"\"\"The apparition of these faces in the crowd;\n", "Petals on a wet, black bough.\"\"\"\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "re.findall(r'[A-Z]', poem)" ] }, { @@ -191,13 +330,49 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 8, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'123abc abc123 JohnSmith1 ABBY4 JANE'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "data = ['123abc', 'abc123', 'JohnSmith1', 'ABBY4', 'JANE']\n", "\n", - "# Your code here:\n" + "# Your code here:\n", + "\n", + "datos = str(' '.join(data))\n", + "datos" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['123', 'abc123', 'JohnSmith1', 'ABBY4']" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "re.findall(r'(?:\\w+)[\\d+]', datos)" ] }, { @@ -213,18 +388,50 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'123abc abc123 JohnSmith1 ABBY4 JANE'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "data = ['123abc', 'abc123', 'JohnSmith1', 'ABBY4', 'JANE']\n", - "# Your code here:\n" + "# Your code here:\n", + "datos = str(' '.join(data))\n", + "datos" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['abc1', 'mith1']\n", + "['123a']\n" + ] + } + ], + "source": [ + "print(re.findall(r'(?:[a-z+])+[\\d+]', datos))\n", + "print(re.findall(r'[\\d+]+(?:[a-z+])', datos))" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -238,7 +445,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.2" + "version": "3.9.7" } }, "nbformat": 4, diff --git a/your-code/challenge-2.ipynb b/your-code/challenge-2.ipynb index 87c5656..f3b5e6b 100644 --- a/your-code/challenge-2.ipynb +++ b/your-code/challenge-2.ipynb @@ -72,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -88,13 +88,16 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "corpus = []\n", "\n", - "# Write your code here\n" + "# Write your code here\n", + "for doc in docs:\n", + " with open (doc,\"r\") as f:\n", + " corpus.append(f.read())" ] }, { @@ -106,9 +109,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Ironhack is cool.', 'I love Ironhack.', 'I am a student at Ironhack.']\n" + ] + } + ], "source": [ "print(corpus)" ] @@ -136,11 +147,50 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack is cool.', 'i love ironhack.', 'i am a student at ironhack.']\n" + ] + } + ], "source": [ - "# Write your code here" + "# Write your code here\n", + "\n", + "for i in range(len(corpus)):\n", + " corpus[i] = corpus[i].lower()\n", + "print(corpus)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack is cool', 'i love ironhack', 'i am a student at ironhack']\n" + ] + } + ], + "source": [ + "import string\n", + "\n", + "def remove_special(text):\n", + " punc = '.'\n", + " for ele in text: \n", + " if ele in punc: \n", + " text = text.replace(ele, \"\") \n", + " return text\n", + "\n", + "corpus = [remove_special(i) for i in corpus]\n", + "print(corpus)" ] }, { @@ -152,7 +202,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -172,11 +222,45 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ - "# Write your code here" + "import re\n", + "\n", + "terms = str(corpus)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'ironhack is cool i love ironhack i am a student at ironhack'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "terms = re.sub(r'[\\[\\'!@#$%^&*.,:;\\]]', \"\", terms)\n", + "terms" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "for word in terms.split():\n", + " if word not in bag_of_words:\n", + " bag_of_words.append(word)" ] }, { @@ -192,9 +276,19 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 10, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['ironhack', 'is', 'cool', 'i', 'love', 'am', 'a', 'student', 'at']\n" + ] + } + ], "source": [ "print(bag_of_words)" ] @@ -208,13 +302,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "term_freq = []\n", "\n", - "# Write your code here" + "# Write your code here\n", + "\n", + "for array in corpus:\n", + " freq = []\n", + " word_list = array.split(' ')\n", + " for word in bag_of_words:\n", + " if word in word_list:\n", + " freq.append(1)\n", + " else:\n", + " freq.append(0)\n", + " term_freq.append(freq)" ] }, { @@ -228,9 +332,19 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 12, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1]]\n" + ] + } + ], "source": [ "print(term_freq)" ] @@ -278,7 +392,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -319,7 +433,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -333,7 +447,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.2" + "version": "3.9.7" } }, "nbformat": 4,