-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathnld.py
More file actions
159 lines (127 loc) · 4.48 KB
/
nld.py
File metadata and controls
159 lines (127 loc) · 4.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
# -*- coding: utf-8 -*-
"""
@author: Viet Lai
"""
import scrapy
import io
import os
import logging
from article import Article
from bs4 import BeautifulSoup
from scrapy import signals
from scrapy.crawler import CrawlerProcess
class NewsSpider(scrapy.Spider):
"""Summary
Attributes:
crawled_history (str): Description
crawled_pages (list): Description
link_directory (str): Description
name (str): Description
"""
name = "nld"
crawled_history = "history/{}.txt".format(name)
crawled_pages = []
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
"""Summary
Args:
crawler (TYPE): Description
*args: Description
**kwargs: Description
Returns:
TYPE: Description
"""
spider = super(NewsSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed,
signal=signals.spider_closed)
return spider
def start_requests(self):
"""Summary
Yields:
TYPE: Description
"""
self.load_crawled_pages()
# get all text file in folder Links
files = [x for x in os.listdir(self.name) if x.endswith('.txt')]
for file in files:
file_name = os.path.join(self.name, file)
# self.log(file_name)
#
# read all links in each text file
with open(file_name) as f:
links = f.readlines()
links = [x.strip() for x in links]
# crawl data for each link
base = os.path.basename(file_name)
category = os.path.splitext(base)[0]
try:
os.mkdir(os.path.join(self.name, category))
except:
pass
for link in links:
page = link.split("/")[-1]
if page not in self.crawled_pages:
yield scrapy.Request(url=link, callback=self.parse)
# break
def parse(self, response):
"""Summary
Args:
response (TYPE): Description
"""
container = response.css("div.contentleft")[0]
title = container.css("h1").extract_first().strip()
title = BeautifulSoup(title, "lxml").text.strip()
description = container.css("h2").extract_first().strip()
description = BeautifulSoup(description, "lxml").text.strip()
paragraphs = container.css("div.contentdetail p").extract()
paragraphs = [BeautifulSoup(p, "lxml").text.strip() for p in paragraphs]
paragraphs = [x for x in paragraphs if len(x) > 0]
author = container.css("div.nguon-tin-detail").extract()[0]
print(author)
author = BeautifulSoup(author, "lxml").text.strip()
time = container.css('p.ngayxuatban').extract_first()
time = BeautifulSoup(time, 'lxml').text.strip()
page = response.url.split("/")[-1]
id = page.split('.')[0].split('-')[-1]
category = response.url.split('/')[3]
filename = '{}/{}/{}.json'.format(self.name, category, id)
directory_path = os.path.join(self.name, category)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
a = Article()
a.url = response.url
a.author = author
a.title = title
a.paragraphs = paragraphs
a.description = description
a.time = time
print('Save: ', filename)
with io.open(filename, 'w', encoding='utf8') as f:
f.write(a.json())
self.log('Saved: {}'.format(filename), level=logging.DEBUG)
# append history
self.crawled_pages.append(response.url)
def spider_closed(self, spider):
"""Summary
Args:
spider (TYPE): Description
"""
self.log('Spider Closed')
self.save_crawled_pages()
def load_crawled_pages(self):
"""Summary
"""
if os.path.exists(self.crawled_history):
with open(self.crawled_history) as f:
pages = f.readlines()
self.crawled_pages = [x.strip() for x in pages]
def save_crawled_pages(self):
"""Summary
"""
with open(self.crawled_history, 'w+') as f:
for page in self.crawled_pages:
f.writelines(page + '\n')
print('Save history', len(self.crawled_pages))
process = CrawlerProcess()
process.crawl(NewsSpider)
process.start()