-
Notifications
You must be signed in to change notification settings - Fork 0
/
scrapingweb.py
35 lines (35 loc) · 1.04 KB
/
scrapingweb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import requests
import time
from bs4 import BeautifulSoup
import json
import csv
filecsv = open('managa.csv', 'w', encoding='utf8')
# Set the URL you want to webscrape from
file = open('manga.json', 'w', encoding='utf8')
file.write('[\n')
data = {}
csv_columns = [ 'chapter','img']
page=319
for page in range(960,965):
print('---', page, '---')
url = 'https://3asq.org/manga/one-piece/'+str(page)+'/?style=list'
print(url)
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
ancher = soup.find_all(
'img',{'class':'wp-manga-chapter-img'})
writer = csv.DictWriter(filecsv, fieldnames=csv_columns)
i = 0
#print(ancher)
for i in ancher:
writer.writeheader()
name = i.find('img')
writer.writerow({'chapter':page,'img': " ".join(i.get('src').split())})
data['chapter']=page
data['img'] = " ".join(i.get('src').split())
json_data = json.dumps(data, ensure_ascii=False)
file.write(json_data)
file.write(",\n")
file.write("\n]")
filecsv.close()
file.close()