-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFlipkart Webscrapper.py
More file actions
133 lines (107 loc) · 3.73 KB
/
Flipkart Webscrapper.py
File metadata and controls
133 lines (107 loc) · 3.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import time
import pandas as pd
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
text = input("Enter product name: ")
# number = int(input("Enter number of pages to scan: "))
# Chrome Driver config
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
options.add_argument("--test-type")
options.add_argument('--headless')
options.add_argument('--silent')
driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome()
titles = []
prices = []
ratings = []
discounts = []
links = []
imageLinks = []
# utility Functions
def formatPrice(price):
price = price[1:]
price = price.split(',')
price = ''.join(price)
return price
def formatDiscount(discount):
discount = discount.split('%')[0]
return discount
url = "https://www.flipkart.com/search?q=" + text
driver.get(url)
# time.sleep(2)
# # To close login model
# driver.find_element_by_css_selector("._2AkmmA._29YdH8").click()
# searchText = driver.find_element_by_class_name(
# "LM6RPg") # to locate search box
# searchText.send_keys(text)
# searchText.send_keys(Keys.ENTER)
# time.sleep(3)
page = driver.find_elements_by_css_selector("._3liAhj")
if page:
for item in page:
print("*", end=" ")
title = item.find_element_by_css_selector(
"._2cLu-l").get_attribute("title")
link = item.find_element_by_css_selector(
"._2cLu-l").get_attribute("href")
imageLink = item.find_element_by_css_selector(
"._1Nyybr._30XEf0").get_attribute("src")
price = formatPrice(
item.find_element_by_class_name("_1vC4OE").text)
try:
discount = formatDiscount(item.find_element_by_class_name(
"VGWI6T").find_element_by_tag_name("span").text)
except Exception:
discount = 0
try:
rating = item.find_element_by_class_name("hGSR34").text
except Exception:
rating = 'NA'
titles.append(title)
prices.append(price)
ratings.append(rating)
discounts.append(discount)
links.append(link)
imageLinks.append(imageLink)
link = (driver.current_url).split("&page=")[0]
page = driver.find_elements_by_css_selector("._3liAhj")
else:
page = driver.find_elements_by_css_selector("._1UoZlX")
for item in page:
print("*", end=" ")
link = item.find_element_by_css_selector(
"._31qSD5").get_attribute("href")
title = item.find_element_by_class_name("_3wU53n").text
price = formatPrice(
item.find_element_by_class_name("_1vC4OE").text)
try:
imageLink = item.find_element_by_css_selector(
"._1Nyybr._30XEf0").get_attribute("src")
except Exception:
imageLink = " "
try:
discount = formatDiscount(item.find_element_by_class_name(
"VGWI6T").find_element_by_tag_name("span").text)
except Exception:
discount = 0
try:
rating = item.find_element_by_class_name("hGSR34").text
except Exception:
rating = 'NA'
titles.append(title)
prices.append(price)
ratings.append(rating)
discounts.append(discount)
links.append(link)
imageLinks.append(imageLink)
link = (driver.current_url).split("&page=")[0]
page = driver.find_elements_by_css_selector("._1UoZlX")
print()
# To convert data into csv
df = pd.DataFrame({'Product Name': titles, 'Prices': prices,
'Rating': ratings, 'Discount %': discounts, 'Link': links, "Image Links": imageLinks})
text = text + '.csv'
df.to_csv(text, index=False, encoding='utf-8')
print("Check ", text)