import scrapy
from selenium import webdriver
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'http://ozhat-turkiye.com/en/brands/a',
]
I want to click a link
def __init__(self):
self.drivers = webdriver.Firefox('C:/Program Files (x86)\Mozilla Firefox')
I want to click a link
def parse(self, response):
for title in response.css('div.tabledivinlineblock a.tablelink50::attr(href)').extract():
yield {'title': title,
'response': response.url
}
# i want to click this a tag
next = self.driver.find_element_by_xpath('//*[@id="maincontent_DataPager"]/a[last()]')
# follow pagination links
# for href in response.css('span#maincontent_DataPager a:last-child'):
#
# yield response.follow(href, self.parse)
next_page = response.css('span#maincontent_DataPager a:last-child::attr(href)').extract_first().strip()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
最佳答案
以下脚本应该会耗尽连接到下一页链接的所有点击来获取所需的项目。您不能在此处使用 response.follow()
,因为除了点击它之外没有任何链接可供关注。
import time
import scrapy
from selenium import webdriver
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'http://ozhat-turkiye.com/en/brands/a',
]
def __init__(self):
self.driver = webdriver.Firefox()
def parse(self, response):
self.driver.get(response.url)
while True:
time.sleep(5)
for title in self.driver.find_elements_by_css_selector('div.tabledivinlineblock a.tablelink50'):
yield {'title': title.text,'response': response.url}
try:
self.driver.find_element_by_css_selector('span#maincontent_DataPager a:last-child').click()
except Exception: break
我在脚本中使用了编码等待,这是完全不推荐的。您应该将其替换为显式等待
。
关于python - 我想使用 scrapy python 单击网站链接,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52517882/