and so on. The rule should apply for all my links that I retrieve from my database. Provided that I don't traverse the links already visited, and end my crawl of the website if there are no more static links to be visited. Can anyone guide me in how to achieve this?
What I tried:
class PHscrapy(scrapy.Spider):
name = "PHscrapy"
crawled_links = []
rules = (Rule(LxmlLinkExtractor(allow=(), unique=True),callback='parse',canonicalize=True, follow=True),)
def start_requests(self):
db = MySQLdb.connect("localhost", "****", "****", "***")
cursor = db.cursor()
cursor.execute("SELECT website FROM SHOPPING")
links = cursor.fetchall()
for url in links:
yield scrapy.Request(url=url[0], meta={'base_url': url[0]}, callback=self.parse)
def parse(self, response):
base_url = response.meta['base_url']
for link in LxmlLinkExtractor(allow=(base_url+'/*'),unique=True,canonicalize=True).extract_links(response):
print(link.url)
yield scrapy.Request(link.url,callback=self.parse,meta=response.meta)