example - Python Scrapy: convierte rutas relativas en rutas absolutas
scrapy spider (5)
De documentos de Scrapy :
def parse(self, response):
# ... code ommited
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, self.parse)
es decir, el objeto de response
tiene un método para hacer exactamente esto.
He enmendado el código en base a las soluciones que se ofrecen a continuación por la gran gente aquí; Me sale el error que se muestra debajo del código aquí.
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from dmoz2.items import DmozItem
class DmozSpider(BaseSpider):
name = "namastecopy2"
allowed_domains = ["namastefoods.com"]
start_urls = [
"http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=1",
"http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=12",
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select(''/html/body/div/div[2]/table/tr/td[2]/table/tr'')
items = []
for site in sites:
item = DmozItem()
item[''manufacturer''] = ''Namaste Foods''
item[''productname''] = site.select(''td/h1/text()'').extract()
item[''description''] = site.select(''//*[@id="info-col"]/p[7]/strong/text()'').extract()
item[''ingredients''] = site.select(''td[1]/table/tr/td[2]/text()'').extract()
item[''ninfo''] = site.select(''td[2]/ul/li[3]/img/@src'').extract()
#insert code that will save the above image path for ninfo as an absolute path
base_url = get_base_url(response)
relative_url = site.select(''//*[@id="showImage"]/@src'').extract()
item[''image_urls''] = urljoin_rfc(base_url, relative_url)
items.append(item)
return items
Mis items.py se ve así:
from scrapy.item import Item, Field
class DmozItem(Item):
# define the fields for your item here like:
productid = Field()
manufacturer = Field()
productname = Field()
description = Field()
ingredients = Field()
ninfo = Field()
imagename = Field()
image_paths = Field()
relative_images = Field()
image_urls = Field()
pass
Necesito las rutas relativas que la araña está obteniendo para los elementos [''relative_images''] convertidas en rutas absolutas y guardadas en elementos [''image_urls''] para poder descargar las imágenes desde dentro de esta araña. Por ejemplo, la ruta de imágenes relativas que la araña obtiene es ''../../files/images/small/8270-BrowniesHiResClip.jpg'', esto se debe convertir a ''http://namastefoods.com/files/images/small /8270-BrowniesHiResClip.jpg '', y almacenado en elementos ['' image_urls '']
También necesitaré la ruta de los elementos [''ninfo''] para que se almacene como una ruta absoluta.
Error al ejecutar el código anterior:
2011-06-28 17:18:11-0400 [scrapy] INFO: Scrapy 0.12.0.2541 started (bot: dmoz2)
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled extensions: TelnetConsole, SpiderContext, WebService, CoreStats, CloseSpider
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled scheduler middlewares: DuplicatesFilterMiddleware
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, RedirectMiddleware, CookiesMiddleware, HttpCompressionMiddleware, DownloaderStats
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled item pipelines: MyImagesPipeline
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6023
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Web service listening on 0.0.0.0:6080
2011-06-28 17:18:11-0400 [namastecopy2] INFO: Spider opened
2011-06-28 17:18:12-0400 [namastecopy2] DEBUG: Crawled (200) <GET http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=12> (referer: None)
2011-06-28 17:18:12-0400 [namastecopy2] ERROR: Spider error processing <http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=12> (referer: <None>)
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 1137, in mainLoop
self.runUntilCurrent()
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 757, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 243, in callback
self._startRunCallbacks(result)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 312, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 328, in _runCallbacks
self.result = callback(self.result, *args, **kw)
File "/***/***/***/***/***/***/spiders/namaste_copy2.py", line 30, in parse
item[''image_urls''] = urljoin_rfc(base_url, relative_url)
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/url.py", line 37, in urljoin_rfc
unicode_to_str(ref, encoding))
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/python.py", line 96, in unicode_to_str
raise TypeError(''unicode_to_str must receive a unicode or str object, got %s'' % type(text).__name__)
exceptions.TypeError: unicode_to_str must receive a unicode or str object, got list
2011-06-28 17:18:15-0400 [namastecopy2] DEBUG: Crawled (200) <GET http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=1> (referer: None)
2011-06-28 17:18:15-0400 [namastecopy2] ERROR: Spider error processing <http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=1> (referer: <None>)
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 1137, in mainLoop
self.runUntilCurrent()
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 757, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 243, in callback
self._startRunCallbacks(result)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 312, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 328, in _runCallbacks
self.result = callback(self.result, *args, **kw)
File "/***/***/***/***/***/***/spiders/namaste_copy2.py", line 30, in parse
item[''image_urls''] = urljoin_rfc(base_url, relative_url)
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/url.py", line 37, in urljoin_rfc
unicode_to_str(ref, encoding))
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/python.py", line 96, in unicode_to_str
raise TypeError(''unicode_to_str must receive a unicode or str object, got %s'' % type(text).__name__)
exceptions.TypeError: unicode_to_str must receive a unicode or str object, got list
2 011-06-28 17:18:15-0400 [namastecopy2] INFO: Closing spider (finished)
2011-06-28 17:18:15-0400 [namastecopy2] INFO: Spider closed (finished)
Gracias.-TM
Lo que hago es:
import urlparse
...
def parse(self, response):
...
urlparse.urljoin(response.url, extractedLink.strip())
...
Note strip()
, porque a veces me encuentro con enlaces extraños como:
<a href="
/MID_BRAND_NEW!%c2%a0MID_70006_Google_Android_2.2_7%22%c2%a0Tablet_PC_Silver/a904326516.html
">MID BRAND NEW! MID 70006 Google Android 2.2 7" Tablet PC Silver</a>
Un enfoque más general para obtener una URL absoluta sería
import urlparse
def abs_url(url, response):
"""Return absolute link"""
base = response.xpath(''//head/base/@href'').extract()
if base:
base = base[0]
else:
base = response.url
return urlparse.urljoin(base, url)
Esto también funciona cuando un elemento base está presente.
En tu caso, lo usarías así:
def parse(self, response):
# ...
for site in sites:
# ...
image_urls = site.select(''//*[@id="showImage"]/@src'').extract()
if image_urls: item[''image_urls''] = abs_url(image_urls[0], response)
Varias notas:
items = []
for site in sites:
item = DmozItem()
item[''manufacturer''] = ''Namaste Foods''
...
items.append(item)
return items
Lo hago de manera diferente:
for site in sites:
item = DmozItem()
item[''manufacturer''] = ''Namaste Foods''
...
yield item
Entonces:
relative_url = site.select(''//*[@id="showImage"]/@src'').extract()
item[''image_urls''] = urljoin_rfc(base_url, relative_url)
extract()
siempre devuelve una lista, porque una consulta xpath siempre devuelve una lista de nodos seleccionados.
Hacer esto:
relative_url = site.select(''//*[@id="showImage"]/@src'').extract()[0]
item[''image_urls''] = urljoin_rfc(base_url, relative_url)
from scrapy.utils.response import get_base_url
base_url = get_base_url(response)
relative_url = site.select(''//*[@id="showImage"]/@src'').extract()
item[''image_urls''] = [urljoin_rfc(base_url,ru) for ru in relative_url]
o podrías extraer solo un elemento
base_url = get_base_url(response)
relative_url = site.select(''//*[@id="showImage"]/@src'').extract()[0]
item[''image_urls''] = urljoin_rfc(base_url,relative_url)
El error fue porque estaba pasando una lista en lugar de una cadena para la función urljoin.