python - Scrapy 与 Mysql 出现此错误

标签 python mysql scrapy

我想将scrapy与mysql一起使用。 所以我在管道中创建了这个类

类MySQLStorePipeline(对象):

def __init__(self, dbpool):
    self.dbpool = dbpool

@classmethod
def from_settings(cls, settings):
    dbargs = dict(
        host=settings['MYSQL_HOST'],
        db=settings['MYSQL_DBNAME'],
        user=settings['MYSQL_USER'],
        passwd=settings['MYSQL_PASSWD'],
        charset='utf8',
        use_unicode=True,
    )
    dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
    return cls(dbpool)

def process_item(self, item, spider):
    # run db query in the thread pool
    d = self.dbpool.runInteraction(self._do_upsert, item, spider)
    d.addErrback(self._handle_error, item, spider)
    # at the end return the item in case of success or failure
    d.addBoth(lambda _: item)
    # return the deferred instead the item. This makes the engine to
    # process next item (according to CONCURRENT_ITEMS setting) after this
    # operation (deferred) has finished.
    return d

def _do_upsert(self, conn, item, spider):
    """Perform an insert or update."""
    guid = self._get_guid(item)
    now = datetime.utcnow().replace(microsecond=0).isoformat(' ')

    conn.execute("""SELECT EXISTS(
        SELECT 1 FROM table WHERE guid = %s
    )""", (guid, ))
    ret = conn.fetchone()[0]
    if ret:
        conn.execute("""
            UPDATE table
            SET topicName=%s, authorName=%s, content=%s, updated=%s
            WHERE guid=%s
        """, (item['topicName'], item['authorName'], item['content'], now, guid))
        spider.log("Item updated in db: %s %r" % (guid, item))
    else:
        conn.execute("""
            INSERT INTO table (guid, topicName, authorName, content, updated)
            VALUES (%s, %s, %s, %s, %s)
        """, (guid, item['topicName'], item['authorName'], item['content'], now))
        spider.log("Item stored in db: %s %r" % (guid, item))

def _handle_error(self, failure, item, spider):
    """Handle occurred on db interaction."""
    # do nothing, just log
    log.err(failure)

def _get_guid(self, item):
    """Generates an unique identifier for a given item."""
    # hash based solely in the url field
    return md5(item['content']).hexdigest()

但是当我运行这个蜘蛛时出现这个错误

2014-12-03 10:02:08+0800 [scrapy] INFO: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2014-12-03 10:02:08+0800 [scrapy] INFO: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
/Library/Python/2.7/site-packages/scrapy/contrib/pipeline/__init__.py:21: ScrapyDeprecationWarning: ITEM_PIPELINES defined as a list or a set is deprecated, switch to a dict
  category=ScrapyDeprecationWarning, stacklevel=1)
Traceback (most recent call last):
  File "/usr/local/bin/scrapy", line 11, in <module>
    sys.exit(execute())
  File "/Library/Python/2.7/site-packages/scrapy/cmdline.py", line 143, in execute
    _run_print_help(parser, _run_command, cmd, args, opts)
  File "/Library/Python/2.7/site-packages/scrapy/cmdline.py", line 89, in _run_print_help
    func(*a, **kw)
  File "/Library/Python/2.7/site-packages/scrapy/cmdline.py", line 150, in _run_command
    cmd.run(args, opts)
  File "/Library/Python/2.7/site-packages/scrapy/commands/crawl.py", line 60, in run
    self.crawler_process.start()
  File "/Library/Python/2.7/site-packages/scrapy/crawler.py", line 92, in start
    if self.start_crawling():
  File "/Library/Python/2.7/site-packages/scrapy/crawler.py", line 124, in start_crawling
    return self._start_crawler() is not None
  File "/Library/Python/2.7/site-packages/scrapy/crawler.py", line 139, in _start_crawler
    crawler.configure()
  File "/Library/Python/2.7/site-packages/scrapy/crawler.py", line 47, in configure
    self.engine = ExecutionEngine(self, self._spider_closed)
  File "/Library/Python/2.7/site-packages/scrapy/core/engine.py", line 65, in __init__
    self.scraper = Scraper(crawler)
  File "/Library/Python/2.7/site-packages/scrapy/core/scraper.py", line 66, in __init__
    self.itemproc = itemproc_cls.from_crawler(crawler)
  File "/Library/Python/2.7/site-packages/scrapy/middleware.py", line 50, in from_crawler
    return cls.from_settings(crawler.settings, crawler)
  File "/Library/Python/2.7/site-packages/scrapy/middleware.py", line 33, in from_settings
    mw = mwcls.from_settings(settings)
  File "/Users/tony/Development/@Scrapy/myspider/myspider/pipelines.py", line 42, in from_settings
    dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
  File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/enterprise/adbapi.py", line 203, in __init__
    self.dbapi = reflect.namedModule(dbapiName)
  File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/python/_reflectpy3.py", line 137, in namedModule
    topLevel = __import__(name)
  File "/Library/Python/2.7/site-packages/MySQLdb/__init__.py", line 19, in <module>
    import _mysql
ImportError: dlopen(/Library/Python/2.7/site-packages/_mysql.so, 2): no suitable image found.  Did find:
    /Library/Python/2.7/site-packages/_mysql.so: mach-o, but wrong architecture

是不是python安装的mysql没有正确安装?我该如何解决这个问题。

最佳答案

我使用的是MAC 这样问题就解决了

nano ~/.bash_profile

add these line

export PATH=/usr/local/mysql/bin:${PATH}
export DYLD_LIBRARY_PATH=/usr/local/mysql/lib/
export VERSIONER_PYTHON_PREFER_64_BIT=yes
export VERSIONER_PYTHON_PREFER_32_BIT=yes

then run source ~/.bash_profile

python setup.py build
python setup.py install

关于python - Scrapy 与 Mysql 出现此错误,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/27262473/

相关文章:

python - Pandas - 用其他值替换值

php - 在 PHP/MYSQL 中基于 DATE 的 SELECT 的最佳方式

mysql - 在 phpMyAdmin 中导入文本文件

python - 我不能得到?而不是 %s 来处理 python mysqldb 的插入

python - 从产品页面内部获取产品的所有图片

Python:根据条件插入数组

python - Flask 登录 - @login_manager.token_loader 未被调用

python - Pandas 根据元素的类别拆分列

python - 在 Python 中使用 scrapy 时出错

python - 错误 : raise ValueError ("No element found in %s" % response) occur when try to login with scrappy