使用函数修改python web抓取代码的技巧

2024-09-27 07:28:06 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在尝试使用BeautifulSoup编写一个python脚本,该脚本在网页http://tbc-python.fossee.in/completed-books/中爬行并从中收集必要的数据。基本上,它必须将所有书籍章节中出现的page loading errors, SyntaxErrors, NameErrors, AttributeErrors, etc提取到一个文本文件errors.txt。大约有273本书。写的剧本做得很好。我正在以很好的速度使用带宽。但是代码要花很多时间来浏览所有的书籍。请帮助我优化python脚本与必要的调整,也许使用函数等。谢谢

import urllib2, urllib
from bs4 import BeautifulSoup
website = "http://tbc-python.fossee.in/completed-books/"
soup = BeautifulSoup(urllib2.urlopen(website))
errors = open('errors.txt','w')

# Completed books webpage has data stored in table format
BookTable = soup.find('table', {'class': 'table table-bordered table-hover'})
for BookCount, BookRow in enumerate(BookTable.find_all('tr'), start = 1):
    # Grab  book names
    BookCol = BookRow.find_all('td')
    BookName = BookCol[1].a.string.strip()
    print "%d: %s" % (BookCount, BookName)  
    # Open each book
    BookSrc = BeautifulSoup(urllib2.urlopen('http://tbc-python.fossee.in%s' %(BookCol[1].a.get("href"))))
    ChapTable = BookSrc.find('table', {'class': 'table table-bordered table-hover'})

    # Check if each chapter page opens, if not store book & chapter name in error.txt
    for ChapRow in ChapTable.find_all('tr'):
        ChapCol = ChapRow.find_all('td')
        ChapName = (ChapCol[0].a.string.strip()).encode('ascii', 'ignore') # ignores error : 'ascii' codec can't encode character u'\xef'
        ChapLink = 'http://tbc-python.fossee.in%s' %(ChapCol[0].a.get("href"))

        try:
            ChapSrc = BeautifulSoup(urllib2.urlopen(ChapLink))
        except:
            print '\t%s\n\tPage error' %(ChapName)
            errors.write("Page; %s;%s;%s;%s" %(BookCount, BookName, ChapName, ChapLink))
            continue

        # Check for errors in chapters and store the errors in error.txt
        EgError = ChapSrc.find_all('div', {'class': 'output_subarea output_text output_error'})
        if EgError:
            for e, i in enumerate(EgError, start=1):
                errors.write("Example;%s;%s;%s;%s\n" %(BookCount,BookName,ChapName,ChapLink)) if 'ipython-input' or 'Error' in i.pre.get_text() else None           
            print '\t%s\n\tExample errors: %d' %(ChapName, e)       

errors.close()

Tags: intxthttpfortableerrorallfind
2条回答

您可能需要查看^{}并处理工作负载。在

如果你一次只使用一个连接,那么你的连接速度就不重要了。在

我试图分解代码并用函数表示它。 有什么建议可以再即兴编写代码吗?如何将从网站获取的错误转储到一个新的html文件中,该文件采用表格格式,其中包含包含错误的书籍和章节的详细信息。在

更新代码如下:

import urllib2, sys
from bs4 import BeautifulSoup

def get_details(link, index):
    """
    This function takes in two arguments and returns a list which contains details of 
    books and/or chapters like:
    * name of the book or chapter
    * link of the book or chapter

    Getting details from book or chapter is set by index value
    * index = 1  > gets details of the book
    * index = 0  > gets details of the chapter
    """
    details_list = []

    src = BeautifulSoup(urllib2.urlopen(link))
    table = src.find('table')
    for row in table.find_all('tr'):
        column = row.find_all('td')  
        name, link = column[index].a.string, column[index].a.get("href")
        details_list.append([name, link])

    return details_list


def get_chapter_errors(chap_link):
    """
    This function takes in chapter link from chapter_details_list as argument and returns 
    * Number of example errors(SyntaxErrors, NameErrors, ValueErrors, etc) present in the chapter
                 OR
    * HTTPError while loading the chapter
    """
    try:
        chp_src = BeautifulSoup(urllib2.urlopen(chap_link))
        example_errors = chp_src.find_all('div', {'class': 'output_subarea output_text output_error'})
        error = len(example_errors)
        if not example_errors:
            error = None 

    except urllib2.HTTPError as e:
        print e
        error = "Page fetch error"

    return error


def main():
    log_dict = {}
    book_dict = {}

    url = sys.argv[1] # accept url as argument
    book_details_list = get_details(url, index=1)
    for book_name, book_link in book_details_list:
        chapter_details_list = get_details('http://tbc-python.fossee.in%s' % book_link, index=0)
        _id = book_link.strip('/book-details')
        book_dict = {'name': book_name,
                     'url': 'http://tbc-python.fossee.in%s' % book_link,
                     'id': _id,
                     'chapters': []
                    }

        for chap_name, chap_link in chapter_details_list:
            error = get_chapter_errors('http://tbc-python.fossee.in%s' % chap_link)
            book_dict.get('chapters').append({'name': chap_name, 
                                              'url': 'http://tbc-python.fossee.in%s' % chap_link, 
                                              'errors': error
                                             })

        log_dict.update({_id: book_dict})

        print log_dict
        print "\n\n\n\n"


if __name__ == '__main__':
    main()

相关问题 更多 >

    热门问题