gutenberg语料库 花样电子书
>>> import nltk >>> nltk.corpus.gutenberg.fileids() ['austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', 'bible-kjv.txt', 'blake-poems.txt', 'bryant-stories.txt', 'burgess-busterbrown.txt', 'carroll-alice.txt', 'chesterton-ball.txt', 'chesterton-brown.txt', 'chesterton-thursday.txt', 'edgeworth-parents.txt', 'melville-moby_dick.txt', 'milton-paradise.txt', 'shakespeare-caesar.txt', 'shakespeare-hamlet.txt', 'shakespeare-macbeth.txt', 'whitman-leaves.txt'] >>> emma = nltk.corpus.gutenberg.words('austen-emma.txt') >>> len(emma) 192427 #上面的引用方式用起来会很麻烦(nltk.corpus.gutenberg.....) #所以我们这样引用 >>> from nltk.corpus import gutenberg酱紫(⁎⁍̴̛ᴗ⁍̴̛⁎),我们用起来就很方便啦
>>> gutenberg.fileids() ['austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', 'bible-kjv.txt', 'blake-poems.txt', 'bryant-stories.txt', 'burgess-busterbrown.txt', 'carroll-alice.txt', 'chesterton-ball.txt', 'chesterton-brown.txt', 'chesterton-thursday.txt', 'edgeworth-parents.txt', 'melville-moby_dick.txt', 'milton-paradise.txt', 'shakespeare-caesar.txt', 'shakespeare-hamlet.txt', 'shakespeare-macbeth.txt', 'whitman-leaves.txt'] >>> for fileid in gutenberg.fileids(): ... num_chars = len(gutenberg.raw(fileid)) ... num_words = len(gutenberg.words(fileid)) ... num_sents = len(gutenberg.sents(fileid)) ... num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)])) ... print (int(num_chars/num_words),int(num_words/num_sents),int(num_words/num_vocab),fileid)#平均词长,平均句子长,词汇多样性 ... 4 24 26 austen-emma.txt 4 26 16 austen-persuasion.txt 4 28 22 austen-sense.txt 4 33 79 bible-kjv.txt 4 19 5 blake-poems.txt 4 19 14 bryant-stories.txt 4 17 12 burgess-busterbrown.txt 4 20 12 carroll-alice.txt 4 20 11 chesterton-ball.txt 4 22 11 chesterton-brown.txt 4 18 10 chesterton-thursday.txt 4 20 24 edgeworth-parents.txt 4 25 15 melville-moby_dick.txt 4 52 10 milton-paradise.txt 4 11 8 shakespeare-caesar.txt 4 12 7 shakespeare-hamlet.txt 4 12 6 shakespeare-macbeth.txt 4 36 12 whitman-leaves.txt看看例子里面的函数: raw(string):文章原文
gutenberg.raw("austen-emma.txt") '[Emma by Jane Austen 1816]\n\nVOLUME I\n\nCHAPTER I\n\n\nEmma Woodhouse, handsome, clever, and rich, with a comfortable home\nand happy disposition, seemed to unite some of the best blessings\nof existence……好一篇生肉( ̄Д ̄)ノ
sents(string):把文本化成句子,每一个句子是词链表
>>> gutenberg.sents('austen-emma.txt') [['[', 'Emma', 'by', 'Jane', 'Austen', '1816', ']'], ['VOLUME', 'I'], ...]words(string):分词
>>> gutenberg.words('austen-emma.txt') ['[', 'Emma', 'by', 'Jane', 'Austen', '1816', ']', ...]网络文学库
>>> from nltk.corpus import webtext >>> webtext.fileids() ['firefox.txt', 'grail.txt', 'overheard.txt', 'pirates.txt', 'singles.txt', 'wine.txt']布朗语料库 百万词级的英语电子语料库:研究文体之间的系统性差异
>>> from nltk.corpus import brown >>> brown.fileids() ['ca01', 'ca02', 'ca03', 'ca04', 'ca05', 'ca06', 'ca07', 'ca08', 'ca09', 'ca10', 'ca11', 'ca12', 'ca13', 'ca14', 'ca15', 'ca16', 'ca17', 'ca18', 'ca19', 'ca20', 'ca21', 'ca22', 'ca23', 'ca24'…… >>> brown.categories() ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction']路透社语料库 新闻文档
>>> from nltk.corpus import reuters >>> reuters.fileids() ['test/14826', 'test/14828', 'test/14829'……就职演说语料库
>>> from nltk.corpus import inaugural >>> inaugural.fileids() ['1789-Washington.txt', '1793-Washington.txt', '1797-Adams.txt', '1801-Jefferson.txt', '1805-Jefferson.txt', '1809-Madison.txt'…… >>> [fileid[:4] for fileid in inaugural.fileids()] ['1789', '1793', '1797', '1801', '1805', '1809', '1813'……载入自己的语料库
>>> from nltk.corpus import PlaintextCorpusReader >>> corpus_root = 'usr/yourdictory' >>> wordlists = PlaintextCorpusReader(corpus_root, '.*')