# Categorized XML Corpus Reader
from nltk.corpus.reader import CategorizedCorpusReader, XMLCorpusReader
class CategorizedXMLCorpusReader(CategorizedCorpusReader, XMLCorpusReader):
def __init__(self, *args, **kwargs):
CategorizedCorpusReader.__init__(self, kwargs)
XMLCorpusReader.__init__(self, *args, **kwargs)
def _resolve(self, fileids, categories):
if fileids is not None and categories is not None:
raise ValueError('Specify fileids or categories, not both')
if categories is not None:
return self.fileids(categories)
else:
return fileids
# All of the following methods call the corresponding function in ChunkedCorpusReader
# with the value returned from _resolve(). We'll start with the plain text methods.
def raw(self, fileids=None, categories=None):
return XMLCorpusReader.raw(self, self._resolve(fileids, categories))
def words(self, fileids=None, categories=None):
#return CategorizedCorpusReader.words(self, self._resolve(fileids, categories))
# Can I just concat words over each file in a file list?
words=[]
fileids = self._resolve(fileids, categories)
# XMLCorpusReader.words works on one file at a time. Concatenate them here.
for fileid in fileids:
words+=XMLCorpusReader.words(self, fileid)
return words
# This returns a string of the text of the XML docs without any markup
def text(self, fileids=None, categories=None):
fileids = self._resolve(fileids, categories)
text = ""
for fileid in fileids:
for i in self.xml(fileid).getiterator():
if i.text:
text += i.text
return text
# This returns all text for a specified xml field
def fieldtext(self, fileids=None, categories=None):
# NEEDS TO BE WRITTEN
return
def sents(self, fileids=None, categories=None):
#return CategorizedCorpusReader.sents(self, self._resolve(fileids, categories))
text = self.words(fileids, categories)
sents=nltk.PunktSentenceTokenizer().tokenize(text)
return sents
def paras(self, fileids=None, categories=None):
return CategorizedCorpusReader.paras(self, self._resolve(fileids, categories))
很抱歉,但是把它作为一个新问题发布是我发现讨论这个代码的唯一方法。我还在使用words()方法尝试使用categories时发现了一个小错误。这里:https://github.com/nltk/nltk/issues/250#issuecomment-5273102
你比我先解决这个问题了吗?另外,您是否对其进行了进一步的修改,以使类别发挥作用?我的电子邮件在我的个人资料页,如果你想谈论它,所以:-)
这是一个用于NLTK的分类XML语料库阅读器。它基于this tutorial. 这使您可以在XML语料库上使用NLTK的基于类别的特性,如《纽约时报》带注释的语料库。在
把这个文件叫做CategorizedXMLCorpusReader.py并将其导入为:
然后您可以像其他NLTK阅读器一样使用它。例如
^{pr2}$我仍在考虑NLTK,所以欢迎任何更正或建议。在
相关问题 更多 >
编程相关推荐