@@ -11,7 +11,7 @@ First make sure you have the Stanford CoreNLP server running. See [the instruct
1111
1212Then the setup just requires you to pass in the url of the server:
1313```
14- >>> from corenlp import StanfordCoreNLP
14+ >>> from pycorenlp import StanfordCoreNLP
1515>>> nlp = StanfordCoreNLP('http://localhost:9000')
1616```
1717
@@ -21,7 +21,7 @@ Supports annotation:
2121 'Pusheen and Smitha walked along the beach. '
2222 'Pusheen wanted to surf, but fell off the surfboard.')
2323>>> output = nlp.annotate(text, properties={
24- 'annotators': 'tokenize,ssplit,pos,depparse,parse',
24+ 'annotators': 'tokenize,ssplit,pos,depparse,parse',
2525 'outputFormat': 'json'
2626 })
2727>>> print(output['sentences'][0]['parse'])
@@ -41,15 +41,15 @@ And tokensregex + semgrex
4141>>> nlp.tokensregex(text, pattern='/Pusheen|Smitha/', filter=False)
4242{u'sentences': [
4343 {
44- u'1': {u'text': u'Smitha', u'begin': 2, u'end': 3},
44+ u'1': {u'text': u'Smitha', u'begin': 2, u'end': 3},
4545 u'0': {u'text': u'Pusheen', u'begin': 0, u'end': 1}, u'length': 2
46- },
46+ },
4747 {u'0': {u'text': u'Pusheen', u'begin': 0, u'end': 1}, u'length': 1}]}
4848>>> nlp.semgrex(text, pattern='{tag: VBD}', filter=False)
4949{u'sentences': [
50- {u'0': {u'text': u'walked', u'begin': 3, u'end': 4}, u'length': 1},
50+ {u'0': {u'text': u'walked', u'begin': 3, u'end': 4}, u'length': 1},
5151 {
52- u'1': {u'text': u'fell', u'begin': 6, u'end': 7},
52+ u'1': {u'text': u'fell', u'begin': 6, u'end': 7},
5353 u'0': {u'text': u'wanted', u'begin': 1, u'end': 2}, u'length': 2
5454 }
5555]}
0 commit comments