if offsets:
for i in offsets:
query_word = self._tokens[i]
# Find the context of query word.
left_context = self._tokens[i-context:i]
рдЬрдм рдЦреЛрдЬ рд╢рдмреНрдж рдХреА рдкрд╣рд▓реА рдШрдЯрдирд╛ рдкрд╛рда рдХреА рд╢реБрд░реБрдЖрдд рдореЗрдВ рд╣реЛрддреА рд╣реИ (рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП рдСрдлрд╝рд╕реЗрдЯ 7 рдкрд░), рдорд╛рди рд▓реЗрдВ рдХрд┐ рдЪреМрдбрд╝рд╛рдИ рдкреИрд░рд╛рдореАрдЯрд░ 20 рдкрд░ рд╕реЗрдЯ рд╣реИ, рддреЛ [i- рд╕рдВрджрд░реНрдн: i ] рдХрд╛ рдореВрд▓реНрдпрд╛рдВрдХрди [-13:7] рдХреЗ рд░реВрдк рдореЗрдВ рдХрд┐рдпрд╛ рдЬрд╛рдПрдЧрд╛ред .
рдЗрд╕ рд╕реНрдерд┐рддрд┐ рдореЗрдВ, рдпрджрд┐ рдкрд╛рда рдореЗрдВ 20 рд╕реЗ рдЕрдзрд┐рдХ рд╢рдмреНрдж рд╣реИрдВ, рддреЛ рдкрд╛рда рдХреЗ рдкрд╣рд▓реЗ 7 рд╢рдмреНрджреЛрдВ рд╡рд╛рд▓реА рд╕реВрдЪреА рдХреЗ рдмрдЬрд╛рдп, рдЪрд░ left_context рдПрдХ рдЦрд╛рд▓реА рд╕реВрдЪреА рд╣реЛрдЧреАред
рдПрдХ рд╕рд╛рдзрд╛рд░рдг рдлрд┐рдХреНрд╕ рдХрд░реЗрдЧрд╛:
if offsets:
for i in offsets:
query_word = self._tokens[i]
# Find the context of query word.
if i - context < 0:
left_context = self._tokens[:i]
else:
left_context = self._tokens[i-context:i]
рдХреНрдпрд╛ рдЖрдк рдПрдХ рдирдореВрдирд╛ рдЗрдирдкреБрдЯ рдФрд░ рд╡рд╛рдВрдЫрд┐рдд рдЖрдЙрдЯрдкреБрдЯ рдкреНрд░рджрд╛рди рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ рддрд╛рдХрд┐ рд╣рдо рдкреНрд░рддрд┐рдЧрдорди рдкрд░реАрдХреНрд╖рдг рдореЗрдВ рдЬреЛрдбрд╝ рд╕рдХреЗрдВ?
рдЗрдирдкреБрдЯ:
jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.'
text = nltk.Text(nltk.word_tokenize(jane_eyre))
text.concordance('taking')
text.concordance_list('taking')[0]
рдЖрдЙрдЯрдкреБрдЯ (рдПрдирдПрд▓рдЯреАрдХреЗ 3.3):
Displaying 1 of 1 matches:
taking a walk that day . We had been wander
ConcordanceLine(left=[],
query='taking',
right=['a', 'walk', 'that', 'day', '.', 'We', 'had', 'been', 'wandering', ',', 'indeed', ',', 'in', 'the', 'leafless', 'shrubbery', 'an', 'hour'],
offset=7,
left_print='',
right_print='a walk that day . We had been wande',
line=' taking a walk that day . We had been wande')
рд╡рд╛рдВрдЫрд┐рдд рдЖрдЙрдЯрдкреБрдЯ:
Displaying 1 of 1 matches:
Chapter 1 THERE was no possibility of taking a walk that day . We had been wander
ConcordanceLine(left=['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of'],
query='taking',
right=['a', 'walk', 'that', 'day', '.', 'We', 'had', 'been', 'wandering', ',', 'indeed', ',', 'in', 'the', 'leafless', 'shrubbery', 'an', 'hour'],
offset=7,
left_print='Chapter 1 THERE was no possibility of',
right_print='a walk that day . We had been wande',
line='Chapter 1 THERE was no possibility of taking a walk that day . We had been wande')
рдзрдиреНрдпрд╡рд╛рдж @BLKSerene рдмрдЧ рдХреА рд░рд┐рдкреЛрд░реНрдЯ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП!
рдЖрд╣, рдпрд╣рд╛рдБ рдПрдХ рдЕрдЪреНрдЫрд╛ рдлрд┐рдХреНрд╕ рд╣реИред рдЗрд╕рдХреЗ рдмрдЬрд╛рдп рдЕрдЧрд░-рдФрд░ред рд╣рдо рдиреНрдпреВрдирддрдо рдмрд╛рдЙрдВрдб рдХреЛ max()
рдХреНрд▓рд┐рдк рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ, рдЙрджрд╛рд╣рд░рдг рдХреЗ рд▓рд┐рдП
left_context = self._tokens[max(0, i-context):i]
рдирд┐рд░рдВрддрд░ рдПрдХреАрдХрд░рдг/рдкреНрд░рддрд┐рдЧрдорди рдкрд░реАрдХреНрд╖рдг рдХреЗ рд▓рд┐рдП https://github.com/nltk/nltk/blob/develop/nltk/test/concordance.doctest рдореЗрдВ doctest рдЬреЛрдбрд╝рдирд╛ рдмрд╣реБрдд рдорджрджрдЧрд╛рд░ рд╣реЛрдЧрд╛ =)
Patching https://github.com/nltk/nltk/issues/2088
The left slice of the left context should be clip to 0 if the `i-context` < 0.
>>> from nltk import Text, word_tokenize
>>> jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.'
>>> text = Text(word_tokenize(jane_eyre))
>>> text.concordance_list('taking')[0].left
['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of']
#реирезрежрей рдореЗрдВ рдкреИрдЪ рдХрд┐рдпрд╛ рдЧрдпрд╛ред рдзрдиреНрдпрд╡рд╛рдж @BLKSerene рдФрд░ @dnc1994!