from nltk.tokenize.stanford_segmenter import StanfordSegmenter
segmenter = StanfordSegmenter (path_to_jar= "E:/anaconda/StanfordNLTK/stanford-segmenter.jar",
java_class="edu.stanford.nlp.ie.crf.CRFClassifier",
path_to_slf4j="E:/anaconda/StanfordNLTK/slf4j-api.jar",
path_to_sihan_corpora_dict="E:/anaconda/StanfordNLTK/data",
path_to_model="E:/anaconda/StanfordNLTK/data/pku.gz",
path_to_dict="E:/anaconda/StanfordNLTK/data/dict-chris6.ser.gz"
)
there is a hint: DeprecationWarning: pycharmpycharmpycharm2017pjbpycharmUV professional br 2017.2.3helperspydevpydevdevdevconsole.pypura 6:
The StanfordTokenizer will be deprecated in version 3.2.5.
Please use nltk.parse.corenlp.CoreNLPTokenizer instead."