# -*- coding: utf-8 -*- #!/usr/bin/python3.5 -S # python -m spacy.en.download python -m spacy.de.download # https://spacy.io/docs/#tutorials # CSS: http://codepen.io/explosion/pen/xEpgKz # CSS 2: https://explosion.ai/blog/displacy-ent-named-entity-visualizer import time #pip install --upgrade 3to2 #pip install --upgrade language-check start_time = time.time() # http://polyglot.readthedocs.io/en/latest/Installation.html import polyglot from polyglot.text import Text, Word #import rake import NP #import language_check from gensim import utils from gensim.models.doc2vec import LabeledSentence from gensim.models import Doc2Vec import gensim, logging import markovify from langdetect import detect import spacy from spacy.de import German import base64 import os.path import json import pprint import codecs import nltk import re from subprocess import call import sys # import sys package, if not already imported from textblob_de import TextBlobDE from unidecode import unidecode def remove_non_ascii(text): return unidecode(unicode(text, encoding = "utf-8")) def only_letters(string): #s = re.sub('[^0-9a-zA-Z]+', '*', s) lol = re.findall(r'[^A-Za-z-\s\d]',string) #pprint.pprint("Suche: "+string), #pprint.pprint(lol) if len(lol) > 0: return 0 else: #pprint.pprint(lol) #pprint.pprint(len(lol)) return 1 return 0 #de_nlp = spacy.load('de', tagger=True, parser=True, entity=True) #de_nlp = spacy.de.German() stopword = "/home/100biere/demo/stopwordlist.de.txt" filename = "/home/100biere/demo/input.txt" writename = "/home/100biere/demo/output.txt" #tool = language_check.LanguageTool('de_DE') # https://github.com/zelandiya/RAKE-tutorial/blob/master/rake_tutorial.py #rake_object = rake.Rake(stopword, 3, 1, 1) ''' rake_object = rake.Rake("SmartStoplist.txt", 5, 3, 4) Now, we have a RAKE object that extracts keywords where: Each word has at least 5 characters Each phrase has at most 3 words Each keyword appears in the text at least 4 times ''' # Get raw text as string. with codecs.open(filename,'r',encoding='utf8') as f: text = f.read() f.close() #keywordsAll = rake_object.run(text) # 3. print results #print("Keywords:", keywordsAll) #pprint.pprint(sentences) #sys.exit(0) # Build the model. text_model = markovify.Text(text) # Print five randomly-generated sentences #for i in range(5): # print(text_model.make_sentence()) # markov_sample = text_model.make_sentence() # http://pythonhosted.org/pyenchant/faq.html while 1: markov_sample = text_model.make_sentence() if markov_sample is not None and markov_sample and len(markov_sample) > 30: with codecs.open(writename,'w',encoding='utf-8') as f: f.write(markov_sample) f.close() break with codecs.open(writename,'r',encoding='latin-1') as f: markov_sample = f.read() f.close() #print("#########################") print("Markov Sample Text: "), pprint.pprint(markov_sample) print() #keywordsMarkow = rake_object.run(markov_sample) #print("RAKE Keywords Markov:", keywordsMarkow) #blob = TextBlobDE(markov_sample) #for tok in blob.tags: # print(tok) # #print("TextBlobDE Noun Phrases:", blob.noun_phrases) nphrases = NP.tokenize(markov_sample) #pprint.pprint(nphrases) print("Noun Phrases:", nphrases) print() blob = TextBlobDE(markov_sample) toks = blob.tags print("TextBlob POS Tagger:", toks) print() ''' print("LanguageTool Corrected Text: "), #http://wiki.languagetool.org/command-line-options os.system("java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl -a /home/100biere/demo/output.txt") print() print() ''' text = Text(markov_sample.encode('utf-8')) for entity in text.entities: print(entity.tag, entity) print() ft = (time.time() - start_time) print("Script Runtime: --- "), print(ft), print(" ---- Seconds") sys.exit(0) ''' matches = tool.check(markov_sample) mk_s = str(markov_sample) markov_correct = tool.correct(mk_s, matches) print("Markov Corrected Text: "), pprint.pprint(markov_correct) print("LanguageTool Corrected Text: "), #http://wiki.languagetool.org/command-line-options os.system("java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl -a /home/100biere/v3test/output.txt") #os.system("java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl /home/100biere/v3test/output.txt") sys.exit(0) ''' #sp_str = str(markov_sample) de_doc_spc = de_nlp(markov_sample) noun_phrases = de_doc_spc.noun_chunks pprint.pprint(noun_phrases) blob = TextBlob(text) blob_markov = TextBlob(markov_sample) nounPhrases = [] nounPhrMarkov = [] keyword = "Tochter" print("Noun Chunks von Spacy:") for np in de_doc_spc.noun_chunks: #print(np.text) pprint.pprint(np) sys.exit(0) #print("Alle Noun Phrases von TextBlog:") for np in blob.noun_phrases: #v = np.encode('utf-8', 'xmlcharrefreplace') v = np.encode('utf-8') ok_upper = 0 ok_only = 0 if only_letters(v): ok_only = 1 for l in v: if l.isupper(): ok_upper = 1 if ok_upper == 1 and ok_only == 1: nounPhrases.append(v) #print(v) #print("Noun Phrases von TextBlog:") for np in blob_markov.noun_phrases: #v = np.encode('utf-8', 'xmlcharrefreplace') v = np.encode('utf-8') ok_upper = 0 ok_only = 0 if only_letters(v): ok_only = 1 for l in v: if l.isupper(): ok_upper = 1 if ok_upper == 1 and ok_only == 1: nounPhrMarkov.append(v) #print(v) if any(x in nounPhrMarkov for x in nounPhrases): print("Noun Phrase hit: ") if markov_sample in text: print("Version 1: Markov Sample Text kommt im Urpsrungskorpus vor: ") print("\n") list_mk = [] list_mk.append(markov_sample) if any(x in list_mk for x in text): print("Version 2: Markov Sample Text kommt im Urpsrungskorpus vor: ") print("\n") if keyword in text: print(keyword +" -> kommt in Korpus vor") print("\n") raw_sentences = [] #tokens = nltk.word_tokenize(text) text_sent = nltk.sent_tokenize(text) for a in text_sent: words = nltk.word_tokenize(a) #pprint.pprint(words), #print(type(a)) raw_sentences.append(words) sentences = [gensim.models.doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)] path = "/home/100biere/v3test/Doc2Vec.bin" if os.path.isfile(path): model = gensim.models.Doc2Vec.load(path) model = gensim.models.Doc2Vec(iter=50, size=400, workers=4, sorted_vocab=1, alpha=0.75, min_alpha=0.45) #min_count=1 model.sort_vocab() model.build_vocab(sentences) for epoch in range(40): model.train(sentences) model.init_sims(replace=False) # can read,write from, and also training -> more memory #model.init_sims(replace=True) # can only read from, but no more training -> less memory model.save(path) # vocabeln im Doc2Vec wordVocab = [k for (k, v) in model.vocab.iteritems()] #pprint.pprint(wordVocab) #sys.exit(0) try: sim = model.most_similar(positive=[keyword], negative=[], topn=5) sim_word = sim[0] sim_calc = sim_word[1] print("Similiarity:"), print(sim) except KeyError: 1 # java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl -a /home/100biere/input.txt print("Script Runtime: --- %s seconds ---" % (time.time() - start_time)) sys.exit(0)