# -*- coding: utf-8 -*- #!/usr/bin/python3.5 -S # python -m spacy.en.download python -m spacy.de.download # https://spacy.io/docs/#tutorials # CSS: http://codepen.io/explosion/pen/xEpgKz # CSS 2: https://explosion.ai/blog/displacy-ent-named-entity-visualizer import time #pip install --upgrade 3to2 #pip install --upgrade language-check start_time = time.time() import language_check from gensim import utils from gensim.models.doc2vec import LabeledSentence from gensim.models import Doc2Vec import gensim, logging import markovify from langdetect import detect import spacy from spacy.de import German import base64 import os.path import json import pprint import codecs import nltk import re from subprocess import call import sys # import sys package, if not already imported from textblob_de import TextBlobDE as TextBlob def only_letters(string): #s = re.sub('[^0-9a-zA-Z]+', '*', s) lol = re.findall(r'[^A-Za-z-\s\d]',string) #pprint.pprint("Suche: "+string), #pprint.pprint(lol) if len(lol) > 0: return 0 else: #pprint.pprint(lol) #pprint.pprint(len(lol)) return 1 return 0 #de_nlp = spacy.load('de', tagger=True, parser=True, entity=True) de_nlp = spacy.de.German() filename = "/home/100biere/v3test/input.txt" writename = "/home/100biere/v3test/output.txt" tool = language_check.LanguageTool('de_DE') # Get raw text as string. with codecs.open(filename,'r',encoding='utf8') as f: text = f.read() f.close() #pprint.pprint(sentences) #sys.exit(0) # Build the model. text_model = markovify.Text(text) # Print five randomly-generated sentences #for i in range(5): # print(text_model.make_sentence()) # markov_sample = text_model.make_sentence() # http://pythonhosted.org/pyenchant/faq.html while 1: markov_sample = text_model.make_sentence() if markov_sample is not None and markov_sample and len(markov_sample) > 30: with codecs.open(writename,'w',encoding='latin-1') as f: f.write(markov_sample) f.close() break with codecs.open(writename,'r',encoding='latin-1') as f: markov_sample = f.read() f.close() #print("#########################") print("Markov Sample Text: "), markov_sample_utf8 = markov_sample.encode('utf-8') pprint.pprint(markov_sample) ''' matches = tool.check(markov_sample) mk_s = str(markov_sample) markov_correct = tool.correct(mk_s, matches) print("Markov Corrected Text: "), pprint.pprint(markov_correct) print("LanguageTool Corrected Text: "), #http://wiki.languagetool.org/command-line-options os.system("java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl -a /home/100biere/v3test/output.txt") #os.system("java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl /home/100biere/v3test/output.txt") sys.exit(0) ''' "noun phrases" http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/ #sp_str = str(markov_sample) de_doc_spc = de_nlp(markov_sample) noun_phrases = de_doc_spc.noun_chunks pprint.pprint(noun_phrases) blob = TextBlob(text) blob_markov = TextBlob(markov_sample) nounPhrases = [] nounPhrMarkov = [] keyword = "Tochter" print("Noun Chunks von Spacy:") for np in de_doc_spc.noun_chunks: #print(np.text) pprint.pprint(np) sys.exit(0) #print("Alle Noun Phrases von TextBlog:") for np in blob.noun_phrases: #v = np.encode('utf-8', 'xmlcharrefreplace') v = np.encode('utf-8') ok_upper = 0 ok_only = 0 if only_letters(v): ok_only = 1 for l in v: if l.isupper(): ok_upper = 1 if ok_upper == 1 and ok_only == 1: nounPhrases.append(v) #print(v) #print("Noun Phrases von TextBlog:") for np in blob_markov.noun_phrases: #v = np.encode('utf-8', 'xmlcharrefreplace') v = np.encode('utf-8') ok_upper = 0 ok_only = 0 if only_letters(v): ok_only = 1 for l in v: if l.isupper(): ok_upper = 1 if ok_upper == 1 and ok_only == 1: nounPhrMarkov.append(v) #print(v) if any(x in nounPhrMarkov for x in nounPhrases): print("Noun Phrase hit: ") if markov_sample in text: print("Version 1: Markov Sample Text kommt im Urpsrungskorpus vor: ") print("\n") list_mk = [] list_mk.append(markov_sample) if any(x in list_mk for x in text): print("Version 2: Markov Sample Text kommt im Urpsrungskorpus vor: ") print("\n") if keyword in text: print(keyword +" -> kommt in Korpus vor") print("\n") raw_sentences = [] #tokens = nltk.word_tokenize(text) text_sent = nltk.sent_tokenize(text) for a in text_sent: words = nltk.word_tokenize(a) #pprint.pprint(words), #print(type(a)) raw_sentences.append(words) sentences = [gensim.models.doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)] path = "/home/100biere/v3test/Doc2Vec.bin" if os.path.isfile(path): model = gensim.models.Doc2Vec.load(path) model = gensim.models.Doc2Vec(iter=50, size=400, workers=4, sorted_vocab=1, alpha=0.75, min_alpha=0.45) #min_count=1 model.sort_vocab() model.build_vocab(sentences) for epoch in range(40): model.train(sentences) model.init_sims(replace=False) # can read,write from, and also training -> more memory #model.init_sims(replace=True) # can only read from, but no more training -> less memory model.save(path) # vocabeln im Doc2Vec wordVocab = [k for (k, v) in model.vocab.iteritems()] #pprint.pprint(wordVocab) #sys.exit(0) try: sim = model.most_similar(positive=[keyword], negative=[], topn=5) sim_word = sim[0] sim_calc = sim_word[1] print("Similiarity:"), print(sim) except KeyError: 1 # java -jar /home/100biere/software/LanguageTool-3.5/languagetool-commandline.jar -adl -a /home/100biere/input.txt print("Script Runtime: --- %s seconds ---" % (time.time() - start_time)) sys.exit(0)