# -*- coding: utf-8 -*-
#!/usr/bin/env python
# https://developers.google.com/custom-search/docs/xml_results#countryCodes
#https://www.linkedin.com/countserv/count/share?format=jsonp&url=https://www.buzzerstar.com
# pip install --upgrade spacy tensorflow gensim sumy keras markovify google-api-python-client beautifulsoup4
from sphinxapi import *
from DowseryLibrary import *
from utils import *
from colored import fg, bg, attr
import sys, time
import MySQLdb as mdb
import os
import re
import sys
import codecs
import string
import time
import glob
import getopt
import argparse
from unidecode import unidecode
from datetime import datetime as dTime
from pprint import PrettyPrinter
#import modules.Fileify as fileify
from sumy.parsers.plaintext import PlaintextParser
from sumy.summarizers.edmundson import EdmundsonSummarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
def cprint(msg, foreground = "black", background = "white"):
fground = foreground.upper()
bground = background.upper()
style = getattr(Fore, fground) + getattr(Back, bground)
print(style + msg + Style.RESET_ALL)
pp = PrettyPrinter(indent=5)
# python3 TextRankSummarizer.py --textid "1.txt" --sentencecount "10"
parser = argparse.ArgumentParser()
parser.add_argument('--suchbegriff', action="append")
parser.add_argument('--echtzeittreffer', action="append")
parser.add_argument('--maximaleergebnisse', action="append")
parser.add_argument('--gesetzPercentage', action="append")
parser.add_argument('--paragraphPercentage', action="append")
parser.add_argument('--sessionID', action="append")
args = parser.parse_args()
v=vars(args)
suchbegriff =" ".join(v["suchbegriff"])
echtzeittreffer =" ".join(v["echtzeittreffer"])
maximaleergebnisse =" ".join(v["maximaleergebnisse"])
gesetzPercentage_v1 =" ".join(v["gesetzPercentage"])
paragraphPercentage_v1 =" ".join(v["paragraphPercentage"])
session_ID =" ".join(v["sessionID"])
suchbegriff = suchbegriff.strip()
echtzeittreffer = echtzeittreffer.strip()
maximaleergebnisse = maximaleergebnisse.strip()
gesetzPercentage_v1 = gesetzPercentage_v1.strip()
paragraphPercentage_v1 = paragraphPercentage_v1.strip()
session_ID = session_ID.strip();
gesetzPercentage_v1 = float(gesetzPercentage_v1)
gesetzPercentage_v1 = int(gesetzPercentage_v1) #1500
paragraphPercentage_v1 = float(paragraphPercentage_v1)
paragraphPercentage_v1 = int(paragraphPercentage_v1) #1500
sSplitsuchbegriff = suchbegriff.split()
if len(sSplitsuchbegriff)==1:
print("Warnung: Nur ein Suchbegriff eingegeben!")
#fileify.plainWriteAppend(logquery, "/home/www/dowery.com/results/"+session_ID+".debug.txt")
# Like the mysql client, MySQLdb won't connect to Sphinx
mode = SPH_MATCH_ALL
host = 'localhost'
port = 9312
index = 'dowery'
filtercol = 'group_id'
filtervals = []
sortby = ''
groupby = ''
groupsort = '@group desc'
#mode = SPH_MATCH_EXTENDED
a = float(echtzeittreffer)
limit = int(a) #1500
SqlQuery = "SELECT DISTINCT * FROM openjurv4 WHERE"; # db:dowery_prototype
r_ResultList = list()
def encodeToUTF8Adv(text):
encResults = text.encode('utf-8', "ignore")
#return str(encResults.decode('latin-1', "ignore"))
return str(encResults.decode('utf-8', "remove"))
def encodeToLatin1(text):
#n_String=replaceUmlauts(text)
encResults = text.encode('utf-8', "ignore")
#encResults = text.encode('utf-8', "ignore")
return str(encResults.decode('latin-1', "ignore"))
def all_same(items):
return all(x == items[0] for x in items)
def isGericht(text):
gericht=set();
sentences=mySentenceSplitter(text)
#t_sentList=sentences.split()
for ele in sentences:
ele = ele.strip()
sentElements=ele.split()
for s in sentElements:
r_String=isMusterGerichtFound(s)
if len(r_String)>0:
#print("Result:", r_String, " s->:", s)
gericht.add(r_String)
return gericht
def isGesetz(text):
gesetz=set();
sentences=mySentenceSplitter(text)
#t_sentList=sentences.split()
for ele in sentences:
ele = ele.strip()
sentElements= ele.split()
for s in sentElements:
r_String=isMusterGesetzFound(s)
if len(r_String)>0:
#print("Result:", r_String, " s->:", s)
gesetz.add(r_String)
return gesetz
def only_numerics(seq):
seq_type= type(seq)
return seq_type().join(filter(seq_type.isdigit, seq))
def only_letter(seq):
seq_type= type(seq)
return seq_type().join(filter(seq_type.isalpha, seq))
def isParagraph(text):
"""
§ 3 EStG
§ 2 Abs 1 EStG
§§ 2 und 3 EStG
§ 22 Nr 5 EStG
§ 38a Abs 1 Satz 3 Einkommenssteuergesetz
§ 2 Abs 7 Satz 2
SozR 4-7837 § 1 Nr 4
§ 2 Abs 1 Satz 1 Nr 1 bis 4 EStG
Allgemein: Paragraph Nummer Wort(kurz oder lang)
"""
paragraph=set();
t_musterListGesetz=mustererkennung_gesetz.split("\n")
sentences=mySentenceSplitter(text)
for ele in sentences:
sentElements=ele.strip()
#r = re.compile("\§ \d{1,6} \w{2,}")
matches = re.findall("\§ \d{1,6} \w{2,}", sentElements)
#newlist = filter(r.match, t_musterListGesetz)
#matches = re.findall("\§ \d{1,6}\s\w+", sentElements)
#m = re.search("\§ \d \w", sentElements)
if matches:
#print("Cur Sent:", sentElements)
for match in matches:
match=match.strip()
#print('Found = {}'.format(match))
if "Abs" not in match:
t_ssplit=match.split()
r_last=t_ssplit[-1]
r_last=only_letter(r_last)
for tmV1 in t_musterListGesetz:
if r_last in tmV1 and len(r_last)>1 and len(tmV1)>1 and len(r_last) == len(tmV1):
#print("Adding:",match )
paragraph.add(match)
if "Abs" in match or "Nr" in match:
r_Index=sentElements.index(match)
r_Content=sentElements[r_Index:len(sentElements)]
r_split=r_Content.split()
#print("Position:", r_Index)
#print("Position Content:", r_Content)
for tm in t_musterListGesetz:
tm = tm.strip()
#print("Mustererkennung:", tm)
for rw in r_split:
rw = rw.strip()
#rw = only_numerics(rw)
rw = only_letter(rw)
#print("texterkennung:",tm,"->",rw)
if rw in tm and len(rw)>1 and len(tm)>1 and len(rw) == len(tm):
r_IndexV2=0
try:
r_IndexV2=sentElements.index(rw)
except Exception as e:
1
if r_IndexV2 > r_Index:
r_ContentV2=sentElements[r_Index:r_IndexV2]
r_Fullmatch=r_ContentV2+" "+tm
#print("AdvMatch: ", rw, " at Position:", r_IndexV2)
#print("Position Content:", r_Content)
#print("FullMatch:", r_Fullmatch)
#paragraph.add("DEBUG: "+r_Fullmatch)
match=match.replace("Abs", "")
match=match.replace("Nr", "")
paragraph.add(match+tm)
return paragraph
# do query
cl = SphinxClient()
cl.SetServer ( host, port )
cl.SetMatchMode ( mode )
if filtervals:
cl.SetFilter ( filtercol, filtervals )
if groupby:
cl.SetGroupBy ( groupby, SPH_GROUPBY_ATTR, groupsort )
if sortby:
cl.SetSortMode ( SPH_SORT_EXTENDED, sortby )
if limit:
cl.SetLimits ( 0, limit, max(limit,1000) )
res = cl.Query ( suchbegriff, index )
if not res:
print('query failed: %s' % cl.GetLastError())
sys.exit(1)
if cl.GetLastWarning():
print('WARNING: %s\n' % cl.GetLastWarning())
print('Query \'%s\' retrieved %d of %d matches in %s sec' % (suchbegriff, res['total'], res['total_found'], res['time']))
print('Query stats:')
if 'words' in res:
for info in res['words']:
print('\t\'%s\' found %d times in %d documents' % (info['word'], info['hits'], info['docs']))
if 'matches' in res:
n = 1
print('\nMatches:')
for match in res['matches']:
attrsdump = ''
for attr in res['attrs']:
attrname = attr[0]
attrtype = attr[1]
value = match['attrs'][attrname]
if attrtype==SPH_ATTR_TIMESTAMP:
value = time.strftime ( '%Y-%m-%d %H:%M:%S', time.localtime(value) )
attrsdump = '%s, %s=%s' % ( attrsdump, attrname, value )
id_Str=str(match['id'])
print('%d. doc_id=%s, weight=%d%s' % (n, match['id'], match['weight'], attrsdump))
if n==1:
SqlQuery += " `p_uid` IN (\""+id_Str+"\","
else:
SqlQuery += '"'+id_Str+'",'
n += 1
SqlQuery = SqlQuery[:-1] # remove last char=","
#SqlQuery +=") AND p_hasleitsatz=1 ORDER BY p_datum DESC LIMIT 1";
#SqlQuery +=") AND p_hasleitsatz=1 ORDER BY rand() LIMIT 25;";
#SqlQuery +=") AND p_hasleitsatz=1 LIMIT 25;";
SqlQuery +=") AND p_hasleitsatz=1 LIMIT "+str(limit)+";";
print()
print(SqlQuery)
print()
# open a database connection
# be sure to change the host IP address, username, password and database name to match your own
connection = mdb.connect (unix_socket = '/var/run/mysqld/mysqld.sock', host = "localhost", user = "root", passwd = "###########99", db = "dowery_prototype")
# prepare a cursor object using cursor() method
cursor = connection.cursor (mdb.cursors.DictCursor)
# execute the SQL query using execute() method.
cursor.execute (SqlQuery)
# fetch all of the rows from the query
# print the rows
result_set = cursor.fetchall()
c_Count=1
###
###### Hole die Ergebnisse von der Echtzeit Suche
###
for row in result_set:
p_uid=str(row["p_uid"])
gericht=isGericht(row["p_gruendeHtml"])
gesetz=isGesetz(row["p_gruendeHtml"])
paragraphV1=isParagraph(row["p_gruendePlain"])
"""
print("####################################")
print("Neues Ergebnis fuer Suche:", suchbegriff)
#print(row["p_gruendeHtml"])
print("Gericht: ")
pp.pprint(gericht)
print("Gesetz: ")
pp.pprint(gesetz)
print("Paragraph: ")
pp.pprint(paragraphV1)
print()
print()
"""
tel = {'count':c_Count,'p_uid':p_uid, 'gericht':gericht,'gesetz':gesetz,'paragraph':paragraphV1}
r_ResultList.append(tel)
c_Count=c_Count+1
# close the cursor object
cursor.close()
# close the connection
connection.close()
###
###### Verarbeite alle Ergebnisse: berechne gleiche Gesetze und gleiche Paragraphen
###
f_ResultList=list()
t_tmp=set()
for t in r_ResultList:
"""
print(t)
print(type(t))
print(t1)
print(type(t1))
count1 = t1[0]
gericht1 = t1[1]
gesetz1 = t1[2]
paragraph1 = t1[3]
"""
t1 = t
count1 = t1['count']
gericht1 = t1['gericht']
gesetz1 = t1['gesetz']
paragraph1 = t1['paragraph']
p_uid1 = t1['p_uid']
c_Paragraph1= len(paragraph1)
l_ParaHit = list()
s_puids = set()
for y in r_ResultList:
t2 = y
count2 = t2['count']
gericht2 = t2['gericht']
gesetz2 = t2['gesetz']
paragraph2 = t2['paragraph']
p_uid2 = t2['p_uid']
c_Paragraph2= len(paragraph2)
if count1 == count2:
continue
"""
smallest = 0
if c_Paragraph1 > c_Paragraph2:
smallest = 2
elif c_Paragraph2 > c_Paragraph1:
smallest = 1
elif c_Paragraph2 == c_Paragraph1:
smallest = 1
"""
#https://stackoverflow.com/questions/15455737/python-use-set-to-find-the-different-items-in-list
# unterschiede: s_Paragraph = paragraph1 ^ paragraph2
#print("Gesetz 1:", gesetz1)
#print("Paragraph 1:", paragraph1)
#print("Gesetz 2:", gesetz2)
#print("Paragraph 2:", paragraph2)
#time.sleep(0.2)
s_Gesetz = gesetz1 & gesetz2 #gleiche elemente
s_Paragraphv1 = paragraph1 & paragraph2 #gleiche elemente
s_Paragraph = paragraph1 & paragraph2 #gleiche elemente
#https://stackoverflow.com/questions/29929074/percentage-overlap-of-two-lists
gesetzPercentage=0
paragraphPercentage=0
try:
#gesetzPercentage=len(set(gesetz1)&set(gesetz2)) / float(len(set(gesetz1) | set(gesetz2))) * 100
gesetzPercentage=len(gesetz1 & gesetz2) / float(len(gesetz1 | gesetz2)) * 100
except Exception as e:
1
try:
#paragraphPercentage=len(set(paragraph1)&set(paragraph2)) / float(len(set(paragraph1) | set(paragraph2))) * 100
paragraphPercentage=len(paragraph1 & paragraph2) / float(len(paragraph1 | paragraph2)) * 100
except Exception as e:
1
#https://stackoverflow.com/questions/6709693/calculating-the-similarity-of-two-lists
#https://stackoverflow.com/questions/2864842/common-elements-comparison-between-2-lists
gesetzSame=list(set(gesetz1).intersection(gesetz2))
paragraphSame=list(set(paragraph1).intersection(paragraph2))
gesetzLongest=len(set(gesetz1))
paragraphLongest=len(set(paragraph1))
is_GesetzSame = set(gesetz1) == set(gesetz2)
is_ParagraphSame = set(paragraph1) == set(paragraph2)
p_id=p_uid1+";"+p_uid2
p_id2=p_uid2+";"+p_uid1
print("p_uid1:",p_uid1)
print("p_uid2:",p_uid2)
print("gesetzPercentage Same:", gesetzPercentage)
print("gesetzMinmumPercentage:", gesetzPercentage_v1)
print("gesetzSame (v1):", gesetzSame)
print("gesetzSame (v2):", s_Gesetz)
#print("gesetzLongest:", gesetzLongest)
print("gesetz1:", gesetz1)
print("gesetz2:", gesetz2)
print("is_GesetzSame:", is_GesetzSame)
print("--------------------------")
print("paragraphPercentage Same:", paragraphPercentage)
print("paragraphMinmumPercentage:", paragraphPercentage_v1)
print("paragraphSame (v1):", paragraphSame)
print("paragraphSame (v2):", s_Paragraph)
#print("paragraphLongest:", paragraphLongest)
print("paragraph1:", paragraph1)
print("paragraph2:", paragraph2)
print("is_ParagraphSame:",is_ParagraphSame)
print("####################################")
logText = '''
#####BAD#########"
"p_uid1:",p_uid1
"p_uid2:",p_uid2
"gesetzPercentage Same:", gesetzPercentage
"gesetzPercentage Same:", gesetzPercentage
"gesetzSame (v1):", gesetzSame
"gesetzSame (v2):", s_Gesetz
"gesetzLongest:", gesetzLongest
"gesetz1:", gesetz1
"gesetz2:", gesetz2
"is_GesetzSame:", is_GesetzSame
"--------------------------"
"paragraphPercentage Same:", paragraphPercentage
"paragraphSame (v1):", paragraphSame
"paragraphSame (v2):", s_Paragraph
"paragraphLongest:", paragraphLongest
"paragraph1:", paragraph1
"paragraph2:", paragraph2
"is_ParagraphSame:",is_ParagraphSame
"####################################"
'''
####fileify.plainWriteAppend(logText, "/home/www/dowery.com/results/"+session_ID+".debug.txt")
###
###### Stelle scharf, was soll gematcht werden
###
if gesetzPercentage>=gesetzPercentage_v1 or paragraphPercentage>=paragraphPercentage_v1 and len(s_Gesetz)>0 and len(s_Paragraph)>0 and p_id not in t_tmp:
logText2 = '''
"#####GOOD#########"
"p_uid1:",p_uid1
"p_uid2:",p_uid2
"gesetzPercentage Same:", gesetzPercentage
"gesetzPercentage Same:", gesetzPercentage
"gesetzSame (v1):", gesetzSame
"gesetzSame (v2):", s_Gesetz
"gesetzLongest:", gesetzLongest
"gesetz1:", gesetz1
"gesetz2:", gesetz2
"is_GesetzSame:", is_GesetzSame
"--------------------------")
"paragraphPercentage Same:", paragraphPercentage
"paragraphSame (v1):", paragraphSame
"paragraphSame (v2):", s_Paragraph
"paragraphLongest:", paragraphLongest
"paragraph1:", paragraph1
"paragraph2:", paragraph2
"is_ParagraphSame:",is_ParagraphSame
"####################################"
'''
####fileify.plainWriteAppend(logText2, "/home/www/dowery.com/results/"+session_ID+".debug.txt")
s_Paragraph = list(paragraph1 & paragraph2) #gleiche elemente
l_ParaHit.append(s_Paragraphv1)
s_puids.add(p_uid2)
t_tmp.add(p_id)
t_tmp.add(p_id2)
#p_val = {'suchbegriff':suchbegriff, 'origin_id':p_uid1, 'origin_paragraph':paragraph1, 'winning_ids':s_puids, 'same_paragraphs_of(origin_id,winning_ids)':l_ParaHit}
p_val = {'suchbegriff':suchbegriff, 'origin_id':p_uid1, 'origin_paragraph':paragraph1, 'winning_ids':s_puids, 'same_paragraphs':l_ParaHit}
if len(s_puids) > 1:
"""
print("############################ START ###############################")
print("Data:")
pp.pprint(p_val)
print()
print("Winner:"+a['winning_ids'])
print("############################ END #################################")
print()
time.sleep(0.7)
"""
f_ResultList.append(p_val)
"""
#t_parList.append(list(paragraph1))
#p_val = {'suchbegriff':suchbegriff, 'database_ids':t_parList,'same_paragraphs':t_parSet}
#u=set(t_parList[-1]).intersection(*map(set, t_parList))
#u=set().union(*t_parList)
#u=[i for i, x in enumerate(t_parList) if t_parList.count(x) > 1]
print("####################################")
print("Same Paragraphs:")
pp.pprint(u)
print()
print("Database IDS:")
pp.pprint(t_resSet)
print()
"""
print("####################################")
print("####################################")
print("####################################")
SqlQueryV2 = "SELECT DISTINCT * FROM openjurv4 WHERE"; # db:dowery_prototype
SqlQueryV3 = "SELECT DISTINCT * FROM openjurv4 WHERE"; # db:dowery_prototype
#for a in f_ResultList:
# break hier nötig, da nur 1 Gewinner Ergebnis Eintrag genommen werden darf
# das erste und das zweite Ergebnis haben unterschiedliche Gesetze und Paragraphen und können nicht zusammen genommen werden
t1=""
try:
t1 = f_ResultList[0]
except Exception as e:
print("Keine Ergebnisse zum Zusammenfassen gefunden")
###fileify.plainWriteAppend("Keine Ergebnisse zum Zusammenfassen gefunden", "/home/www/dowery.com/results/"+session_ID+".debug.txt")
fo = open("/home/www/dowery.com/results/"+session_ID+".debug.txt", "a+")
fo.write(str("Keine Ergebnisse zum Zusammenfassen gefunden"));
fo.close()
print("####################################")
print("###### BIN FERTIG ##################")
print("####################################")
exit(1)
w_ids = t1['winning_ids']
o_ids = t1['origin_id']
p_ids = t1['same_paragraphs']
op_ids=t1['origin_paragraph']
print("Original:"+str(o_ids))
print("Original Paragraph:"+str(op_ids))
print("Gewinner:"+str(w_ids))
print("Paragraph Gewinner Ergebnisse:"+str(p_ids))
exit
SqlQueryV2 += " `p_uid` IN (\""+o_ids+"\","
SqlQueryV3 += " `p_uid` IN (\""+o_ids+"\","
for s in w_ids:
#print("Winner:"+str(s))
SqlQueryV2 += '"'+str(s)+'",'
SqlQueryV2 = SqlQueryV2[:-1] # remove last char=","
SqlQueryV2 +=") AND p_hasleitsatz=1 LIMIT "+str(maximaleergebnisse)+";";
SqlQueryV3 = SqlQueryV3[:-1] # remove last char=","
SqlQueryV3 +=") AND p_hasleitsatz=1 LIMIT "+str(maximaleergebnisse)+";";
####fileify.plainWriteAppend("Count f_ResultList:"+str(len(f_ResultList)), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
print("Count f_ResultList:", len(f_ResultList))
print("SqlQueryV2:", SqlQueryV2)
# exit the program
#Ablauf:
# original: 'origin_id': '73459',
# gewinner: 'winning_ids': {'116594', '75569'}}
# via SQL abholen
# immer den Leitsatz unverändert übernehmen
# dann testen Edmunsions Summary über die Gründe des Original und Gewinnerteams (p_GründeHtml)
###
###### Hole den Leitsatz des Gewinnereintrages ab
###
# open a database connection
# be sure to change the host IP address, username, password and database name to match your own
connection = mdb.connect (unix_socket = '/var/run/mysqld/mysqld.sock', host = "localhost", user = "root", passwd = "###########99", db = "dowery_prototype")
# prepare a cursor object using cursor() method
cursor = connection.cursor (mdb.cursors.DictCursor)
# execute the SQL query using execute() method.
cursor.execute (SqlQueryV3)
# fetch all of the rows from the query
# print the rows
result_set = cursor.fetchall()
for row in result_set:
p_uid=str(row["p_uid"])
leitsatz=row["p_leitsatz"]
####fileify.plainWriteAppend("Gewinner Leitsatz:"+str(leitsatz), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
####fileify.plainWriteAppend("Gewinner Leitsatz Lenght:"+str(len(leitsatz)), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
print("Gewinner Leitsatz:", leitsatz)
print("Gewinner Leitsatz Lenght:", len(leitsatz))
print()
print()
cursor.close()
# close the connection
connection.close()
###
###### Erstelle die Zusammenfassungen der Gewinner Ergebnisse
###
# open a database connection
# be sure to change the host IP address, username, password and database name to match your own
connection = mdb.connect (unix_socket = '/var/run/mysqld/mysqld.sock', host = "localhost", user = "root", passwd = "###########99", db = "dowery_prototype")
# prepare a cursor object using cursor() method
cursor = connection.cursor (mdb.cursors.DictCursor)
# execute the SQL query using execute() method.
cursor.execute (SqlQueryV2)
# fetch all of the rows from the query
# print the rows
result_set = cursor.fetchall()
c_Count=1
for row in result_set:
p_uid=str(row["p_uid"])
gericht=isGericht(row["p_gruendeHtml"])
gesetz=isGesetz(row["p_gruendeHtml"])
gruende=row["p_gruendePlain"]
leitsatz=row["p_leitsatz"]
p_verweisendegesetze=str(row["p_verweisendegesetze"])
#print("Gründe:", p_uid)
lang="german"
#parser = PlaintextParser.from_string(gruende, Tokenizer(lang))
#stemmer = Stemmer(lang)
mySents = mySentenceSplitterStringReturn(gruende)
document = build_document(mySents)
###
###### Zusammenfassen, der Gründe der Ergebnisse
###
summarizer = EdmundsonSummarizer(cue_weight=1, key_weight=1, title_weight=0, location_weight=0)
summarizer.stop_words = get_stop_words(lang)
wordsv1=mustererkennung_gesetz.split("\n")
wordsv2=mustererkennung_gericht.split("\n")
wordsv3=mustererkennung_abkuerzung.split("\n")
words = ("§",'§', wordsv1, wordsv2, wordsv3, suchbegriff, sSplitsuchbegriff) # Gewinner Wörter
summarizer.bonus_words = words
summarizer.stigma_words = ("null")
summarizer.null_words = ("null")
s_OverallCount=len(mySents)
#SentCount=round(s_OverallCount/5) # zusammenfassung auf 1/5 des Gründe Urteil Textes
SentCount=3
s_Temp=""
####fileify.plainWriteAppend("# Maximal "+str(SentCount)+" Saetze als Zusammenfassung #", "/home/www/dowery.com/results/"+session_ID+".debug.txt")
####fileify.plainWriteAppend("# Bin Bei Ergebniseintrag #:"+str(p_uid), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
print("# Maximal "+str(SentCount)+" Saetze als Zusammenfassung #")
print("# Bin Bei Ergebniseintrag #:",p_uid)
for sentence in summarizer(document, SentCount):
s_tt=str(sentence)
s_tt=encodeToLatin1(s_tt)
print("Zusammenfassung (Satz):", s_tt)
s_Temp+=s_tt
print()
print("Gesetze:", p_verweisendegesetze)
s_Temp+="\n\n\n\n
"
s_Temp+="Gesetze:"+''.join(str(s) for s in p_verweisendegesetze)+"\n\n\n\n
"
paragraphV11=isParagraph(row["p_gruendePlain"])
print("Paragraphen:", paragraphV11)
s_Temp+="Paragraphen:"+''.join(str(s) for s in paragraphV11)+"\n\n\n\n
"
print("Gruende Lenght:", len(gruende))
print("Zusammenfassung Lenght:", len(s_Temp))
print("#################")
####fileify.plainWriteAppend("Paragraphen:"+str(paragraphV11), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
####fileify.plainWriteAppend("Gruende Lenght:"+str(len(gruende)), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
####fileify.plainWriteAppend("Zusammenfassung Lenght:"+str(len(s_Temp)), "/home/www/dowery.com/results/"+session_ID+".debug.txt")
fo = open("/home/www/dowery.com/results/"+session_ID+".debug.txt", "a+")
s_Temp+"\n\n\n\n
"
fo.write(str(s_Temp));
fo.close()
print("Writing to /home/www/dowery.com/results/"+session_ID+".debug.txt")
# close the cursor object
cursor.close()
# close the connection
connection.close()
print("####################################")
print("###### BIN FERTIG ##################")
print("####################################")
sys.exit()