#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# *** IMPORTS ***
#
# To not have wikipedia and archivingext in one dir we'll import sys
import sys
sys.path.append("/pfad/zum/pywikipediaframework")
import re # Used for regular expressions
import wikipedia # Wikipedia-pybot-framework
import pagegenerators
from time import * # strftime-Function and related
#
# Local exceptions
#
class Error(Exception):
"""Archvierungsfehler"""
class NoOptions(Error):
"""Keine Optionen gefunden"""
def __init__(self):
wikipedia.output(u"\t *** Keine Autoarchivvorlage gefunden oder falsches Format ***\n")
class WrongOptions(Error):
"""Optionen fehlerhaft"""
def __init__(self):
wikipedia.output(u"\t *** Falsches Format der Optionen ***\n")
#
# *** DECLARATIONS ***
#
class Discussion:
def __init__(self, pagename):
"""
Initiate Object
"""
self.titleOffsetStart = 0 # offset, where title of disc starts
self.contentOffsetStart = 0 # offset, where text starts
self.contentOffsetEnd = 0 # offset, where text ends
self.content = u"" # content of disc
self.title = u"" # Title of disc
self.titleClear = u""
self.age = 0.0 # How old is disc
self.cleared = 0 # Whether discussion has an cleared-Flag or not
self.numContributions = 0 # How many contributions were made to this discussion
self.firstContribution = None # Date of the first contribution
self.headlineLevel = 0 # depth of headline-level
self.pageOrigin = pagename # Name of the Page the discussion is from
def __repr__(self):
pass
def setTitle(self, title, titleClear = ""):
"""
Sets the Titletext of an discussion.
"""
self.title = title
self.titleClear = titleClear.strip()
def getTitle(self, clear = False):
if clear:
return self.titleClear
else:
return self.title
def getTitleLength(self):
return len(self.title)
def setTitleOffsetStart(self, titleOffsetStart):
"""
Sets the offset where a title starts.
"""
self.titleOffsetStart = titleOffsetStart
def getTitleOffsetStart(self):
"""
Returns the offset where a title starts.
"""
return self.titleOffsetStart
def setContentOffsetStart(self, contentOffsetStart):
self.contentOffsetStart = contentOffsetStart
def setContentOffsetEnd(self, contentOffsetEnd):
self.contentOffsetEnd = contentOffsetEnd
def getContentOffsetEnd(self):
return self.contentOffsetEnd
def retrieveContent(self, text):
self.content = text[self.titleOffsetStart:self.contentOffsetEnd]
def retrieveContent2(self, text):
self.content = text[self.start:self.end]
def setContent(self, content = ""):
self.content = content
def setStart(self, start):
self.start = start
def setEnd(self, end):
self.end = end
def setHeadlineLevel(self, headlineLevel):
self.headlineLevel = headlineLevel
def getHeadlineLevel(self):
return self.headlineLevel
def examine(self):
# Extract dates from content
# 1 date = 1 contribution
p1 = re.compile(u"(?P<hh>[0-9]{2}):(?P<mm>[0-9]{2}), (?P<dd>[0-9]{1,2})\. (?P<MM>[a-zA-Zä]{3})\.? (?P<yyyy>[0-9]{4}) \(CE[S]?T\)", re.I)
matches1 = p1.finditer(self.content)
agesList1 = []
sortAgesList = []
for match1 in matches1:
hh1 = match1.group('hh')
mm1 = match1.group('mm')
dd1 = match1.group('dd')
MM1 = match1.group('MM')
yy1 = match1.group('yyyy')
dateToCheck1 = mktime((int(yy1), int(self.replaceToDate(MM1)), int(dd1), int(hh1), int(mm1), 0, 0, 0, 0))
today1 = mktime(localtime())
actualAge1 = (today1 - dateToCheck1)/60/60/24
agesList1.append(actualAge1)
sortAgesList.append(dateToCheck1)
if len(agesList1) > 0:
self.firstContribution = min(sortAgesList)
self.age = min(agesList1)
self.numContributions = len(agesList1)
del agesList1
del actualAge1
del sortAgesList
del p1
del matches1
# Now examine if discussion has a "cleared"-Flag and
# check how old it is
p = re.compile(u"\{\{Erledigt[\s]{0,5}\|.*(?P<hh>[0-9]{2}):(?P<mm>[0-9]{2}), (?P<dd>[0-9]{1,2})\. (?P<MM>[a-zA-Zä]{3})\.? (?P<yyyy>[0-9]{4}) \(CE[S]?T\)\}\}", re.I)
matches = p.finditer(self.content)
agesList = []
for match in matches:
hh = match.group('hh')
mm = match.group('mm')
dd = match.group('dd')
MM = match.group('MM')
yy = match.group('yyyy')
dateToCheck = mktime((int(yy), int(self.replaceToDate(MM)), int(dd), int(hh), int(mm), 0, 0, 0, 0))
today = mktime(localtime())
actualAge = (today - dateToCheck)/60/60/24
agesList.append(actualAge)
if agesList != []:
self.cleared = min(agesList)
del agesList
del actualAge
del p
del matches
wikipedia.output(u"Age: %3.2f\tCleared: %3.2f\tst: %5d\ten: %5d\tTitle: %s" % (self.age, self.cleared, self.titleOffsetStart, self.contentOffsetEnd, self.titleClear))
def checkCleared(self, age):
if self.cleared > age:
return True
else:
return False
def getClearedAge(self):
return self.cleared
def replaceToDate(self, mm):
if mm == u"Jan":
return 1
if mm == u"Feb":
return 2
if mm in [u"Mär", u"Mrz"]:
return 3
if mm == u"Apr":
return 4
if mm == u"Mai":
return 5
if mm == u"Jun":
return 6
if mm == u"Jul":
return 7
if mm == u"Aug":
return 8
if mm == u"Sep":
return 9
if mm == u"Okt":
return 10
if mm == u"Nov":
return 11
if mm == u"Dez":
return 12
def getAge(self):
return self.age
def getContributions(self):
return self.numContributions
def getArchivingTarget(self, target):
return self.parseArchivingTarget(target)
def parseArchivingTarget(self, parseString):
"""
* ((Tag)): Tag, z.B. 1, 24
* ((Tag:##)): zweistelliger Tag, z.B. 01, 24
* ((Tag:kurz)): abgekürzter Tagesname (kleingeschrieben), z.B. mo, fr
* ((Tag:Kurz)): abgekürzter Tagesname, z.B. Mo, Fr
* ((Tag:KURZ)): abgekürzter Tagesname (großgeschrieben), z.B. MO, FR
* ((Tag:lang)): Tagesname (kleingeschrieben), z.B. montag, freitag
* ((Tag:Lang)): Tagesname, z.B. Montag, Freitag
* ((Tag:LANG)): Tagesname (großgeschrieben), z.B. MONTAG, FREITAG
* ((Monat)): Monat, z.B. 1, 10
* ((Monat:##)): zweistelliger Monat, z.B. 01, 10
* ((Monat:kurz)): abgekürzter Monatsname (kleingeschrieben), z.B. jan, okt
* ((Monat:Kurz)): abgekürzter Monatsname, z.B. Jan, Okt
* ((Monat:KURZ)): abgekürzter Monatsname (großgeschrieben), z.B. JAN, OKT
* ((Monat:lang)): Monatsname (kleingeschrieben), z.B. januar, oktober
* ((Monat:Lang)): Monatsname, z.B. Januar, Oktober
* ((Monat:LANG)): Monatsname (großgeschrieben), z.B. JANUAR, OKTOBER
* ((Quartal)): Quartal, z.B. 1, 3
* ((Quartal:##)): zweistelliges Quartal, z.B. 01, 03
* ((Quartal:i)): Quartal (kleine römische Ziffern), z.B. i, iv
* ((Quartal:I)): Quartal (große römische Ziffern), z.B. I, IV
* ((Halbjahr)): Halbjahr, z.B. 1, 2
* ((Halbjahr:##)): zweistelliges Halbjahr, z.B. 01, 02
* ((Halbjahr:i)): Halbjahr (kleine römische Ziffern), z.B. i, ii
* ((Halbjahr:I)): Halbjahr (große römische Ziffern), z.B. I, II
* ((Woche)): Woche, z.B. 1, 43
* ((Woche:##)): zweistelliges Woche, z.B. 01, 43
* ((Jahr)): Jahr, z.B. 2006, 2007
"""
def Halbjahr(stamp, roman = False, fill = False):
month = strftime(u"%m", stamp)
hj1 = u"1"
hj2 = u"2"
if fill:
hj1 = u"01"
hj2 = u"02"
if roman:
hj1 = u"i"
hj2 = u"ii"
if month >= 1 and month <= 7:
return hj1
else:
return hj2
def Quartal(stamp, roman = False, fill = False):
month = strftime(u"%m", stamp)
re1 = u"1"
re2 = u"2"
re3 = u"3"
re4 = u"4"
if fill:
re1 = u"01"
re2 = u"02"
re3 = u"03"
re4 = u"04"
if roman:
re1 = u"i"
re2 = u"ii"
re3 = u"iii"
re4 = u"iv"
if month in ("01","02","03"):
return re1
if month in ("04","05","06"):
return re2
if month in ("07","08","09"):
return re3
if month in ("10","11","12"):
return re4
def ClearTitle(title):
p = re.compile(u"\[\[(?P<cl>.*)\]\]", re.I)
m = p.match(title)
if m:
return m.group("cl")
else:
return title
def ClearTitleNSD(title):
p = re.compile(u"\[\[((?P<ns>.*):){0,1}(?P<cl>.*)\]\]", re.I)
m = p.match(title)
if m:
return m.group("cl")
else:
return title
stamp = localtime(self.firstContribution)
replStrings = [( u"((Jahr))" , strftime(u"%Y", stamp) ),
( u"((Monat:Lang))" , strftime(u"%B", stamp) ),
( u"((Woche:##))" , u"%02d" % int(strftime(u"%W", stamp))),
( u"((Woche))" , strftime(u"%W", stamp)),
( u"((Tag:##))" , u"%02d" % int(strftime(u"%d", stamp))),
( u"((fullpagename))" , self.pageOrigin),
( u"((Fullpagename))" , self.pageOrigin),
( u"((FULLPAGENAME))" , self.pageOrigin),
( u"((Überschrift))" , ClearTitle(self.titleClear.strip() ) ),
( u"((Überschrift-NSD))" , ClearTitleNSD(self.titleClear.strip() ) ),
( u"((Quartal))" , Quartal(stamp, False, False) ),
( u"((Quartal:##))" , Quartal(stamp, False, True) ),
( u"((Quartal:i))" , Quartal(stamp, True, False) ),
( u"((Quartal:I))" , Quartal(stamp, True, False).upper() ),
( u"((Halbjahr))" , Halbjahr(stamp, False, False) ),
( u"((Halbjahr:##))" , Halbjahr(stamp, False, True) ),
( u"((Halbjahr:i))" , Halbjahr(stamp, True, False) ),
( u"((Halbjahr:I))" , Halbjahr(stamp, True, False).upper() )]
for old, new in replStrings:
try:
parseString = parseString.replace(old, new.decode("utf-8"))
except UnicodeDecodeError:
parseString = parseString.replace(old, new.decode("iso-8859-1"))
except UnicodeEncodeError:
parseString = parseString.replace(old, new)
pass
return parseString
class WikiDocument:
def __init__(self, page, archivingTarget = u""):
self.numberDiscussions = 0 # Number of Discussions in Page
self.originalText = page.get() # The original Text of the wikipage
self.modifiedText = self.originalText # The text that should be saved back
self.sliceOffset = 0 # When a slice gets extracted the offset changes
self.listDiscussions = [] # A list containing the discussions
self.longestTitle = 0 # Variable contains the lenght of the longest title
self.reportText = u"" # Text to save into report file
self.archivingAge = 7.0 # Number of days, after a discussion will be archived
self.archivingTarget = archivingTarget # Target String a discussion will be archived to
# Should be equal to [[Vorlage:Autoarchiv]] on de
self.headlineLevel = 2 # level of headline (no. of equal-signs)
self.numArchived = 0 # Number of discussions that will be archived
self.archiveContainer = {} # Dict in which archive text will be stored in
self.archiveContCounter = {} # Number of archived discs per container
self.name = page.title() # Own name of page
self.headTemplate = u"{{Archiv}}" # Headtemplate to insert into new archivpages
self.minorEdit = False # Wheater to do minor Edits or not
def divideIntoSlices(self):
# Divide the Original Text by Headlines ...
#regex = u"(?P<title>^=.*)[\s]+"
regex = u"(?P<title>^(?P<hls>[=]{1,%d})(?P<title_clear>[^=]{1}.*?)(?P=hls))([\s]{0,3})$" % self.headlineLevel
wikipedia.output(u"\nRegex: %s" % regex)
p = re.compile(regex, re.I|re.M)
# ... and iterate through it
headlineIterator = p.finditer(self.originalText)
counter = 0
possibleDiscs = []
for singleHeadline in headlineIterator:
possibleDiscs.append(Discussion(self.name))
possibleDiscs[counter].setTitleOffsetStart(singleHeadline.span()[0])
possibleDiscs[counter].setContentOffsetStart(singleHeadline.span()[1])
possibleDiscs[counter].setTitle(singleHeadline.group('title'), singleHeadline.group('title_clear'))
headlineLevel = singleHeadline.group('hls').count("=")
possibleDiscs[counter].setHeadlineLevel(headlineLevel)
if counter > 0:
possibleDiscs[counter - 1].setContentOffsetEnd(possibleDiscs[counter].getTitleOffsetStart())
possibleDiscs[counter - 1].retrieveContent(self.originalText)
titleLength = possibleDiscs[counter].getTitleLength()
if titleLength > self.longestTitle:
self.longestTitle = titleLength
counter = counter + 1
if (len(possibleDiscs) != 0):
possibleDiscs[counter - 1].setContentOffsetEnd(len(self.originalText))
possibleDiscs[counter - 1].retrieveContent(self.originalText)
for i in possibleDiscs:
if i.getHeadlineLevel() == self.headlineLevel:
self.listDiscussions.append(i)
self.numberDiscussions = self.numberDiscussions + 1
def examineDiscussions(self):
for singleDiscussion in self.listDiscussions:
singleDiscussion.examine()
def generateErrorReport(self):
logText = strftime(u"== Botlauf am %Y-%m-%d um %H:%M Uhr - ", localtime())
logText += u"[[%s]] ==\n" % self.name
logText += u"Fehlende oder fehlerhafte Optionen!\n"
wikipedia.output(logText)
self.reportText = logText
def generateReport(self):
"""
Should report the following:
* Überschrift
* Alter des letzen Beitrages
* Anzahl Beiträge
* Alter der Erledigt-Kennzeichnung
* Ziel der Archivierung
* Archivierung Ja/Nein
"""
logText = strftime(u"== Botlauf am %Y-%m-%d um %H:%M Uhr - ", localtime())
logText = logText + u"[[%s]] ==\n" % self.name
logText = logText + u"* Archivierung ab einem Alter von <tt>'''%03.1f Tagen'''</tt>\n" % self.archivingAge
logText = logText + u"* Zielmuster: <tt>'''%s'''</tt>\n" % self.archivingTarget
logText = logText + u"* Archivierung von Überschriftsebene <tt>'''%d'''</tt>\n" % self.headlineLevel
logText = logText + u"* Einsetzen von Kopfvorlage <tt>'''<nowiki>%s</nowiki>'''</tt>\n" % self.headTemplate
logText = logText + u"* Gesamtzahl Abschnitte: <tt>'''%d'''</tt>\n" % self.numberDiscussions
logText = logText + u"* Anzahl zu archivierender Abschnitte: <tt>'''%d'''</tt>\n" % self.numArchived
logText = logText + u"{| class=\"prettytable\"\n|- class=\"hintergrundfarbe8\"\n! lfd. Nr. !! Überschrift !! Alter des letzen Beitrages !! Anzahl Beiträge !! Alter der Erledigt-Kennzeichnung !! Ziel"
counter = 0 # lfd. Nr.
headline = u"" # Überschrift
agefirst = 0.0 # Age of first contribution
numcontri = 0 # number of contributions
agecleared = u"-" # age of cleared flag
targettoarchiveto = u"" # where it would be archived to
for discussion in self.listDiscussions:
counter = counter + 1
headline = discussion.getTitle(True)
agefirst = discussion.getAge()
numcontri = discussion.getContributions()
targettoarchiveto = discussion.getArchivingTarget(self.archivingTarget)
if discussion.getClearedAge() == 0.0:
agecleared = u"-"
headlineColor = u""
else:
agecleared = u"%03.2f" % discussion.getClearedAge()
if discussion.getClearedAge() < self.archivingAge:
headlineColor = u" style=\"background-color:#ffcbcb;\" "
else:
headlineColor = u" style=\"background-color:#b9ffc5;\" "
logText = logText + u"\n|-%s\n| %d || %s || %03.2f || %d || %s || [[%s|→]]" % (headlineColor, counter, headline, agefirst, numcontri, agecleared, targettoarchiveto)
headline = u"" # Überschrift
agefirst = 0.0 # Age of first contribution
numcontri = 0 # number of contributions
agecleared = u"-" # age of cleared flag
targettoarchiveto = u"" # where it would be archived to
logText = logText + u"\n|}\n"
wikipedia.output(logText)
self.reportText = logText
def saveReport(self, toDisk = False):
if toDisk:
wikipedia.output(u"Speichere Log ... ")
fd = open(logFile, 'a')
writeMe = self.reportText + u"\n"
writeMe = writeMe.encode('utf-8')
fd.write(writeMe)
fd.close()
wikipedia.output(u"Done.\n")
else:
logPage = strftime(u"Benutzer:RhodoBot/Log/%Y-%m-%d",localtime())
wikipedia.setAction(u"Bot: Schreibe Logtext")
if saveRun:
try:
page = wikipedia.Page(wikipedia.getSite(), logPage)
page_origin = page.get()
page.put(page_origin + u"\n" + self.reportText)
except wikipedia.NoPage:
page.put(self.reportText)
pass
def prepareArchiving(self):
for discussion in self.listDiscussions:
if discussion.checkCleared(self.archivingAge):
self.archiveSlice(discussion)
def executeArchiving(self):
if len(self.archiveContainer) >= 1:
# self.showDiffs()
# Try to archive the slices
toText = "" # Text for what Bot did
counter = 0
for target, content in self.archiveContainer.items():
if self.archiveContCounter[target] == 1:
wikipedia.setAction(u"Bot: Archiviere %d Abschnitt von [[%s]]" % (self.archiveContCounter[target], self.name))
else:
wikipedia.setAction(u"Bot: Archiviere %d Abschnitte von [[%s]]" % (self.archiveContCounter[target], self.name))
counter = counter + self.archiveContCounter[target]
if toText == "":
toText = toText + u"%d nach [[%s]]" % (self.archiveContCounter[target], target)
else:
toText = toText + u", " + u"%d nach [[%s]]" % (self.archiveContCounter[target], target)
if saveRun:
try:
pageTo = wikipedia.Page(wikipedia.getSite(), target)
pageTo_origin = pageTo.get()
pageTo.put(pageTo_origin + u"\n\n" + content, None, None, self.minorEdit)
doNotSave = False
except wikipedia.NoPage:
pageTo.put(self.headTemplate + u"\n\n" + content, None, None, self.minorEdit)
doNotSave = False
except wikipedia.EditConflict:
wikipedia.output(u"Bearbeitungskonflikt, Seite wird nicht gespeichert")
doNotSave = True
except wikipedia.LockedPage:
wikipedia.output(u"Seite evenutell blockiert!")
doNotSave = True
# Try to save original page
if counter == 1:
partsText = u"1 Abschnitt"
else:
partsText = u"%d Abschnitte" % counter
wikipedia.setAction(u"Bot: Archiviere %s: %s" % (partsText, toText))
if saveRun and not doNotSave:
try:
pageTo = wikipedia.Page(wikipedia.getSite(), self.name)
pageTo_origin = pageTo.get()
pageTo.put(self.modifiedText, None, None, self.minorEdit)
except wikipedia.NoPage:
pageTo.put(self.modifiedText, None, None, self.minorEdit)
pass
except wikipedia.EditConflict:
wikipedia.output(u'Skipping %s because of edit conflict' % (self.name))
def archiveSlice(self, disc):
SliceStart = disc.getTitleOffsetStart() - self.sliceOffset
SliceStop = disc.getContentOffsetEnd() - self.sliceOffset
SliceText = self.modifiedText[SliceStart:SliceStop]
self.modifiedText = self.modifiedText[:SliceStart] + self.modifiedText[SliceStop:]
self.sliceOffset = self.sliceOffset + len(SliceText)
self.numArchived = self.numArchived + 1
target = disc.getArchivingTarget(self.archivingTarget)
if self.archiveContainer.has_key(target):
self.archiveContainer[target] = self.archiveContainer[target] + SliceText
self.archiveContCounter[target] = self.archiveContCounter[target] + 1
else:
self.archiveContainer[target] = SliceText
self.archiveContCounter[target] = 1
print len(self.archiveContainer)
def showDiffs(self):
wikipedia.output(u"#"*80)
wikipedia.output(u"Unterschied zwischen den Seiten")
wikipedia.output(u"#"*80)
wikipedia.showDiff(self.originalText, self.modifiedText)
wikipedia.output(u"#"*80)
wikipedia.output(u"#"*80)
def findOptions(self):
"""
Looks for the template {{Autoarchiv-Erledigt}}
"""
p = re.compile(u"\{\{Autoarchiv-Erledigt(?P<options>.*?)\}\}", re.S)
match = p.search(self.originalText)
if match:
allOptions = match.group('options')
optionsDict = {}
for x in allOptions.split("|"):
y = x.split("=")
if len(y) == 2:
optionsDict[y[0].strip().upper()] = y[1].strip()
for nam, num in optionsDict.items():
wikipedia.output(u"%s = %s" % (nam, num))
try:
self.archivingAge = float(optionsDict["ALTER"])
self.archivingTarget = optionsDict["ZIEL"].replace("'", "")
if optionsDict.has_key("EBENE"):
self.headlineLevel = int(optionsDict["EBENE"])
else:
self.headlineLevel = 2
if optionsDict.has_key("KOPFVORLAGE"):
self.headTemplate = u"{{" + optionsDict["KOPFVORLAGE"].replace("'", "") + u"}}"
if optionsDict.has_key("KLEIN"):
if optionsDict["KLEIN"].strip().replace("'", "").upper() == "JA":
self.minorEdit = True
if optionsDict["KLEIN"].strip().replace("'", "").upper() == "NEIN":
self.minorEdit = False
except KeyError:
raise NoOptions()
else:
raise NoOptions()
def workToDo(self):
"""
Whether there is something to archive or not
"""
if self.numArchived > 0:
return True
else:
return False
# ***************************************
# Set some options here
pageName = u"Vorlage:Autoarchiv-Erledigt"
#saveRun = False # Weather to save pages to wikipedia or not...
saveRun = True
excludeList = (
#u"Benutzer:Rhododendronbusch/Spielwiese-1",
#u"Benutzer:Rhododendronbusch/Spielwiese-2",
#u"Benutzer Diskussion:Cjesch",
#u"Benutzer Diskussion:Rhododendronbusch",
#u"Vorlage:Erledigt",
#u"Benutzer:RhodoBot",
#u"Wikipedia:Redundanz/August 2006"
#u"Benutzer:º the Bench º/test"
)
logFilePath = "/pfad/wo/die/logs/angelegt/werden/sollen/"
logFile = logFilePath + strftime("archiv-%Y-%m-%d.log",localtime())
startPage = wikipedia.Page(wikipedia.getSite(), pageName)
generator = startPage.getReferences(False, True, True)
#generator = [wikipedia.Page(wikipedia.getSite(), u"Benutzer:Rhododendronbusch/Spielwiese-3")]
# Begin archiving
for page in generator:
if not page.title() in excludeList:
wdoc = WikiDocument(page)
try:
wdoc.findOptions()
wdoc.divideIntoSlices()
wdoc.examineDiscussions()
wdoc.prepareArchiving()
wdoc.generateReport()
wdoc.showDiffs()
wdoc.saveReport(True)
if wdoc.workToDo():
wdoc.executeArchiving()
except NoOptions:
wdoc.generateErrorReport()
wdoc.saveReport(True)
continue
# Write logfile to wikipedia
wikipedia.setAction(u"Bot: Schreibe Logtext")
logPage = strftime(u"Benutzer:RhodoBot/Log/%Y-%m-%d",localtime())
fd = open(logFile)
content = fd.read()
content = content.decode('utf-8')
fd.close()
if saveRun:
try:
page = wikipedia.Page(wikipedia.getSite(), logPage)
page.put(content)
except wikipedia.NoPage:
page.put(self.reportText)
pass
# Delete old logs
wikipedia.output(u"Lösche alte Logs")
heute = mktime(localtime())
for i in range (8, 16):
tage = localtime(heute - 86400 * i)
wikipedia.setAction(u"Bot: Setze Schnelllöschantrag für alte Logs")
logPage = strftime(u"Benutzer:RhodoBot/Log/%Y-%m-%d",tage)
page = wikipedia.Page(wikipedia.getSite(), logPage)
if saveRun:
try:
old = page.get()
new = u"{{löschen|Es handelt sich bei dieser Seite um ein veraltetes Log und kann gelöscht werden. Liebe Grüße an den löschenden Administrator, --~~~~}}"
page.put(new)
except wikipedia.NoPage:
wikipedia.output(u"Log existiert nicht")
# Everything is done, so stop it
wikipedia.stopme()