diff --git a/.github/workflows/mas_check.yml b/.github/workflows/mas_check.yml
index 5287d88663..7ca9fb39f8 100644
--- a/.github/workflows/mas_check.yml
+++ b/.github/workflows/mas_check.yml
@@ -30,57 +30,40 @@ jobs:
- uses: actions/checkout@v2
- name: Setup Python
- uses: actions/setup-python@v2.2.1
+ uses: actions/setup-python@v2
with:
# Version range or exact version of a Python version to use, using SemVer's version range syntax.
- python-version: 2.7.18 # optional, default is 3.x
-
- # get/dl renpy src
- - name: cache rpy source
- id: cache-rpy
- uses: actions/cache@v2
- with:
- path: renpy/
- key: ${{ runner.os }}-rpy
+ python-version: 3.10.4 # optional, default is 3.x
+ # dl renpy src
- name: Download rpy source
- if: steps.cache-rpy.outputs.cache-hit != 'true'
run: |
- wget https://www.renpy.org/dl/6.99.12.4/renpy-6.99.12.4-sdk.tar.bz2
- tar xf renpy-6.99.12.4-sdk.tar.bz2
- rm renpy-6.99.12.4-sdk.tar.bz2
- mv renpy-6.99.12.4-sdk renpy
+ renpysdk=$(wget -qO- https://nightly.renpy.org/current-8/index.html | grep -P -m 1 -o '(?<=href=").*\.tar\.bz2(?=".*)')
+ wget https://nightly.renpy.org/current-8/$renpysdk
+ tar xf $renpysdk
+ rm $renpysdk
+ mv ${renpysdk/.tar.bz2/} renpy
# get/download base mas
- - name: cache base MAS
- id: cache-mas
- uses: actions/cache@v2
- with:
- path: mas0105/
- key: ${{ runner.os }}-mas
+ # - name: cache base MAS
+ # id: cache-mas
+ # uses: actions/cache@v2
+ # with:
+ # path: mas0105/
+ # key: ${{ runner.os }}-mas
- name: Download base MAS
- if: steps.cache-mas.outputs.cache-hit != 'true'
+ # if: steps.cache-mas.outputs.cache-hit != 'true'
run: |
wget https://s3-us-west-2.amazonaws.com/monika-after-story/ddlc/mas.zip
mkdir mas0105
unzip mas.zip -d mas0105
+ rm mas0105/game/scripts.rpa
- # TEMP
- # - name: what are these
- # run: |
- # file mas0105
- # file renpy
-
- # copy files over
+ # copy over gh files to base
- name: copy source over
run: cp -Rf Monika\ After\ Story/* mas0105/
- # touch file for unstable so it doesn't raise exceptions for some things
- - name: exception skip for unstable
- if: github.ref == 'refs/heads/unstable'
- run: touch mas0105/trb
-
# run sprite checkers
- name: check sprites
run: python tools/ghactions.py
@@ -95,4 +78,4 @@ jobs:
- name: rpy distribute
run: |
cd renpy
- ./renpy.sh launcher distribute "../mas0105/" --package Mod
+ ./renpy.sh launcher distribute "../mas0105/" --package market
diff --git a/.gitignore b/.gitignore
index 4ce0ee0c67..f74625426d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,37 +1,37 @@
-*~
-*.rpyc
-*.rpyb
-*.bak
-*.pyc
-*.swp
-log.txt
-traceback.txt
-errors.txt
-firstrun
-*.chr
-*.rpa
-*.rpa
-Monika After Story/game/audio.rpa
-*.save
-Monika After Story/game/saves/persistent
-Monika After Story/IPGuidelines.md
-*.rpa
+#Catch all rpa, rpyc, rpyb, and rpyms
*.rpa
-Monika After Story/game/screens.rpyc
-Monika After Story/old_persistent.txt
-Monika After Story/merged_persistent.txt
-Monika After Story/game/screens.rpyc
-Monika After Story/project.json
+*.rpy[cbm]
+
+#Catch any persist info
+persistent
+*.bak
+
+#No compiled pycode or pycache
+__pycache__
+*.pyc
+
+#Ide specific things
+*.swp
+**/*.vscode
+
+#Various other clutter-y things
+log.txt
+traceback.txt
+errors.txt
+firstrun
+log
+*.log
+
+#DDLC specific
*.chr
-*.rpyc
-Monika After Story/project.json
-Monika After Story/characters/monika.chr
-*.rpyc
-Monika After Story/project.json
+
+#OS Specific files
.DS_Store
-Monika After Story/.DS_Store
-Monika After Story/characters/monika.chr
-Monika After Story/update/updates.json
-zzzz*
-.vscode
-cacert.pem
+
+#Toolsmenu things
+zzzz*
+
+# Build
+cacert.pem
+navigation.json
+Monika_After_Story-[0-9]*.[0-9]*.[0-9]*-dists
diff --git a/Monika After Story/CustomIconMac.icns b/Monika After Story/CustomIconMac.icns
deleted file mode 100644
index de2a10b2df..0000000000
Binary files a/Monika After Story/CustomIconMac.icns and /dev/null differ
diff --git a/Monika After Story/README.html b/Monika After Story/README.html
index 05e452a21d..d6520314be 100644
--- a/Monika After Story/README.html
+++ b/Monika After Story/README.html
@@ -64,6 +64,7 @@
Disclaimer
Monika After Story is a Doki Doki Literature Club fan game that is not affiliated with Team Salvato. It is designed to be played only after the official game has been completed. You can download Doki Doki Literature Club at: http://ddlc.moe
Installation
+ Installation of an r7 build is NOT REVERSABLE
The files in Monika After Story alter Doki Doki Literature Club. You must have an installation of DDLC, which can be downloaded for free from http://ddlc.moe.
@@ -76,9 +77,6 @@ Installation
Mac users can access the game directory by right-clicking on the DDLC application and selecting Show Package Contents. Then, the game directory can be found in Contents/Resources/autorun.
-
- To uninstall this mod, simply delete all added files in the game directory.
-
Basic Help
To advance through the game, left-click or press the space or enter keys. When at a menu,
diff --git a/Monika After Story/game/0config.rpy b/Monika After Story/game/0config.rpy
index a29debb2a2..a96a0aa83e 100644
--- a/Monika After Story/game/0config.rpy
+++ b/Monika After Story/game/0config.rpy
@@ -14,7 +14,8 @@ python early:
renpy.config.name = "Monika After Story"
## The version of the game.
- renpy.config.version = "0.12.15"
+ renpy.config.version = "0.13.0"
+
#Triple space suffix to avoid potential issues with same names in window title
config.window_title = "Monika After Story "
@@ -35,6 +36,31 @@ python early:
renpy.config.save_directory = "Monika After Story"
+ ### R7+ Config Var adjustments
+ ## 7.4.11
+ renpy.config.mouse_focus_clickthrough = True
+ ##7.3.3
+ #Only devs need this
+ renpy.config.report_extraneous_attributes = False
+
+ ##7.3.0
+ renpy.config.keyword_after_python = True
+
+ ##7.1.1
+ #Fix menu textbox issues
+ renpy.config.menu_showed_window = True
+ #Fix textbox sometimes disappearing
+ renpy.config.window_auto_show = ["say"]
+ #Fix textbox flickering
+ renpy.config.window_auto_hide = ["scene", "call screen"]
+
+ ##7.0
+ #Fixes spaceroom masks from restarting every interaction
+ renpy.config.replay_movie_sprites = False
+
+ ##6.99.13
+ renpy.config.atl_one_frame = False
+
init -1200 python:
## Sounds and music ############################################################
@@ -97,7 +123,7 @@ init -1200 python:
## The icon displayed on the taskbar or dock.
- renpy.config.window_icon = "gui/window_icon.png"
+ renpy.config.window_icon = "mod_assets/mas_icon.ico"
## Custom configs ##############################################################
@@ -120,7 +146,6 @@ init -1200 python:
renpy.loadsave.location.locations.pop()
################START: INIT TIME CONFIGS
-
## Uncomment the following line to set an audio file that will be played while
## the player is at the main menu. This file will continue playing into the
## game, until it is stopped or another file is played.
@@ -130,3 +155,6 @@ define config.main_menu_music = audio.t1
define config.window_show_transition = dissolve_textbox
define config.window_hide_transition = dissolve_textbox
+
+init python:
+ config.per_frame_screens.append("_trace_screen")
diff --git a/Monika After Story/game/0statements.rpy b/Monika After Story/game/0statements.rpy
index 9978c16027..60f70dde95 100644
--- a/Monika After Story/game/0statements.rpy
+++ b/Monika After Story/game/0statements.rpy
@@ -1,7 +1,8 @@
python early in mas_statements:
from collections import namedtuple
- __JumpWithArgsParseData = namedtuple("__JumpWithArgsParseData", ("label", "is_expression", "arg_info"))
+ # Consider this being fully private member of this namespace
+ _JumpWithArgsParseData = namedtuple("_JumpWithArgsParseData", ("label", "is_expression", "arg_info"))
def __jump_with_args(label, args, kwargs):
@@ -37,7 +38,7 @@ python early in mas_statements:
NOTE: may raise exceptions
IN:
- parsed_data - __JumpWithArgsParseData for this statement
+ parsed_data - _JumpWithArgsParseData for this statement
OUT:
str
@@ -55,7 +56,7 @@ python early in mas_statements:
lex - the Lexer object
OUT:
- __JumpWithArgsParseData
+ _JumpWithArgsParseData
"""
lex.expect_noblock("jarg")
@@ -73,14 +74,14 @@ python early in mas_statements:
lex.expect_eol()
lex.advance()
- return __JumpWithArgsParseData(label_, is_expression, arg_info)
+ return _JumpWithArgsParseData(label_, is_expression, arg_info)
def __execute_jump_with_args(parsed_data):
"""
Executes the jump_with_args statement
IN:
- parsed_data - __JumpWithArgsParseData for this statement
+ parsed_data - _JumpWithArgsParseData for this statement
"""
label_ = __get_label(parsed_data)
@@ -97,7 +98,7 @@ python early in mas_statements:
Predicts the jump_with_args statement
IN:
- parsed_data - __JumpWithArgsParseData for this statement
+ parsed_data - _JumpWithArgsParseData for this statement
"""
try:
label_ = __get_label(parsed_data)
@@ -114,7 +115,7 @@ python early in mas_statements:
A lint function for the jump_with_args statement
IN:
- parsed_data - __JumpWithArgsParseData for this statement
+ parsed_data - _JumpWithArgsParseData for this statement
"""
try:
label_ = __get_label(parsed_data)
@@ -127,7 +128,7 @@ python early in mas_statements:
# Define the new statement
renpy.register_statement(
- "jarg",
+ "jarg",
parse=__parse_jump_with_args,
execute=__execute_jump_with_args,
predict=__predict_jump_with_args,
diff --git a/Monika After Story/game/0utils.rpy b/Monika After Story/game/0utils.rpy
index 83e1ba00d5..b9439c74c7 100644
--- a/Monika After Story/game/0utils.rpy
+++ b/Monika After Story/game/0utils.rpy
@@ -37,7 +37,7 @@ python early in mas_logging:
import re
#Thanks python...
- from logging import handlers as loghandlers
+ import logging.handlers as loghandlers
# log tags
LT_INFO = "info"
@@ -75,16 +75,17 @@ python early in mas_logging:
if datefmt is None:
datefmt = DEF_DATEFMT
- super(MASLogFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
+ super().__init__(fmt=fmt, datefmt=datefmt)
def format(self, record):
"""
Override of format - mainly replaces the levelname prop
"""
self.update_levelname(record)
- return self.replace_lf(
- super(MASLogFormatter, self).format(record)
- )
+ # return self.replace_lf(
+ # super().format(record)
+ # )
+ return super().format(record)
def update_levelname(self, record):
"""
@@ -93,12 +94,12 @@ python early in mas_logging:
"""
record.levelname = LT_MAP.get(record.levelno, record.levelname)
- @classmethod
- def replace_lf(cls, msg):
- """
- Replaces all line feeds with carriage returns and a line feed
- """
- return re.sub(cls.NEWLINE_MATCHER, cls.LINE_TERMINATOR, msg)
+ # @classmethod
+ # def replace_lf(cls, msg):
+ # """
+ # Replaces all line feeds with carriage returns and a line feed
+ # """
+ # return re.sub(cls.NEWLINE_MATCHER, cls.LINE_TERMINATOR, msg)
class MASNewlineLogFormatter(MASLogFormatter):
"""
@@ -127,11 +128,9 @@ python early in mas_logging:
"""
Applies a prefix newline if appropriate.
"""
- return self.replace_lf(
- self.apply_newline_prefix(
- record,
- super(MASNewlineLogFormatter, self).format(record)
- )
+ return self.apply_newline_prefix(
+ record,
+ super().format(record)
)
@@ -230,7 +229,7 @@ python early in mas_logging:
#Add the header to each log, including OS info + MAS version number
#NOTE: python logging does not auto handle CRLF, so we need to explicitly manage that for the header
- LOG_HEADER = "\r\n\r\n{_date}\r\n{system_info}\r\n{renpy_ver}\r\n\r\nVERSION: {game_ver}\r\n{separator}"
+ LOG_HEADER = "\n\n{_date}\n{system_info}\n{renpy_ver}\n\nVERSION: {game_ver}\n{separator}"
#Unformatted logs use these consts (spj/pnm)
MSG_INFO = "[" + LT_INFO + "]: {0}"
@@ -268,7 +267,6 @@ python early in mas_logging:
(Default: True)
formatter - custom logging.Formatter to be used.
If None is provided, the default MASLogFormatter is used.
- NOTE: IF YOU ARE USING YOUR OWN FORMATTER, YOU SHOULD CALL THE `replace_lf` METHOD TO ENSURE YOUR LOGS ARE USING CRLF
(Default: None)
adapter_ctor - Constructor reference to the adapter we want to use. If None, no adapter is used
(Default: None)
@@ -509,132 +507,6 @@ python early in mas_utils:
deprecated.__all_warnings__ = _deprecation_warnings
- # mac logging
- class MASMacLog(renpy.renpy.log.LogFile):
- def __init__(self, name, append=False, developer=False, flush=True):
- """
- `name`
- The name of the logfile, without the .txt extension.
- `append`
- If true, we will append to the logfile. If false, we will truncate
- it to an empty file the first time we write to it.
- `developer`
- If true, nothing happens if config.developer is not set to True.
- `flush`
- Determines if the file is flushed after each write.
- """
- super(MASMacLog, self).__init__(name, append=append, developer=developer, flush=flush)
-
-
- def open(self): # @ReservedAssignment
- if self.file:
- return True
-
- if self.file is False:
- return False
-
- if self.developer and not renpy.config.developer:
- return False
-
- if not renpy.config.log_enable:
- return False
-
- try:
-
- home = os.path.expanduser("~")
- base = os.path.join(home,".MonikaAfterStory/" )
-
- if base is None:
- return False
-
- fn = os.path.join(base, self.name + ".txt")
-
- path, filename = os.path.split(fn)
- if not os.path.exists(path):
- os.makedirs(path)
-
- if self.append:
- mode = "a"
- else:
- mode = "w"
-
- if renpy.config.log_to_stdout:
- self.file = real_stdout
-
- else:
-
- try:
- self.file = codecs.open(fn, mode, "utf-8")
- except:
- pass
-
- if self.append:
- self.write('')
- self.write('=' * 78)
- self.write('')
-
- self.write("%s", time.ctime())
- try:
- self.write("%s", platform.platform())
- except:
- self.write("Unknown platform.")
- self.write("%s", renpy.version())
- self.write("%s %s", renpy.config.name, renpy.config.version)
- self.write("")
-
- return True
-
- except:
- self.file = False
- return False
-
- # A map from the log name to a log object.
- mas_mac_log_cache = { }
-
- @deprecated(use_instead="mas_logging.init_log")
- def macLogOpen(name, append=False, developer=False, flush=False): # @ReservedAssignment
- rv = mas_mac_log_cache.get(name, None)
-
- if rv is None:
- rv = MASMacLog(name, append=append, developer=developer, flush=flush)
- mas_mac_log_cache[name] = rv
-
- return rv
-
- @deprecated(use_instead="mas_logging.init_log")
- def getMASLog(name, append=False, developer=False, flush=False):
- if renpy.macapp or renpy.macintosh:
- return macLogOpen(name, append=append, developer=developer, flush=flush)
- return renpy.renpy.log.open(name, append=append, developer=developer, flush=flush)
-
- @deprecated(use_instead="mas_logging.init_log")
- def logcreate(filepath, append=False, flush=False, addversion=False):
- """
- Creates a log at the given filepath.
- This also opens the log and sets raw_write to True.
- This also adds per version number if desired
-
- IN:
- filepath - filepath of the log to create (extension is added)
- append - True will append to the log. False will overwrite
- (Default: False)
- flush - True will flush every operation, False will not
- (Default: False)
- addversion - True will add the version, False will not
- You dont need this if you create the log in runtime,
- (Default: False)
-
- RETURNS: created log object.
- """
- new_log = getMASLog(filepath, append=append, flush=flush)
- new_log.open()
- new_log.raw_write = True
- if addversion:
- new_log.write("VERSION: {0}\n".format(
- store.persistent.version_number
- ))
- return new_log
-
@deprecated(use_instead="mas_utils.mas_log.info")
def writelog(msg):
"""
@@ -665,69 +537,8 @@ python early in mas_utils:
"""
mas_log.debug("".join(traceback.format_stack()))
- #"No longer necessary as all logs have builtin rotation"
- @deprecated()
- def logrotate(logpath, filename):
- """
- Does a log rotation. Log rotations contstantly increase. We defualt
- to about 2 decimal places, but let the limit go past that
-
- NOTE: exceptions are logged
-
- IN:
- logpath - path to the folder containing logs
- NOTE: this is assumed to have the trailing slash
- filename - filename of the log to rotate
- """
- try:
- filelist = os.listdir(logpath)
- except Exception as e:
- mas_log.error(str(e))
- return
-
- # log rotation constants
- __numformat = "{:02d}"
- __numdelim = "."
-
- # parse filelist for valid filenames,
- # also sort them so the largest number is last
- filelist = sorted([
- x
- for x in filelist
- if x.startswith(filename)
- ])
-
- # now extract only the largest number in this list.
- # NOTE: this is only possible if we have more than one file in the list
- if len(filelist) > 1:
- fname, dot, largest_num = filelist.pop().rpartition(__numdelim)
- largest_num = tryparseint(largest_num, -1)
-
- else:
- # otherwise
- largest_num = -1
-
- # now increaese largest num to get the next number we should write out
- largest_num += 1
-
- # delete whatever file that is if it exists
- new_path = os.path.normcase("".join([
- logpath,
- filename,
- __numdelim,
- __numformat.format(largest_num)
- ]))
- trydel(new_path)
-
- # and copy our main file over
- old_path = os.path.normcase(logpath + filename)
- copyfile(old_path, new_path)
-
- # and delete the current file
- trydel(old_path)
-
- class IsolatedFlexProp(object):
+ class IsolatedFlexProp(python_object):
"""
class that supports flexible attributes.
all attributes that are set are stored in a
diff --git a/Monika After Story/game/chess.rpy b/Monika After Story/game/chess.rpy
index 1006ef991a..ccd0a97acd 100644
--- a/Monika After Story/game/chess.rpy
+++ b/Monika After Story/game/chess.rpy
@@ -545,7 +545,7 @@ init python in mas_chess:
Chess960 rules are basically:
1. One rook must stay on the left side of king, and another one stay on the right side.
- Due to this, the king can never be placed on a-file or h-file.
+ Due to this, the king can never be placed on a-file or h-file.
2. Bishops must stay on different color square.
3. Pawns must stay like the normal chess game.
4. The position of player A's pieces must be the 'reversed version' of player B's.
@@ -1948,7 +1948,7 @@ init python:
import random
import pygame
import threading
- import StringIO
+ from io import StringIO
import os
#Only add the chess_games folder if we can even do chess
@@ -2024,7 +2024,7 @@ init python:
BUTTON_INDICATOR_X = int(BOARD_X_POS + BOARD_WIDTH + BUTTON_INDICATOR_X_SPACING)
#Indicator Y position
- INDICATOR_Y = int(BOARD_Y_POS + ((BOARD_HEIGHT - INDICATOR_HEIGHT)/ 2))
+ INDICATOR_Y = int(BOARD_Y_POS + ((BOARD_HEIGHT - INDICATOR_HEIGHT)// 2))
#Absolute indicator position
INDICATOR_POS = (BUTTON_INDICATOR_X, INDICATOR_Y)
@@ -2332,7 +2332,7 @@ init python:
self.piece_map = dict()
#And refill it
- for position, Piece in self.board.piece_map().iteritems():
+ for position, Piece in self.board.piece_map().items():
MASPiece.fromPiece(
Piece,
MASChessDisplayableBase.square_to_board_coords(position),
@@ -2511,7 +2511,7 @@ init python:
highlight_magenta = renpy.render(MASChessDisplayableBase.PIECE_HIGHLIGHT_MAGENTA_IMAGE, 1280, 720, st, at)
#Get our mouse pos
- mx, my = mas_getMousePos()
+ mx, my = renpy.get_mouse_pos()
#Since different buttons show during the game vs post game, we'll sort out what's shown here
visible_buttons = list()
@@ -2591,7 +2591,7 @@ init python:
renderer.blit(highlight_yellow, MASChessDisplayableBase.board_coords_to_screen_coords(hl))
#Draw the pieces on the Board renderer.
- for piece_location, Piece in self.piece_map.iteritems():
+ for piece_location, Piece in self.piece_map.items():
#Unpack the location
ix, iy = piece_location
@@ -2659,9 +2659,9 @@ init python:
#Draw the selected piece.
piece = self.get_piece_at(self.selected_piece[0], self.selected_piece[1])
- px, py = mas_getMousePos()
- px -= MASChessDisplayableBase.PIECE_WIDTH / 2
- py -= MASChessDisplayableBase.PIECE_HEIGHT / 2
+ px, py = renpy.get_mouse_pos()
+ px -= MASChessDisplayableBase.PIECE_WIDTH // 2
+ py -= MASChessDisplayableBase.PIECE_HEIGHT // 2
piece.render(width, height, st, at, px, py, renderer)
#Ask that we be re-rendered ASAP, so we can show the next frame.
@@ -2737,11 +2737,11 @@ init python:
OUT:
Tuple of coordinates (x, y) marking where the piece is
"""
- mx, my = mas_getMousePos()
+ mx, my = renpy.get_mouse_pos()
mx -= MASChessDisplayableBase.BASE_PIECE_X
my -= MASChessDisplayableBase.BASE_PIECE_Y
- px = mx / MASChessDisplayableBase.PIECE_WIDTH
- py = my / MASChessDisplayableBase.PIECE_HEIGHT
+ px = mx // MASChessDisplayableBase.PIECE_WIDTH
+ py = my // MASChessDisplayableBase.PIECE_HEIGHT
#White
if self.is_player_white:
@@ -2812,7 +2812,7 @@ init python:
OUT:
tuple - (x, y) coords representing board coordinates for the square provided
"""
- return (sq_num % 8, sq_num / 8)
+ return (sq_num % 8, sq_num // 8)
@staticmethod
def board_coords_to_screen_coords(pos_tuple, inversion_tuple=(False,False)):
@@ -2877,13 +2877,21 @@ init python:
IMG_MAP = {
color + (symbol.upper() if color == "w" else symbol): Image("mod_assets/games/chess/pieces/{0}{1}.png".format(color, (symbol.upper() if color == "w" else symbol)))
- for color in FP_COLOR_LOOKUP.itervalues()
+ for color in FP_COLOR_LOOKUP.values()
for symbol in mas_chess.PIECE_POOL
}
+ NAMES_MAP = {
+ "b": "Bishop",
+ "k": "King",
+ "n": "Knight",
+ "p": "Pawn",
+ "r": "Rook",
+ "q": "Qeeb"
+ }
+
def __init__(
self,
- is_white,
symbol,
posX,
posY,
@@ -2893,20 +2901,18 @@ init python:
MASPiece constructor
IN:
- is_white - Whether or not the piece is white
symbol - letter symbol representing the piece. If capital, the piece is white
posX - x position of the piece
posY - y position of the piece
piece_map - Map to store this piece in
"""
- self.is_white = is_white
self.symbol = symbol
#Store an internal reference to the piece map so we can execute moves from the piece
self.piece_map = piece_map
#Store the internal reference to this piece's image fp for use in rendering
- self.__piece_image = MASPiece.IMG_MAP[MASPiece.FP_COLOR_LOOKUP[is_white] + symbol]
+ self.__piece_image = MASPiece.IMG_MAP[MASPiece.FP_COLOR_LOOKUP[self.is_white] + symbol]
#Internal reference to the position
self.x_pos = posX
@@ -2927,7 +2933,18 @@ init python:
"""
Handles a representation of this piece
"""
- return "MASPiece which: {0} and symbol: {1}".format("is white" if self.is_white else "is black", self.symbol)
+ return "MASPiece<{0} {1}>".format(
+ "White" if self.is_white else "Black",
+ self.name
+ )
+
+ @property
+ def name(self) -> str:
+ return self.NAMES_MAP[self.symbol.lower()]
+
+ @property
+ def is_white(self) -> bool:
+ return self.symbol.isupper()
@staticmethod
def fromPiece(piece, pos_tuple, piece_map):
@@ -2944,7 +2961,6 @@ init python:
MASPiece
"""
return MASPiece(
- piece.color,
piece.symbol(),
pos_tuple[0],
pos_tuple[1],
@@ -3350,6 +3366,14 @@ init python:
self._button_done
]
+ def _send_uci_command(self, cmd: str):
+ """
+ Sends a command to stockfish using its input
+ """
+ self.stockfish.stdin.write(
+ cmd.encode("utf-8")
+ )
+
def __del__(self):
self.stockfish.stdin.close()
self.stockfish.wait()
@@ -3361,10 +3385,10 @@ init python:
OUT:
move - representing the best move stockfish found
"""
+ res = None
with self.lock:
- res = None
while self.queue:
- line = self.queue.pop()
+ line = self.queue.pop().decode("utf-8")
match = re.match(r"^bestmove (\w+)", line)
if match:
res = match.group(1)
@@ -3375,9 +3399,9 @@ init python:
"""
Starts Monika's analysis of the board
"""
- self.stockfish.stdin.write("position fen {0}\n".format(self.board.fen()))
- self.stockfish.stdin.write("go depth {0}\n".format(persistent._mas_chess_difficulty[1]))
- self.stockfish.stdin.write("go movetime {0}\n".format(self.MONIKA_WAITTIME))
+ self._send_uci_command("position fen {0}\n".format(self.board.fen()))
+ self._send_uci_command("go depth {0}\n".format(persistent._mas_chess_difficulty[1]))
+ self._send_uci_command("go movetime {0}\n".format(self.MONIKA_WAITTIME))
def additional_setup(self):
"""
@@ -3392,14 +3416,21 @@ init python:
path - filepath to the stockfish application
startupinfo - startup flags
"""
- try:
+ def start_stockfish_proc(path: str, startupinfo: subprocess.STARTUPINFO) -> subprocess.Popen:
+ """
+ Tries to launch a stockfish subprocess, can raise exceptions
+ """
return subprocess.Popen(
os.path.join(renpy.config.gamedir, path).replace('\\', '/'),
+ bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
startupinfo=startupinfo
)
+ try:
+ return start_stockfish_proc(path, startupinfo)
+
#Catch the permission error
except OSError as os_err:
if not renpy.windows:
@@ -3430,12 +3461,7 @@ init python:
renpy.hide_screen("mas_py_console_teaching")
#Try again
try:
- stockfish_proc = subprocess.Popen(
- os.path.join(renpy.config.gamedir, path).replace('\\', '/'),
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- startupinfo=startupinfo
- )
+ stockfish_proc = start_stockfish_proc(path, startupinfo)
renpy.show("monika 3hua", at_list=[t11])
renpy.say(m, "Yay! We should be able to play now~")
@@ -3473,13 +3499,13 @@ init python:
elif is_64_bit:
fp = "mod_assets/games/chess/stockfish_8_{0}_x64".format("linux" if renpy.linux else "macosx")
- os.chmod(config.basedir + "/game/".format(fp), 0755)
+ os.chmod(config.basedir + "/game/".format(fp), 0o755)
self.stockfish = open_stockfish(fp)
#Set Monika's parameters
- self.stockfish.stdin.write("setoption name Skill Level value {0}\n".format(persistent._mas_chess_difficulty[0]))
- self.stockfish.stdin.write("setoption name Contempt value {0}\n".format(self.MONIKA_OPTIMISM))
- self.stockfish.stdin.write("setoption name Ponder value False\n")
+ self._send_uci_command("setoption name Skill Level value {0}\n".format(persistent._mas_chess_difficulty[0]))
+ self._send_uci_command("setoption name Contempt value {0}\n".format(self.MONIKA_OPTIMISM))
+ self._send_uci_command("setoption name Ponder value False\n")
#And set up facilities for asynchronous communication
self.queue = collections.deque()
@@ -3620,21 +3646,23 @@ init python:
# Poll Monika for moves if it's her turn
if not self.is_game_over:
#Queue a Moni move if this is implemented
- monika_move = self.poll_monika_move()
+ monika_move = None
+ while monika_move is None:
+ # We have to wait for stockfish to send us a move
+ monika_move = self.poll_monika_move()
- if monika_move is not None:
- #Now verify legality
- monika_move_check = chess.Move.from_uci(monika_move)
+ #Now verify legality
+ monika_move_check = chess.Move.from_uci(monika_move)
- if self.board.is_legal(monika_move_check):
- #Monika is thonking
- renpy.pause(1.5)
+ if self.board.is_legal(monika_move_check):
+ #Monika is thonking
+ renpy.pause(1.5)
- #Push her move
- self.__push_move(monika_move)
+ #Push her move
+ self.__push_move(monika_move)
- #Set the buttons
- self.set_button_states()
+ #Set the buttons
+ self.set_button_states()
def set_button_states(self):
"""
diff --git a/Monika After Story/game/definitions.rpy b/Monika After Story/game/definitions.rpy
index 8967218289..8a431a471b 100644
--- a/Monika After Story/game/definitions.rpy
+++ b/Monika After Story/game/definitions.rpy
@@ -3,6 +3,7 @@ define persistent.demo = False
define config.developer = False
# define persistent.steam = "steamapps" in config.basedir.lower()
+
python early:
# We want these to be available globally, please don't remove
# Add more as needed
@@ -12,6 +13,7 @@ python early:
import random
import traceback
from collections import defaultdict # this will be availalable anywhere now
+ import string
# define the zorders
MAS_MONIKA_Z = 10
@@ -21,11 +23,11 @@ python early:
### Overrides of core renpy things
- def dummy(*args, **kwargs):
+ def dummy(*args, **kwargs) -> None:
"""
Dummy function that does nothing
"""
- return
+ return None
class MASDummyClass(object):
"""
@@ -76,6 +78,32 @@ python early:
Our string formatter that uses more
advanced formatting rules compared to the RenPy one
"""
+ @staticmethod
+ def _getStoreNameForObject(object_name, *scopes):
+ """
+ Returns the name of the store where the given object
+ was defined or imported to
+
+ IN:
+ object_name - the name of the object to look for (string)
+ scopes - the scopes where we look for the object (storemodule.__dict__)
+
+ OUT:
+ name of the store module where the object was defined
+ or empty string if we couldn't find it
+ """
+ for scope in scopes:
+ if object_name in scope:
+ stores_names_list = [
+ store_module_name
+ for store_module_name, store_module in sys.modules.items()
+ if store_module and store_module.__dict__ is scope
+ ]
+ if stores_names_list:
+ return stores_names_list[0]
+
+ return ""
+
def get_field(self, field_name, args, kwargs):
"""
Originally this method returns objects by references
@@ -90,31 +118,6 @@ python early:
OUT:
tuple of the object and its key
"""
- def _getStoreNameForObject(object_name, *scopes):
- """
- Returns the name of the store where the given object
- was defined or imported to
-
- IN:
- object_name - the name of the object to look for (string)
- scopes - the scopes where we look for the object (storemodule.__dict__)
-
- OUT:
- name of the store module where the object was defined
- or empty string if we couldn't find it
- """
- for scope in scopes:
- if object_name in scope:
- stores_names_list = [
- store_module_name
- for store_module_name, store_module in sys.modules.iteritems()
- if store_module and store_module.__dict__ is scope
- ]
- if stores_names_list:
- return stores_names_list[0]
-
- return ""
-
# if it's a function call, we eval it
if "(" in field_name:
# split the string into its components
@@ -129,10 +132,10 @@ python early:
# now we find the store's name to use in eval
if isinstance(kwargs, renpy.substitutions.MultipleDict):
- scope_store_name = _getStoreNameForObject(first, *kwargs.dicts)
+ scope_store_name = self._getStoreNameForObject(first, *kwargs.dicts)
else:
- scope_store_name = _getStoreNameForObject(first, kwargs)
+ scope_store_name = self._getStoreNameForObject(first, kwargs)
# apply formatting if appropriate
if scope_store_name:
@@ -149,79 +152,16 @@ python early:
args
)
)
+ # RenPy requires to return a tuple of the object and the kwargs
+ # as the first item
+ return ((obj, kwargs), first)
- # otherwise just get the reference
- else:
- first, rest = field_name._formatter_field_name_split()
-
- obj = self.get_value(first, args, kwargs)
-
- for is_attr, i in rest:
- if is_attr:
- obj = getattr(obj, i)
-
- else:
- # convert the accessor only if obj isn't a dict
- # so the accessor is always a long for other iterables
- if not isinstance(obj, dict):
- i = long(i)
-
- obj = obj[i]
-
- return obj, first
+ # Otherwise fallback to what renpy does: just get the reference
+ return super().get_field(field_name, args, kwargs)
# allows us to use a more advanced string formatting
renpy.substitutions.formatter = MASFormatter()
- def mas_with_statement(trans, always=False, paired=None, clear=True):
- """
- Causes a transition to occur. This is the Python equivalent of the
- with statement
-
- IN:
- trans - the transition to use
- always - if True, the transition will always occur, even if the user has disabled transitions
- paired - Tom knows
- clear - if True cleans out transient stuff at the end of an interaction
-
- OUT:
- True if the user chose to interrupt the transition,
- and False otherwise
- """
- if renpy.game.context().init_phase:
- raise Exception("With statements may not run while in init phase.")
-
- if renpy.config.skipping:
- trans = None
-
- if not (renpy.game.preferences.transitions or always):
- trans = None
-
- renpy.exports.mode('with')
-
- if isinstance(paired, dict):
- paired = paired.get(None, None)
-
- if (trans is None) and (paired is None):
- return
-
- if isinstance(trans, dict):
-
- for k, v in trans.items():
- if k is None:
- continue
-
- renpy.exports.transition(v, layer=k)
-
- if None not in trans:
- return
-
- trans = trans[None]
-
- return renpy.game.interface.do_with(trans, paired, clear=clear)
-
- renpy.exports.with_statement = mas_with_statement
-
def mas_find_target(self):
"""
This method tries to find an image by its reference. It can be a displayable or tuple.
@@ -244,6 +184,8 @@ python early:
if renpy.config.debug:
raise Exception(msg)
+ target = None # typing
+
args = [ ]
while name:
@@ -274,7 +216,7 @@ python early:
target = renpy.display.image.images[name]
#If we somehow failed, show the exception and return False
- except:
+ except Exception:
error("Image '%s' not found." % ' '.join(self.name))
return False
@@ -282,17 +224,24 @@ python early:
error("Image '%s' not found." % ' '.join(self.name))
return False
+ if self._args.name == name:
+ error("Image '{}' refers to itself.".format(' '.join(name)))
+ return False
+
+ args += self._args.args
+
try:
a = self._args.copy(name=name, args=args)
self.target = target._duplicate(a)
except Exception as e:
- if renpy.config.debug:
+ if renpy.config.raise_image_exceptions and (renpy.config.debug or renpy.config.developer):
raise
error(str(e))
+ return False
- #Copy the old transform over.
+ # Copy the old transform over.
new_transform = self.target._target()
if isinstance(new_transform, renpy.display.transform.Transform):
@@ -308,207 +257,11 @@ python early:
renpy.display.image.ImageReference.find_target = mas_find_target
- class MASImageData(renpy.display.im.ImageBase):
- """
- NOTE: This DOES NOT support saving in persistent (pickling),
- and it might be unsafe to do so.
-
- This image manipulator loads an image from binary data.
- """
- def __init__(self, data, filename, **properties):
- """
- Constructor
-
- IN:
- data - string of bytes, giving the compressed image data in a standard
- file format.
- filename - "filename" associated with the image. This is used to provide a
- hint to Ren'Py about the format of `data`. (It's not actually
- loaded from disk.)
- properties - additional props
- """
- super(MASImageData, self).__init__(data, filename, **properties)
- self.data = data
- self.filename = filename
-
- def __unicode__(self):
- return u"MASImageData({})".format(self.filename)
-
- def __repr__(self):
- return str(self.__unicode__())
-
- def __reduce__(self):
- return (str, (self.filename,))
-
- def load(self):
- f = io.BytesIO(self.data)
- return renpy.display.pgrender.load_image(f, self.filename)
-
- class MASAudioData(unicode):
- """
- NOTE: This is temporal plaster-fix to renpy, use on your own risk,
- this class and all support for it will be completely gone with r8
- NOTE: This DOES NOT support saving in persistent (pickling),
- and it might be unsafe to do so.
-
- This loads audio from binary data
- """
-
- def __new__(cls, data, filename):
- rv = unicode.__new__(cls, filename)
- rv.data = data
- return rv
-
- def __init__(self, data, filename):
- self.filename = filename
-
- def __reduce__(self):
- # Pickle as a str is safer
- return (str, (self.filename, ))
+ # Deprecated, use im.Data directly
+ MASImageData = im.Data
- def __mas_periodic_override(self):
- """
- This is the periodic call that causes this channel to load new stuff
- into its queues, if necessary.
- """
-
- # Update the channel volume.
- vol = self.chan_volume * renpy.game.preferences.volumes[self.mixer]
-
- if vol != self.actual_volume:
- renpy.audio.renpysound.set_volume(self.number, vol)
- self.actual_volume = vol
-
- # This should be set from something that checks to see if our
- # mixer is muted.
- force_stop = self.context.force_stop or (renpy.game.preferences.mute[self.mixer] and self.stop_on_mute)
-
- if self.playing and force_stop:
- renpy.audio.renpysound.stop(self.number)
- self.playing = False
- self.wait_stop = False
-
- if force_stop:
- if self.loop:
- self.queue = self.queue[-len(self.loop):]
- else:
- self.queue = [ ]
- return
-
- # Should we do the callback?
- do_callback = False
-
- topq = None
-
- # This has been modified so we only queue a single sound file
- # per call, to prevent memory leaks with really short sound
- # files. So this loop will only execute once, in practice.
- while True:
-
- depth = renpy.audio.renpysound.queue_depth(self.number)
-
- if depth == 0:
- self.wait_stop = False
- self.playing = False
-
- # Need to check this, so we don't do pointless work.
- if not self.queue:
- break
-
- # If the pcm_queue is full, then we can't queue
- # anything, regardless of if it is midi or pcm.
- if depth >= 2:
- break
-
- # If we can't buffer things, and we're playing something
- # give up here.
- if not self.buffer_queue and depth >= 1:
- break
-
- # We can't queue anything if the depth is > 0 and we're
- # waiting for a synchro_start.
- if self.synchro_start and depth:
- break
-
- # If the queue is full, return.
- if renpy.audio.renpysound.queue_depth(self.number) >= 2:
- break
-
- # Otherwise, we might be able to enqueue something.
- topq = self.queue.pop(0)
-
- # Blacklist of old file formats we used to support, but we now
- # ignore.
- lfn = topq.filename.lower() + self.file_suffix.lower()
- for i in (".mod", ".xm", ".mid", ".midi"):
- if lfn.endswith(i):
- topq = None
-
- if not topq:
- continue
-
- try:
- filename, start, end = self.split_filename(topq.filename, topq.loop)
-
- if (end >= 0) and ((end - start) <= 0) and self.queue:
- continue
-
- if isinstance(topq.filename, MASAudioData):
- topf = io.BytesIO(topq.filename.data)
- else:
- topf = renpy.audio.audio.load(self.file_prefix + filename + self.file_suffix)
-
- renpy.audio.renpysound.set_video(self.number, self.movie)
-
- if depth == 0:
- renpy.audio.renpysound.play(self.number, topf, topq.filename, paused=self.synchro_start, fadein=topq.fadein, tight=topq.tight, start=start, end=end)
- else:
- renpy.audio.renpysound.queue(self.number, topf, topq.filename, fadein=topq.fadein, tight=topq.tight, start=start, end=end)
-
- self.playing = True
-
- except:
-
- # If playing failed, remove topq.filename from self.loop
- # so we don't keep trying.
- while topq.filename in self.loop:
- self.loop.remove(topq.filename)
-
- if renpy.config.debug_sound and not renpy.game.after_rollback:
- raise
- else:
- return
-
- break
-
- if self.loop and not self.queue:
- for i in self.loop:
- if topq is not None:
- newq = renpy.audio.audio.QueueEntry(i, 0, topq.tight, True)
- else:
- newq = renpy.audio.audio.QueueEntry(i, 0, False, True)
-
- self.queue.append(newq)
- else:
- do_callback = True
-
- # Queue empty callback.
- if do_callback and self.callback:
- self.callback() # E1102
-
- # global global_pause
- want_pause = self.context.pause or renpy.audio.audio.global_pause
-
- if self.paused != want_pause:
-
- if want_pause:
- self.pause()
- else:
- self.unpause()
-
- self.paused = want_pause
-
- renpy.audio.audio.Channel.periodic = __mas_periodic_override
+ # Deprecated, use AudioData
+ MASAudioData = AudioData
# uncomment this if you want syntax highlighting support on vim
# init -1 python:
@@ -770,7 +523,7 @@ python early:
start_date=None,
end_date=None,
unlock_date=None,
-# diary_entry=None,
+ # diary_entry=None,
rules=dict(),
last_seen=None,
years=None,
@@ -787,12 +540,12 @@ python early:
raise EventException("'per_eventdb' cannot be None")
if action is not None and action not in EV_ACTIONS:
raise EventException("'" + action + "' is not a valid action")
-# if diary_entry is not None and len(diary_entry) > self.DIARY_LIMIT:
-# raise Exception(
-# (
-# "diary entry for {0} is longer than {1} characters"
-# ).format(eventlabel, self.DIARY_LIMIT)
-# )
+ # if diary_entry is not None and len(diary_entry) > self.DIARY_LIMIT:
+ # raise Exception(
+ # (
+ # "diary entry for {0} is longer than {1} characters"
+ # ).format(eventlabel, self.DIARY_LIMIT)
+ # )
if rules is None:
raise Exception(
"'{0}' - rules property cannot be None".format(eventlabel)
@@ -898,7 +651,7 @@ python early:
stored_data_list = list(stored_data_row)
# first, check for lock existence
- lock_entry = Event.INIT_LOCKDB.get(eventlabel, None)
+ lock_entry = self.INIT_LOCKDB.get(eventlabel, None)
if lock_entry:
@@ -912,8 +665,7 @@ python early:
# if the lock exists, then iterate through the names
# and only update items that are unlocked
- for name,index in Event.T_EVENT_NAMES.iteritems():
-
+ for name,index in self.T_EVENT_NAMES.items():
if not lock_entry[index]:
stored_data_list[index] = data_row[index]
@@ -931,8 +683,8 @@ python early:
# actaully this should be always
self.prompt = prompt
self.category = category
-# self.diary_entry = diary_entry
-# self.rules = rules
+ # self.diary_entry = diary_entry
+ # self.rules = rules
self.years = years
#self.sensitive = sensitive
self.aff_range = aff_range
@@ -944,11 +696,11 @@ python early:
self.per_eventdb[self.eventlabel] = data_row
# setup lock entry
- Event.INIT_LOCKDB.setdefault(eventlabel, mas_init_lockdb_template)
+ self.INIT_LOCKDB.setdefault(eventlabel, mas_init_lockdb_template)
# Cache conditional
- if self.conditional is not None and self.conditional not in Event._conditional_cache:
- Event._conditional_cache[self.conditional] = renpy.python.py_compile(self.conditional, "eval")
+ if self.conditional is not None and self.conditional not in self._conditional_cache:
+ self._conditional_cache[self.conditional] = renpy.python.py_compile(self.conditional, "eval")
def __eq__(self, other):
"""
@@ -985,8 +737,8 @@ python early:
value - the new value
"""
if name in self.N_EVENT_NAMES:
- super(Event, self).__setattr__(name, value)
-# self.__dict__[name] = value
+ super().__setattr__(name, value)
+ # self.__dict__[name] = value
# otherwise, figure out the location of an attribute, then repack
# a tup
@@ -1020,9 +772,9 @@ python early:
elif (
name == "conditional"
and value is not None
- and value not in Event._conditional_cache
+ and value not in self._conditional_cache
):
- Event._conditional_cache[value] = renpy.python.py_compile(value, "eval")
+ self._conditional_cache[value] = renpy.python.py_compile(value, "eval")
# otherwise, repack the tuples
data_row = list(data_row)
@@ -1055,7 +807,7 @@ python early:
return data_row[attr_loc]
else:
- return super(Event, self).__getattribute__(name)
+ return super().__getattribute__(name)
#repr override
def __repr__(self):
@@ -1129,7 +881,11 @@ python early:
if self.conditional is None:
return True
- return renpy.python.py_eval_bytecode(Event._conditional_cache[self.conditional], globals=globals, locals=locals)
+ return renpy.python.py_eval_bytecode(
+ self._conditional_cache[self.conditional],
+ globals=globals,
+ locals=locals
+ )
def canRepeat(self):
"""
@@ -1168,7 +924,7 @@ python early:
if not self.canRepeat():
return False
- new_start, new_end, was_changed = Event._yearAdjustEV(self, force)
+ new_start, new_end, was_changed = self._yearAdjustEV(self, force)
if was_changed:
if self.isWithinRange():
@@ -1265,17 +1021,20 @@ python early:
"""
A method to validate conditionals
+ RAISES:
+ EventException
+
ASSUMES:
mas_all_ev_db
"""
- for ev in mas_all_ev_db.itervalues():
+ for ev in mas_all_ev_db.values():
if ev.conditional is not None:
try:
renpy.python.py_eval_bytecode(cls._conditional_cache[ev.conditional])
except Exception as e:
raise EventException(
- "Failed to evaluate the '{0}' conditional for the event with the '{1}' label:\n{2}.".format(
+ "Failed to evaluate '{0}' conditional for the event '{1}':\n{2}.".format(
ev.conditional,
ev.eventlabel,
traceback.format_exc()
@@ -1298,8 +1057,8 @@ python early:
"""
return ev.shown_count
- @staticmethod
- def lockInit(name, ev=None, ev_label=None):
+ @classmethod
+ def lockInit(cls, name, ev=None, ev_label=None):
"""
Locks the property for a given event object or eventlabel.
This will prevent the property from being overwritten on object
@@ -1312,11 +1071,11 @@ python early:
ev_label - event label of Event to property lock
(Default: None)
"""
- Event._modifyInitLock(name, True, ev=ev, ev_label=ev_label)
+ cls._modifyInitLock(name, True, ev=ev, ev_label=ev_label)
- @staticmethod
- def unlockInit(name, ev=None, ev_label=None):
+ @classmethod
+ def unlockInit(cls, name, ev=None, ev_label=None):
"""
Unlocks the property for a given event object or event label.
This will allow the property to be overwritten on object creation.
@@ -1328,11 +1087,11 @@ python early:
ev_label - event label of Event to property lock
(Default: None)
"""
- Event._modifyInitLock(name, False, ev=ev, ev_label=ev_label)
+ cls._modifyInitLock(name, False, ev=ev, ev_label=ev_label)
- @staticmethod
- def _modifyInitLock(name, value, ev=None, ev_label=None):
+ @classmethod
+ def _modifyInitLock(cls, name, value, ev=None, ev_label=None):
"""
Modifies the init lock for a given event/eventlabel
@@ -1349,7 +1108,7 @@ python early:
return
# check if we have a valid property
- property_dex = Event.T_EVENT_NAMES.get(name, None)
+ property_dex = cls.T_EVENT_NAMES.get(name, None)
if property_dex is None:
return
@@ -1358,13 +1117,13 @@ python early:
ev_label = ev.eventlabel
# now lock the property
- lock_entry = list(Event.INIT_LOCKDB[ev_label])
+ lock_entry = list(cls.INIT_LOCKDB[ev_label])
lock_entry[property_dex] = value
- Event.INIT_LOCKDB[ev_label] = tuple(lock_entry)
+ cls.INIT_LOCKDB[ev_label] = tuple(lock_entry)
- @staticmethod
- def _verifyAndSetDatesEV(ev):
+ @classmethod
+ def _verifyAndSetDatesEV(cls, ev):
"""
Runs _verifyDatesEV and sets the event properties if change
happens
@@ -1374,7 +1133,7 @@ python early:
RETURNS: was_changed
"""
- new_start, new_end, was_changed = Event._verifyDatesEV(ev)
+ new_start, new_end, was_changed = cls._verifyDatesEV(ev)
if was_changed:
ev.start_date = new_start
ev.end_date = new_end
@@ -1382,8 +1141,8 @@ python early:
return was_changed
- @staticmethod
- def _verifyDatesEV(ev):
+ @classmethod
+ def _verifyDatesEV(cls, ev):
"""
_verifyDates, but for an Event object.
@@ -1392,11 +1151,11 @@ python early:
RETURNS: See _verifyDates
"""
- return Event._verifyDates(ev.start_date, ev.end_date, ev.years)
+ return cls._verifyDates(ev.start_date, ev.end_date, ev.years)
- @staticmethod
- def _yearAdjustEV(ev, force=False):
+ @classmethod
+ def _yearAdjustEV(cls, ev, force=False):
"""
_yearAdjust, but for an Event object
@@ -1407,7 +1166,7 @@ python early:
RETURNS: See _verifyDates
"""
- return Event._yearAdjust(
+ return cls._yearAdjust(
ev.start_date,
ev.end_date,
ev.years,
@@ -1415,8 +1174,8 @@ python early:
)
- @staticmethod
- def _verifyDates(_start, _end, _years):
+ @classmethod
+ def _verifyDates(cls, _start, _end, _years):
"""
Given start/end/_yeras, figure out the appropriate start and end
dates. We use current datetime to figure this out.
@@ -1441,7 +1200,7 @@ python early:
return (_start, _end, False)
# otherwise, we need to repeat
- return Event._yearAdjust(_start, _end, _years)
+ return cls._yearAdjust(_start, _end, _years)
@staticmethod
@@ -1589,12 +1348,7 @@ python early:
RETURNS:
True if this event passes the filter, False if not
"""
-
- # collections allow us to match all
- from collections import Counter
-
# NOTE: this is done in an order to minimize branching.
-
# now lets filter
if unlocked is not None and event.unlocked != unlocked:
return False
@@ -1650,8 +1404,8 @@ python early:
# we've passed all the filtering rules somehow
return True
- @staticmethod
- def filterEvents(events, **flt_args):
+ @classmethod
+ def filterEvents(cls, events, **flt_args):
"""
Filters the given events dict according to the given filters.
HOW TO USE: Use ** to pass in a dict of filters. they must match
@@ -1708,14 +1462,10 @@ python early:
):
return events
- # copy check
-# if full_copy:
-# from copy import deepcopy
-
# setup keys
- cat_key = Event.FLT[0]
- act_key = Event.FLT[4]
- #sns_key = Event.FLT[8]
+ cat_key = cls.FLT[0]
+ act_key = cls.FLT[4]
+ #sns_key = cls.FLT[8]
# validate filter rules
category = flt_args.get(cat_key)
@@ -1737,33 +1487,34 @@ python early:
filt_ev_dict = dict()
# python 2
- for k,v in events.iteritems():
+ for k,v in events.items():
# time to apply filtering rules
- if Event._filterEvent(v, **flt_args):
+ if cls._filterEvent(v, **flt_args):
filt_ev_dict[k] = v
return filt_ev_dict
@staticmethod
def getSortedKeys(events, include_none=False):
- #
- # Returns a list of eventlables (keys) of the given dict of events
- # sorted by the field unlock_date. The list is sorted in
- # chronological order (newest first). Events with an unlock_date
- # of None are not included unless include_none is True, in which
- # case, Nones are put after everything else
- #
- # IN:
- # events - dict of events of the following format:
- # eventlabel: event object
- # include_none - True means we include events that have None for
- # unlock_date int he sorted key list, False means we dont
- # (Default: False)
- #
- # RETURNS:
- # list of eventlabels (keys), sorted in chronological order.
- # OR: [] if the given events is empty or all unlock_date fields
- # were None and include_none is False
+ """
+ Returns a list of eventlables (keys) of the given dict of events
+ sorted by the field unlock_date. The list is sorted in
+ chronological order (newest first). Events with an unlock_date
+ of None are not included unless include_none is True, in which
+ case, Nones are put after everything else
+
+ IN:
+ events - dict of events of the following format:
+ eventlabel: event object
+ include_none - True means we include events that have None for
+ unlock_date int he sorted key list, False means we dont
+ (Default: False)
+
+ RETURNS:
+ list of eventlabels (keys), sorted in chronological order.
+ OR: [] if the given events is empty or all unlock_date fields
+ were None and include_none is False
+ """
# sanity check
if not events or len(events) == 0:
@@ -1802,150 +1553,8 @@ python early:
return eventlabels
- @store.mas_utils.deprecated(should_raise=True)
- @staticmethod
- def checkConditionals(events, rebuild_ev=False):
- # NOTE: DEPRECATED
- #
- # This checks the conditionals for all of the events in the event list
- # if any evaluate to true, run the desired action then clear the
- # conditional.
- #
- # IN:
- # rebulid_ev - pass in True to notify idle to rebuild events
- # if a random action occured.
- import datetime
-
- # sanity check
- if not events or len(events) == 0:
- return None
-
- _now = datetime.datetime.now()
-
- for ev_label,ev in events.iteritems():
- # TODO: honestly, we should index events with conditionals
- # so we only check what needs to be checked. Its a bit of an
- # annoyance to check all of these properties once per minute.
-
- # NOTE: we only check events with:
- # - a conditional property
- # - current affection is within aff_range
- # - has None for date properties
-
- if (
- # has conditional property
- ev.conditional is not None
-
- # within aff range
- and ev.checkAffection(mas_curr_affection)
-
- # no date props
- and ev.start_date is None
- and ev.end_date is None
-
- # check if the action is valid
- and ev.action in Event.ACTION_MAP
-
- # finally check if the conditional is true
- and eval(ev.conditional)
- ):
-
- # perform action
- Event._performAction(
- ev,
- unlock_time=_now,
- rebuild_ev=rebuild_ev
- )
-
- #Clear the conditional
- ev.conditional = None
-
-
- return events
-
- @store.mas_utils.deprecated(should_raise=True)
- @staticmethod
- def checkCalendar(events):
- # NOTE: DEPRECATED
- #
- # This checks the date for all events to see if they are active.
- # If they are active, then it checks for a conditional, and evaluates
- # if an action should be run.
- import datetime
-
- # sanity check
- if not events or len(events) == 0:
- return None
-
- # dict check
- ev_list = events.keys() # python 2
-
- current_time = datetime.datetime.now()
- # insertion sort
- for ev in ev_list:
-
- e = events[ev]
-
- #If the event has no time-dependence, don't check it
- if (e.start_date is None) and (e.end_date is None):
- continue
-
- #Calendar must be based on a date
- if e.start_date is not None:
- if e.start_date > current_time:
- continue
-
- if e.end_date is not None:
- if e.end_date <= current_time:
- continue
-
- if e.conditional is not None:
- if not eval(e.conditional):
- continue
-
-
- if e.action in Event.ACTION_MAP:
- # perform action
- Event._performAction(e, unlock_time=current_time)
-
- # Check if we have a years property
- if e.years is not None:
-
- # if it's an empty list
- if len(e.years) == 0:
-
- # get event ready for next year
- e.start_date = store.mas_utils.add_years(e.start_date, 1)
- e.end_date = store.mas_utils.add_years(e.end_date, 1)
- continue
-
- # if it's not empty, get all the years that are in the future
- new_years = [year for year in e.years if year > e.start_date.year]
-
- # if we have possible new years
- if len(new_years) > 0:
- # sort them to ensure we get the nearest one
- new_years.sort()
-
- # pick it
- new_year = new_years[0]
-
- # get the difference
- diff = new_year - e.start_date.year
-
- # update event for the year it should repeat
- e.start_date = store.mas_utils.add_years(e.start_date, diff)
- e.end_date = store.mas_utils.add_years(e.end_date, diff)
- continue
-
- # Clear the conditional since the event shouldn't repeat
- events[ev].conditional = "False"
-
- return events
-
-
- @staticmethod
- def _checkEvent(ev, curr_time):
+ @classmethod
+ def _checkEvent(cls, ev, curr_time):
"""
Singular filter function for checkEvents
@@ -1971,15 +1580,15 @@ python early:
return False
# check if valid action
- if ev.action not in Event.ACTION_MAP:
+ if ev.action not in cls.ACTION_MAP:
return False
# success
return True
- @staticmethod
- def checkEvents(ev_dict, rebuild_ev=True):
+ @classmethod
+ def checkEvents(cls, ev_dict, rebuild_ev=True):
"""
This acts as a combination of both checkConditoinal and
checkCalendar
@@ -1991,13 +1600,20 @@ python early:
_now = datetime.datetime.now()
- for ev_label,ev in ev_dict.iteritems():
- # TODO: same TODO as in checkConditionals.
- # indexing would be smarter.
+ for ev_label,ev in ev_dict.items():
+ # TODO: honestly, we should index events with conditionals
+ # so we only check what needs to be checked. Its a bit of an
+ # annoyance to check all of these properties once per minute.
+
+ # NOTE: we only check events with:
+ # - a conditional property
+ # - current affection is within aff_range
+ # - has None for date properties
+ # indexing would be smarter.
- if Event._checkEvent(ev, _now):
+ if cls._checkEvent(ev, _now):
# perform action
- Event._performAction(
+ cls._performAction(
ev,
unlock_time=_now,
rebuild_ev=rebuild_ev
@@ -2011,87 +1627,6 @@ python early:
return
- @store.mas_utils.deprecated(should_raise=True)
- @staticmethod
- def _checkRepeatRule(ev, check_time, defval=True):
- """DEPRECATED
-
- (remove when farewells is updated)
-
- Checks a single event against its repeat rules, which are evaled
- to a time.
- NOTE: no sanity checks
- TODO: include checkConditional
-
- IN:
- ev - single event to check
- check_time - datetime used to check time rules
- defval - defval to pass into the rules
- (Default: True)
-
- RETURNS:
- True if this event passes its repeat rule, False otherwise
- """
- # check if the event contains a MASSelectiveRepeatRule and
- # evaluate it
- if MASSelectiveRepeatRule.evaluate_rule(
- check_time, ev, defval=defval
- ):
- return True
-
- # check if the event contains a MASNumericalRepeatRule and
- # evaluate it
- if MASNumericalRepeatRule.evaluate_rule(
- check_time, ev, defval=defval
- ):
- return True
-
- return False
-
- @store.mas_utils.deprecated(should_raise=True)
- @staticmethod
- def checkRepeatRules(events, check_time=None):
- """DEPRECATED
-
- (remove when farewells is updated)
-
- checks the event dict against repeat rules, which are evaluated
- to a time.
-
- IN:
- events - dict of events of the following format:
- eventlabel: event object
- check_time - the datetime object that will be used to check the
- timed rules, if none is passed we check against the current time
-
- RETURNS:
- A filtered dict containing the events that passed their own rules
- for the given check_time
- """
- # sanity check
- if not events or len(events) == 0:
- return None
-
- # if check_time is none we check against current time
- if check_time is None:
- check_time = datetime.datetime.now()
-
- # prepare empty dict to store events that pass their own rules
- available_events = dict()
-
- # iterate over each event in the given events dict
- for label, event in events.iteritems():
- if Event._checkRepeatRule(event, check_time, defval=False):
-
- if event.monikaWantsThisFirst():
- return {event.eventlabel: event}
-
- available_events[event.eventlabel] = event
-
- # return the available events dict
- return available_events
-
-
@staticmethod
def _checkFarewellRule(ev):
"""
@@ -2106,8 +1641,8 @@ python early:
return MASFarewellRule.evaluate_rule(ev)
- @staticmethod
- def checkFarewellRules(events):
+ @classmethod
+ def checkFarewellRules(cls, events):
"""
Checks the event dict (farewells) against their own farewell specific
rules, filters out those Events whose rule check return true. As for
@@ -2129,65 +1664,10 @@ python early:
available_events = dict()
# iterate over each event in the given events dict
- for label, event in events.iteritems():
+ for label, event in events.items():
# check if the event contains a MASFarewellRule and evaluate it
- if Event._checkFarewellRule(event):
-
- if event.monikaWantsThisFirst():
- return {event.eventlabel: event}
-
- # add the event to our available events dict
- available_events[label] = event
-
- # return the available events dict
- return available_events
-
- #TODO: Depricate this
- @staticmethod
- def _checkAffectionRule(ev,keepNoRule=False):
- """
- Checks the given event against its own affection specific rule.
-
- IN:
- ev - event to check
-
- RETURNS:
- True if this event passes its repeat rule, False otherwise
- """
- return MASAffectionRule.evaluate_rule(ev,noRuleReturn=keepNoRule)
-
- #TODO: Depricate this
- @staticmethod
- def checkAffectionRules(events,keepNoRule=False):
- """
- Checks the event dict against their own affection specific rules,
- filters out those Events whose rule check return true. This rule
- checks if current affection is inside the specified range contained
- on the rule
-
- IN:
- events - dict of events of the following format:
- eventlabel: event object
- keepNoRule - Boolean indicating wheter if it should keep
- events that don't have an affection rule defined
-
- RETURNS:
- A filtered dict containing the events that passed their own rules
-
- """
- # sanity check
- if not events or len(events) == 0:
- return None
-
- # prepare empty dict to store events that pass their own rules
- available_events = dict()
-
- # iterate over each event in the given events dict
- for label, event in events.iteritems():
-
- # check if the event contains a MASAffectionRule and evaluate it
- if Event._checkAffectionRule(event,keepNoRule=keepNoRule):
+ if cls._checkFarewellRule(event):
if event.monikaWantsThisFirst():
return {event.eventlabel: event}
@@ -2198,9 +1678,8 @@ python early:
# return the available events dict
return available_events
-
- @staticmethod
- def _performAction(ev, **kwargs):
+ @classmethod
+ def _performAction(cls, ev, **kwargs):
"""
Efficient / no checking action performing
@@ -2210,19 +1689,19 @@ python early:
ev - event we are performing action on
**kwargs - keyword args to pass to action
"""
- Event.ACTION_MAP[ev.action](ev, **kwargs)
+ cls.ACTION_MAP[ev.action](ev, **kwargs)
- @staticmethod
- def performAction(ev, **kwargs):
+ @classmethod
+ def performAction(cls, ev, **kwargs):
"""
Performs the action of the given event
IN:
ev - event we are perfrming action on
"""
- if ev.action in Event.ACTION_MAP:
- Event._performAction(ev, **kwargs)
+ if ev.action in cls.ACTION_MAP:
+ cls._performAction(ev, **kwargs)
@staticmethod
def _undoEVAction(ev):
@@ -2339,12 +1818,12 @@ python early:
"""
super(renpy.Displayable, self).__init__()
# setup
-# self.idle_text = idle_text
-# self.hover_text = hover_text
-# self.disable_text = disable_text
-# self.idle_back = idle_back
-# self.hover_back = hover_back
-# self.disable_back = disable_back
+ # self.idle_text = idle_text
+ # self.hover_text = hover_text
+ # self.disable_text = disable_text
+ # self.idle_back = idle_back
+ # self.hover_back = hover_back
+ # self.disable_back = disable_back
self.xpos = xpos
self.ypos = ypos
self.width = width
@@ -3207,17 +2686,6 @@ python early:
self.corners[-1]
))
-# init -1 python:
- @store.mas_utils.deprecated(should_raise=True)
- class MASInteractable(renpy.Displayable):
- """DEPRECATED
-
- Do not use this.
- """
-
- def __init__(self, *args, **kwargs):
- pass
-
# init -1 python:
# new class to manage a list of quips
@@ -3328,7 +2796,7 @@ python early:
if self.allow_glitch:
# create the glitchtext quip
- quip = glitchtext(length)
+ quip = mas_glitchText(length)
# check for cps speed adding
if cps_speed > 0 and cps_speed != 1:
@@ -3765,7 +3233,7 @@ python early:
if key.startswith(self.EX_PFX):
return self.ex_props.get(key[self._EX_LEN:], None)
- return super(MASExtraPropable, self).__getattr__(key)
+ return super().__getattr__(key)
def __setattr__(self, key, value):
if key.startswith(self.EX_PFX):
@@ -3774,7 +3242,7 @@ python early:
if len(stripped_key) > 0:
self.ex_props[stripped_key] = value
- super(MASExtraPropable, self).__setattr__(key, value)
+ super().__setattr__(key, value)
def ex_has(self, key):
"""
@@ -3793,7 +3261,7 @@ python early:
RETURNS: iter of ex prop names and values
"""
- return (item for item in self.ex_props.iteritems())
+ return (item for item in self.ex_props.items())
def ex_pop(self, key, default=None):
"""
@@ -3826,7 +3294,7 @@ python early:
props = [
"{0}: {1}".format(key, value)
- for key, value in ex_props.iteritems()
+ for key, value in ex_props.items()
]
return "".format(", ".join(props))
@@ -4188,21 +3656,6 @@ init -1 python in _mas_root:
import datetime
import collections
- # redefine this because I can't get access to global functions, also
- # i dont care to find out how
- nonunicode = (
- "¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝ" +
- "Þßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘę" +
- "ĚěĜĝĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖ" +
- "ŗŘřŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽž"
- )
-
- def glitchtext(length):
- import random
- output = ""
- for x in range(length):
- output += random.choice(nonunicode)
- return output
def mangleFile(filepath, mangle_length=1000):
"""
@@ -4215,7 +3668,7 @@ init -1 python in _mas_root:
(Default: 1000)
"""
import struct
- bad_text = glitchtext(mangle_length)
+ bad_text = store.mas_glitchText(mangle_length)
bad_text = [ord(x) for x in bad_text]
bad_text = struct.pack("{0}i".format(mangle_length), *bad_text)
with open(filepath, "wb") as m_file:
@@ -4416,7 +3869,7 @@ init -995 python in mas_utils:
"""
# check dicts
if data is not None:
- for value in data.itervalues():
+ for value in data.values():
if value is not None:
return False
@@ -4795,7 +4248,7 @@ init -100 python in mas_utils:
import random
import os
import math
- from cStringIO import StringIO as fastIO
+ from io import StringIO
from collections import defaultdict
import functools
@@ -5334,9 +4787,9 @@ init -100 python in mas_utils:
(Default: None)
RETURNS:
- a cStringIO buffer of the random blob
+ a StringIO buffer of the random blob
"""
- data = fastIO()
+ data = StringIO()
_byte_count = 0
curr_state = None
@@ -5366,9 +4819,9 @@ init -100 python in mas_utils:
size - size in bytes of the blob to make
RETURNS:
- a cStringIO buffer of the random blob
+ a StringIO buffer of the random blob
"""
- data = fastIO()
+ data = StringIO()
_byte_limit = 4 * (1024**2) # 4MB
while size > 0:
@@ -6857,8 +6310,13 @@ init 21 python:
return rv
+ @store.mas_utils.deprecated(use_instead="renpy.get_mouse_pos", should_raise=True)
def mas_getMousePos():
"""
+ DEPRECIATED
+
+ Use renpy.get_mouse_pos instead
+
Gets the mouse position in terms of physical screen size
OUT:
@@ -6901,6 +6359,26 @@ init 21 python:
renpy.show("monika " + exp_code)
return ""
+ __NONUNICODE = (
+ "¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝ"
+ "Þßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘę"
+ "ĚěĜĝĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖ"
+ "ŗŘřŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽž"
+ )
+
+ def mas_glitchText(length: int) -> str:
+ """
+ Backported and impoved glitchtext from DDLC
+
+ IN:
+ length - len of the text to generate
+
+ OUT:
+ str of the generated glitch text
+ """
+ output = [random.choice(__NONUNICODE) for i in range(length)]
+ return "".join(output)
+
# Music
init -1:
define audio.t1 = "bgm/1.ogg" #Main theme (title)
@@ -8317,12 +7795,13 @@ init -1 python in mas_randchat:
slider_value - slider value given from the slider
Should be between 0 - 6
"""
+ global rand_low
+ global rand_high
+
slider_setting = SLIDER_MAP.get(slider_value, 4)
# otherwise set up the times
# globalize
- global rand_low
- global rand_high
rand_low = slider_setting
rand_high = slider_setting * SPAN_MULTIPLIER
@@ -8397,12 +7876,6 @@ init 4 python:
import store.mas_randchat as mas_randchat
-# Deprecated, call mas_set_pronouns directly
-label mas_set_gender:
- $ mas_set_pronouns()
- return
-
-
style jpn_text:
font "mod_assets/font/mplus-2p-regular.ttf"
diff --git a/Monika After Story/game/dev/dev_calendar.rpy b/Monika After Story/game/dev/dev_calendar.rpy
index 3c6896e8ab..dd75257366 100644
--- a/Monika After Story/game/dev/dev_calendar.rpy
+++ b/Monika After Story/game/dev/dev_calendar.rpy
@@ -32,7 +32,3 @@ label dev_calendar_testing:
m "You selected [sel_date_formal]."
return
-
-
-
-
diff --git a/Monika After Story/game/dev/dev_check_scrollable_menu_test.rpy b/Monika After Story/game/dev/dev_check_scrollable_menu_test.rpy
index 1fa00694a3..51bbd4ec11 100644
--- a/Monika After Story/game/dev/dev_check_scrollable_menu_test.rpy
+++ b/Monika After Story/game/dev/dev_check_scrollable_menu_test.rpy
@@ -1,4 +1,3 @@
-
init 5 python:
addEvent(
Event(
@@ -141,4 +140,3 @@ label dev_check_scrollable_menu_sample:
m "sample complete"
return
-
diff --git a/Monika After Story/game/dev/dev_db.rpy b/Monika After Story/game/dev/dev_db.rpy
index 2d5c1f2c9e..9a214872fe 100644
--- a/Monika After Story/game/dev/dev_db.rpy
+++ b/Monika After Story/game/dev/dev_db.rpy
@@ -6,7 +6,7 @@ init python:
if val is not None and not isinstance(val, bool):
report.extend([delim, "bad ", name, " {0}".format(val)])
-
+
def _mas_check_ev_type_dict(val, name, report, delim=" | ", str_rep=True):
if val is not None and not isinstance(val, dict):
report.extend([delim, "bad ", name, " {0}".format(val)])
@@ -29,7 +29,7 @@ init python:
def _mas_check_ev_type_str(val, name, report, delim=" | ", str_rep=True):
if (
- val is not None
+ val is not None
and not (isinstance(val, str) or isinstance(val, unicode))
):
report.extend([delim, "bad ", name, " {0}".format(val)])
@@ -37,7 +37,7 @@ init python:
def _mas_check_ev_type_tuli(val, name, report, delim=" | ", str_rep=True):
if (
- val is not None
+ val is not None
and not (
isinstance(val, list)
or isinstance(val, tuple)
@@ -132,7 +132,7 @@ init python:
def mas_check_event_types(per_db, str_buffer=None, str_rep=True):
"""
- Goes through given persistent database for events and double checks
+ Goes through given persistent database for events and double checks
types. Returns a string report.
IN:
@@ -147,8 +147,8 @@ init python:
# NOTE: we assume lots of things about the given per_db.
if str_buffer is None:
return
-
- for ev_label, ev_line in per_db.iteritems():
+
+ for ev_label, ev_line in per_db.items():
str_buffer.write("".join(_mas_check_ev_type_per(ev_line)))
def mas_largest_persistent_item():
@@ -187,7 +187,7 @@ init python:
mas_per_dump_list(item_key)
# NOTE: ignore others for now
-
+
def mas_per_dump_dict(dkey):
"""
Dumps an output of a persistent dict
@@ -215,7 +215,8 @@ init python:
init python in dev_mas_shared:
- import cPickle
+ import renpy.compat.pickle as pickle
+ import codecs
import store
import store.mas_ev_data_ver as ver
@@ -242,8 +243,8 @@ init python in dev_mas_shared:
def __init__(self, in_char):
"""
IN:
- in_char - pass True if the persisten file is int
- eh user's charactesr dir. Otherwise we use the
+ in_char - pass True if the persisten file is int
+ eh user's charactesr dir. Otherwise we use the
loaded persistent
"""
self.in_char = in_char
@@ -257,8 +258,9 @@ init python in dev_mas_shared:
# select persistent to load
if self.in_char:
pkg = store.mas_docking_station.getPackage("persistent")
- pdata = cPickle.loads(pkg.read().decode("zlib"))
+ pdata = pickle.loads(codecs.decode(pkg.read(),"zlib"))
pkg.close()
+
else:
pdata = store.persistent
@@ -273,7 +275,7 @@ init python in dev_mas_shared:
for prop_name in store.Event.T_EVENT_NAMES:
prop_value = getattr(ev_data, prop_name)
- # listables need to be split into parts
+ # listables need to be split into parts
# except affection
if ver._verify_tuli(prop_value, allow_none=False) and prop_name != "aff_range":
for item in prop_value:
@@ -329,7 +331,7 @@ init python in dev_mas_shared:
only_incl=None
):
"""
- Does get_all_for_prop but saves the results directly to file
+ Does get_all_for_prop but saves the results directly to file
See get_all_for_prop for param doc
"""
@@ -349,7 +351,7 @@ init python in dev_mas_shared:
prop_value = getattr(curr_data, prop)
- # this is just so we dont have to print out of loop
+ # this is just so we dont have to print out of loop
if prop_value is None:
last_value = ""
else:
@@ -377,7 +379,7 @@ init python in dev_mas_shared:
False to sort descending
None to not sort
(Default: None)
- only_incl - pass this in as a dictionary of eventlabels to
+ only_incl - pass this in as a dictionary of eventlabels to
only include these in the prop
RETURNS: tuple:
@@ -415,7 +417,7 @@ init python in dev_mas_shared:
IN:
prop - the property to get
value - the value to get
- only_incl - if passed in, only include entries with
+ only_incl - if passed in, only include entries with
eventlabel in this dict
RETURNS: dictionary containing all items that match the value for
diff --git a/Monika After Story/game/dev/dev_deco.rpy b/Monika After Story/game/dev/dev_deco.rpy
index 71a5a94ce7..39c5e76e6f 100644
--- a/Monika After Story/game/dev/dev_deco.rpy
+++ b/Monika After Story/game/dev/dev_deco.rpy
@@ -202,7 +202,7 @@ init 5 python:
)
label dev_deco_tag_test_api_same:
-
+
m 1eub "TIME TO TEST `register_img_same`"
$ mas_showDecoTag("dev_monika_deco_thr", show_now=True)
diff --git a/Monika After Story/game/dev/dev_exp_previewer.rpy b/Monika After Story/game/dev/dev_exp_previewer.rpy
index c7aeb6c461..efea03eecc 100644
--- a/Monika After Story/game/dev/dev_exp_previewer.rpy
+++ b/Monika After Story/game/dev/dev_exp_previewer.rpy
@@ -154,14 +154,14 @@ init 999 python:
REVERSE_IMG_NAMES_MAP = {
key: {
v: k
- for k, v in sub_map.iteritems()
+ for k, v in sub_map.items()
}
- for key, sub_map in IMG_NAMES_MAP.iteritems()
+ for key, sub_map in IMG_NAMES_MAP.items()
}
# And also handle the keys from the mod map
# (different keys that actually correspond to the same sprite letters)
- for key, sub_map in MOD_MAP.iteritems():
- for k, v in sub_map.iteritems():
+ for key, sub_map in MOD_MAP.items():
+ for k, v in sub_map.items():
spr_code_letter = REVERSE_IMG_NAMES_MAP[key][k]
for mod_str in v:
REVERSE_IMG_NAMES_MAP[key][k + mod_str] = spr_code_letter
@@ -437,7 +437,7 @@ init 999 python:
# update torsos with spritepacked sprites
torso_map = self.SEL_TX_MAP["torso"]
torso_list_add = []
- for sel in store.mas_selspr.CLOTH_SEL_MAP.itervalues():
+ for sel in store.mas_selspr.CLOTH_SEL_MAP.values():
spr = sel.get_sprobj()
if spr.is_custom and spr.name not in torso_map:
torso_map[spr.name] = sel.display_name
@@ -1071,7 +1071,7 @@ init 999 python:
# Add bits that might not be present in the given code
exp_kwargs.setdefault(key, None)
- for key, value in exp_kwargs.iteritems():
+ for key, value in exp_kwargs.items():
# Skip the keys we don't need
if (
key in self.SPRITE_CODE_MAP
diff --git a/Monika After Story/game/dev/dev_greetings.rpy b/Monika After Story/game/dev/dev_greetings.rpy
index 1ed592ac81..370cb33356 100644
--- a/Monika After Story/game/dev/dev_greetings.rpy
+++ b/Monika After Story/game/dev/dev_greetings.rpy
@@ -255,7 +255,7 @@ label dev_gre_sampler:
# done with sampling, output results
with open(renpy.config.basedir + "/gre_sample", "w") as outdata:
- for ev_label, count in results.iteritems():
+ for ev_label, count in results.items():
outdata.write("{0},{1}\n".format(ev_label, count))
# relock locked gres
diff --git a/Monika After Story/game/dev/dev_idle_test.rpy b/Monika After Story/game/dev/dev_idle_test.rpy
index b61153aa4b..7df2b15005 100644
--- a/Monika After Story/game/dev/dev_idle_test.rpy
+++ b/Monika After Story/game/dev/dev_idle_test.rpy
@@ -30,6 +30,3 @@ label dev_idle_test:
label dev_idle_test_cb:
m 1hua "done with idle!"
return
-
-
-
diff --git a/Monika After Story/game/dev/dev_islands.rpy b/Monika After Story/game/dev/dev_islands.rpy
index 0474b4aed5..061a078c9c 100644
--- a/Monika After Story/game/dev/dev_islands.rpy
+++ b/Monika After Story/game/dev/dev_islands.rpy
@@ -69,7 +69,7 @@ label dev_test_islands_progress:
k,
", ".join(map(str, v))
)
- for k, v in data.iteritems()
+ for k, v in data.items()
]
)
diff --git a/Monika After Story/game/dev/dev_mouse_tracker.rpy b/Monika After Story/game/dev/dev_mouse_tracker.rpy
index 3bd3979b18..a9cab7367f 100644
--- a/Monika After Story/game/dev/dev_mouse_tracker.rpy
+++ b/Monika After Story/game/dev/dev_mouse_tracker.rpy
@@ -242,17 +242,17 @@ init python:
self.cz_man.set_disabled(self.mib.ZONE_CHEST_1_L, True)
self.quick_add(self.mib.ZONE_HEAD)
-
+
self.quick_add(self.mib.ZONE_NOSE)
def build_zone_actions(self):
return {
self.mib.ZONE_CHEST: MASZoomableInteractable.ZONE_ACTION_NONE,
- self.mib.ZONE_CHEST_1_R:
+ self.mib.ZONE_CHEST_1_R:
MASZoomableInteractable.ZONE_ACTION_NONE,
- self.mib.ZONE_CHEST_1_M:
+ self.mib.ZONE_CHEST_1_M:
MASZoomableInteractable.ZONE_ACTION_NONE,
- self.mib.ZONE_CHEST_1_L:
+ self.mib.ZONE_CHEST_1_L:
MASZoomableInteractable.ZONE_ACTION_NONE,
self.mib.ZONE_HEAD: MASZoomableInteractable.ZONE_ACTION_NONE,
self.mib.ZONE_NOSE: MASZoomableInteractable.ZONE_ACTION_NONE,
diff --git a/Monika After Story/game/dev/dev_pg_topics.rpy b/Monika After Story/game/dev/dev_pg_topics.rpy
index 3977e6b48f..9ef69e4283 100644
--- a/Monika After Story/game/dev/dev_pg_topics.rpy
+++ b/Monika After Story/game/dev/dev_pg_topics.rpy
@@ -18,7 +18,7 @@ label zz_mas_poemgame_actone:
m "Hi [player]!"
m "These are your point totals:"
python:
- for k,v in testvalues.iteritems():
+ for k,v in testvalues.items():
m(k + " received "+ str(v) + " pt(s) from your choices.")
m "I hope that was fun!"
@@ -43,7 +43,7 @@ label zz_mas_poemgame_acttwo:
m "Hi [player]!"
m "These are your point totals:"
python:
- for k,v in testvalues.iteritems():
+ for k,v in testvalues.items():
m(k + " received "+ str(v) + " pt(s) from your choices.")
m "I hope that was fun!"
@@ -119,7 +119,7 @@ label zz_mas_poemgame_actonept:
m "Hi [player]!"
m "These are your point totals:"
python:
- for k,v in testvalues.iteritems():
+ for k,v in testvalues.items():
m(k + " received "+ str(v) + " pt(s) from your choices.")
m "And you selected these words:"
@@ -279,7 +279,7 @@ label zz_mas_poemgame_oneg:
m "Hi [player]!"
m "These are your point totals:"
python:
- for k,v in testvalues.iteritems():
+ for k,v in testvalues.items():
m(k + " received "+ str(v) + " pt(s) from your choices.")
@@ -325,7 +325,7 @@ label zz_mas_poemgame_oc:
m "Hi [player]!"
m "These are your point totals:"
python:
- for k,v in testvalues.iteritems():
+ for k,v in testvalues.items():
m(k + " received "+ str(v) + " pt(s) from your choices.")
m "you selected these words:"
python:
diff --git a/Monika After Story/game/dev/dev_selector.rpy b/Monika After Story/game/dev/dev_selector.rpy
index c8c229a4be..859ec36a22 100644
--- a/Monika After Story/game/dev/dev_selector.rpy
+++ b/Monika After Story/game/dev/dev_selector.rpy
@@ -1,7 +1,7 @@
# selector testing
init -100 python:
-
+
def dev_mas_unlock_all_sprites():
for sel_obj in store.mas_selspr.ACS_SEL_SL:
sel_obj.unlocked = True
@@ -113,7 +113,7 @@ label dev_selector_test:
return
# TODO: this needs to be called, so we need to redo the jump logic.,
-# jump mas_selector_sidebar_select(start_test_items, "dev_selector_test_confirm", "dev_selector_test_cancel",
+# jump mas_selector_sidebar_select(start_test_items, "dev_selector_test_confirm", "dev_selector_test_cancel",
label dev_selector_test_confirm:
hide screen mas_selector_sidebar
@@ -161,7 +161,7 @@ label dev_selector_hair_test:
call mas_selector_sidebar_select_hair(sorted_hair, mailbox=mailbox, select_map=sel_map)
- # undo the unlocks
+ # undo the unlocks
python:
for item in sorted_hair:
item.unlocked = unlock_map[item.name]
@@ -201,7 +201,7 @@ label dev_selector_clothes_test:
call mas_selector_sidebar_select_clothes(sorted_clothes, mailbox=mailbox, select_map=sel_map)
- # undo the unlocks
+ # undo the unlocks
python:
for item in sorted_clothes:
item.unlocked = unlock_map[item.name]
@@ -248,7 +248,7 @@ label dev_selector_acs_ribbons_test:
call mas_selector_sidebar_select_acs(use_acs, mailbox=mailbox, select_map=sel_map)
- # undo the unlocks
+ # undo the unlocks
python:
for item in use_acs:
item.unlocked = unlock_map[item.name]
diff --git a/Monika After Story/game/dev/dev_stories.rpy b/Monika After Story/game/dev/dev_stories.rpy
index d50a2f5931..cf5993460a 100644
--- a/Monika After Story/game/dev/dev_stories.rpy
+++ b/Monika After Story/game/dev/dev_stories.rpy
@@ -3,5 +3,5 @@ init 20 python in mas_stories:
"""
Dev function, unlocks all stories
"""
- for story in story_database.itervalues():
+ for story in story_database.values():
story.unlocked=True
diff --git a/Monika After Story/game/dev/dev_tools.rpy b/Monika After Story/game/dev/dev_tools.rpy
index 83d321e751..c65780227b 100644
--- a/Monika After Story/game/dev/dev_tools.rpy
+++ b/Monika After Story/game/dev/dev_tools.rpy
@@ -1,7 +1,7 @@
# basic dev tool stuff
init 800 python:
-
+
def mas_remove_event(*labels):
"""
Removes an event from the persistent database and lock DB
@@ -23,7 +23,7 @@ init 800 python:
if label in Event.INIT_LOCKDB:
Event.INIT_LOCKDB.pop(label)
-
+
persistent.closed_self = True
persistent._mas_game_crashed = False
renpy.save_persistent()
diff --git a/Monika After Story/game/dev/dev_weather.rpy b/Monika After Story/game/dev/dev_weather.rpy
index 7de555dd41..724b52163f 100644
--- a/Monika After Story/game/dev/dev_weather.rpy
+++ b/Monika After Story/game/dev/dev_weather.rpy
@@ -27,7 +27,7 @@ label dev_change_weather:
# build other weather list
other_weathers = [
(mw_obj.prompt, mw_obj, False, False)
- for mw_id, mw_obj in mas_weather.WEATHER_MAP.iteritems()
+ for mw_id, mw_obj in mas_weather.WEATHER_MAP.items()
if mw_id != "def"
]
@@ -99,7 +99,7 @@ label dev_weather_sampler:
for count in range(sample_size):
got_weather = mas_shouldRain()
totals += 1
-
+
if got_weather is None:
results["default"] += 1
@@ -111,7 +111,7 @@ label dev_weather_sampler:
# done with sampling, output results
with open(renpy.config.basedir + "/weather_sample", "w") as outdata:
- for weather_name, count in results.iteritems():
+ for weather_name, count in results.items():
outdata.write("{0},{1} -> {2}\n".format(
weather_name,
count,
@@ -120,4 +120,3 @@ label dev_weather_sampler:
m "check files for 'weather_sample' for more info."
return
-
diff --git a/Monika After Story/game/dev/dev_xp.rpy b/Monika After Story/game/dev/dev_xp.rpy
index aec25fe781..8cf11aca59 100644
--- a/Monika After Story/game/dev/dev_xp.rpy
+++ b/Monika After Story/game/dev/dev_xp.rpy
@@ -1,4 +1,3 @@
-
init 5 python:
addEvent(
Event(
diff --git a/Monika After Story/game/dev/zz_dump.rpy b/Monika After Story/game/dev/zz_dump.rpy
index a349179ce6..07678d02c3 100644
--- a/Monika After Story/game/dev/zz_dump.rpy
+++ b/Monika After Story/game/dev/zz_dump.rpy
@@ -15,4 +15,3 @@ init 999 python:
# )
# )
# del outtext
-
diff --git a/Monika After Story/game/event-handler.rpy b/Monika After Story/game/event-handler.rpy
index bccd05f6d2..642f9ad2fe 100644
--- a/Monika After Story/game/event-handler.rpy
+++ b/Monika After Story/game/event-handler.rpy
@@ -33,7 +33,7 @@ init -999 python in mas_ev_data_ver:
# must be before -900 so we can use in persistent backup/cleanup
# need to use real lists and dicts here
- import __builtin__
+ import builtins
# special store dedicated to verification of Event-based data
import datetime
@@ -90,7 +90,6 @@ init -999 python in mas_ev_data_ver:
bool,
int,
float,
- long,
complex,
datetime.timedelta,
datetime.date,
@@ -103,10 +102,10 @@ init -999 python in mas_ev_data_ver:
# list types
if val_type in (
- __builtin__.list,
+ builtins.list,
renpy.python.RevertableList,
- __builtin__.set,
- __builtin__.frozenset,
+ builtins.set,
+ builtins.frozenset,
renpy.python.RevertableSet,
tuple,
):
@@ -116,7 +115,7 @@ init -999 python in mas_ev_data_ver:
return True
# dict types
- if val_type in (__builtin__.dict, renpy.python.RevertableDict):
+ if val_type in (builtins.dict, renpy.python.RevertableDict):
for sub_key in val:
if (
not __strict_can_pickle(sub_key)
@@ -136,11 +135,11 @@ init -999 python in mas_ev_data_ver:
def _verify_dict(val, allow_none=True):
- return _verify_item(val, __builtin__.dict, allow_none)
+ return _verify_item(val, builtins.dict, allow_none)
def _verify_list(val, allow_none=True):
- return _verify_item(val, __builtin__.list, allow_none)
+ return _verify_item(val, builtins.list, allow_none)
def _verify_dt(val, allow_none=True):
@@ -192,7 +191,7 @@ init -999 python in mas_ev_data_ver:
if val is None:
return allow_none
- return isinstance(val, __builtin__.list) or isinstance(val, tuple)
+ return isinstance(val, builtins.list) or isinstance(val, tuple)
def _verify_tuli_nn(val):
@@ -353,7 +352,7 @@ init -950 python in mas_ev_data_ver:
if per_db is None:
return
- for ev_label in per_db.keys():
+ for ev_label in tuple(per_db.keys()):
# pull out the data
ev_line = per_db[ev_label]
@@ -480,7 +479,7 @@ init 6 python:
# mainly to create centralized database for calendar lookup
# (and possible general db lookups)
mas_all_ev_db = {}
- for code,ev_db in mas_all_ev_db_map.iteritems():
+ for code,ev_db in mas_all_ev_db_map.items():
mas_all_ev_db.update(ev_db)
del code, ev_db
@@ -668,7 +667,7 @@ init 6 python:
if ev is None:
return False
- for attr, new_value in kwargs.iteritems():
+ for attr, new_value in kwargs.items():
setattr(ev, attr, new_value)
return True
@@ -1496,26 +1495,26 @@ init -1 python in evhand:
_NT_CAT_PANE = namedtuple("_NT_CAT_PANE", "menu cats")
# RIGHT PANE
-# PREV_X = 30
+ # PREV_X = 30
RIGHT_X = 1020
-# PREV_Y = 10
+ # PREV_Y = 10
RIGHT_Y = 15 + 55
-# PREV_W = 300
+ # PREV_W = 300
RIGHT_W = 250
RIGHT_H = 572
-# PREV_XALIGN = -0.08
+ # PREV_XALIGN = -0.08
RIGHT_XALIGN = -0.10
RIGHT_AREA = (RIGHT_X, RIGHT_Y, RIGHT_W, RIGHT_H)
# LEFT PANE
-# MAIN_X = 360
+ # MAIN_X = 360
LEFT_X = 740
-# MAIN_Y = 10
+ # MAIN_Y = 10
LEFT_Y = RIGHT_Y
-# MAIN_W = 300
+ # MAIN_W = 300
LEFT_W = RIGHT_W
LEFT_H = RIGHT_H
-# MAIN_XALIGN = -0.08
+ # MAIN_XALIGN = -0.08
LEFT_XALIGN = -0.10
LEFT_AREA = (LEFT_X, LEFT_Y, LEFT_W, LEFT_H)
LEFT_EXTRA_SPACE = 68
@@ -1589,8 +1588,8 @@ init -1 python in evhand:
self._eli
)
- @staticmethod
- def build(evl, *args):
+ @classmethod
+ def build(cls, evl, *args):
"""
Builds an ELI.
@@ -1600,10 +1599,10 @@ init -1 python in evhand:
RETURNS: EventListItem object
"""
- return EventListItem(EventListItem._build_raw(evl, *args))
+ return cls(cls._build_raw(evl, *args))
- @staticmethod
- def _build_raw(evl, *args):
+ @classmethod
+ def _build_raw(cls, evl, *args):
"""
Builds raw data for an ELI.
@@ -1612,13 +1611,13 @@ init -1 python in evhand:
RETURNS: raw data
"""
data = list(
- (evl, ) + args + EventListItem.DEFAULT_VALUES[len(args):]
+ (evl, ) + args + cls.DEFAULT_VALUES[len(args):]
)
# adjust context to be persistntable
- ctx = data[EventListItem.IDX_CONTEXT]
+ ctx = data[cls.IDX_CONTEXT]
if isinstance(ctx, store.MASEventContext):
- data[EventListItem.IDX_CONTEXT] = ctx._to_dict()
+ data[cls.IDX_CONTEXT] = ctx._to_dict()
return tuple(data)
@@ -1891,7 +1890,7 @@ init -1 python in evhand:
Goes through the year setblacklist and removes expired entries
"""
now_dt = datetime.datetime.now()
- for evl in store.persistent._mas_ev_yearset_blacklist.keys():
+ for evl in tuple(store.persistent._mas_ev_yearset_blacklist.keys()):
if store.persistent._mas_ev_yearset_blacklist[evl] <= now_dt:
store.persistent._mas_ev_yearset_blacklist.pop(evl)
@@ -1952,7 +1951,7 @@ init python:
ctx_data - context data directly from event list. Optional.
(Default: None)
"""
- super(MASEventContext, self).__init__()
+ super().__init__()
if ctx_data is not None:
self._from_dict(ctx_data)
@@ -1960,8 +1959,8 @@ init python:
"""
We don't allow types that cannot be saved to persistent
"""
- if MASEventContext.is_allowed_data(value):
- super(MASEventContext, self).__setattr__(name, value)
+ if self.is_allowed_data(value):
+ super().__setattr__(name, value)
@classmethod
def is_allowed_data(cls, thing):
@@ -2034,12 +2033,12 @@ init python:
# current event functions
- @staticmethod
- def clear_current():
+ @classmethod
+ def clear_current(cls):
"""
Clears the current event aka persistent eli data.
"""
- MASEventList._set_current(None)
+ cls._set_current(None)
@staticmethod
def load_current():
@@ -2077,39 +2076,39 @@ init python:
persistent._mas_curr_eli_data = new_eli_data
persistent.current_monikatopic = new_curr_moni_topic
- @staticmethod
- def sync_current():
+ @classmethod
+ def sync_current(cls):
"""
Syncs the current event persistent vars, aka:
- current_monikatopic
- _mas_curr_eli_data
"""
- curr_eli = MASEventList.load_current()
+ curr_eli = cls.load_current()
if curr_eli is None:
if renpy.has_label(str(persistent.current_monikatopic)):
# to handle unexpected uses, we'll build an eli for this
# if this var is set but no eli data was found.
- MASEventList._set_current(evhand.EventListItem.build(
+ cls._set_current(evhand.EventListItem.build(
str(persistent.current_monikatopic)
))
else:
- MASEventList.clear_current()
+ cls.clear_current()
else:
- MASEventList._set_current(curr_eli)
+ cls._set_current(curr_eli)
# event list functions
- @staticmethod
- def clean():
+ @classmethod
+ def clean(cls):
"""
Cleans the event list and makes sure all events are of the
appropriate length and have a valid label.
"""
- for index in MASEventList.rev_idx_iter():
+ for index in cls.rev_idx_iter():
item_raw = persistent.event_list[index]
# type check
@@ -2160,8 +2159,8 @@ init python:
mas_globals.event_unpause_dt = None
return False
- @staticmethod
- def _next():
+ @classmethod
+ def _next(cls):
"""
Gets the next event's data and its location in the event_list.
This takes event restrictions into account, aka pausing and idle.
@@ -2173,9 +2172,9 @@ init python:
if len(persistent.event_list) < 1:
return None, -1
- is_paused = MASEventList.is_paused()
+ is_paused = cls.is_paused()
- for index, item in MASEventList.rev_enum_iter():
+ for index, item in cls.rev_enum_iter():
ev = mas_getEV(item.evl)
if (
@@ -2199,8 +2198,8 @@ init python:
# no valid event available
return None, -1
- @staticmethod
- def peek():
+ @classmethod
+ def peek(cls):
"""
Gets the EventListItem for the next event on the event list, but
does NOT remove it.
@@ -2213,10 +2212,10 @@ init python:
RETURNS: EventListItem object for the next event, or None if no
next event.
"""
- return MASEventList._next()[0]
+ return cls._next()[0]
- @staticmethod
- def pop():
+ @classmethod
+ def pop(cls):
"""
Gets the EventListItem for the next event on the event list and
removes the event from the event list.
@@ -2229,7 +2228,7 @@ init python:
RETURNS: EventListItem object for the next event
"""
- item, loc = MASEventList._next()
+ item, loc = cls._next()
if item is None:
return None
@@ -2237,12 +2236,12 @@ init python:
if 0 <= loc < len(persistent.event_list): # just in case
persistent.event_list.pop(loc)
- MASEventList._set_current(item)
+ cls._set_current(item)
return item
- @staticmethod
- def push(event_label, skipeval=False, notify=False, context=None):
+ @classmethod
+ def push(cls, event_label, skipeval=False, notify=False, context=None):
"""
Pushes an event to the list - this will make the event trigger
next unless something else is pushed.
@@ -2260,7 +2259,7 @@ init python:
(accessible via MASEventContext.get())
(Default: None)
"""
- MASEventList._push_eli(evhand.EventListItem.build(
+ cls._push_eli(evhand.EventListItem.build(
event_label,
notify,
context
@@ -2279,8 +2278,8 @@ init python:
"""
persistent.event_list.append(eli._raw())
- @staticmethod
- def queue(event_label, notify=False, context=None):
+ @classmethod
+ def queue(cls, event_label, notify=False, context=None):
"""
Queues an event to the list - this will make the event trigger,
but not right away unless the list is empty.
@@ -2295,7 +2294,7 @@ init python:
(accessible via MASEventContext.get())
(Default: None)
"""
- MASEventList._queue_eli(evhand.EventListItem.build(
+ cls._queue_eli(evhand.EventListItem.build(
event_label,
notify,
context
@@ -2406,33 +2405,6 @@ init python:
# now this event has passsed checks, we can add it to the db
eventdb.setdefault(event.eventlabel, event)
- @store.mas_utils.deprecated(use_instead="mas_hideEVL", should_raise=True)
- def hideEventLabel(
- eventlabel,
- lock=False,
- derandom=False,
- depool=False,
- decond=False,
- eventdb=evhand.event_database
- ):
- #
- # NOTE: DEPRECATED
- # hide an event in the given eventdb by Falsing its unlocked,
- # random, and pool properties.
- #
- # IN:
- # eventlabel - label of the event to hide
- # lock - True if we want to lock this event, False otherwise
- # (Default: False)
- # derandom - True if we want to unrandom this event, False otherwise
- # (Default: False)
- # depool - True if we want to unpool this event, False otherwise
- # (Default: False)
- # decond - True if we want to remove the conditional, False otherwise
- # (Default: False)
- # eventdb - the event database (dict) we want to reference
- # (DEfault: evhand.event_database)
- mas_hideEventLabel(eventlabel, lock, derandom, depool, decond, eventdb)
@store.mas_utils.deprecated(use_instead="mas_hideEvent")
def hideEvent(
@@ -2582,29 +2554,6 @@ init python:
"""
mas_showEvent(eventdb.get(ev_label, None), unlock, _random, _pool)
- @store.mas_utils.deprecated(use_instead="mas_lockEvent", should_raise=True)
- def lockEvent(ev):
- """
- NOTE: DEPRECATED
- Locks the given event object
-
- IN:
- ev - the event object to lock
- """
- mas_lockEvent(ev)
-
- @store.mas_utils.deprecated(use_instead="mas_lockEventLabel", should_raise=True)
- def lockEventLabel(evlabel, eventdb=evhand.event_database):
- """
- NOTE: DEPRECATED
- Locks the given event label
-
- IN:
- evlabel - event label of the event to lock
- eventdb - Event database to find this label
- """
- mas_lockEventLabel(evlabel, eventdb)
-
def mas_lockEvent(ev):
"""
@@ -2630,6 +2579,7 @@ init python:
@store.mas_utils.deprecated(use_instead="MASEventList.push")
def pushEvent(event_label, skipeval=False, notify=False):
"""
+ NOTE: Preferable to use MASEventList.push
This pushes high priority or time sensitive events onto the top of
the event list
@@ -2645,12 +2595,13 @@ init python:
ASSUMES:
persistent.event_list
"""
- MASEventList.push(event_label, skipeval, notify)
+ MASEventList.push(*args, **kwargs)
@store.mas_utils.deprecated(use_instead="MASEventList.queue")
def queueEvent(event_label, notify=False):
"""
+ NOTE: Preferable to use MASEventList.queue
This adds low priority or order-sensitive events onto the bottom of
the event list. This is slow, but rarely called and list should be
small.
@@ -2664,20 +2615,9 @@ init python:
ASSUMES:
persistent.event_list
"""
- MASEventList.queue(event_label, notify)
+ MASEventList.queue(*args, **kwargs)
- @store.mas_utils.deprecated(use_instead="mas_unlockEvent", should_raise=True)
- def unlockEvent(ev):
- """
- NOTE: DEPRECATED
- Unlocks the given evnet object
-
- IN:
- ev - the event object to unlock
- """
- mas_unlockEvent(ev)
-
@store.mas_utils.deprecated(use_instead="mas_unlockEventLabel")
def unlockEventLabel(evlabel, eventdb=evhand.event_database):
"""
@@ -2761,16 +2701,6 @@ init python:
return evhand._isPresent(ev)
- @store.mas_utils.deprecated(use_instead="MASEventList.pop", should_raise=True)
- def popEvent(remove=True):
- """
- DO NOT USE.
-
- Use MASEventList.pop instead (not exactly the same)
- """
- pass
-
-
def seen_event(event_label):
"""
Please use mas_seenEvent, this function hasn't been deprecated
@@ -2974,7 +2904,7 @@ init python:
# get locked pool topics that are not banned from unlocking
pool_evs = [
ev
- for ev in evhand.event_database.itervalues()
+ for ev in evhand.event_database.values()
if (
Event._filterEvent(ev, unlocked=False, pool=True)
and "no_unlock" not in ev.rules
@@ -3104,9 +3034,9 @@ label call_next_event:
):
#Create a new notif
if renpy.windows:
- $ mas_display_notif(m_name, mas_win_notif_quips, "Topic Alerts")
+ $ mas_display_notif(m_name, mas_win_notif_quips, "Topic Alerts", flash_window=True)
else:
- $ mas_display_notif(m_name, mas_other_notif_quips, "Topic Alerts")
+ $ mas_display_notif(m_name, mas_other_notif_quips, "Topic Alerts", flash_window=True)
#Also check here and reset the forced idle exp if necessary
if ev is not None and "keep_idle_exp" not in ev.rules:
@@ -3500,33 +3430,32 @@ label prompts_categories(pool=True):
# setup items
main_items = no_cat_list
- """ KEEP this for legacy purposes
-# sorted_event_keys = Event.getSortedKeys(unlocked_events,include_none=True)
+ # KEEP this for legacy purposes
+ # sorted_event_keys = Event.getSortedKeys(unlocked_events,include_none=True)
- prompt_category_menu = []
- #Make a list of categories
+ # prompt_category_menu = []
+ # #Make a list of categories
- #Make a list of all categories
- subcategories=set([])
- for event in sorted_event_keys:
- if unlocked_events[event].category is not None:
- new_categories=set(unlocked_events[event].category).difference(set(current_category))
- subcategories=subcategories.union(new_categories)
+ # #Make a list of all categories
+ # subcategories=set([])
+ # for event in sorted_event_keys:
+ # if unlocked_events[event].category is not None:
+ # new_categories=set(unlocked_events[event].category).difference(set(current_category))
+ # subcategories=subcategories.union(new_categories)
- subcategories = list(subcategories)
- for category in sorted(subcategories, key=lambda s: s.lower()):
- #Don't list additional subcategories if adding them wouldn't change the same you are looking at
- test_unlock = Event.filterEvents(evhand.event_database,full_copy=True,category=[False,current_category+[category]],unlocked=True)
+ # subcategories = list(subcategories)
+ # for category in sorted(subcategories, key=lambda s: s.lower()):
+ # #Don't list additional subcategories if adding them wouldn't change the same you are looking at
+ # test_unlock = Event.filterEvents(evhand.event_database,full_copy=True,category=[False,current_category+[category]],unlocked=True)
- if len(test_unlock) != len(sorted_event_keys):
- prompt_category_menu.append([category.capitalize() + "...",category])
+ # if len(test_unlock) != len(sorted_event_keys):
+ # prompt_category_menu.append([category.capitalize() + "...",category])
- #If we do have a category picked, make a list of the keys
- if sorted_event_keys is not None:
- for event in sorted_event_keys:
- prompt_category_menu.append([unlocked_events[event].prompt,event])
- """
+ # #If we do have a category picked, make a list of the keys
+ # if sorted_event_keys is not None:
+ # for event in sorted_event_keys:
+ # prompt_category_menu.append([unlocked_events[event].prompt,event])
call screen twopane_scrollable_menu(prev_items, main_items, evhand.LEFT_AREA, evhand.LEFT_XALIGN, evhand.RIGHT_AREA, evhand.RIGHT_XALIGN, len(current_category)) nopredict
@@ -3539,15 +3468,15 @@ label prompts_categories(pool=True):
current_category.pop()
current_category.append(_return)
-# TODO: if we have subcategories, this needs to be setup properly
-# elif _return in main_cats:
- # we selected a category in the main pane
-# $ current_category.append(_return)
-# $ cat_lists.append(main_pane)
-# $ is_root = False
+ # TODO: if we have subcategories, this needs to be setup properly
+ # elif _return in main_cats:
+ # we selected a category in the main pane
+ # $ current_category.append(_return)
+ # $ cat_lists.append(main_pane)
+ # $ is_root = False
-# elif _return == -2: # Thats enough for now
-# $picked_event = True
+ # elif _return == -2: # Thats enough for now
+ # $picked_event = True
elif _return == -1: # go back
if len(current_category) > 0:
@@ -3702,13 +3631,13 @@ label mas_bookmarks_unbookmark(bookmarks_items):
# sanity check that the user selected something
if bookmarks_to_remove:
python:
- for ev_label in bookmarks_to_remove.iterkeys():
+ for ev_label in bookmarks_to_remove.keys():
# remove the bookmark from persist (if in it)
if ev_label in persistent._mas_player_bookmarked:
persistent._mas_player_bookmarked.remove(ev_label)
# filter the removed items to show the menu again
- bookmarks_items = filter(lambda item: item[1] not in bookmarks_to_remove, bookmarks_items)
+ bookmarks_items = list(filter(lambda item: item[1] not in bookmarks_to_remove, bookmarks_items))
show monika at t11
m 1dsa "Okay, [player].{w=0.2}.{w=0.2}.{w=0.2}{nw}"
diff --git a/Monika After Story/game/event-rules.rpy b/Monika After Story/game/event-rules.rpy
index 23d12ed69b..0da430687e 100644
--- a/Monika After Story/game/event-rules.rpy
+++ b/Monika After Story/game/event-rules.rpy
@@ -636,83 +636,6 @@ init -1 python:
# Evaluate randint with a chance of 1 in random_chance
return renpy.random.randint(1,random_chance) == 1
- @store.mas_utils.deprecated(use_instead="the aff_range property for Events", should_raise=True)
- class MASAffectionRule(object):
- """
- NOTE: DEPRECATED
- Use the aff_range property for Events instead
-
- Static Class used to create affection specific rules in tuple form.
- That tuple is then stored in a dict containing this rule name constant.
- Each rule is defined by a min and a max determining a range of affection
- to check against.
- """
-
- @store.mas_utils.deprecated(use_instead="the aff_range property for Events", should_raise=True)
- @staticmethod
- def create_rule(min, max, ev=None):
- """
- IN:
- min - An int representing the minimal(inclusive) affection required
- for the event to be available, if None is passed is assumed
- that there's no minimal affection
- max - An int representing the maximum(inclusive) affection required
- for the event to be available, if None is passed is assumed
- that there's no maximum affection
- ev - Event to create rule for, if passed in
- (Default: None)
-
- RETURNS:
- a dict containing the specified rules
- """
-
- # both min and max can't be None at the same time, since that means
- # that this is not affection dependent
- if not min and not max:
- raise Exception("at least min or max must not be None")
-
- # return the rule inside a dict
- rule = {EV_RULE_AFF_RANGE : (min, max)}
-
- if ev:
- ev.rules.update(rule)
-
- return rule
-
- @store.mas_utils.deprecated(use_instead="the aff_range property for Events", should_raise=True)
- @staticmethod
- def evaluate_rule(event=None, rule=None, affection=None, noRuleReturn=False):
- """
- IN:
- event - the event to evaluate
- rule - the MASAffectionRule to check against
- affection - the affection to check the rule against
-
- RETURNS:
- True if the current affection is inside the rule range
- """
-
- # check if we have an event that contains the rule we need
- # event rule takes priority so it's checked here
-
- if event and EV_RULE_AFF_RANGE in event.rules:
- rule = event.rules[EV_RULE_AFF_RANGE]
-
- # sanity check if we don't have a rule return False
- if rule is None:
- return noRuleReturn
-
- # store affection for easy checking
- if not affection:
- affection = _mas_getAffection()
-
- # unpack the rule for easy access
- min, max = rule
-
- # Evaluate if affection is inside the rule range, in case both are None
- # will return true (however that case should be catched on create_rule)
- return (affection >= min and not max) or (min <= affection <= max)
-
class MASPriorityRule(object):
"""
@@ -1065,7 +988,7 @@ init python:
NOTE: uses mas_getEV
"""
- for ev_label in persistent._mas_undo_action_rules.keys():
+ for ev_label in tuple(persistent._mas_undo_action_rules.keys()):
ev = mas_getEV(ev_label)
#Since we can have differing returns, we store this to use later
should_undo = MASUndoActionRule.evaluate_rule(ev)
diff --git a/Monika After Story/game/gui.rpy b/Monika After Story/game/gui.rpy
new file mode 100644
index 0000000000..791e772c18
--- /dev/null
+++ b/Monika After Story/game/gui.rpy
@@ -0,0 +1,463 @@
+
+
+
+
+
+
+
+
+
+
+init -2 python:
+ gui.init(1280, 720)
+
+
+
+
+
+
+
+define -2 gui.hover_sound = "gui/sfx/hover.ogg"
+define -2 gui.activate_sound = "gui/sfx/select.ogg"
+define -2 gui.activate_sound_glitch = "gui/sfx/select_glitch.ogg"
+
+
+
+
+
+
+define -2 gui.accent_color = '#ffffff'
+
+
+define -2 gui.idle_color = '#aaaaaa'
+
+
+
+define -2 gui.idle_small_color = '#333'
+
+
+define -2 gui.hover_color = '#cc6699'
+
+
+
+define -2 gui.selected_color = '#bb5588'
+
+
+define -2 gui.insensitive_color = '#aaaaaa7f'
+
+
+
+define -2 gui.muted_color = '#6666a3'
+define -2 gui.hover_muted_color = '#9999c1'
+
+
+define -2 gui.text_color = '#ffffff'
+define -2 gui.interface_text_color = '#ffffff'
+
+
+
+
+
+define -2 gui.default_font = "gui/font/Aller_Rg.ttf"
+
+
+define -2 gui.name_font = "gui/font/RifficFree-Bold.ttf"
+
+
+define -2 gui.interface_font = "gui/font/Aller_Rg.ttf"
+
+
+define -2 gui.text_size = 24
+
+
+define -2 gui.name_text_size = 24
+
+
+define -2 gui.interface_text_size = 24
+
+
+define -2 gui.label_text_size = 28
+
+
+define -2 gui.notify_text_size = 16
+
+
+define -2 gui.title_text_size = 38
+
+
+
+
+
+define -2 gui.main_menu_background = "menu_bg"
+define -2 gui.game_menu_background = "game_menu_bg"
+
+
+define -2 gui.show_name = False
+
+
+
+
+
+
+
+
+define -2 gui.textbox_height = 182
+
+
+
+define -2 gui.textbox_yalign = 0.99
+
+
+
+
+define -2 gui.name_xpos = 350
+define -2 gui.name_ypos = -3
+
+
+
+define -2 gui.name_xalign = 0.5
+
+
+
+define -2 gui.namebox_width = 168
+define -2 gui.namebox_height = 39
+
+
+
+define -2 gui.namebox_borders = Borders(5, 5, 5, 2)
+
+
+
+define -2 gui.namebox_tile = False
+
+
+
+
+
+define -2 gui.text_xpos = 268
+define -2 gui.text_ypos = 62
+
+
+define -2 gui.text_width = 744
+
+
+
+define -2 gui.text_xalign = 0.0
+
+
+
+
+
+
+
+
+define -2 gui.button_width = None
+define -2 gui.button_height = 36
+
+
+define -2 gui.button_borders = Borders(4, 4, 4, 4)
+
+
+
+define -2 gui.button_tile = False
+
+
+define -2 gui.button_text_font = gui.interface_font
+
+
+define -2 gui.button_text_size = gui.interface_text_size
+
+
+define -2 gui.button_text_idle_color = gui.idle_color
+define -2 gui.button_text_hover_color = gui.hover_color
+define -2 gui.button_text_selected_color = gui.selected_color
+define -2 gui.button_text_insensitive_color = gui.insensitive_color
+
+
+
+define -2 gui.button_text_xalign = 0.0
+
+
+
+
+
+
+
+
+define -2 gui.radio_button_borders = Borders(28, 4, 4, 4)
+
+define -2 gui.check_button_borders = Borders(28, 4, 4, 4)
+
+define -2 gui.confirm_button_text_xalign = 0.5
+
+define -2 gui.page_button_borders = Borders(10, 4, 10, 4)
+
+
+define -2 gui.quick_button_text_size = 14
+define -2 gui.quick_button_text_idle_color = "#522"
+define -2 gui.quick_button_text_hover_color = "#fcc"
+define -2 gui.quick_button_text_selected_color = gui.accent_color
+define -2 gui.quick_button_text_insensitive_color = "#a66"
+
+
+
+
+
+
+
+
+
+
+
+
+define -2 gui.choice_button_width = 420
+define -2 gui.choice_button_height = None
+define -2 gui.choice_button_tile = False
+define -2 gui.choice_button_borders = Borders(100, 5, 100, 5)
+define -2 gui.choice_button_text_font = gui.default_font
+define -2 gui.choice_button_text_size = gui.text_size
+define -2 gui.choice_button_text_xalign = 0.5
+define -2 gui.choice_button_text_idle_color = "#000"
+define -2 gui.choice_button_text_hover_color = "#fa9"
+
+
+
+
+
+
+
+
+
+define -2 gui.slot_button_width = 276
+define -2 gui.slot_button_height = 206
+define -2 gui.slot_button_borders = Borders(10, 10, 10, 10)
+define -2 gui.slot_button_text_size = 14
+define -2 gui.slot_button_text_xalign = 0.5
+define -2 gui.slot_button_text_idle_color = gui.idle_small_color
+define -2 gui.slot_button_text_hover_color = gui.hover_color
+
+
+define -2 config.thumbnail_width = 256
+define -2 config.thumbnail_height = 144
+
+
+define -2 gui.file_slot_cols = 3
+define -2 gui.file_slot_rows = 2
+
+
+
+
+
+
+
+
+
+define -2 gui.navigation_xpos = 80
+
+
+define -2 gui.skip_ypos = 10
+
+
+define -2 gui.notify_ypos = 45
+
+
+define -2 gui.choice_spacing = 22
+
+
+define -2 gui.navigation_spacing = 6
+
+
+define -2 gui.pref_spacing = 10
+
+
+define -2 gui.pref_button_spacing = 0
+
+
+define -2 gui.page_spacing = 0
+
+
+define -2 gui.slot_spacing = 10
+
+
+
+
+
+
+
+
+define -2 gui.frame_borders = Borders(4, 4, 4, 4)
+
+
+define -2 gui.confirm_frame_borders = Borders(40, 40, 40, 40)
+
+
+define -2 gui.skip_frame_borders = Borders(16, 5, 50, 5)
+
+
+define -2 gui.notify_frame_borders = Borders(16, 5, 40, 5)
+
+
+define -2 gui.frame_tile = False
+
+
+
+
+
+
+
+
+
+
+
+define -2 gui.bar_size = 36
+define -2 gui.scrollbar_size = 12
+define -2 gui.slider_size = 30
+
+
+define -2 gui.bar_tile = False
+define -2 gui.scrollbar_tile = False
+define -2 gui.slider_tile = False
+
+
+define -2 gui.bar_borders = Borders(4, 4, 4, 4)
+define -2 gui.scrollbar_borders = Borders(4, 4, 4, 4)
+define -2 gui.slider_borders = Borders(4, 4, 4, 4)
+
+
+define -2 gui.vbar_borders = Borders(4, 4, 4, 4)
+define -2 gui.vscrollbar_borders = Borders(4, 4, 4, 4)
+define -2 gui.vslider_borders = Borders(4, 4, 4, 4)
+
+
+
+define -2 gui.unscrollable = "hide"
+
+
+
+
+
+
+
+define -2 config.history_length = 50
+
+
+
+define -2 gui.history_height = None
+
+
+
+define -2 gui.history_name_xpos = 150
+define -2 gui.history_name_ypos = 0
+define -2 gui.history_name_width = 150
+define -2 gui.history_name_xalign = 1.0
+
+
+define -2 gui.history_text_xpos = 170
+define -2 gui.history_text_ypos = 5
+define -2 gui.history_text_width = 740
+define -2 gui.history_text_xalign = 0.0
+
+
+
+
+
+
+
+define -2 gui.nvl_borders = Borders(0, 10, 0, 20)
+
+
+
+define -2 gui.nvl_height = 115
+
+
+
+define -2 gui.nvl_spacing = 10
+
+
+
+define -2 gui.nvl_name_xpos = 430
+define -2 gui.nvl_name_ypos = 0
+define -2 gui.nvl_name_width = 150
+define -2 gui.nvl_name_xalign = 1.0
+
+
+define -2 gui.nvl_text_xpos = 450
+define -2 gui.nvl_text_ypos = 8
+define -2 gui.nvl_text_width = 590
+define -2 gui.nvl_text_xalign = 0.0
+
+
+
+define -2 gui.nvl_thought_xpos = 240
+define -2 gui.nvl_thought_ypos = 0
+define -2 gui.nvl_thought_width = 780
+define -2 gui.nvl_thought_xalign = 0.0
+
+
+define -2 gui.nvl_button_xpos = 450
+define -2 gui.nvl_button_xalign = 0.0
+
+
+
+
+
+
+
+init -2 python:
+
+
+
+ if renpy.variant("touch"):
+
+ gui.quick_button_borders = Borders(60, 14, 60, 0)
+
+
+
+ if renpy.variant("small"):
+
+
+ gui.text_size = 30
+ gui.name_text_size = 36
+ gui.notify_text_size = 25
+ gui.interface_text_size = 36
+ gui.button_text_size = 34
+ gui.label_text_size = 36
+
+
+ gui.textbox_height = 240
+ gui.name_xpos = 80
+ gui.text_xpos = 90
+ gui.text_width = 1100
+
+
+ gui.choice_button_width = 1240
+
+ gui.navigation_spacing = 20
+ gui.pref_button_spacing = 10
+
+ gui.history_height = 190
+ gui.history_text_width = 690
+
+
+ gui.file_slot_cols = 2
+ gui.file_slot_rows = 2
+
+
+ gui.nvl_height = 170
+
+ gui.nvl_name_width = 305
+ gui.nvl_name_xpos = 325
+
+ gui.nvl_text_width = 915
+ gui.nvl_text_xpos = 345
+ gui.nvl_text_ypos = 5
+
+ gui.nvl_thought_width = 1240
+ gui.nvl_thought_xpos = 20
+
+ gui.nvl_button_width = 1240
+ gui.nvl_button_xpos = 20
+
+
+ gui.quick_button_text_size = 20
+# Decompiled by unrpyc: https://github.com/CensoredUsername/unrpyc
diff --git a/Monika After Story/game/import_ddlc.rpy b/Monika After Story/game/import_ddlc.rpy
index f345195a4e..8855b75c8d 100644
--- a/Monika After Story/game/import_ddlc.rpy
+++ b/Monika After Story/game/import_ddlc.rpy
@@ -14,7 +14,7 @@ init python:
fo = open(dumppath, "w")
- for key in sorted(dumped_persistent.iterkeys()):
+ for key in sorted(dumped_persistent.keys()):
fo.write(str(key) + ' - ' + str(type(dumped_persistent[key])) + ' >>> '+ str(dumped_persistent[key]) + '\n\n')
fo.close()
@@ -103,8 +103,7 @@ label import_ddlc_persistent:
#Open the persistent save file at ddlc_save_path
ddlc_persistent = None
try:
- with open(ddlc_save_path, "rb") as ddlc_pfile:
- ddlc_persistent = mas_dockstat.cPickle.loads(ddlc_pfile.read().decode("zlib"))
+ ddlc_persistent = store.mas_per_check._load_per_data(ddlc_save_path)
except Exception as e:
store.mas_utils.mas_log.error("Failed to read/decode DDLC persistent: {0}".format(e))
diff --git a/Monika After Story/game/mod_assets/games/hangman/poemwords.txt b/Monika After Story/game/mod_assets/games/hangman/poemwords.txt
new file mode 100644
index 0000000000..bdf438386b
--- /dev/null
+++ b/Monika After Story/game/mod_assets/games/hangman/poemwords.txt
@@ -0,0 +1,235 @@
+#File format: word,sPoint,nPoint,yPoint
+
+#Sayori's winning words
+happiness,3,2,1
+sadness,3,2,1
+death,3,1,2
+tragedy,3,1,2
+alone,3,1,2
+love,3,2,1
+adventure,3,2,1
+sweet,3,2,1
+excitement,3,2,1
+fireworks,3,2,1
+romance,3,2,1
+tears,3,1,2
+depression,3,1,2
+heart,3,2,1
+marriage,3,2,1
+passion,3,2,1
+childhood,3,2,1
+fun,3,2,1
+color,3,2,1
+hope,3,1,2
+friends,3,2,1
+family,3,2,1
+party,3,2,1
+vacation,3,2,1
+lazy,3,2,1
+daydream,3,1,2
+pain,3,1,2
+holiday,3,2,1
+bed,3,2,1
+feather,3,2,1
+shame,3,1,2
+fear,3,1,2
+warm,3,2,1
+flower,3,2,1
+comfort,3,2,1
+dance,3,2,1
+sing,3,2,1
+cry,3,1,2
+laugh,3,2,1
+dark,3,1,2
+sunny,3,2,1
+raincloud,3,2,1
+calm,3,1,2
+silly,3,2,1
+flying,3,2,1
+wonderful,3,2,1
+unrequited,3,1,2
+rose,3,1,2
+together,3,2,1
+promise,3,2,1
+charm,3,2,1
+beauty,3,2,1
+cheer,3,2,1
+smile,3,2,1
+broken,3,1,2
+precious,3,2,1
+prayer,3,1,2
+clumsy,3,2,1
+forgive,3,1,2
+nature,3,2,1
+ocean,3,2,1
+dazzle,3,2,1
+special,3,2,1
+music,3,2,1
+lucky,3,2,1
+misfortune,3,1,2
+loud,3,2,1
+peaceful,3,1,2
+joy,3,1,2
+sunset,3,2,1
+fireflies,3,2,1
+rainbow,3,2,1
+hurt,3,1,2
+play,3,2,1
+sparkle,3,2,1
+scars,3,1,2
+empty,3,1,2
+amazing,3,2,1
+grief,3,1,2
+embrace,3,1,2
+extraordinary,3,2,1
+awesome,3,2,1
+defeat,3,1,2
+hopeless,3,1,2
+misery,3,1,2
+treasure,3,2,1
+bliss,3,2,1
+memories,3,2,1
+
+#Natsuki's words
+cute,2,3,1
+fluffy,2,3,1
+pure,1,3,2
+candy,2,3,1
+shopping,2,3,1
+puppy,2,3,1
+kitty,2,3,1
+clouds,2,3,1
+lipstick,1,3,2
+parfait,2,3,1
+strawberry,2,3,1
+pink,2,3,1
+chocolate,2,3,1
+heartbeat,1,3,2
+kiss,1,3,2
+melody,2,3,1
+ribbon,2,3,1
+jumpy,2,3,1
+doki-doki,2,3,1
+kawaii,2,3,1
+skirt,2,3,1
+cheeks,2,3,1
+email,2,3,1
+sticky,2,3,1
+bouncy,2,3,1
+shiny,2,3,1
+nibble,2,3,1
+fantasy,1,3,2
+sugar,2,3,1
+giggle,2,3,1
+marshmallow,2,3,1
+hop,2,3,1
+skipping,2,3,1
+peace,2,3,1
+spinning,2,3,1
+twirl,2,3,1
+lollipop,2,3,1
+poof,2,3,1
+bubbles,2,3,1
+whisper,2,3,1
+summer,2,3,1
+waterfall,1,3,2
+swimsuit,2,3,1
+vanilla,2,3,1
+headphones,2,3,1
+games,2,3,1
+socks,2,3,1
+hair,2,3,1
+playground,2,3,1
+nightgown,1,3,2
+blanket,1,3,2
+milk,2,3,1
+pout,2,3,1
+anger,2,3,1
+papa,2,3,1
+valentine,2,3,1
+mouse,1,3,2
+whistle,2,3,1
+boop,2,3,1
+bunny,2,3,1
+anime,2,3,1
+jump,2,3,1
+
+#Yuri's words
+determination,1,1,3
+suicide,2,1,3
+imagination,2,1,3
+secretive,2,1,3
+vitality,1,1,3
+existence,2,1,3
+effulgent,1,1,3
+crimson,1,1,3
+whirlwind,1,1,3
+afterimage,1,1,3
+vertigo,1,1,3
+disoriented,1,1,3
+essence,2,1,3
+ambient,2,1,3
+starscape,2,1,3
+disarray,1,1,3
+contamination,1,1,3
+intellectual,1,1,3
+analysis,1,1,3
+entropy,1,1,3
+vivacious,1,1,3
+uncanny,2,1,3
+incongruent,1,1,3
+wrath,2,1,3
+heavensent,2,1,3
+massacre,2,1,3
+philosophy,1,1,3
+fickle,1,1,3
+tenacious,1,1,3
+aura,2,1,3
+unstable,1,1,3
+inferno,2,1,3
+incapable,2,1,3
+destiny,2,1,3
+infallible,1,1,3
+agonizing,2,1,3
+variance,1,1,3
+uncontrollable,2,1,3
+extreme,1,1,3
+flee,2,1,3
+dream,2,2,3
+disaster,2,1,3
+vivid,2,1,3
+vibrant,1,2,3
+question,1,2,3
+fester,2,1,3
+judgment,1,1,3
+cage,1,2,3
+explode,1,2,3
+pleasure,1,2,3
+lust,1,2,3
+sensation,1,2,3
+climax,1,2,3
+electricity,1,2,3
+disown,1,1,3
+despise,2,1,3
+infinite,2,1,3
+eternity,2,1,3
+time,2,1,3
+universe,2,1,3
+unending,2,1,3
+raindrops,2,1,3
+covet,1,1,3
+unrestrained,1,1,3
+landscape,2,1,3
+portrait,2,1,3
+journey,2,1,3
+meager,1,1,3
+anxiety,2,1,3
+frightening,2,1,3
+horror,2,1,3
+melancholy,2,1,3
+insight,2,1,3
+atone,2,1,3
+breathe,1,2,3
+captive,2,1,3
+desire,1,2,3
+graveyard,2,1,3
\ No newline at end of file
diff --git a/Monika After Story/CustomIconWindows.ico b/Monika After Story/game/mod_assets/mas_icon.ico
similarity index 100%
rename from Monika After Story/CustomIconWindows.ico
rename to Monika After Story/game/mod_assets/mas_icon.ico
diff --git a/Monika After Story/game/options.rpy b/Monika After Story/game/options.rpy
index e975da49ca..d442d43b7e 100644
--- a/Monika After Story/game/options.rpy
+++ b/Monika After Story/game/options.rpy
@@ -22,6 +22,9 @@ define gui.about = _("")
define build.name = "Monika_After_Story"
+## Name of the executables, we must keep it DDLC to obey the guidelines
+define build.executable_name = "DDLC"
+
## Preference defaults #########################################################
## Controls the default text speed. The default, 0, is infinite, while any other
@@ -121,39 +124,33 @@ init python:
##This tells Renpy to build an updater file
build.include_update = True
- ## This is the archive of data for your mod
- #build.archive(build.name, "all")
-
- #Add the pictures necessary for the scrollable menu
- build.classify("game/gui/**",build.name)
-
- ## These files get put into your data file
- build.classify("game/mod_assets/**",build.name)
- #build.classify("game/**.rpy",build.name) #Optional line to include plaintext scripts
- build.classify("game/*.rpyc",build.name) #Serialized scripts must be included
- build.classify("game/dev/*.*",None) #But not the dev folder
- build.classify("README.html",build.name) #Included help file for mod installation
- build.classify("game/python-packages/**",build.name)#Additional python pacakges
- build.classify("CustomIcon**.**",build.name)
-
-
- build.package(build.directory_name + "Mod",'zip',build.name,description='DDLC Compatible Mod')
-
- build.classify('**~', None)
- build.classify('**.bak', None)
- build.classify('**/.**', None)
- build.classify('**/#**', None)
- build.classify('**/thumbs.db', None)
- build.classify('**.rpy', None)
- build.classify('**.psd', None)
- build.classify('**.sublime-project', None)
- build.classify('**.sublime-workspace', None)
- build.classify('/music/*.*', None)
- build.classify('script-regex.txt', None)
- build.classify('/game/10', None)
- build.classify('/game/cache/*.*', None)
- build.classify('**.rpa',None)
- build.classify("game/mod_assets/api_keys.json", None)
+ ## Define the archives to use
+ build.archive("scripts", "all")
+
+ ## These files will be included in the package
+ # Add mod assets
+ build.classify("game/mod_assets/**", "all")
+ build.classify("game/gui/**", "all")
+ # Add scripts in the game folder
+ # build.classify("game/*.rpy", "scripts")# Optional, includes source
+ build.classify("game/*.rpyc", "scripts")
+ # Add python packages
+ build.classify("game/python-packages/**", "all")
+ # Add README
+ build.classify("README.html", "all")
+
+ # build.package(build.directory_name + "Mod", "zip", "all", description="DDLC Compatible Mod")
+
+ ## These files will be excluded
+ # Remove everything else from the game folder
+ build.classify("game/**", None)
+ # Remove cache
+ # build.classify("game/cache/**", None)
+ # build.classify("game/saves/**", None)
+ # Remove logs
+ build.classify("log/**", None)
+ build.classify("*.log", None)
+
build.classify("**.pem", None)
## Files matching documentation patterns are duplicated in a mac app build,
diff --git a/Monika After Story/game/overrides.rpy b/Monika After Story/game/overrides.rpy
index f5b9278831..4283d4c6e7 100644
--- a/Monika After Story/game/overrides.rpy
+++ b/Monika After Story/game/overrides.rpy
@@ -19,5 +19,50 @@ init -10 python:
## Super early overrides
## You'll need a block like this for creator defined screen language
## Don't use this unless you know you need it
-python early:
- pass
+python early in mas_overrides:
+ import threading
+
+ import renpy
+ import renpy.savelocation as savelocation
+
+
+ def verify_data_override(data, signatures, check_verifying=True):
+ """
+ Verify the data in a save token.
+
+ Originally, this function is used to check against a checksum to verify the persistent should be loaded
+ But because we want to allow anyone be able to migrate and transfer their data, we will just return True
+ """
+ return True
+
+ renpy.savetoken.verify_data = verify_data_override
+
+
+ def savelocation_init_override():
+ """
+ Run **SOME** of the stuff savelocation.init runs
+
+ basically we trying to keep saves in the AppData/equivalent folder
+ to make backups/restoring easier.
+
+ The only difference here is that this skips over game savedirs and
+ 'extra' save dirs (so just omissions)
+ """
+ savelocation.quit()
+ savelocation.quit_scan_thread = False
+
+ location = savelocation.MultiLocation()
+
+ location.add(savelocation.FileLocation(renpy.config.savedir))
+
+ location.scan()
+
+ renpy.loadsave.location = location
+
+ if not renpy.emscripten:
+ savelocation.scan_thread = threading.Thread(target=savelocation.run_scan_thread)
+ savelocation.scan_thread.start()
+
+ savelocation.init = savelocation_init_override
+
+
diff --git a/Monika After Story/game/pong.rpy b/Monika After Story/game/pong.rpy
index 8d563d0f77..035d608477 100644
--- a/Monika After Story/game/pong.rpy
+++ b/Monika After Story/game/pong.rpy
@@ -7,53 +7,54 @@ default persistent._mas_pm_ever_let_monika_win_on_purpose = False
# the day at which the difficulty change was initiated
default persistent._mas_pong_difficulty_change_next_game_date = datetime.date.today()
-define PONG_DIFFICULTY_CHANGE_ON_WIN = +1
-define PONG_DIFFICULTY_CHANGE_ON_LOSS = -1
-define PONG_DIFFICULTY_POWERUP = +5
-define PONG_DIFFICULTY_POWERDOWN = -5
-define PONG_PONG_DIFFICULTY_POWERDOWNBIG = -10
+init -5 python:
+ PONG_DIFFICULTY_CHANGE_ON_WIN = +1
+ PONG_DIFFICULTY_CHANGE_ON_LOSS = -1
+ PONG_DIFFICULTY_POWERUP = +5
+ PONG_DIFFICULTY_POWERDOWN = -5
+ PONG_PONG_DIFFICULTY_POWERDOWNBIG = -10
#Triggering the same response twice in a row leads to a different response, not all responses reset this (on purpose)
-define PONG_MONIKA_RESPONSE_NONE = 0
-define PONG_MONIKA_RESPONSE_WIN_AFTER_PLAYER_WON_MIN_THREE_TIMES = 1
-define PONG_MONIKA_RESPONSE_SECOND_WIN_AFTER_PLAYER_WON_MIN_THREE_TIMES = 2
-define PONG_MONIKA_RESPONSE_WIN_LONG_GAME = 3
-define PONG_MONIKA_RESPONSE_WIN_SHORT_GAME = 4
-define PONG_MONIKA_RESPONSE_WIN_TRICKSHOT = 5
-define PONG_MONIKA_RESPONSE_WIN_EASY_GAME = 6
-define PONG_MONIKA_RESPONSE_WIN_MEDIUM_GAME = 7
-define PONG_MONIKA_RESPONSE_WIN_HARD_GAME = 8
-define PONG_MONIKA_RESPONSE_WIN_EXPERT_GAME = 9
-define PONG_MONIKA_RESPONSE_WIN_EXTREME_GAME = 10
-define PONG_MONIKA_RESPONSE_LOSE_WITHOUT_HITTING_BALL = 11
-define PONG_MONIKA_RESPONSE_LOSE_TRICKSHOT = 12
-define PONG_MONIKA_RESPONSE_LOSE_LONG_GAME = 13
-define PONG_MONIKA_RESPONSE_LOSE_SHORT_GAME = 14
-define PONG_MONIKA_RESPONSE_LOSE_EASY_GAME = 15
-define PONG_MONIKA_RESPONSE_LOSE_MEDIUM_GAME = 16
-define PONG_MONIKA_RESPONSE_LOSE_HARD_GAME = 17
-define PONG_MONIKA_RESPONSE_LOSE_EXPERT_GAME = 18
-define PONG_MONIKA_RESPONSE_LOSE_EXTREME_GAME = 19
-
-define pong_monika_last_response_id = PONG_MONIKA_RESPONSE_NONE
-
-define played_pong_this_session = False
-define mas_pong_taking_break = False
-define player_lets_monika_win_on_purpose = False
-define instant_loss_streak_counter = 0
-define loss_streak_counter = 0
-define win_streak_counter = 0
-define lose_on_purpose = False
-define monika_asks_to_go_easy = False
-
-# Need to be set before every game and be accessible outside the class
-define ball_paddle_bounces = 0
-define powerup_value_this_game = 0
-define instant_loss_streak_counter_before = 0
-define loss_streak_counter_before = 0
-define win_streak_counter_before = 0
-define pong_difficulty_before = 0
-define pong_angle_last_shot = 0.0
+ PONG_MONIKA_RESPONSE_NONE = 0
+ PONG_MONIKA_RESPONSE_WIN_AFTER_PLAYER_WON_MIN_THREE_TIMES = 1
+ PONG_MONIKA_RESPONSE_SECOND_WIN_AFTER_PLAYER_WON_MIN_THREE_TIMES = 2
+ PONG_MONIKA_RESPONSE_WIN_LONG_GAME = 3
+ PONG_MONIKA_RESPONSE_WIN_SHORT_GAME = 4
+ PONG_MONIKA_RESPONSE_WIN_TRICKSHOT = 5
+ PONG_MONIKA_RESPONSE_WIN_EASY_GAME = 6
+ PONG_MONIKA_RESPONSE_WIN_MEDIUM_GAME = 7
+ PONG_MONIKA_RESPONSE_WIN_HARD_GAME = 8
+ PONG_MONIKA_RESPONSE_WIN_EXPERT_GAME = 9
+ PONG_MONIKA_RESPONSE_WIN_EXTREME_GAME = 10
+ PONG_MONIKA_RESPONSE_LOSE_WITHOUT_HITTING_BALL = 11
+ PONG_MONIKA_RESPONSE_LOSE_TRICKSHOT = 12
+ PONG_MONIKA_RESPONSE_LOSE_LONG_GAME = 13
+ PONG_MONIKA_RESPONSE_LOSE_SHORT_GAME = 14
+ PONG_MONIKA_RESPONSE_LOSE_EASY_GAME = 15
+ PONG_MONIKA_RESPONSE_LOSE_MEDIUM_GAME = 16
+ PONG_MONIKA_RESPONSE_LOSE_HARD_GAME = 17
+ PONG_MONIKA_RESPONSE_LOSE_EXPERT_GAME = 18
+ PONG_MONIKA_RESPONSE_LOSE_EXTREME_GAME = 19
+
+ pong_monika_last_response_id = PONG_MONIKA_RESPONSE_NONE
+
+ played_pong_this_session = False
+ mas_pong_taking_break = False
+ player_lets_monika_win_on_purpose = False
+ instant_loss_streak_counter = 0
+ loss_streak_counter = 0
+ win_streak_counter = 0
+ lose_on_purpose = False
+ monika_asks_to_go_easy = False
+
+ # Need to be set before every game and be accessible outside the class
+ ball_paddle_bounces = 0
+ powerup_value_this_game = 0
+ instant_loss_streak_counter_before = 0
+ loss_streak_counter_before = 0
+ win_streak_counter_before = 0
+ pong_difficulty_before = 0
+ pong_angle_last_shot = 0.0
init:
@@ -249,6 +250,9 @@ init:
# Recomputes the position of the ball, handles bounces, and
# draws the screen.
def render(self, width, height, st, at):
+ global lose_on_purpose, win_streak_counter
+ global loss_streak_counter, instant_loss_streak_counter
+ global pong_angle_last_shot, ball_paddle_bounces
# The Render object we'll be drawing into.
r = renpy.Render(width, height)
@@ -278,7 +282,7 @@ init:
# Bounces the ball up to one time, either up or down
if not self.check_bounce_off_top():
- self.check_bounce_off_bottom()
+ self.check_bounce_off_bottom()
# Handles Monika's targeting and speed.
@@ -294,7 +298,6 @@ init:
# Moves Monika's paddle. It wants to go to self.by, but
# may be limited by it's speed limit.
- global lose_on_purpose
if lose_on_purpose and self.bx >= self.COURT_WIDTH * 0.75:
if self.bx <= self.PADDLE_X_MONIKA:
if self.ctargety > self.computery:
@@ -320,6 +323,7 @@ init:
# This draws a paddle, and checks for bounces.
def paddle(px, py, hotside, is_computer):
+ global ball_paddle_bounces
# Render the paddle image. We give it an 1280x720 area
# to render into, knowing that images will render smaller.
@@ -358,13 +362,11 @@ init:
elif angle < -self.MAX_ANGLE:
angle = -self.MAX_ANGLE;
- global pong_angle_last_shot
pong_angle_last_shot = angle;
self.bdy = .5 * math.sin(angle)
self.bdx = math.copysign(.5 * math.cos(angle), -self.bdx)
- global ball_paddle_bounces
ball_paddle_bounces += 1
# Changes where the computer aims after a hit.
@@ -384,8 +386,13 @@ init:
# Draw the ball.
ball = renpy.render(self.ball, self.COURT_WIDTH, self.COURT_HEIGHT, st, at)
- r.blit(ball, (int(self.bx - self.BALL_WIDTH / 2),
- int(self.by - self.BALL_HEIGHT / 2)))
+ r.blit(
+ ball,
+ (
+ int(self.bx - self.BALL_WIDTH / 2),
+ int(self.by - self.BALL_HEIGHT / 2)
+ )
+ )
# Show the player names.
player = renpy.render(self.player, self.COURT_WIDTH, self.COURT_HEIGHT, st, at)
@@ -405,21 +412,17 @@ init:
# Check for a winner.
if self.bx < -200:
-
if self.winner == None:
- global loss_streak_counter
loss_streak_counter += 1
+
if ball_paddle_bounces <= 1:
- global instant_loss_streak_counter
instant_loss_streak_counter += 1
else:
- global instant_loss_streak_counter
instant_loss_streak_counter = 0
- global win_streak_counter
- win_streak_counter = 0;
+ win_streak_counter = 0
self.winner = "monika"
# Needed to ensure that event is called, noticing
@@ -428,20 +431,17 @@ init:
elif self.bx > self.COURT_WIDTH + 200:
+
if self.winner == None:
- global win_streak_counter
win_streak_counter += 1;
- global loss_streak_counter
loss_streak_counter = 0
#won't reset if Monika misses the first hit
if ball_paddle_bounces > 1:
- global instant_loss_streak_counter
instant_loss_streak_counter = 0
self.winner = "player"
-
renpy.timeout(0)
# Ask that we be re-rendered ASAP, so we can show the next
diff --git a/Monika After Story/game/progression.rpy b/Monika After Story/game/progression.rpy
index 00084e53e6..047d4c5495 100644
--- a/Monika After Story/game/progression.rpy
+++ b/Monika After Story/game/progression.rpy
@@ -316,20 +316,3 @@ init python in mas_xp:
)
if xp_rate < 1:
xp_rate = 1.0
-
-
-init python:
- @store.mas_utils.deprecated(should_raise=True)
- def grant_xp(experience):
- """DEPRECATED
- This does not do anything anymore. Around for compatibility
- purposes
- """
- pass
-
- @store.mas_utils.deprecated(should_raise=True)
- def get_level():
- """DEPRECATED
- This does not do anything anymore. Around for compatibility purposes
- """
- return 0
diff --git a/Monika After Story/game/python-packages/balloontip.py b/Monika After Story/game/python-packages/balloontip.py
deleted file mode 100644
index c60b230def..0000000000
--- a/Monika After Story/game/python-packages/balloontip.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# -- coding: utf-8 --
-
-from win32api import *
-from win32gui import *
-import win32con
-import sys, os
-import struct
-import time
-
-class WindowsBalloonTip:
- def __init__(self):
- message_map = {
- win32con.WM_DESTROY: self.OnDestroy,
- }
- # Register the Window class.
- wc = WNDCLASS()
- self.hinst = wc.hInstance = GetModuleHandle(None)
- wc.lpszClassName = "PythonTaskbar"
- wc.lpfnWndProc = message_map # could also specify a wndproc.
- self.classAtom = RegisterClass(wc)
-
- def showWindow(self,title, msg):
- # Create the Window.
- style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
- self.hwnd = CreateWindow( self.classAtom, "Taskbar", style, \
- 0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
- 0, 0, self.hinst, None)
- UpdateWindow(self.hwnd)
-
- #Get/Set Notification Icon
- iconPathName = os.path.abspath(os.path.join( sys.path[0], "CustomIconWindows.ico" ))
- icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
- try:
- hicon = LoadImage(self.hinst, iconPathName, \
- win32con.IMAGE_ICON, 0, 0, icon_flags)
- except:
- hicon = LoadIcon(0, win32con.IDI_APPLICATION)
-
- #Initialize the notif itself
- flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
- nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "Monika After Story")
-
- try:
- Shell_NotifyIcon(NIM_ADD, nid)
- Shell_NotifyIcon(NIM_MODIFY, \
- (self.hwnd, 0, NIF_INFO, win32con.WM_USER+20,\
- hicon, "Balloon tooltip",msg,200,title))
-
- #If we got here, that means we had no issue making the notif
- return True
-
- except:
- #Something went wrong, need to flag this to not make a sound
- return False
-
- def OnDestroy(self, hwnd, msg, wparam, lparam):
- nid = (self.hwnd, 0)
- Shell_NotifyIcon(NIM_DELETE, nid)
- PostQuitMessage(0) # Terminate the app.
-
-def balloon_tip(title, msg):
- w=WindowsBalloonTip(msg, title)
diff --git a/Monika After Story/game/python-packages/battery/__init__.py b/Monika After Story/game/python-packages/battery/__init__.py
deleted file mode 100644
index 20f890486c..0000000000
--- a/Monika After Story/game/python-packages/battery/__init__.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""
-This module provides functions to check information for the battery and
-the AC Line for supported systems.
-"""
-
-# known issues/help needed:
-# Running through wine makes it impossible to detect the battery
-# The Darwin linux distro conflicts with the OSX battery detection
-# The battery doesn't get properly detected on some linux distros
-# Testing on linux distros, like seriously, a lot of testing
-
-import platform
-
-from . import windows, linux, misc
-
-"""
-List of functions to check the battery level.
-"""
-BATTERY_LEVEL_FUNCTIONS = {
- 'Windows': windows.get_level,
- 'Linux': linux.get_level,
- '*': misc.get_level,
-}
-
-"""
-List of functions to check if the battery is present.
-"""
-BATTERY_CHECK_FUNCTIONS = {
- 'Windows': windows.is_battery_present,
- 'Linux': linux.is_battery_present,
- '*': misc.is_battery_present,
-}
-
-"""
-List of functions to check if the system is charging.
-"""
-AC_LINE_CHECK_FUNCTIONS = {
- 'Windows': windows.is_charging,
- 'Linux': linux.is_charging,
- '*': misc.is_charging,
-}
-
-_system = platform.system()
-
-
-def _run_function_by_system(funcdict):
- """
- Executes a function based on the system running.
-
- The '*' will be used for others, usually misc module's functions.
-
- Raises NotImplemetedError if the system is unsupported.
-
- :param funcdict: Dictionary of functions by system
- """
- if _system in BATTERY_LEVEL_FUNCTIONS:
- func = funcdict[_system]
- elif misc.can_check():
- func = funcdict['*']
- else:
- return None
-
- return func()
-
-
-def get_level():
- """
- Return the system battery level, otherwise None if the system
- doesn't have any batteries.
- """
- try:
- return _run_function_by_system(BATTERY_LEVEL_FUNCTIONS)
- except:
- return None
-
-
-def is_battery_present():
- """
- Check if the system has a battery present.
- """
- try:
- return _run_function_by_system(BATTERY_CHECK_FUNCTIONS)
- except:
- return False
-
-
-def is_charging():
- """
- Check if the system is charging.
- """
- try:
- return _run_function_by_system(AC_LINE_CHECK_FUNCTIONS)
- except:
- return False
-
-
-def get_supported_systems():
- """
- Returns a list of supported systems.
- """
- systems = [x for x in BATTERY_LEVEL_FUNCTIONS if x != '*']
- systems += misc.get_supported_systems()
-
- return systems
-
-
-def is_supported():
- """
- Check if this system is supported.
- """
- return False
-# return _system in get_supported_systems()
diff --git a/Monika After Story/game/python-packages/battery/linux.py b/Monika After Story/game/python-packages/battery/linux.py
deleted file mode 100644
index 259b74f352..0000000000
--- a/Monika After Story/game/python-packages/battery/linux.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-This module provides functions to check information for the battery and
-the AC Line on Linux systems.
-"""
-
-import os
-
-"""
-Path to the Linux power supply class directory
-"""
-LINUX_POWER_SUPPLY_CLASS = '/sys/class/power_supply/'
-
-
-def _get_battery():
- """
- Return the path to the systems' battery class, None otherwise.
- """
- for file in os.listdir(LINUX_POWER_SUPPLY_CLASS):
- if file.startswith('BAT'):
- return os.path.join(LINUX_POWER_SUPPLY_CLASS, file)
-
- return None
-
-
-def _read_battery(what):
- """
- Return content from a file in the battery class.
- """
- bat = _get_battery()
-
- if bat == None:
- raise RuntimeError('Battery not found')
-
- path = os.path.join(bat, what)
-
- with open(path, 'r') as f:
- content = f.read()
-
- return content
-
-
-def get_level():
- """
- Return the system battery level from the battery class in percentage,
- otherwise None if the system doesn't have any batteries.
- """
- level = int(_read_battery('capacity'))
-
- return level
-
-
-def is_charging():
- """
- Check if the system is charging based on the battery class
-
- :return: True if it's charging, false otherwise
- """
- status = _read_battery('status')
-
- return status == 'Charging\n'
-
-
-def is_battery_present():
- """
- Check if the system have a battery present
-
- :return: True if there's a battery, false otherwise
- """
- return _get_battery() != None
diff --git a/Monika After Story/game/python-packages/battery/misc.py b/Monika After Story/game/python-packages/battery/misc.py
deleted file mode 100644
index cfea00f6a4..0000000000
--- a/Monika After Story/game/python-packages/battery/misc.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-This module provides functions to check information for the battery and
-the AC Line for various systems by running commands onto a shell.
-"""
-
-import platform
-import re
-import subprocess
-
-"""
-List of commands that can be used to check for the battery level.
-"""
-BATTERY_LEVEL_COMMANDS = {
- 'FreeBSD': 'sysctl hw.acpi.battery.life',
- 'Darwin': 'pmset -g batt',
-}
-
-"""
-List of regex patterns to be used with BATTERY_LEVEL_COMMANDS to extract
-the battery level
-"""
-BATTERY_LEVEL_REGEX = {
- 'FreeBSD': r'hw\.acpi\.battery\.life: (\d+)',
- 'Darwin': r'(\d+%)',
-}
-
-"""
-List of commands that can be used to check for the AC Line status.
-"""
-AC_LINE_CHECK_COMMANDS = {
- 'FreeBSD': 'sysctl hw.acpi.acline',
- 'Darwin': 'pmset -g batt',
-}
-
-"""
-List of regex patterns to be used with AC_LINE_CHECK_COMMANDS to extract
-the AC Line status.
-"""
-AC_LINE_CHECK_REGEX = {
- 'FreeBSD': r'hw\.acpi\.acline: (\d)',
- 'Darwin': r'\d+%; (\w+|AC attached);'
-}
-
-_system = platform.system()
-
-
-class RegexDidNotMatchError(RuntimeError):
- pass
-
-
-def _run_command_based_by_system(cmddict, regexdict):
- """
- Runs a command based on the system running.
-
- Raises RegexDidNotMatchError if the regex provided did not match
- the output of the command.
-
- :param cmddict: Dictionary of commands to run by system
- :param regexdict: Dictionary of regex patterns to filter the output
- by system
-
- :return: The output from the command filtered by the regex pattern
- if provided
- """
- cmd = cmddict[_system]
-
- output = subprocess.check_output(cmd, shell=True)
-
- if _system in regexdict:
- regex = regexdict[_system]
- res = re.search(regex, output)
- if res:
- output = res.group(1)
- else:
- raise RegexDidNotMatchError("Regex failure for '%s' on %s" %
- (output, _system))
-
- return output
-
-
-def _run_function_based_by_system(funcdict):
- """
- Executes a function based on the system running
-
- :param funcdict: Dictionary of functions by system
- """
- func = funcdict[_system]
-
- return func()
-
-
-def get_level():
- """
- Return the system battery level based on the command output in percentage,
- otherwise None if the system doesn't have any batteries.
- """
- try:
- output = _run_command_based_by_system(BATTERY_LEVEL_COMMANDS,
- BATTERY_LEVEL_REGEX)
- value = int(output.rstrip().replace('%',''))
- except RegexDidNotMatchError:
- return None
-
- return int(output.rstrip().replace('%',''))
-
-
-def get_supported_systems():
- """
- Returns a list of supported systems.
- """
- return BATTERY_LEVEL_COMMANDS.keys()
-
-
-def can_check():
- """
- Check if this module can check the battery.
- """
- return _system in get_supported_systems()
-
-
-def _freebsd_is_battery_present():
- """
- Check if there's a battery present for FreeBSD systems.
- """
- try:
- level = get_level()
- except RegexDidNotMatchError:
- return False
- return level != -1
-
-
-def _darwin_is_battery_present():
- """
- Check if there's a battery present for macOS systems.
- """
- try:
- get_level()
- except RegexDidNotMatchError:
- return False
- return True
-
-
-"""
-List of functions that can be used to check if the battery is present.
-"""
-BATTERY_CHECK_FUNCTIONS = {
- 'FreeBSD': _freebsd_is_battery_present,
- 'Darwin': _darwin_is_battery_present,
-}
-
-
-def is_battery_present():
- """
- Check if the system has a battery present.
- """
- return _run_function_based_by_system(BATTERY_CHECK_FUNCTIONS)
-
-
-def _get_ac_line_status():
- """
- Return the output of the AC Line command
- """
- output = _run_command_based_by_system(AC_LINE_CHECK_COMMANDS,
- AC_LINE_CHECK_REGEX)
- return output
-
-
-def _freebsd_is_charging():
- """
- Check if it's charging for FreeBSD systems.
- """
- return bool(int(_get_ac_line_status().rstrip()))
-
-
-def _darwin_is_charging():
- """
- Check if it's charging for macOS systems.
- """
- charging_statuses = ['charging', 'charged']
-
- return _get_ac_line_status() in charging_statuses
-
-
-"""
-List of functions that can be used to check if the system is charging.
-"""
-AC_LINE_CHECK_FUNCTIONS = {
- 'FreeBSD': _freebsd_is_charging,
- 'Darwin': _darwin_is_charging,
-}
-
-
-def is_charging():
- """
- Check if the system is charging.
- """
- return _run_function_based_by_system(AC_LINE_CHECK_FUNCTIONS)
diff --git a/Monika After Story/game/python-packages/battery/windows.py b/Monika After Story/game/python-packages/battery/windows.py
deleted file mode 100644
index ff53d28955..0000000000
--- a/Monika After Story/game/python-packages/battery/windows.py
+++ /dev/null
@@ -1,95 +0,0 @@
-"""
-This module provides functions to check information for the battery and
-the AC Line on Windows systems.
-
-It may only work on Windows Server 2003 / Windows XP and above.
-
-For more information see the following:
-https://msdn.microsoft.com/en-us/library/windows/desktop/aa373232(v=vs.85).aspx
-"""
-
-import ctypes
-
-try:
- from ctypes import wintypes
-except: # pragma: no cover
- wintypes = None
-
-
-def _system_power_status():
- """
- Return 'SYSTEM_POWER_STATUS' C structure with the values set by
- GetSystemPowerStatus.
- """
-
- class SYSTEM_POWER_STATUS(ctypes.Structure):
- """
- This class is a representation of the SYSTEM_POWER_STATUS C structure
- used by GetSystemPowerStatus.
- """
- _fields_ = [
- ('ACLineStatus', wintypes.BYTE),
- ('BatteryFlag', wintypes.BYTE),
- ('BatteryLifePercent', wintypes.BYTE),
- ('Reserved1', wintypes.BYTE),
- ('BatteryLifeTime', wintypes.DWORD),
- ('BatteryFullLifeTime', wintypes.DWORD),
- ]
-
- pointer = ctypes.POINTER(SYSTEM_POWER_STATUS)
-
- GetSystemPowerStatus = ctypes.windll.kernel32.GetSystemPowerStatus
- GetSystemPowerStatus.argtypes = [pointer]
- GetSystemPowerStatus.restype = wintypes.BOOL
-
- status = SYSTEM_POWER_STATUS()
-
- if not GetSystemPowerStatus(ctypes.pointer(status)):
- return None
-
- return status
-
-
-def _get_ac_status():
- """
- Return the ACLineStatus from SYSTEM_POWER_STATUS
- """
- return _system_power_status().ACLineStatus
-
-
-def _get_battery_flag():
- """
- Return the BatteryFlag from SYSTEM_POWER_STATUS
- """
- return _system_power_status().BatteryFlag
-
-
-def get_level():
- """
- Return the system battery level in percentage, otherwise None
- if the value is unknown
- """
- percentage = _system_power_status().BatteryLifePercent
-
- if percentage == 255:
- return None
-
- return percentage
-
-
-def is_charging():
- """
- Check if the system is charging based on the ACLineStatus.
-
- :return: True if it's charging, false otherwise
- """
- return _get_ac_status() == 1 or _get_ac_status() == 255
-
-
-def is_battery_present():
- """
- Check if the system have a battery present
-
- :return: True if there's a battery, false otherwise
- """
- return (_get_battery_flag() != 128 or _get_battery_flag() != 255) and _get_battery_flag() != -1
diff --git a/Monika After Story/game/python-packages/eliza.py b/Monika After Story/game/python-packages/eliza.py
deleted file mode 100644
index 4bd321e18a..0000000000
--- a/Monika After Story/game/python-packages/eliza.py
+++ /dev/null
@@ -1,313 +0,0 @@
-#----------------------------------------------------------------------
-# eliza.py
-#
-# a cheezy little Eliza knock-off by Joe Strout
-# with some updates by Jeff Epler
-# hacked into a module and updated by Jez Higgins
-#----------------------------------------------------------------------
-
-import string
-import re
-import random
-
-class eliza:
- def __init__(self):
- self.keys = list(map(lambda x:re.compile(x[0], re.IGNORECASE),gPats))
- self.values = list(map(lambda x:x[1],gPats))
-
- #----------------------------------------------------------------------
- # translate: take a string, replace any words found in dict.keys()
- # with the corresponding dict.values()
- #----------------------------------------------------------------------
- def translate(self,str,dict):
- words = str.lower().split()
- keys = dict.keys();
- for i in range(0,len(words)):
- if words[i] in keys:
- words[i] = dict[words[i]]
- return ' '.join(words)
-
- #----------------------------------------------------------------------
- # respond: take a string, a set of regexps, and a corresponding
- # set of response lists; find a match, and return a randomly
- # chosen response from the corresponding list.
- #----------------------------------------------------------------------
- def respond(self,str):
- # find a match among keys
- for i in range(0, len(self.keys)):
- match = self.keys[i].match(str)
- if match:
- # found a match ... stuff with corresponding value
- # chosen randomly from among the available options
- resp = random.choice(self.values[i])
- # we've got a response... stuff in reflected text where indicated
- pos = resp.find('%')
- while pos > -1:
- num = int(resp[pos+1:pos+2])
- resp = resp[:pos] + \
- self.translate(match.group(num),gReflections) + \
- resp[pos+2:]
- pos = resp.find('%')
- # fix munged punctuation at the end
- if resp[-2:] == '?.': resp = resp[:-2] + '.'
- if resp[-2:] == '??': resp = resp[:-2] + '?'
- return resp
-
-#----------------------------------------------------------------------
-# gReflections, a translation table used to convert things you say
-# into things the computer says back, e.g. "I am" --> "you are"
-#----------------------------------------------------------------------
-gReflections = {
- "am" : "are",
- "was" : "were",
- "i" : "you",
- "i'd" : "you would",
- "i've" : "you have",
- "i'll" : "you will",
- "my" : "your",
- "are" : "am",
- "you've": "I have",
- "you'll": "I will",
- "your" : "my",
- "yours" : "mine",
- "you" : "me",
- "me" : "you"
-}
-
-#----------------------------------------------------------------------
-# gPats, the main response table. Each element of the list is a
-# two-element list; the first is a regexp, and the second is a
-# list of possible responses, with group-macros labelled as
-# %1, %2, etc.
-#----------------------------------------------------------------------
-gPats = [
- [r'I need (.*)',
- [ "Why do you need %1?",
- "Would it really help you to get %1?",
- "Are you sure you need %1?"]],
-
- [r'Why don\'?t you ([^\?]*)\??',
- [ "Do you really think I don't %1?",
- "Perhaps eventually I will %1.",
- "Do you really want me to %1?"]],
-
- [r'Why can\'?t I ([^\?]*)\??',
- [ "Do you think you should be able to %1?",
- "If you could %1, what would you do?",
- "I don't know -- why can't you %1?",
- "Have you really tried?"]],
-
- [r'I can\'?t (.*)',
- [ "How do you know you can't %1?",
- "Perhaps you could %1 if you tried.",
- "What would it take for you to %1?"]],
-
- [r'I am (.*)',
- [ "Did you come to me because you are %1?",
- "How long have you been %1?",
- "How do you feel about being %1?"]],
-
- [r'I\'?m (.*)',
- [ "How does being %1 make you feel?",
- "Do you enjoy being %1?",
- "Why do you tell me you're %1?",
- "Why do you think you're %1?"]],
-
- [r'Are you ([^\?]*)\??',
- [ "Why does it matter whether I am %1?",
- "Would you prefer it if I were not %1?",
- "Perhaps you believe I am %1.",
- "I may be %1 -- what do you think?"]],
-
- [r'What (.*)',
- [ "Why do you ask?",
- "How would an answer to that help you?",
- "What do you think?"]],
-
- [r'How (.*)',
- [ "How do you suppose?",
- "Perhaps you can answer your own question.",
- "What is it you're really asking?"]],
-
- [r'Because (.*)',
- [ "Is that the real reason?",
- "What other reasons come to mind?",
- "Does that reason apply to anything else?",
- "If %1, what else must be true?"]],
-
- [r'(.*) sorry (.*)',
- [ "There are many times when no apology is needed.",
- "What feelings do you have when you apologize?"]],
-
- [r'Hello(.*)',
- [ "Hello... I'm glad you could drop by today.",
- "Hi there... how are you today?",
- "Hello, how are you feeling today?"]],
-
- [r'I think (.*)',
- [ "Do you doubt %1?",
- "Do you really think so?",
- "But you're not sure %1?"]],
-
- [r'(.*) friend (.*)',
- [ "Tell me more about your friends.",
- "When you think of a friend, what comes to mind?",
- "Why don't you tell me about a childhood friend?"]],
-
- [r'Yes',
- [ "You seem quite sure.",
- "OK, but can you elaborate a bit?"]],
-
- [r'(.*) computer(.*)',
- [ "Are you really talking about me?",
- "Does it seem strange to talk to a computer?",
- "How do computers make you feel?",
- "Do you feel threatened by computers?"]],
-
- [r'Is it (.*)',
- [ "Do you think it is %1?",
- "Perhaps it's %1 -- what do you think?",
- "If it were %1, what would you do?",
- "It could well be that %1."]],
-
- [r'It is (.*)',
- [ "You seem very certain.",
- "If I told you that it probably isn't %1, what would you feel?"]],
-
- [r'Can you ([^\?]*)\??',
- [ "What makes you think I can't %1?",
- "If I could %1, then what?",
- "Why do you ask if I can %1?"]],
-
- [r'Can I ([^\?]*)\??',
- [ "Perhaps you don't want to %1.",
- "Do you want to be able to %1?",
- "If you could %1, would you?"]],
-
- [r'You are (.*)',
- [ "Why do you think I am %1?",
- "Does it please you to think that I'm %1?",
- "Perhaps you would like me to be %1.",
- "Perhaps you're really talking about yourself?"]],
-
- [r'You\'?re (.*)',
- [ "Why do you say I am %1?",
- "Why do you think I am %1?",
- "Are we talking about you, or me?"]],
-
- [r'I don\'?t (.*)',
- [ "Don't you really %1?",
- "Why don't you %1?",
- "Do you want to %1?"]],
-
- [r'I feel (.*)',
- [ "Good, tell me more about these feelings.",
- "Do you often feel %1?",
- "When do you usually feel %1?",
- "When you feel %1, what do you do?"]],
-
- [r'I have (.*)',
- [ "Why do you tell me that you've %1?",
- "Have you really %1?",
- "Now that you have %1, what will you do next?"]],
-
- [r'I would (.*)',
- [ "Could you explain why you would %1?",
- "Why would you %1?",
- "Who else knows that you would %1?"]],
-
- [r'Is there (.*)',
- [ "Do you think there is %1?",
- "It's likely that there is %1.",
- "Would you like there to be %1?"]],
-
- [r'My (.*)',
- [ "I see, your %1.",
- "Why do you say that your %1?",
- "When your %1, how do you feel?"]],
-
- [r'You (.*)',
- [ "We should be discussing you, not me.",
- "Why do you say that about me?",
- "Why do you care whether I %1?"]],
-
- [r'Why (.*)',
- [ "Why don't you tell me the reason why %1?",
- "Why do you think %1?" ]],
-
- [r'I want (.*)',
- [ "What would it mean to you if you got %1?",
- "Why do you want %1?",
- "What would you do if you got %1?",
- "If you got %1, then what would you do?"]],
-
- [r'(.*) mother(.*)',
- [ "Tell me more about your mother.",
- "What was your relationship with your mother like?",
- "How do you feel about your mother?",
- "How does this relate to your feelings today?",
- "Good family relations are important."]],
-
- [r'(.*) father(.*)',
- [ "Tell me more about your father.",
- "How did your father make you feel?",
- "How do you feel about your father?",
- "Does your relationship with your father relate to your feelings today?",
- "Do you have trouble showing affection with your family?"]],
-
- [r'(.*) child(.*)',
- [ "Did you have close friends as a child?",
- "What is your favorite childhood memory?",
- "Do you remember any dreams or nightmares from childhood?",
- "Did the other children sometimes tease you?",
- "How do you think your childhood experiences relate to your feelings today?"]],
-
- [r'(.*)\?',
- [ "Why do you ask that?",
- "Please consider whether you can answer your own question.",
- "Perhaps the answer lies within yourself?",
- "Why don't you tell me?"]],
-
- [r'quit',
- [ "Thank you for talking with me.",
- "Good-bye.",
- "Thank you, that will be $150. Have a good day!"]],
-
- [r'(.*)',
- [ "Please tell me more.",
- "Let's change focus a bit... Tell me about your family.",
- "Can you elaborate on that?",
- "Why do you say that %1?",
- "I see.",
- "Very interesting.",
- "%1.",
- "I see. And what does that tell you?",
- "How does that make you feel?",
- "How do you feel when you say that?"]]
- ]
-
-#----------------------------------------------------------------------
-# command_interface
-#----------------------------------------------------------------------
-def command_interface():
- print('Therapist\n---------')
- print('Talk to the program by typing in plain English, using normal upper-')
- print('and lower-case letters and punctuation. Enter "quit" when done.')
- print('='*72)
- print('Hello. How are you feeling today?')
-
- s = ''
- therapist = eliza();
- while s != 'quit':
- try:
- s = input('> ')
- except EOFError:
- s = 'quit'
- print(s)
- while s[-1] in '!.':
- s = s[:-1]
- print(therapist.respond(s))
-
-
-if __name__ == "__main__":
- command_interface()
diff --git a/Monika After Story/game/python-packages/logging/__init__.py b/Monika After Story/game/python-packages/logging/__init__.py
deleted file mode 100644
index 88fb84cd71..0000000000
--- a/Monika After Story/game/python-packages/logging/__init__.py
+++ /dev/null
@@ -1,1751 +0,0 @@
-# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Logging package for Python. Based on PEP 282 and comments thereto in
-comp.lang.python.
-
-Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import sys, os, time, cStringIO, traceback, warnings, weakref, collections
-
-__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
- 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
- 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
- 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
- 'captureWarnings', 'critical', 'debug', 'disable', 'error',
- 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
- 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
-
-try:
- import codecs
-except ImportError:
- codecs = None
-
-try:
- import thread
- import threading
-except ImportError:
- thread = None
-
-__author__ = "Vinay Sajip "
-__status__ = "production"
-# Note: the attributes below are no longer maintained.
-__version__ = "0.5.1.2"
-__date__ = "07 February 2010"
-
-#---------------------------------------------------------------------------
-# Miscellaneous module data
-#---------------------------------------------------------------------------
-try:
- unicode
- _unicode = True
-except NameError:
- _unicode = False
-
-# next bit filched from 1.5.2's inspect.py
-def currentframe():
- """Return the frame object for the caller's stack frame."""
- try:
- raise Exception
- except:
- return sys.exc_info()[2].tb_frame.f_back
-
-if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
-# done filching
-
-#
-# _srcfile is used when walking the stack to check when we've got the first
-# caller stack frame.
-#
-_srcfile = os.path.normcase(currentframe.__code__.co_filename)
-
-# _srcfile is only used in conjunction with sys._getframe().
-# To provide compatibility with older versions of Python, set _srcfile
-# to None if _getframe() is not available; this value will prevent
-# findCaller() from being called.
-#if not hasattr(sys, "_getframe"):
-# _srcfile = None
-
-#
-#_startTime is used as the base when calculating the relative time of events
-#
-_startTime = time.time()
-
-#
-#raiseExceptions is used to see if exceptions during handling should be
-#propagated
-#
-raiseExceptions = 1
-
-#
-# If you don't want threading information in the log, set this to zero
-#
-logThreads = 1
-
-#
-# If you don't want multiprocessing information in the log, set this to zero
-#
-logMultiprocessing = 1
-
-#
-# If you don't want process information in the log, set this to zero
-#
-logProcesses = 1
-
-#---------------------------------------------------------------------------
-# Level related stuff
-#---------------------------------------------------------------------------
-#
-# Default levels and level names, these can be replaced with any positive set
-# of values having corresponding names. There is a pseudo-level, NOTSET, which
-# is only really there as a lower limit for user-defined levels. Handlers and
-# loggers are initialized with NOTSET so that they will log all messages, even
-# at user-defined levels.
-#
-
-CRITICAL = 50
-FATAL = CRITICAL
-ERROR = 40
-WARNING = 30
-WARN = WARNING
-INFO = 20
-DEBUG = 10
-NOTSET = 0
-
-_levelNames = {
- CRITICAL : 'CRITICAL',
- ERROR : 'ERROR',
- WARNING : 'WARNING',
- INFO : 'INFO',
- DEBUG : 'DEBUG',
- NOTSET : 'NOTSET',
- 'CRITICAL' : CRITICAL,
- 'ERROR' : ERROR,
- 'WARN' : WARNING,
- 'WARNING' : WARNING,
- 'INFO' : INFO,
- 'DEBUG' : DEBUG,
- 'NOTSET' : NOTSET,
-}
-
-def getLevelName(level):
- """
- Return the textual representation of logging level 'level'.
-
- If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
- INFO, DEBUG) then you get the corresponding string. If you have
- associated levels with names using addLevelName then the name you have
- associated with 'level' is returned.
-
- If a numeric value corresponding to one of the defined levels is passed
- in, the corresponding string representation is returned.
-
- Otherwise, the string "Level %s" % level is returned.
- """
- return _levelNames.get(level, ("Level %s" % level))
-
-def addLevelName(level, levelName):
- """
- Associate 'levelName' with 'level'.
-
- This is used when converting levels to text during message formatting.
- """
- _acquireLock()
- try: #unlikely to cause an exception, but you never know...
- _levelNames[level] = levelName
- _levelNames[levelName] = level
- finally:
- _releaseLock()
-
-def _checkLevel(level):
- if isinstance(level, (int, long)):
- rv = level
- elif str(level) == level:
- if level not in _levelNames:
- raise ValueError("Unknown level: %r" % level)
- rv = _levelNames[level]
- else:
- raise TypeError("Level not an integer or a valid string: %r" % level)
- return rv
-
-#---------------------------------------------------------------------------
-# Thread-related stuff
-#---------------------------------------------------------------------------
-
-#
-#_lock is used to serialize access to shared data structures in this module.
-#This needs to be an RLock because fileConfig() creates and configures
-#Handlers, and so might arbitrary user threads. Since Handler code updates the
-#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
-#the lock would already have been acquired - so we need an RLock.
-#The same argument applies to Loggers and Manager.loggerDict.
-#
-if thread:
- _lock = threading.RLock()
-else:
- _lock = None
-
-def _acquireLock():
- """
- Acquire the module-level lock for serializing access to shared data.
-
- This should be released with _releaseLock().
- """
- if _lock:
- _lock.acquire()
-
-def _releaseLock():
- """
- Release the module-level lock acquired by calling _acquireLock().
- """
- if _lock:
- _lock.release()
-
-#---------------------------------------------------------------------------
-# The logging record
-#---------------------------------------------------------------------------
-
-class LogRecord(object):
- """
- A LogRecord instance represents an event being logged.
-
- LogRecord instances are created every time something is logged. They
- contain all the information pertinent to the event being logged. The
- main information passed in is in msg and args, which are combined
- using str(msg) % args to create the message field of the record. The
- record also includes information such as when the record was created,
- the source line where the logging call was made, and any exception
- information to be logged.
- """
- def __init__(self, name, level, pathname, lineno,
- msg, args, exc_info, func=None):
- """
- Initialize a logging record with interesting information.
- """
- ct = time.time()
- self.name = name
- self.msg = msg
- #
- # The following statement allows passing of a dictionary as a sole
- # argument, so that you can do something like
- # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
- # Suggested by Stefan Behnel.
- # Note that without the test for args[0], we get a problem because
- # during formatting, we test to see if the arg is present using
- # 'if self.args:'. If the event being logged is e.g. 'Value is %d'
- # and if the passed arg fails 'if self.args:' then no formatting
- # is done. For example, logger.warn('Value is %d', 0) would log
- # 'Value is %d' instead of 'Value is 0'.
- # For the use case of passing a dictionary, this should not be a
- # problem.
- # Issue #21172: a request was made to relax the isinstance check
- # to hasattr(args[0], '__getitem__'). However, the docs on string
- # formatting still seem to suggest a mapping object is required.
- # Thus, while not removing the isinstance check, it does now look
- # for collections.Mapping rather than, as before, dict.
- if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
- and args[0]):
- args = args[0]
- self.args = args
- self.levelname = getLevelName(level)
- self.levelno = level
- self.pathname = pathname
- try:
- self.filename = os.path.basename(pathname)
- self.module = os.path.splitext(self.filename)[0]
- except (TypeError, ValueError, AttributeError):
- self.filename = pathname
- self.module = "Unknown module"
- self.exc_info = exc_info
- self.exc_text = None # used to cache the traceback text
- self.lineno = lineno
- self.funcName = func
- self.created = ct
- self.msecs = (ct - long(ct)) * 1000
- self.relativeCreated = (self.created - _startTime) * 1000
- if logThreads and thread:
- self.thread = thread.get_ident()
- self.threadName = threading.current_thread().name
- else:
- self.thread = None
- self.threadName = None
- if not logMultiprocessing:
- self.processName = None
- else:
- self.processName = 'MainProcess'
- mp = sys.modules.get('multiprocessing')
- if mp is not None:
- # Errors may occur if multiprocessing has not finished loading
- # yet - e.g. if a custom import hook causes third-party code
- # to run when multiprocessing calls import. See issue 8200
- # for an example
- try:
- self.processName = mp.current_process().name
- except StandardError:
- pass
- if logProcesses and hasattr(os, 'getpid'):
- self.process = os.getpid()
- else:
- self.process = None
-
- def __str__(self):
- return ''%(self.name, self.levelno,
- self.pathname, self.lineno, self.msg)
-
- def getMessage(self):
- """
- Return the message for this LogRecord.
-
- Return the message for this LogRecord after merging any user-supplied
- arguments with the message.
- """
- if not _unicode: #if no unicode support...
- msg = str(self.msg)
- else:
- msg = self.msg
- if not isinstance(msg, basestring):
- try:
- msg = str(self.msg)
- except UnicodeError:
- msg = self.msg #Defer encoding till later
- if self.args:
- msg = msg % self.args
- return msg
-
-def makeLogRecord(dict):
- """
- Make a LogRecord whose attributes are defined by the specified dictionary,
- This function is useful for converting a logging event received over
- a socket connection (which is sent as a dictionary) into a LogRecord
- instance.
- """
- rv = LogRecord(None, None, "", 0, "", (), None, None)
- rv.__dict__.update(dict)
- return rv
-
-#---------------------------------------------------------------------------
-# Formatter classes and functions
-#---------------------------------------------------------------------------
-
-class Formatter(object):
- """
- Formatter instances are used to convert a LogRecord to text.
-
- Formatters need to know how a LogRecord is constructed. They are
- responsible for converting a LogRecord to (usually) a string which can
- be interpreted by either a human or an external system. The base Formatter
- allows a formatting string to be specified. If none is supplied, the
- default value of "%s(message)\\n" is used.
-
- The Formatter can be initialized with a format string which makes use of
- knowledge of the LogRecord attributes - e.g. the default value mentioned
- above makes use of the fact that the user's message and arguments are pre-
- formatted into a LogRecord's message attribute. Currently, the useful
- attributes in a LogRecord are described by:
-
- %(name)s Name of the logger (logging channel)
- %(levelno)s Numeric logging level for the message (DEBUG, INFO,
- WARNING, ERROR, CRITICAL)
- %(levelname)s Text logging level for the message ("DEBUG", "INFO",
- "WARNING", "ERROR", "CRITICAL")
- %(pathname)s Full pathname of the source file where the logging
- call was issued (if available)
- %(filename)s Filename portion of pathname
- %(module)s Module (name portion of filename)
- %(lineno)d Source line number where the logging call was issued
- (if available)
- %(funcName)s Function name
- %(created)f Time when the LogRecord was created (time.time()
- return value)
- %(asctime)s Textual time when the LogRecord was created
- %(msecs)d Millisecond portion of the creation time
- %(relativeCreated)d Time in milliseconds when the LogRecord was created,
- relative to the time the logging module was loaded
- (typically at application startup time)
- %(thread)d Thread ID (if available)
- %(threadName)s Thread name (if available)
- %(process)d Process ID (if available)
- %(message)s The result of record.getMessage(), computed just as
- the record is emitted
- """
-
- converter = time.localtime
-
- def __init__(self, fmt=None, datefmt=None):
- """
- Initialize the formatter with specified format strings.
-
- Initialize the formatter either with the specified format string, or a
- default as described above. Allow for specialized date formatting with
- the optional datefmt argument (if omitted, you get the ISO8601 format).
- """
- if fmt:
- self._fmt = fmt
- else:
- self._fmt = "%(message)s"
- self.datefmt = datefmt
-
- def formatTime(self, record, datefmt=None):
- """
- Return the creation time of the specified LogRecord as formatted text.
-
- This method should be called from format() by a formatter which
- wants to make use of a formatted time. This method can be overridden
- in formatters to provide for any specific requirement, but the
- basic behaviour is as follows: if datefmt (a string) is specified,
- it is used with time.strftime() to format the creation time of the
- record. Otherwise, the ISO8601 format is used. The resulting
- string is returned. This function uses a user-configurable function
- to convert the creation time to a tuple. By default, time.localtime()
- is used; to change this for a particular formatter instance, set the
- 'converter' attribute to a function with the same signature as
- time.localtime() or time.gmtime(). To change it for all formatters,
- for example if you want all logging times to be shown in GMT,
- set the 'converter' attribute in the Formatter class.
- """
- ct = self.converter(record.created)
- if datefmt:
- s = time.strftime(datefmt, ct)
- else:
- t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
- s = "%s,%03d" % (t, record.msecs)
- return s
-
- def formatException(self, ei):
- """
- Format and return the specified exception information as a string.
-
- This default implementation just uses
- traceback.print_exception()
- """
- sio = cStringIO.StringIO()
- traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
- s = sio.getvalue()
- sio.close()
- if s[-1:] == "\n":
- s = s[:-1]
- return s
-
- def usesTime(self):
- """
- Check if the format uses the creation time of the record.
- """
- return self._fmt.find("%(asctime)") >= 0
-
- def format(self, record):
- """
- Format the specified record as text.
-
- The record's attribute dictionary is used as the operand to a
- string formatting operation which yields the returned string.
- Before formatting the dictionary, a couple of preparatory steps
- are carried out. The message attribute of the record is computed
- using LogRecord.getMessage(). If the formatting string uses the
- time (as determined by a call to usesTime(), formatTime() is
- called to format the event time. If there is exception information,
- it is formatted using formatException() and appended to the message.
- """
- record.message = record.getMessage()
- if self.usesTime():
- record.asctime = self.formatTime(record, self.datefmt)
- try:
- s = self._fmt % record.__dict__
- except UnicodeDecodeError as e:
- # Issue 25664. The logger name may be Unicode. Try again ...
- try:
- record.name = record.name.decode('utf-8')
- s = self._fmt % record.__dict__
- except UnicodeDecodeError:
- raise e
- if record.exc_info:
- # Cache the traceback text to avoid converting it multiple times
- # (it's constant anyway)
- if not record.exc_text:
- record.exc_text = self.formatException(record.exc_info)
- if record.exc_text:
- if s[-1:] != "\n":
- s = s + "\n"
- try:
- s = s + record.exc_text
- except UnicodeError:
- # Sometimes filenames have non-ASCII chars, which can lead
- # to errors when s is Unicode and record.exc_text is str
- # See issue 8924.
- # We also use replace for when there are multiple
- # encodings, e.g. UTF-8 for the filesystem and latin-1
- # for a script. See issue 13232.
- s = s + record.exc_text.decode(sys.getfilesystemencoding(),
- 'replace')
- return s
-
-#
-# The default formatter to use when no other is specified
-#
-_defaultFormatter = Formatter()
-
-class BufferingFormatter(object):
- """
- A formatter suitable for formatting a number of records.
- """
- def __init__(self, linefmt=None):
- """
- Optionally specify a formatter which will be used to format each
- individual record.
- """
- if linefmt:
- self.linefmt = linefmt
- else:
- self.linefmt = _defaultFormatter
-
- def formatHeader(self, records):
- """
- Return the header string for the specified records.
- """
- return ""
-
- def formatFooter(self, records):
- """
- Return the footer string for the specified records.
- """
- return ""
-
- def format(self, records):
- """
- Format the specified records and return the result as a string.
- """
- rv = ""
- if len(records) > 0:
- rv = rv + self.formatHeader(records)
- for record in records:
- rv = rv + self.linefmt.format(record)
- rv = rv + self.formatFooter(records)
- return rv
-
-#---------------------------------------------------------------------------
-# Filter classes and functions
-#---------------------------------------------------------------------------
-
-class Filter(object):
- """
- Filter instances are used to perform arbitrary filtering of LogRecords.
-
- Loggers and Handlers can optionally use Filter instances to filter
- records as desired. The base filter class only allows events which are
- below a certain point in the logger hierarchy. For example, a filter
- initialized with "A.B" will allow events logged by loggers "A.B",
- "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
- initialized with the empty string, all events are passed.
- """
- def __init__(self, name=''):
- """
- Initialize a filter.
-
- Initialize with the name of the logger which, together with its
- children, will have its events allowed through the filter. If no
- name is specified, allow every event.
- """
- self.name = name
- self.nlen = len(name)
-
- def filter(self, record):
- """
- Determine if the specified record is to be logged.
-
- Is the specified record to be logged? Returns 0 for no, nonzero for
- yes. If deemed appropriate, the record may be modified in-place.
- """
- if self.nlen == 0:
- return 1
- elif self.name == record.name:
- return 1
- elif record.name.find(self.name, 0, self.nlen) != 0:
- return 0
- return (record.name[self.nlen] == ".")
-
-class Filterer(object):
- """
- A base class for loggers and handlers which allows them to share
- common code.
- """
- def __init__(self):
- """
- Initialize the list of filters to be an empty list.
- """
- self.filters = []
-
- def addFilter(self, filter):
- """
- Add the specified filter to this handler.
- """
- if not (filter in self.filters):
- self.filters.append(filter)
-
- def removeFilter(self, filter):
- """
- Remove the specified filter from this handler.
- """
- if filter in self.filters:
- self.filters.remove(filter)
-
- def filter(self, record):
- """
- Determine if a record is loggable by consulting all the filters.
-
- The default is to allow the record to be logged; any filter can veto
- this and the record is then dropped. Returns a zero value if a record
- is to be dropped, else non-zero.
- """
- rv = 1
- for f in self.filters:
- if not f.filter(record):
- rv = 0
- break
- return rv
-
-#---------------------------------------------------------------------------
-# Handler classes and functions
-#---------------------------------------------------------------------------
-
-_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
-_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
-
-def _removeHandlerRef(wr):
- """
- Remove a handler reference from the internal cleanup list.
- """
- # This function can be called during module teardown, when globals are
- # set to None. It can also be called from another thread. So we need to
- # pre-emptively grab the necessary globals and check if they're None,
- # to prevent race conditions and failures during interpreter shutdown.
- acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
- if acquire and release and handlers:
- try:
- acquire()
- try:
- if wr in handlers:
- handlers.remove(wr)
- finally:
- release()
- except TypeError:
- # https://bugs.python.org/issue21149 - If the RLock object behind
- # acquire() and release() has been partially finalized you may see
- # an error about NoneType not being callable. Absolutely nothing
- # we can do in this GC during process shutdown situation. Eat it.
- pass
-
-def _addHandlerRef(handler):
- """
- Add a handler to the internal cleanup list using a weak reference.
- """
- _acquireLock()
- try:
- _handlerList.append(weakref.ref(handler, _removeHandlerRef))
- finally:
- _releaseLock()
-
-class Handler(Filterer):
- """
- Handler instances dispatch logging events to specific destinations.
-
- The base handler class. Acts as a placeholder which defines the Handler
- interface. Handlers can optionally use Formatter instances to format
- records as desired. By default, no formatter is specified; in this case,
- the 'raw' message as determined by record.message is logged.
- """
- def __init__(self, level=NOTSET):
- """
- Initializes the instance - basically setting the formatter to None
- and the filter list to empty.
- """
- Filterer.__init__(self)
- self._name = None
- self.level = _checkLevel(level)
- self.formatter = None
- # Add the handler to the global _handlerList (for cleanup on shutdown)
- _addHandlerRef(self)
- self.createLock()
-
- def get_name(self):
- return self._name
-
- def set_name(self, name):
- _acquireLock()
- try:
- if self._name in _handlers:
- del _handlers[self._name]
- self._name = name
- if name:
- _handlers[name] = self
- finally:
- _releaseLock()
-
- name = property(get_name, set_name)
-
- def createLock(self):
- """
- Acquire a thread lock for serializing access to the underlying I/O.
- """
- if thread:
- self.lock = threading.RLock()
- else:
- self.lock = None
-
- def acquire(self):
- """
- Acquire the I/O thread lock.
- """
- if self.lock:
- self.lock.acquire()
-
- def release(self):
- """
- Release the I/O thread lock.
- """
- if self.lock:
- self.lock.release()
-
- def setLevel(self, level):
- """
- Set the logging level of this handler.
- """
- self.level = _checkLevel(level)
-
- def format(self, record):
- """
- Format the specified record.
-
- If a formatter is set, use it. Otherwise, use the default formatter
- for the module.
- """
- if self.formatter:
- fmt = self.formatter
- else:
- fmt = _defaultFormatter
- return fmt.format(record)
-
- def emit(self, record):
- """
- Do whatever it takes to actually log the specified logging record.
-
- This version is intended to be implemented by subclasses and so
- raises a NotImplementedError.
- """
- raise NotImplementedError('emit must be implemented '
- 'by Handler subclasses')
-
- def handle(self, record):
- """
- Conditionally emit the specified logging record.
-
- Emission depends on filters which may have been added to the handler.
- Wrap the actual emission of the record with acquisition/release of
- the I/O thread lock. Returns whether the filter passed the record for
- emission.
- """
- rv = self.filter(record)
- if rv:
- self.acquire()
- try:
- self.emit(record)
- finally:
- self.release()
- return rv
-
- def setFormatter(self, fmt):
- """
- Set the formatter for this handler.
- """
- self.formatter = fmt
-
- def flush(self):
- """
- Ensure all logging output has been flushed.
-
- This version does nothing and is intended to be implemented by
- subclasses.
- """
- pass
-
- def close(self):
- """
- Tidy up any resources used by the handler.
-
- This version removes the handler from an internal map of handlers,
- _handlers, which is used for handler lookup by name. Subclasses
- should ensure that this gets called from overridden close()
- methods.
- """
- #get the module data lock, as we're updating a shared structure.
- _acquireLock()
- try: #unlikely to raise an exception, but you never know...
- if self._name and self._name in _handlers:
- del _handlers[self._name]
- finally:
- _releaseLock()
-
- def handleError(self, record):
- """
- Handle errors which occur during an emit() call.
-
- This method should be called from handlers when an exception is
- encountered during an emit() call. If raiseExceptions is false,
- exceptions get silently ignored. This is what is mostly wanted
- for a logging system - most users will not care about errors in
- the logging system, they are more interested in application errors.
- You could, however, replace this with a custom handler if you wish.
- The record which was being processed is passed in to this method.
- """
- if raiseExceptions and sys.stderr: # see issue 13807
- ei = sys.exc_info()
- try:
- traceback.print_exception(ei[0], ei[1], ei[2],
- None, sys.stderr)
- sys.stderr.write('Logged from file %s, line %s\n' % (
- record.filename, record.lineno))
- except IOError:
- pass # see issue 5971
- finally:
- del ei
-
-class StreamHandler(Handler):
- """
- A handler class which writes logging records, appropriately formatted,
- to a stream. Note that this class does not close the stream, as
- sys.stdout or sys.stderr may be used.
- """
-
- def __init__(self, stream=None):
- """
- Initialize the handler.
-
- If stream is not specified, sys.stderr is used.
- """
- Handler.__init__(self)
- if stream is None:
- stream = sys.stderr
- self.stream = stream
-
- def flush(self):
- """
- Flushes the stream.
- """
- self.acquire()
- try:
- if self.stream and hasattr(self.stream, "flush"):
- self.stream.flush()
- finally:
- self.release()
-
- def emit(self, record):
- """
- Emit a record.
-
- If a formatter is specified, it is used to format the record.
- The record is then written to the stream with a trailing newline. If
- exception information is present, it is formatted using
- traceback.print_exception and appended to the stream. If the stream
- has an 'encoding' attribute, it is used to determine how to do the
- output to the stream.
- """
- try:
- msg = self.format(record)
- stream = self.stream
- fs = "%s\r\n"
- if not _unicode: #if no unicode support...
- stream.write(fs % msg)
- else:
- try:
- if (isinstance(msg, unicode) and
- getattr(stream, 'encoding', None)):
- ufs = u'%s\r\n'
- try:
- stream.write(ufs % msg)
- except UnicodeEncodeError:
- #Printing to terminals sometimes fails. For example,
- #with an encoding of 'cp1251', the above write will
- #work if written to a stream opened or wrapped by
- #the codecs module, but fail when writing to a
- #terminal even when the codepage is set to cp1251.
- #An extra encoding step seems to be needed.
- stream.write((ufs % msg).encode(stream.encoding))
- else:
- stream.write(fs % msg)
- except UnicodeError:
- stream.write(fs % msg.encode("UTF-8"))
- self.flush()
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class FileHandler(StreamHandler):
- """
- A handler class which writes formatted logging records to disk files.
- """
- def __init__(self, filename, mode='a', encoding=None, delay=0):
- """
- Open the specified file and use it as the stream for logging.
- """
- #keep the absolute path, otherwise derived classes which use this
- #may come a cropper when the current directory changes
- if codecs is None:
- encoding = None
- self.baseFilename = os.path.abspath(filename)
- self.mode = mode
- self.encoding = encoding
- self.delay = delay
- if delay:
- #We don't open the stream, but we still need to call the
- #Handler constructor to set level, formatter, lock etc.
- Handler.__init__(self)
- self.stream = None
- else:
- StreamHandler.__init__(self, self._open())
-
- def close(self):
- """
- Closes the stream.
- """
- self.acquire()
- try:
- try:
- if self.stream:
- try:
- self.flush()
- finally:
- stream = self.stream
- self.stream = None
- if hasattr(stream, "close"):
- stream.close()
- finally:
- # Issue #19523: call unconditionally to
- # prevent a handler leak when delay is set
- StreamHandler.close(self)
- finally:
- self.release()
-
- def _open(self):
- """
- Open the current base file with the (original) mode and encoding.
- Return the resulting stream.
- """
- if self.encoding is None:
- stream = open(self.baseFilename, self.mode)
- else:
- stream = codecs.open(self.baseFilename, self.mode, self.encoding)
- return stream
-
- def emit(self, record):
- """
- Emit a record.
-
- If the stream was not opened because 'delay' was specified in the
- constructor, open it before calling the superclass's emit.
- """
- if self.stream is None:
- self.stream = self._open()
- StreamHandler.emit(self, record)
-
-#---------------------------------------------------------------------------
-# Manager classes and functions
-#---------------------------------------------------------------------------
-
-class PlaceHolder(object):
- """
- PlaceHolder instances are used in the Manager logger hierarchy to take
- the place of nodes for which no loggers have been defined. This class is
- intended for internal use only and not as part of the public API.
- """
- def __init__(self, alogger):
- """
- Initialize with the specified logger being a child of this placeholder.
- """
- #self.loggers = [alogger]
- self.loggerMap = { alogger : None }
-
- def append(self, alogger):
- """
- Add the specified logger as a child of this placeholder.
- """
- #if alogger not in self.loggers:
- if alogger not in self.loggerMap:
- #self.loggers.append(alogger)
- self.loggerMap[alogger] = None
-
-#
-# Determine which class to use when instantiating loggers.
-#
-_loggerClass = None
-
-def setLoggerClass(klass):
- """
- Set the class to be used when instantiating a logger. The class should
- define __init__() such that only a name argument is required, and the
- __init__() should call Logger.__init__()
- """
- if klass != Logger:
- if not issubclass(klass, Logger):
- raise TypeError("logger not derived from logging.Logger: "
- + klass.__name__)
- global _loggerClass
- _loggerClass = klass
-
-def getLoggerClass():
- """
- Return the class to be used when instantiating a logger.
- """
-
- return _loggerClass
-
-class Manager(object):
- """
- There is [under normal circumstances] just one Manager instance, which
- holds the hierarchy of loggers.
- """
- def __init__(self, rootnode):
- """
- Initialize the manager with the root node of the logger hierarchy.
- """
- self.root = rootnode
- self.disable = 0
- self.emittedNoHandlerWarning = 0
- self.loggerDict = {}
- self.loggerClass = None
-
- def getLogger(self, name):
- """
- Get a logger with the specified name (channel name), creating it
- if it doesn't yet exist. This name is a dot-separated hierarchical
- name, such as "a", "a.b", "a.b.c" or similar.
-
- If a PlaceHolder existed for the specified name [i.e. the logger
- didn't exist but a child of it did], replace it with the created
- logger and fix up the parent/child references which pointed to the
- placeholder to now point to the logger.
- """
- rv = None
- if not isinstance(name, basestring):
- raise TypeError('A logger name must be string or Unicode')
- if isinstance(name, unicode):
- name = name.encode('utf-8')
- _acquireLock()
- try:
- if name in self.loggerDict:
- rv = self.loggerDict[name]
- if isinstance(rv, PlaceHolder):
- ph = rv
- rv = (self.loggerClass or _loggerClass)(name)
- rv.manager = self
- self.loggerDict[name] = rv
- self._fixupChildren(ph, rv)
- self._fixupParents(rv)
- else:
- rv = (self.loggerClass or _loggerClass)(name)
- rv.manager = self
- self.loggerDict[name] = rv
- self._fixupParents(rv)
- finally:
- _releaseLock()
- return rv
-
- def setLoggerClass(self, klass):
- """
- Set the class to be used when instantiating a logger with this Manager.
- """
- if klass != Logger:
- if not issubclass(klass, Logger):
- raise TypeError("logger not derived from logging.Logger: "
- + klass.__name__)
- self.loggerClass = klass
-
- def _fixupParents(self, alogger):
- """
- Ensure that there are either loggers or placeholders all the way
- from the specified logger to the root of the logger hierarchy.
- """
- name = alogger.name
- i = name.rfind(".")
- rv = None
- while (i > 0) and not rv:
- substr = name[:i]
- if substr not in self.loggerDict:
- self.loggerDict[substr] = PlaceHolder(alogger)
- else:
- obj = self.loggerDict[substr]
- if isinstance(obj, Logger):
- rv = obj
- else:
- assert isinstance(obj, PlaceHolder)
- obj.append(alogger)
- i = name.rfind(".", 0, i - 1)
- if not rv:
- rv = self.root
- alogger.parent = rv
-
- def _fixupChildren(self, ph, alogger):
- """
- Ensure that children of the placeholder ph are connected to the
- specified logger.
- """
- name = alogger.name
- namelen = len(name)
- for c in ph.loggerMap.keys():
- #The if means ... if not c.parent.name.startswith(nm)
- if c.parent.name[:namelen] != name:
- alogger.parent = c.parent
- c.parent = alogger
-
-#---------------------------------------------------------------------------
-# Logger classes and functions
-#---------------------------------------------------------------------------
-
-class Logger(Filterer):
- """
- Instances of the Logger class represent a single logging channel. A
- "logging channel" indicates an area of an application. Exactly how an
- "area" is defined is up to the application developer. Since an
- application can have any number of areas, logging channels are identified
- by a unique string. Application areas can be nested (e.g. an area
- of "input processing" might include sub-areas "read CSV files", "read
- XLS files" and "read Gnumeric files"). To cater for this natural nesting,
- channel names are organized into a namespace hierarchy where levels are
- separated by periods, much like the Java or Python package namespace. So
- in the instance given above, channel names might be "input" for the upper
- level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
- There is no arbitrary limit to the depth of nesting.
- """
- def __init__(self, name, level=NOTSET):
- """
- Initialize the logger with a name and an optional level.
- """
- Filterer.__init__(self)
- self.name = name
- self.level = _checkLevel(level)
- self.parent = None
- self.propagate = 1
- self.handlers = []
- self.disabled = 0
-
- def setLevel(self, level):
- """
- Set the logging level of this logger.
- """
- self.level = _checkLevel(level)
-
- def debug(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'DEBUG'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
- """
- if self.isEnabledFor(DEBUG):
- self._log(DEBUG, msg, args, **kwargs)
-
- def info(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'INFO'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
- """
- if self.isEnabledFor(INFO):
- self._log(INFO, msg, args, **kwargs)
-
- def warning(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'WARNING'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
- """
- if self.isEnabledFor(WARNING):
- self._log(WARNING, msg, args, **kwargs)
-
- warn = warning
-
- def error(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'ERROR'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.error("Houston, we have a %s", "major problem", exc_info=1)
- """
- if self.isEnabledFor(ERROR):
- self._log(ERROR, msg, args, **kwargs)
-
- def exception(self, msg, *args, **kwargs):
- """
- Convenience method for logging an ERROR with exception information.
- """
- kwargs['exc_info'] = 1
- self.error(msg, *args, **kwargs)
-
- def critical(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'CRITICAL'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
- """
- if self.isEnabledFor(CRITICAL):
- self._log(CRITICAL, msg, args, **kwargs)
-
- fatal = critical
-
- def log(self, level, msg, *args, **kwargs):
- """
- Log 'msg % args' with the integer severity 'level'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
- """
- if not isinstance(level, (int, long)):
- if raiseExceptions:
- raise TypeError("level must be an integer")
- else:
- return
- if self.isEnabledFor(level):
- self._log(level, msg, args, **kwargs)
-
- def findCaller(self):
- """
- Find the stack frame of the caller so that we can note the source
- file name, line number and function name.
- """
- f = currentframe()
- #On some versions of IronPython, currentframe() returns None if
- #IronPython isn't run with -X:Frames.
- if f is not None:
- f = f.f_back
- rv = "(unknown file)", 0, "(unknown function)"
- while hasattr(f, "f_code"):
- co = f.f_code
- filename = os.path.normcase(co.co_filename)
- if filename == _srcfile:
- f = f.f_back
- continue
- rv = (co.co_filename, f.f_lineno, co.co_name)
- break
- return rv
-
- def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
- """
- A factory method which can be overridden in subclasses to create
- specialized LogRecords.
- """
- rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
- if extra is not None:
- for key in extra:
- if (key in ["message", "asctime"]) or (key in rv.__dict__):
- raise KeyError("Attempt to overwrite %r in LogRecord" % key)
- rv.__dict__[key] = extra[key]
- return rv
-
- def _log(self, level, msg, args, exc_info=None, extra=None):
- """
- Low-level logging routine which creates a LogRecord and then calls
- all the handlers of this logger to handle the record.
- """
- if _srcfile:
- #IronPython doesn't track Python frames, so findCaller raises an
- #exception on some versions of IronPython. We trap it here so that
- #IronPython can use logging.
- try:
- fn, lno, func = self.findCaller()
- except ValueError:
- fn, lno, func = "(unknown file)", 0, "(unknown function)"
- else:
- fn, lno, func = "(unknown file)", 0, "(unknown function)"
- if exc_info:
- if not isinstance(exc_info, tuple):
- exc_info = sys.exc_info()
- record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
- self.handle(record)
-
- def handle(self, record):
- """
- Call the handlers for the specified record.
-
- This method is used for unpickled records received from a socket, as
- well as those created locally. Logger-level filtering is applied.
- """
- if (not self.disabled) and self.filter(record):
- self.callHandlers(record)
-
- def addHandler(self, hdlr):
- """
- Add the specified handler to this logger.
- """
- _acquireLock()
- try:
- if not (hdlr in self.handlers):
- self.handlers.append(hdlr)
- finally:
- _releaseLock()
-
- def removeHandler(self, hdlr):
- """
- Remove the specified handler from this logger.
- """
- _acquireLock()
- try:
- if hdlr in self.handlers:
- self.handlers.remove(hdlr)
- finally:
- _releaseLock()
-
- def callHandlers(self, record):
- """
- Pass a record to all relevant handlers.
-
- Loop through all handlers for this logger and its parents in the
- logger hierarchy. If no handler was found, output a one-off error
- message to sys.stderr. Stop searching up the hierarchy whenever a
- logger with the "propagate" attribute set to zero is found - that
- will be the last logger whose handlers are called.
- """
- c = self
- found = 0
- while c:
- for hdlr in c.handlers:
- found = found + 1
- if record.levelno >= hdlr.level:
- hdlr.handle(record)
- if not c.propagate:
- c = None #break out
- else:
- c = c.parent
- if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
- sys.stderr.write("No handlers could be found for logger"
- " \"%s\"\n" % self.name)
- self.manager.emittedNoHandlerWarning = 1
-
- def getEffectiveLevel(self):
- """
- Get the effective level for this logger.
-
- Loop through this logger and its parents in the logger hierarchy,
- looking for a non-zero logging level. Return the first one found.
- """
- logger = self
- while logger:
- if logger.level:
- return logger.level
- logger = logger.parent
- return NOTSET
-
- def isEnabledFor(self, level):
- """
- Is this logger enabled for level 'level'?
- """
- if self.manager.disable >= level:
- return 0
- return level >= self.getEffectiveLevel()
-
- def getChild(self, suffix):
- """
- Get a logger which is a descendant to this one.
-
- This is a convenience method, such that
-
- logging.getLogger('abc').getChild('def.ghi')
-
- is the same as
-
- logging.getLogger('abc.def.ghi')
-
- It's useful, for example, when the parent logger is named using
- __name__ rather than a literal string.
- """
- if self.root is not self:
- suffix = '.'.join((self.name, suffix))
- return self.manager.getLogger(suffix)
-
-class RootLogger(Logger):
- """
- A root logger is not that different to any other logger, except that
- it must have a logging level and there is only one instance of it in
- the hierarchy.
- """
- def __init__(self, level):
- """
- Initialize the logger with the name "root".
- """
- Logger.__init__(self, "root", level)
-
-_loggerClass = Logger
-
-class LoggerAdapter(object):
- """
- An adapter for loggers which makes it easier to specify contextual
- information in logging output.
- """
-
- def __init__(self, logger, extra):
- """
- Initialize the adapter with a logger and a dict-like object which
- provides contextual information. This constructor signature allows
- easy stacking of LoggerAdapters, if so desired.
-
- You can effectively pass keyword arguments as shown in the
- following example:
-
- adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
- """
- self.logger = logger
- self.extra = extra
-
- def process(self, msg, kwargs):
- """
- Process the logging message and keyword arguments passed in to
- a logging call to insert contextual information. You can either
- manipulate the message itself, the keyword args or both. Return
- the message and kwargs modified (or not) to suit your needs.
-
- Normally, you'll only need to override this one method in a
- LoggerAdapter subclass for your specific needs.
- """
- kwargs["extra"] = self.extra
- return msg, kwargs
-
- def debug(self, msg, *args, **kwargs):
- """
- Delegate a debug call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- self.logger.debug(msg, *args, **kwargs)
-
- def info(self, msg, *args, **kwargs):
- """
- Delegate an info call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- self.logger.info(msg, *args, **kwargs)
-
- def warning(self, msg, *args, **kwargs):
- """
- Delegate a warning call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- self.logger.warning(msg, *args, **kwargs)
-
- def error(self, msg, *args, **kwargs):
- """
- Delegate an error call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- self.logger.error(msg, *args, **kwargs)
-
- def exception(self, msg, *args, **kwargs):
- """
- Delegate an exception call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- kwargs["exc_info"] = 1
- self.logger.error(msg, *args, **kwargs)
-
- def critical(self, msg, *args, **kwargs):
- """
- Delegate a critical call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- self.logger.critical(msg, *args, **kwargs)
-
- def log(self, level, msg, *args, **kwargs):
- """
- Delegate a log call to the underlying logger, after adding
- contextual information from this adapter instance.
- """
- msg, kwargs = self.process(msg, kwargs)
- self.logger.log(level, msg, *args, **kwargs)
-
- def isEnabledFor(self, level):
- """
- See if the underlying logger is enabled for the specified level.
- """
- return self.logger.isEnabledFor(level)
-
-root = RootLogger(WARNING)
-Logger.root = root
-Logger.manager = Manager(Logger.root)
-
-#---------------------------------------------------------------------------
-# Configuration classes and functions
-#---------------------------------------------------------------------------
-
-BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
-
-def basicConfig(**kwargs):
- """
- Do basic configuration for the logging system.
-
- This function does nothing if the root logger already has handlers
- configured. It is a convenience method intended for use by simple scripts
- to do one-shot configuration of the logging package.
-
- The default behaviour is to create a StreamHandler which writes to
- sys.stderr, set a formatter using the BASIC_FORMAT format string, and
- add the handler to the root logger.
-
- A number of optional keyword arguments may be specified, which can alter
- the default behaviour.
-
- filename Specifies that a FileHandler be created, using the specified
- filename, rather than a StreamHandler.
- filemode Specifies the mode to open the file, if filename is specified
- (if filemode is unspecified, it defaults to 'a').
- format Use the specified format string for the handler.
- datefmt Use the specified date/time format.
- level Set the root logger level to the specified level.
- stream Use the specified stream to initialize the StreamHandler. Note
- that this argument is incompatible with 'filename' - if both
- are present, 'stream' is ignored.
-
- Note that you could specify a stream created using open(filename, mode)
- rather than passing the filename and mode in. However, it should be
- remembered that StreamHandler does not close its stream (since it may be
- using sys.stdout or sys.stderr), whereas FileHandler closes its stream
- when the handler is closed.
- """
- # Add thread safety in case someone mistakenly calls
- # basicConfig() from multiple threads
- _acquireLock()
- try:
- if len(root.handlers) == 0:
- filename = kwargs.get("filename")
- if filename:
- mode = kwargs.get("filemode", 'a')
- hdlr = FileHandler(filename, mode)
- else:
- stream = kwargs.get("stream")
- hdlr = StreamHandler(stream)
- fs = kwargs.get("format", BASIC_FORMAT)
- dfs = kwargs.get("datefmt", None)
- fmt = Formatter(fs, dfs)
- hdlr.setFormatter(fmt)
- root.addHandler(hdlr)
- level = kwargs.get("level")
- if level is not None:
- root.setLevel(level)
- finally:
- _releaseLock()
-
-#---------------------------------------------------------------------------
-# Utility functions at module level.
-# Basically delegate everything to the root logger.
-#---------------------------------------------------------------------------
-
-def getLogger(name=None):
- """
- Return a logger with the specified name, creating it if necessary.
-
- If no name is specified, return the root logger.
- """
- if name:
- return Logger.manager.getLogger(name)
- else:
- return root
-
-#def getRootLogger():
-# """
-# Return the root logger.
-#
-# Note that getLogger('') now does the same thing, so this function is
-# deprecated and may disappear in the future.
-# """
-# return root
-
-def critical(msg, *args, **kwargs):
- """
- Log a message with severity 'CRITICAL' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- root.critical(msg, *args, **kwargs)
-
-fatal = critical
-
-def error(msg, *args, **kwargs):
- """
- Log a message with severity 'ERROR' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- root.error(msg, *args, **kwargs)
-
-def exception(msg, *args, **kwargs):
- """
- Log a message with severity 'ERROR' on the root logger,
- with exception information.
- """
- kwargs['exc_info'] = 1
- error(msg, *args, **kwargs)
-
-def warning(msg, *args, **kwargs):
- """
- Log a message with severity 'WARNING' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- root.warning(msg, *args, **kwargs)
-
-warn = warning
-
-def info(msg, *args, **kwargs):
- """
- Log a message with severity 'INFO' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- root.info(msg, *args, **kwargs)
-
-def debug(msg, *args, **kwargs):
- """
- Log a message with severity 'DEBUG' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- root.debug(msg, *args, **kwargs)
-
-def log(level, msg, *args, **kwargs):
- """
- Log 'msg % args' with the integer severity 'level' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- root.log(level, msg, *args, **kwargs)
-
-def disable(level):
- """
- Disable all logging calls of severity 'level' and below.
- """
- root.manager.disable = level
-
-def shutdown(handlerList=_handlerList):
- """
- Perform any cleanup actions in the logging system (e.g. flushing
- buffers).
-
- Should be called at application exit.
- """
- for wr in reversed(handlerList[:]):
- #errors might occur, for example, if files are locked
- #we just ignore them if raiseExceptions is not set
- try:
- h = wr()
- if h:
- try:
- h.acquire()
- h.flush()
- h.close()
- except (IOError, ValueError):
- # Ignore errors which might be caused
- # because handlers have been closed but
- # references to them are still around at
- # application exit.
- pass
- finally:
- h.release()
- except:
- if raiseExceptions:
- raise
- #else, swallow
-
-#Let's try and shutdown automatically on application exit...
-import atexit
-atexit.register(shutdown)
-
-# Null handler
-
-class NullHandler(Handler):
- """
- This handler does nothing. It's intended to be used to avoid the
- "No handlers could be found for logger XXX" one-off warning. This is
- important for library code, which may contain code to log events. If a user
- of the library does not configure logging, the one-off warning might be
- produced; to avoid this, the library developer simply needs to instantiate
- a NullHandler and add it to the top-level logger of the library module or
- package.
- """
- def handle(self, record):
- pass
-
- def emit(self, record):
- pass
-
- def createLock(self):
- self.lock = None
-
-# Warnings integration
-
-_warnings_showwarning = None
-
-def _showwarning(message, category, filename, lineno, file=None, line=None):
- """
- Implementation of showwarnings which redirects to logging, which will first
- check to see if the file parameter is None. If a file is specified, it will
- delegate to the original warnings implementation of showwarning. Otherwise,
- it will call warnings.formatwarning and will log the resulting string to a
- warnings logger named "py.warnings" with level logging.WARNING.
- """
- if file is not None:
- if _warnings_showwarning is not None:
- _warnings_showwarning(message, category, filename, lineno, file, line)
- else:
- s = warnings.formatwarning(message, category, filename, lineno, line)
- logger = getLogger("py.warnings")
- if not logger.handlers:
- logger.addHandler(NullHandler())
- logger.warning("%s", s)
-
-def captureWarnings(capture):
- """
- If capture is true, redirect all warnings to the logging package.
- If capture is False, ensure that warnings are not redirected to logging
- but to their original destinations.
- """
- global _warnings_showwarning
- if capture:
- if _warnings_showwarning is None:
- _warnings_showwarning = warnings.showwarning
- warnings.showwarning = _showwarning
- else:
- if _warnings_showwarning is not None:
- warnings.showwarning = _warnings_showwarning
- _warnings_showwarning = None
diff --git a/Monika After Story/game/python-packages/logging/config.py b/Monika After Story/game/python-packages/logging/config.py
deleted file mode 100644
index 8b3795675d..0000000000
--- a/Monika After Story/game/python-packages/logging/config.py
+++ /dev/null
@@ -1,919 +0,0 @@
-# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Configuration functions for the logging package for Python. The core package
-is based on PEP 282 and comments thereto in comp.lang.python, and influenced
-by Apache's log4j system.
-
-Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import cStringIO
-import errno
-import io
-import logging
-import logging.handlers
-import os
-import re
-import socket
-import struct
-import sys
-import traceback
-import types
-
-try:
- import thread
- import threading
-except ImportError:
- thread = None
-
-from SocketServer import ThreadingTCPServer, StreamRequestHandler
-
-
-DEFAULT_LOGGING_CONFIG_PORT = 9030
-
-RESET_ERROR = errno.ECONNRESET
-
-#
-# The following code implements a socket listener for on-the-fly
-# reconfiguration of logging.
-#
-# _listener holds the server object doing the listening
-_listener = None
-
-def fileConfig(fname, defaults=None, disable_existing_loggers=True):
- """
- Read the logging configuration from a ConfigParser-format file.
-
- This can be called several times from an application, allowing an end user
- the ability to select from various pre-canned configurations (if the
- developer provides a mechanism to present the choices and load the chosen
- configuration).
- """
- import ConfigParser
-
- cp = ConfigParser.ConfigParser(defaults)
- if hasattr(fname, 'readline'):
- cp.readfp(fname)
- else:
- cp.read(fname)
-
- formatters = _create_formatters(cp)
-
- # critical section
- logging._acquireLock()
- try:
- logging._handlers.clear()
- del logging._handlerList[:]
- # Handlers add themselves to logging._handlers
- handlers = _install_handlers(cp, formatters)
- _install_loggers(cp, handlers, disable_existing_loggers)
- finally:
- logging._releaseLock()
-
-
-def _resolve(name):
- """Resolve a dotted name to a global object."""
- name = name.split('.')
- used = name.pop(0)
- found = __import__(used)
- for n in name:
- used = used + '.' + n
- try:
- found = getattr(found, n)
- except AttributeError:
- __import__(used)
- found = getattr(found, n)
- return found
-
-def _strip_spaces(alist):
- return map(lambda x: x.strip(), alist)
-
-def _encoded(s):
- return s if isinstance(s, str) else s.encode('utf-8')
-
-def _create_formatters(cp):
- """Create and return formatters"""
- flist = cp.get("formatters", "keys")
- if not len(flist):
- return {}
- flist = flist.split(",")
- flist = _strip_spaces(flist)
- formatters = {}
- for form in flist:
- sectname = "formatter_%s" % form
- opts = cp.options(sectname)
- if "format" in opts:
- fs = cp.get(sectname, "format", 1)
- else:
- fs = None
- if "datefmt" in opts:
- dfs = cp.get(sectname, "datefmt", 1)
- else:
- dfs = None
- c = logging.Formatter
- if "class" in opts:
- class_name = cp.get(sectname, "class")
- if class_name:
- c = _resolve(class_name)
- f = c(fs, dfs)
- formatters[form] = f
- return formatters
-
-
-def _install_handlers(cp, formatters):
- """Install and return handlers"""
- hlist = cp.get("handlers", "keys")
- if not len(hlist):
- return {}
- hlist = hlist.split(",")
- hlist = _strip_spaces(hlist)
- handlers = {}
- fixups = [] #for inter-handler references
- for hand in hlist:
- sectname = "handler_%s" % hand
- klass = cp.get(sectname, "class")
- opts = cp.options(sectname)
- if "formatter" in opts:
- fmt = cp.get(sectname, "formatter")
- else:
- fmt = ""
- try:
- klass = eval(klass, vars(logging))
- except (AttributeError, NameError):
- klass = _resolve(klass)
- args = cp.get(sectname, "args")
- args = eval(args, vars(logging))
- h = klass(*args)
- if "level" in opts:
- level = cp.get(sectname, "level")
- h.setLevel(logging._levelNames[level])
- if len(fmt):
- h.setFormatter(formatters[fmt])
- if issubclass(klass, logging.handlers.MemoryHandler):
- if "target" in opts:
- target = cp.get(sectname,"target")
- else:
- target = ""
- if len(target): #the target handler may not be loaded yet, so keep for later...
- fixups.append((h, target))
- handlers[hand] = h
- #now all handlers are loaded, fixup inter-handler references...
- for h, t in fixups:
- h.setTarget(handlers[t])
- return handlers
-
-
-def _install_loggers(cp, handlers, disable_existing_loggers):
- """Create and install loggers"""
-
- # configure the root first
- llist = cp.get("loggers", "keys")
- llist = llist.split(",")
- llist = list(map(lambda x: x.strip(), llist))
- llist.remove("root")
- sectname = "logger_root"
- root = logging.root
- log = root
- opts = cp.options(sectname)
- if "level" in opts:
- level = cp.get(sectname, "level")
- log.setLevel(logging._levelNames[level])
- for h in root.handlers[:]:
- root.removeHandler(h)
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = hlist.split(",")
- hlist = _strip_spaces(hlist)
- for hand in hlist:
- log.addHandler(handlers[hand])
-
- #and now the others...
- #we don't want to lose the existing loggers,
- #since other threads may have pointers to them.
- #existing is set to contain all existing loggers,
- #and as we go through the new configuration we
- #remove any which are configured. At the end,
- #what's left in existing is the set of loggers
- #which were in the previous configuration but
- #which are not in the new configuration.
- existing = list(root.manager.loggerDict.keys())
- #The list needs to be sorted so that we can
- #avoid disabling child loggers of explicitly
- #named loggers. With a sorted list it is easier
- #to find the child loggers.
- existing.sort()
- #We'll keep the list of existing loggers
- #which are children of named loggers here...
- child_loggers = []
- #now set up the new ones...
- for log in llist:
- sectname = "logger_%s" % log
- qn = cp.get(sectname, "qualname")
- opts = cp.options(sectname)
- if "propagate" in opts:
- propagate = cp.getint(sectname, "propagate")
- else:
- propagate = 1
- logger = logging.getLogger(qn)
- if qn in existing:
- i = existing.index(qn) + 1 # start with the entry after qn
- prefixed = qn + "."
- pflen = len(prefixed)
- num_existing = len(existing)
- while i < num_existing:
- if existing[i][:pflen] == prefixed:
- child_loggers.append(existing[i])
- i += 1
- existing.remove(qn)
- if "level" in opts:
- level = cp.get(sectname, "level")
- logger.setLevel(logging._levelNames[level])
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- logger.propagate = propagate
- logger.disabled = 0
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = hlist.split(",")
- hlist = _strip_spaces(hlist)
- for hand in hlist:
- logger.addHandler(handlers[hand])
-
- #Disable any old loggers. There's no point deleting
- #them as other threads may continue to hold references
- #and by disabling them, you stop them doing any logging.
- #However, don't disable children of named loggers, as that's
- #probably not what was intended by the user.
- for log in existing:
- logger = root.manager.loggerDict[log]
- if log in child_loggers:
- logger.level = logging.NOTSET
- logger.handlers = []
- logger.propagate = 1
- else:
- logger.disabled = disable_existing_loggers
-
-
-
-IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
-
-
-def valid_ident(s):
- m = IDENTIFIER.match(s)
- if not m:
- raise ValueError('Not a valid Python identifier: %r' % s)
- return True
-
-
-class ConvertingMixin(object):
- """For ConvertingXXX's, this mixin class provides common functions"""
-
- def convert_with_key(self, key, value, replace=True):
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- if replace:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def convert(self, value):
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- return result
-
-
-# The ConvertingXXX classes are wrappers around standard Python containers,
-# and they serve to convert any suitable values in the container. The
-# conversion converts base dicts, lists and tuples to their wrapped
-# equivalents, whereas strings which match a conversion format are converted
-# appropriately.
-#
-# Each wrapper should have a configurator attribute holding the actual
-# configurator to use for conversion.
-
-class ConvertingDict(dict, ConvertingMixin):
- """A converting dictionary wrapper."""
-
- def __getitem__(self, key):
- value = dict.__getitem__(self, key)
- return self.convert_with_key(key, value)
-
- def get(self, key, default=None):
- value = dict.get(self, key, default)
- return self.convert_with_key(key, value)
-
- def pop(self, key, default=None):
- value = dict.pop(self, key, default)
- return self.convert_with_key(key, value, replace=False)
-
-class ConvertingList(list, ConvertingMixin):
- """A converting list wrapper."""
- def __getitem__(self, key):
- value = list.__getitem__(self, key)
- return self.convert_with_key(key, value)
-
- def pop(self, idx=-1):
- value = list.pop(self, idx)
- return self.convert(value)
-
-class ConvertingTuple(tuple, ConvertingMixin):
- """A converting tuple wrapper."""
- def __getitem__(self, key):
- value = tuple.__getitem__(self, key)
- # Can't replace a tuple entry.
- return self.convert_with_key(key, value, replace=False)
-
-class BaseConfigurator(object):
- """
- The configurator base class which defines some useful defaults.
- """
-
- CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$')
-
- WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
- DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
- INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
- DIGIT_PATTERN = re.compile(r'^\d+$')
-
- value_converters = {
- 'ext' : 'ext_convert',
- 'cfg' : 'cfg_convert',
- }
-
- # We might want to use a different one, e.g. importlib
- importer = __import__
-
- def __init__(self, config):
- self.config = ConvertingDict(config)
- self.config.configurator = self
- # Issue 12718: winpdb replaces __import__ with a Python function, which
- # ends up being treated as a bound method. To avoid problems, we
- # set the importer on the instance, but leave it defined in the class
- # so existing code doesn't break
- if type(__import__) == types.FunctionType:
- self.importer = __import__
-
- def resolve(self, s):
- """
- Resolve strings to objects using standard import and attribute
- syntax.
- """
- name = s.split('.')
- used = name.pop(0)
- try:
- found = self.importer(used)
- for frag in name:
- used += '.' + frag
- try:
- found = getattr(found, frag)
- except AttributeError:
- self.importer(used)
- found = getattr(found, frag)
- return found
- except ImportError:
- e, tb = sys.exc_info()[1:]
- v = ValueError('Cannot resolve %r: %s' % (s, e))
- v.__cause__, v.__traceback__ = e, tb
- raise v
-
- def ext_convert(self, value):
- """Default converter for the ext:// protocol."""
- return self.resolve(value)
-
- def cfg_convert(self, value):
- """Default converter for the cfg:// protocol."""
- rest = value
- m = self.WORD_PATTERN.match(rest)
- if m is None:
- raise ValueError("Unable to convert %r" % value)
- else:
- rest = rest[m.end():]
- d = self.config[m.groups()[0]]
- #print d, rest
- while rest:
- m = self.DOT_PATTERN.match(rest)
- if m:
- d = d[m.groups()[0]]
- else:
- m = self.INDEX_PATTERN.match(rest)
- if m:
- idx = m.groups()[0]
- if not self.DIGIT_PATTERN.match(idx):
- d = d[idx]
- else:
- try:
- n = int(idx) # try as number first (most likely)
- d = d[n]
- except TypeError:
- d = d[idx]
- if m:
- rest = rest[m.end():]
- else:
- raise ValueError('Unable to convert '
- '%r at %r' % (value, rest))
- #rest should be empty
- return d
-
- def convert(self, value):
- """
- Convert values to an appropriate type. dicts, lists and tuples are
- replaced by their converting alternatives. Strings are checked to
- see if they have a conversion format and are converted if they do.
- """
- if not isinstance(value, ConvertingDict) and isinstance(value, dict):
- value = ConvertingDict(value)
- value.configurator = self
- elif not isinstance(value, ConvertingList) and isinstance(value, list):
- value = ConvertingList(value)
- value.configurator = self
- elif not isinstance(value, ConvertingTuple) and\
- isinstance(value, tuple):
- value = ConvertingTuple(value)
- value.configurator = self
- elif isinstance(value, basestring): # str for py3k
- m = self.CONVERT_PATTERN.match(value)
- if m:
- d = m.groupdict()
- prefix = d['prefix']
- converter = self.value_converters.get(prefix, None)
- if converter:
- suffix = d['suffix']
- converter = getattr(self, converter)
- value = converter(suffix)
- return value
-
- def configure_custom(self, config):
- """Configure an object with a user-supplied factory."""
- c = config.pop('()')
- if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
- c = self.resolve(c)
- props = config.pop('.', None)
- # Check for valid identifiers
- kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
- result = c(**kwargs)
- if props:
- for name, value in props.items():
- setattr(result, name, value)
- return result
-
- def as_tuple(self, value):
- """Utility function which converts lists to tuples."""
- if isinstance(value, list):
- value = tuple(value)
- return value
-
-class DictConfigurator(BaseConfigurator):
- """
- Configure logging using a dictionary-like object to describe the
- configuration.
- """
-
- def configure(self):
- """Do the configuration."""
-
- config = self.config
- if 'version' not in config:
- raise ValueError("dictionary doesn't specify a version")
- if config['version'] != 1:
- raise ValueError("Unsupported version: %s" % config['version'])
- incremental = config.pop('incremental', False)
- EMPTY_DICT = {}
- logging._acquireLock()
- try:
- if incremental:
- handlers = config.get('handlers', EMPTY_DICT)
- for name in handlers:
- if name not in logging._handlers:
- raise ValueError('No handler found with '
- 'name %r' % name)
- else:
- try:
- handler = logging._handlers[name]
- handler_config = handlers[name]
- level = handler_config.get('level', None)
- if level:
- handler.setLevel(logging._checkLevel(level))
- except StandardError as e:
- raise ValueError('Unable to configure handler '
- '%r: %s' % (name, e))
- loggers = config.get('loggers', EMPTY_DICT)
- for name in loggers:
- try:
- self.configure_logger(name, loggers[name], True)
- except StandardError as e:
- raise ValueError('Unable to configure logger '
- '%r: %s' % (name, e))
- root = config.get('root', None)
- if root:
- try:
- self.configure_root(root, True)
- except StandardError as e:
- raise ValueError('Unable to configure root '
- 'logger: %s' % e)
- else:
- disable_existing = config.pop('disable_existing_loggers', True)
-
- logging._handlers.clear()
- del logging._handlerList[:]
-
- # Do formatters first - they don't refer to anything else
- formatters = config.get('formatters', EMPTY_DICT)
- for name in formatters:
- try:
- formatters[name] = self.configure_formatter(
- formatters[name])
- except StandardError as e:
- raise ValueError('Unable to configure '
- 'formatter %r: %s' % (name, e))
- # Next, do filters - they don't refer to anything else, either
- filters = config.get('filters', EMPTY_DICT)
- for name in filters:
- try:
- filters[name] = self.configure_filter(filters[name])
- except StandardError as e:
- raise ValueError('Unable to configure '
- 'filter %r: %s' % (name, e))
-
- # Next, do handlers - they refer to formatters and filters
- # As handlers can refer to other handlers, sort the keys
- # to allow a deterministic order of configuration
- handlers = config.get('handlers', EMPTY_DICT)
- deferred = []
- for name in sorted(handlers):
- try:
- handler = self.configure_handler(handlers[name])
- handler.name = name
- handlers[name] = handler
- except StandardError as e:
- if 'target not configured yet' in str(e):
- deferred.append(name)
- else:
- raise ValueError('Unable to configure handler '
- '%r: %s' % (name, e))
-
- # Now do any that were deferred
- for name in deferred:
- try:
- handler = self.configure_handler(handlers[name])
- handler.name = name
- handlers[name] = handler
- except StandardError as e:
- raise ValueError('Unable to configure handler '
- '%r: %s' % (name, e))
-
- # Next, do loggers - they refer to handlers and filters
-
- #we don't want to lose the existing loggers,
- #since other threads may have pointers to them.
- #existing is set to contain all existing loggers,
- #and as we go through the new configuration we
- #remove any which are configured. At the end,
- #what's left in existing is the set of loggers
- #which were in the previous configuration but
- #which are not in the new configuration.
- root = logging.root
- existing = root.manager.loggerDict.keys()
- #The list needs to be sorted so that we can
- #avoid disabling child loggers of explicitly
- #named loggers. With a sorted list it is easier
- #to find the child loggers.
- existing.sort()
- #We'll keep the list of existing loggers
- #which are children of named loggers here...
- child_loggers = []
- #now set up the new ones...
- loggers = config.get('loggers', EMPTY_DICT)
- for name in loggers:
- name = _encoded(name)
- if name in existing:
- i = existing.index(name)
- prefixed = name + "."
- pflen = len(prefixed)
- num_existing = len(existing)
- i = i + 1 # look at the entry after name
- while (i < num_existing) and\
- (existing[i][:pflen] == prefixed):
- child_loggers.append(existing[i])
- i = i + 1
- existing.remove(name)
- try:
- self.configure_logger(name, loggers[name])
- except StandardError as e:
- raise ValueError('Unable to configure logger '
- '%r: %s' % (name, e))
-
- #Disable any old loggers. There's no point deleting
- #them as other threads may continue to hold references
- #and by disabling them, you stop them doing any logging.
- #However, don't disable children of named loggers, as that's
- #probably not what was intended by the user.
- for log in existing:
- logger = root.manager.loggerDict[log]
- if log in child_loggers:
- logger.level = logging.NOTSET
- logger.handlers = []
- logger.propagate = True
- elif disable_existing:
- logger.disabled = True
-
- # And finally, do the root logger
- root = config.get('root', None)
- if root:
- try:
- self.configure_root(root)
- except StandardError as e:
- raise ValueError('Unable to configure root '
- 'logger: %s' % e)
- finally:
- logging._releaseLock()
-
- def configure_formatter(self, config):
- """Configure a formatter from a dictionary."""
- if '()' in config:
- factory = config['()'] # for use in exception handler
- try:
- result = self.configure_custom(config)
- except TypeError as te:
- if "'format'" not in str(te):
- raise
- #Name of parameter changed from fmt to format.
- #Retry with old name.
- #This is so that code can be used with older Python versions
- #(e.g. by Django)
- config['fmt'] = config.pop('format')
- config['()'] = factory
- result = self.configure_custom(config)
- else:
- fmt = config.get('format', None)
- dfmt = config.get('datefmt', None)
- result = logging.Formatter(fmt, dfmt)
- return result
-
- def configure_filter(self, config):
- """Configure a filter from a dictionary."""
- if '()' in config:
- result = self.configure_custom(config)
- else:
- name = config.get('name', '')
- result = logging.Filter(name)
- return result
-
- def add_filters(self, filterer, filters):
- """Add filters to a filterer from a list of names."""
- for f in filters:
- try:
- filterer.addFilter(self.config['filters'][f])
- except StandardError as e:
- raise ValueError('Unable to add filter %r: %s' % (f, e))
-
- def configure_handler(self, config):
- """Configure a handler from a dictionary."""
- formatter = config.pop('formatter', None)
- if formatter:
- try:
- formatter = self.config['formatters'][formatter]
- except StandardError as e:
- raise ValueError('Unable to set formatter '
- '%r: %s' % (formatter, e))
- level = config.pop('level', None)
- filters = config.pop('filters', None)
- if '()' in config:
- c = config.pop('()')
- if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
- c = self.resolve(c)
- factory = c
- else:
- cname = config.pop('class')
- klass = self.resolve(cname)
- #Special case for handler which refers to another handler
- if issubclass(klass, logging.handlers.MemoryHandler) and\
- 'target' in config:
- try:
- th = self.config['handlers'][config['target']]
- if not isinstance(th, logging.Handler):
- config['class'] = cname # restore for deferred configuration
- raise StandardError('target not configured yet')
- config['target'] = th
- except StandardError as e:
- raise ValueError('Unable to set target handler '
- '%r: %s' % (config['target'], e))
- elif issubclass(klass, logging.handlers.SMTPHandler) and\
- 'mailhost' in config:
- config['mailhost'] = self.as_tuple(config['mailhost'])
- elif issubclass(klass, logging.handlers.SysLogHandler) and\
- 'address' in config:
- config['address'] = self.as_tuple(config['address'])
- factory = klass
- kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
- try:
- result = factory(**kwargs)
- except TypeError as te:
- if "'stream'" not in str(te):
- raise
- #The argument name changed from strm to stream
- #Retry with old name.
- #This is so that code can be used with older Python versions
- #(e.g. by Django)
- kwargs['strm'] = kwargs.pop('stream')
- result = factory(**kwargs)
- if formatter:
- result.setFormatter(formatter)
- if level is not None:
- result.setLevel(logging._checkLevel(level))
- if filters:
- self.add_filters(result, filters)
- return result
-
- def add_handlers(self, logger, handlers):
- """Add handlers to a logger from a list of names."""
- for h in handlers:
- try:
- logger.addHandler(self.config['handlers'][h])
- except StandardError as e:
- raise ValueError('Unable to add handler %r: %s' % (h, e))
-
- def common_logger_config(self, logger, config, incremental=False):
- """
- Perform configuration which is common to root and non-root loggers.
- """
- level = config.get('level', None)
- if level is not None:
- logger.setLevel(logging._checkLevel(level))
- if not incremental:
- #Remove any existing handlers
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- handlers = config.get('handlers', None)
- if handlers:
- self.add_handlers(logger, handlers)
- filters = config.get('filters', None)
- if filters:
- self.add_filters(logger, filters)
-
- def configure_logger(self, name, config, incremental=False):
- """Configure a non-root logger from a dictionary."""
- logger = logging.getLogger(name)
- self.common_logger_config(logger, config, incremental)
- propagate = config.get('propagate', None)
- if propagate is not None:
- logger.propagate = propagate
-
- def configure_root(self, config, incremental=False):
- """Configure a root logger from a dictionary."""
- root = logging.getLogger()
- self.common_logger_config(root, config, incremental)
-
-dictConfigClass = DictConfigurator
-
-def dictConfig(config):
- """Configure logging using a dictionary."""
- dictConfigClass(config).configure()
-
-
-def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
- """
- Start up a socket server on the specified port, and listen for new
- configurations.
-
- These will be sent as a file suitable for processing by fileConfig().
- Returns a Thread object on which you can call start() to start the server,
- and which you can join() when appropriate. To stop the server, call
- stopListening().
- """
- if not thread:
- raise NotImplementedError("listen() needs threading to work")
-
- class ConfigStreamHandler(StreamRequestHandler):
- """
- Handler for a logging configuration request.
-
- It expects a completely new logging configuration and uses fileConfig
- to install it.
- """
- def handle(self):
- """
- Handle a request.
-
- Each request is expected to be a 4-byte length, packed using
- struct.pack(">L", n), followed by the config file.
- Uses fileConfig() to do the grunt work.
- """
- import tempfile
- try:
- conn = self.connection
- chunk = conn.recv(4)
- if len(chunk) == 4:
- slen = struct.unpack(">L", chunk)[0]
- chunk = self.connection.recv(slen)
- while len(chunk) < slen:
- chunk = chunk + conn.recv(slen - len(chunk))
- try:
- import json
- d =json.loads(chunk)
- assert isinstance(d, dict)
- dictConfig(d)
- except:
- #Apply new configuration.
-
- file = cStringIO.StringIO(chunk)
- try:
- fileConfig(file)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- traceback.print_exc()
- if self.server.ready:
- self.server.ready.set()
- except socket.error as e:
- if e.errno != RESET_ERROR:
- raise
-
- class ConfigSocketReceiver(ThreadingTCPServer):
- """
- A simple TCP socket-based logging config receiver.
- """
-
- allow_reuse_address = 1
-
- def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
- handler=None, ready=None):
- ThreadingTCPServer.__init__(self, (host, port), handler)
- logging._acquireLock()
- self.abort = 0
- logging._releaseLock()
- self.timeout = 1
- self.ready = ready
-
- def serve_until_stopped(self):
- import select
- abort = 0
- while not abort:
- rd, wr, ex = select.select([self.socket.fileno()],
- [], [],
- self.timeout)
- if rd:
- self.handle_request()
- logging._acquireLock()
- abort = self.abort
- logging._releaseLock()
- self.socket.close()
-
- class Server(threading.Thread):
-
- def __init__(self, rcvr, hdlr, port):
- super(Server, self).__init__()
- self.rcvr = rcvr
- self.hdlr = hdlr
- self.port = port
- self.ready = threading.Event()
-
- def run(self):
- server = self.rcvr(port=self.port, handler=self.hdlr,
- ready=self.ready)
- if self.port == 0:
- self.port = server.server_address[1]
- self.ready.set()
- global _listener
- logging._acquireLock()
- _listener = server
- logging._releaseLock()
- server.serve_until_stopped()
-
- return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
-
-def stopListening():
- """
- Stop the listening server which was created with a call to listen().
- """
- global _listener
- logging._acquireLock()
- try:
- if _listener:
- _listener.abort = 1
- _listener = None
- finally:
- logging._releaseLock()
diff --git a/Monika After Story/game/python-packages/logging/handlers.py b/Monika After Story/game/python-packages/logging/handlers.py
deleted file mode 100644
index e0b935c878..0000000000
--- a/Monika After Story/game/python-packages/logging/handlers.py
+++ /dev/null
@@ -1,1242 +0,0 @@
-# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Additional handlers for the logging package for Python. The core package is
-based on PEP 282 and comments thereto in comp.lang.python.
-
-Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging.handlers' and log away!
-"""
-
-import errno, logging, socket, os, cPickle, struct, time, re
-from stat import ST_DEV, ST_INO, ST_MTIME
-
-try:
- import codecs
-except ImportError:
- codecs = None
-try:
- unicode
- _unicode = True
-except NameError:
- _unicode = False
-
-#
-# Some constants...
-#
-
-DEFAULT_TCP_LOGGING_PORT = 9020
-DEFAULT_UDP_LOGGING_PORT = 9021
-DEFAULT_HTTP_LOGGING_PORT = 9022
-DEFAULT_SOAP_LOGGING_PORT = 9023
-SYSLOG_UDP_PORT = 514
-SYSLOG_TCP_PORT = 514
-
-_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
-
-class BaseRotatingHandler(logging.FileHandler):
- """
- Base class for handlers that rotate log files at a certain point.
- Not meant to be instantiated directly. Instead, use RotatingFileHandler
- or TimedRotatingFileHandler.
- """
- def __init__(self, filename, mode, encoding=None, delay=0):
- """
- Use the specified filename for streamed logging
- """
- if codecs is None:
- encoding = None
- logging.FileHandler.__init__(self, filename, mode, encoding, delay)
- self.mode = mode
- self.encoding = encoding
-
- def emit(self, record):
- """
- Emit a record.
-
- Output the record to the file, catering for rollover as described
- in doRollover().
- """
- try:
- if self.shouldRollover(record):
- self.doRollover()
- logging.FileHandler.emit(self, record)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class RotatingFileHandler(BaseRotatingHandler):
- """
- Handler for logging to a set of files, which switches from one file
- to the next when the current file reaches a certain size.
- """
- def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
- """
- Open the specified file and use it as the stream for logging.
-
- By default, the file grows indefinitely. You can specify particular
- values of maxBytes and backupCount to allow the file to rollover at
- a predetermined size.
-
- Rollover occurs whenever the current log file is nearly maxBytes in
- length. If backupCount is >= 1, the system will successively create
- new files with the same pathname as the base file, but with extensions
- ".1", ".2" etc. appended to it. For example, with a backupCount of 5
- and a base file name of "app.log", you would get "app.log",
- "app.log.1", "app.log.2", ... through to "app.log.5". The file being
- written to is always "app.log" - when it gets filled up, it is closed
- and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
- exist, then they are renamed to "app.log.2", "app.log.3" etc.
- respectively.
-
- If maxBytes is zero, rollover never occurs.
- """
- # If rotation/rollover is wanted, it doesn't make sense to use another
- # mode. If for example 'w' were specified, then if there were multiple
- # runs of the calling application, the logs from previous runs would be
- # lost if the 'w' is respected, because the log file would be truncated
- # on each run.
- if maxBytes > 0:
- mode = 'a'
- BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
- self.maxBytes = maxBytes
- self.backupCount = backupCount
-
- def doRollover(self):
- """
- Do a rollover, as described in __init__().
- """
- if self.stream:
- self.stream.close()
- self.stream = None
- if self.backupCount > 0:
- for i in range(self.backupCount - 1, 0, -1):
- sfn = "%s.%d" % (self.baseFilename, i)
- dfn = "%s.%d" % (self.baseFilename, i + 1)
- if os.path.exists(sfn):
- #print "%s -> %s" % (sfn, dfn)
- if os.path.exists(dfn):
- os.remove(dfn)
- os.rename(sfn, dfn)
- dfn = self.baseFilename + ".1"
- if os.path.exists(dfn):
- os.remove(dfn)
- # Issue 18940: A file may not have been created if delay is True.
- if os.path.exists(self.baseFilename):
- os.rename(self.baseFilename, dfn)
- if not self.delay:
- self.stream = self._open()
-
- def shouldRollover(self, record):
- """
- Determine if rollover should occur.
-
- Basically, see if the supplied record would cause the file to exceed
- the size limit we have.
- """
- if self.stream is None: # delay was set...
- self.stream = self._open()
- if self.maxBytes > 0: # are we rolling over?
- msg = "%s\n" % self.format(record)
- self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
- if self.stream.tell() + len(msg) >= self.maxBytes:
- return 1
- return 0
-
-class TimedRotatingFileHandler(BaseRotatingHandler):
- """
- Handler for logging to a file, rotating the log file at certain timed
- intervals.
-
- If backupCount is > 0, when rollover is done, no more than backupCount
- files are kept - the oldest ones are deleted.
- """
- def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
- BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
- self.when = when.upper()
- self.backupCount = backupCount
- self.utc = utc
- # Calculate the real rollover interval, which is just the number of
- # seconds between rollovers. Also set the filename suffix used when
- # a rollover occurs. Current 'when' events supported:
- # S - Seconds
- # M - Minutes
- # H - Hours
- # D - Days
- # midnight - roll over at midnight
- # W{0-6} - roll over on a certain day; 0 - Monday
- #
- # Case of the 'when' specifier is not important; lower or upper case
- # will work.
- if self.when == 'S':
- self.interval = 1 # one second
- self.suffix = "%Y-%m-%d_%H-%M-%S"
- self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
- elif self.when == 'M':
- self.interval = 60 # one minute
- self.suffix = "%Y-%m-%d_%H-%M"
- self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
- elif self.when == 'H':
- self.interval = 60 * 60 # one hour
- self.suffix = "%Y-%m-%d_%H"
- self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
- elif self.when == 'D' or self.when == 'MIDNIGHT':
- self.interval = 60 * 60 * 24 # one day
- self.suffix = "%Y-%m-%d"
- self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
- elif self.when.startswith('W'):
- self.interval = 60 * 60 * 24 * 7 # one week
- if len(self.when) != 2:
- raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
- if self.when[1] < '0' or self.when[1] > '6':
- raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
- self.dayOfWeek = int(self.when[1])
- self.suffix = "%Y-%m-%d"
- self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
- else:
- raise ValueError("Invalid rollover interval specified: %s" % self.when)
-
- self.extMatch = re.compile(self.extMatch)
- self.interval = self.interval * interval # multiply by units requested
- if os.path.exists(filename):
- t = os.stat(filename)[ST_MTIME]
- else:
- t = int(time.time())
- self.rolloverAt = self.computeRollover(t)
-
- def computeRollover(self, currentTime):
- """
- Work out the rollover time based on the specified time.
- """
- result = currentTime + self.interval
- # If we are rolling over at midnight or weekly, then the interval is already known.
- # What we need to figure out is WHEN the next interval is. In other words,
- # if you are rolling over at midnight, then your base interval is 1 day,
- # but you want to start that one day clock at midnight, not now. So, we
- # have to fudge the rolloverAt value in order to trigger the first rollover
- # at the right time. After that, the regular interval will take care of
- # the rest. Note that this code doesn't care about leap seconds. :)
- if self.when == 'MIDNIGHT' or self.when.startswith('W'):
- # This could be done with less code, but I wanted it to be clear
- if self.utc:
- t = time.gmtime(currentTime)
- else:
- t = time.localtime(currentTime)
- currentHour = t[3]
- currentMinute = t[4]
- currentSecond = t[5]
- # r is the number of seconds left between now and midnight
- r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
- currentSecond)
- result = currentTime + r
- # If we are rolling over on a certain day, add in the number of days until
- # the next rollover, but offset by 1 since we just calculated the time
- # until the next day starts. There are three cases:
- # Case 1) The day to rollover is today; in this case, do nothing
- # Case 2) The day to rollover is further in the interval (i.e., today is
- # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
- # next rollover is simply 6 - 2 - 1, or 3.
- # Case 3) The day to rollover is behind us in the interval (i.e., today
- # is day 5 (Saturday) and rollover is on day 3 (Thursday).
- # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
- # number of days left in the current week (1) plus the number
- # of days in the next week until the rollover day (3).
- # The calculations described in 2) and 3) above need to have a day added.
- # This is because the above time calculation takes us to midnight on this
- # day, i.e. the start of the next day.
- if self.when.startswith('W'):
- day = t[6] # 0 is Monday
- if day != self.dayOfWeek:
- if day < self.dayOfWeek:
- daysToWait = self.dayOfWeek - day
- else:
- daysToWait = 6 - day + self.dayOfWeek + 1
- newRolloverAt = result + (daysToWait * (60 * 60 * 24))
- if not self.utc:
- dstNow = t[-1]
- dstAtRollover = time.localtime(newRolloverAt)[-1]
- if dstNow != dstAtRollover:
- if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
- addend = -3600
- else: # DST bows out before next rollover, so we need to add an hour
- addend = 3600
- newRolloverAt += addend
- result = newRolloverAt
- return result
-
- def shouldRollover(self, record):
- """
- Determine if rollover should occur.
-
- record is not used, as we are just comparing times, but it is needed so
- the method signatures are the same
- """
- t = int(time.time())
- if t >= self.rolloverAt:
- return 1
- #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
- return 0
-
- def getFilesToDelete(self):
- """
- Determine the files to delete when rolling over.
-
- More specific than the earlier method, which just used glob.glob().
- """
- dirName, baseName = os.path.split(self.baseFilename)
- fileNames = os.listdir(dirName)
- result = []
- prefix = baseName + "."
- plen = len(prefix)
- for fileName in fileNames:
- if fileName[:plen] == prefix:
- suffix = fileName[plen:]
- if self.extMatch.match(suffix):
- result.append(os.path.join(dirName, fileName))
- result.sort()
- if len(result) < self.backupCount:
- result = []
- else:
- result = result[:len(result) - self.backupCount]
- return result
-
- def doRollover(self):
- """
- do a rollover; in this case, a date/time stamp is appended to the filename
- when the rollover happens. However, you want the file to be named for the
- start of the interval, not the current time. If there is a backup count,
- then we have to get a list of matching filenames, sort them and remove
- the one with the oldest suffix.
- """
- if self.stream:
- self.stream.close()
- self.stream = None
- # get the time that this sequence started at and make it a TimeTuple
- currentTime = int(time.time())
- dstNow = time.localtime(currentTime)[-1]
- t = self.rolloverAt - self.interval
- if self.utc:
- timeTuple = time.gmtime(t)
- else:
- timeTuple = time.localtime(t)
- dstThen = timeTuple[-1]
- if dstNow != dstThen:
- if dstNow:
- addend = 3600
- else:
- addend = -3600
- timeTuple = time.localtime(t + addend)
- dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
- if os.path.exists(dfn):
- os.remove(dfn)
- # Issue 18940: A file may not have been created if delay is True.
- if os.path.exists(self.baseFilename):
- os.rename(self.baseFilename, dfn)
- if self.backupCount > 0:
- for s in self.getFilesToDelete():
- os.remove(s)
- if not self.delay:
- self.stream = self._open()
- newRolloverAt = self.computeRollover(currentTime)
- while newRolloverAt <= currentTime:
- newRolloverAt = newRolloverAt + self.interval
- #If DST changes and midnight or weekly rollover, adjust for this.
- if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
- dstAtRollover = time.localtime(newRolloverAt)[-1]
- if dstNow != dstAtRollover:
- if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
- addend = -3600
- else: # DST bows out before next rollover, so we need to add an hour
- addend = 3600
- newRolloverAt += addend
- self.rolloverAt = newRolloverAt
-
-class WatchedFileHandler(logging.FileHandler):
- """
- A handler for logging to a file, which watches the file
- to see if it has changed while in use. This can happen because of
- usage of programs such as newsyslog and logrotate which perform
- log file rotation. This handler, intended for use under Unix,
- watches the file to see if it has changed since the last emit.
- (A file has changed if its device or inode have changed.)
- If it has changed, the old file stream is closed, and the file
- opened to get a new stream.
-
- This handler is not appropriate for use under Windows, because
- under Windows open files cannot be moved or renamed - logging
- opens the files with exclusive locks - and so there is no need
- for such a handler. Furthermore, ST_INO is not supported under
- Windows; stat always returns zero for this value.
-
- This handler is based on a suggestion and patch by Chad J.
- Schroeder.
- """
- def __init__(self, filename, mode='a', encoding=None, delay=0):
- logging.FileHandler.__init__(self, filename, mode, encoding, delay)
- self.dev, self.ino = -1, -1
- self._statstream()
-
- def _statstream(self):
- if self.stream:
- sres = os.fstat(self.stream.fileno())
- self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
-
- def emit(self, record):
- """
- Emit a record.
-
- First check if the underlying file has changed, and if it
- has, close the old stream and reopen the file to get the
- current stream.
- """
- # Reduce the chance of race conditions by stat'ing by path only
- # once and then fstat'ing our new fd if we opened a new log stream.
- # See issue #14632: Thanks to John Mulligan for the problem report
- # and patch.
- try:
- # stat the file by path, checking for existence
- sres = os.stat(self.baseFilename)
- except OSError as err:
- if err.errno == errno.ENOENT:
- sres = None
- else:
- raise
- # compare file system stat with that of our stream file handle
- if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
- if self.stream is not None:
- # we have an open file handle, clean it up
- self.stream.flush()
- self.stream.close()
- self.stream = None # See Issue #21742: _open () might fail.
- # open a new file handle and get new stat info from that fd
- self.stream = self._open()
- self._statstream()
- logging.FileHandler.emit(self, record)
-
-class SocketHandler(logging.Handler):
- """
- A handler class which writes logging records, in pickle format, to
- a streaming socket. The socket is kept open across logging calls.
- If the peer resets it, an attempt is made to reconnect on the next call.
- The pickle which is sent is that of the LogRecord's attribute dictionary
- (__dict__), so that the receiver does not need to have the logging module
- installed in order to process the logging event.
-
- To unpickle the record at the receiving end into a LogRecord, use the
- makeLogRecord function.
- """
-
- def __init__(self, host, port):
- """
- Initializes the handler with a specific host address and port.
-
- The attribute 'closeOnError' is set to 1 - which means that if
- a socket error occurs, the socket is silently closed and then
- reopened on the next logging call.
- """
- logging.Handler.__init__(self)
- self.host = host
- self.port = port
- self.sock = None
- self.closeOnError = 0
- self.retryTime = None
- #
- # Exponential backoff parameters.
- #
- self.retryStart = 1.0
- self.retryMax = 30.0
- self.retryFactor = 2.0
-
- def makeSocket(self, timeout=1):
- """
- A factory method which allows subclasses to define the precise
- type of socket they want.
- """
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if hasattr(s, 'settimeout'):
- s.settimeout(timeout)
- s.connect((self.host, self.port))
- return s
-
- def createSocket(self):
- """
- Try to create a socket, using an exponential backoff with
- a max retry time. Thanks to Robert Olson for the original patch
- (SF #815911) which has been slightly refactored.
- """
- now = time.time()
- # Either retryTime is None, in which case this
- # is the first time back after a disconnect, or
- # we've waited long enough.
- if self.retryTime is None:
- attempt = 1
- else:
- attempt = (now >= self.retryTime)
- if attempt:
- try:
- self.sock = self.makeSocket()
- self.retryTime = None # next time, no delay before trying
- except socket.error:
- #Creation failed, so set the retry time and return.
- if self.retryTime is None:
- self.retryPeriod = self.retryStart
- else:
- self.retryPeriod = self.retryPeriod * self.retryFactor
- if self.retryPeriod > self.retryMax:
- self.retryPeriod = self.retryMax
- self.retryTime = now + self.retryPeriod
-
- def send(self, s):
- """
- Send a pickled string to the socket.
-
- This function allows for partial sends which can happen when the
- network is busy.
- """
- if self.sock is None:
- self.createSocket()
- #self.sock can be None either because we haven't reached the retry
- #time yet, or because we have reached the retry time and retried,
- #but are still unable to connect.
- if self.sock:
- try:
- if hasattr(self.sock, "sendall"):
- self.sock.sendall(s)
- else:
- sentsofar = 0
- left = len(s)
- while left > 0:
- sent = self.sock.send(s[sentsofar:])
- sentsofar = sentsofar + sent
- left = left - sent
- except socket.error:
- self.sock.close()
- self.sock = None # so we can call createSocket next time
-
- def makePickle(self, record):
- """
- Pickles the record in binary format with a length prefix, and
- returns it ready for transmission across the socket.
- """
- ei = record.exc_info
- if ei:
- # just to get traceback text into record.exc_text ...
- dummy = self.format(record)
- record.exc_info = None # to avoid Unpickleable error
- # See issue #14436: If msg or args are objects, they may not be
- # available on the receiving end. So we convert the msg % args
- # to a string, save it as msg and zap the args.
- d = dict(record.__dict__)
- d['msg'] = record.getMessage()
- d['args'] = None
- s = cPickle.dumps(d, 1)
- if ei:
- record.exc_info = ei # for next handler
- slen = struct.pack(">L", len(s))
- return slen + s
-
- def handleError(self, record):
- """
- Handle an error during logging.
-
- An error has occurred during logging. Most likely cause -
- connection lost. Close the socket so that we can retry on the
- next event.
- """
- if self.closeOnError and self.sock:
- self.sock.close()
- self.sock = None #try to reconnect next time
- else:
- logging.Handler.handleError(self, record)
-
- def emit(self, record):
- """
- Emit a record.
-
- Pickles the record and writes it to the socket in binary format.
- If there is an error with the socket, silently drop the packet.
- If there was a problem with the socket, re-establishes the
- socket.
- """
- try:
- s = self.makePickle(record)
- self.send(s)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
- def close(self):
- """
- Closes the socket.
- """
- self.acquire()
- try:
- sock = self.sock
- if sock:
- self.sock = None
- sock.close()
- finally:
- self.release()
- logging.Handler.close(self)
-
-class DatagramHandler(SocketHandler):
- """
- A handler class which writes logging records, in pickle format, to
- a datagram socket. The pickle which is sent is that of the LogRecord's
- attribute dictionary (__dict__), so that the receiver does not need to
- have the logging module installed in order to process the logging event.
-
- To unpickle the record at the receiving end into a LogRecord, use the
- makeLogRecord function.
-
- """
- def __init__(self, host, port):
- """
- Initializes the handler with a specific host address and port.
- """
- SocketHandler.__init__(self, host, port)
- self.closeOnError = 0
-
- def makeSocket(self):
- """
- The factory method of SocketHandler is here overridden to create
- a UDP socket (SOCK_DGRAM).
- """
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- return s
-
- def send(self, s):
- """
- Send a pickled string to a socket.
-
- This function no longer allows for partial sends which can happen
- when the network is busy - UDP does not guarantee delivery and
- can deliver packets out of sequence.
- """
- if self.sock is None:
- self.createSocket()
- self.sock.sendto(s, (self.host, self.port))
-
-class SysLogHandler(logging.Handler):
- """
- A handler class which sends formatted logging records to a syslog
- server. Based on Sam Rushing's syslog module:
- http://www.nightmare.com/squirl/python-ext/misc/syslog.py
- Contributed by Nicolas Untz (after which minor refactoring changes
- have been made).
- """
-
- # from :
- # ======================================================================
- # priorities/facilities are encoded into a single 32-bit quantity, where
- # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
- # facility (0-big number). Both the priorities and the facilities map
- # roughly one-to-one to strings in the syslogd(8) source code. This
- # mapping is included in this file.
- #
- # priorities (these are ordered)
-
- LOG_EMERG = 0 # system is unusable
- LOG_ALERT = 1 # action must be taken immediately
- LOG_CRIT = 2 # critical conditions
- LOG_ERR = 3 # error conditions
- LOG_WARNING = 4 # warning conditions
- LOG_NOTICE = 5 # normal but significant condition
- LOG_INFO = 6 # informational
- LOG_DEBUG = 7 # debug-level messages
-
- # facility codes
- LOG_KERN = 0 # kernel messages
- LOG_USER = 1 # random user-level messages
- LOG_MAIL = 2 # mail system
- LOG_DAEMON = 3 # system daemons
- LOG_AUTH = 4 # security/authorization messages
- LOG_SYSLOG = 5 # messages generated internally by syslogd
- LOG_LPR = 6 # line printer subsystem
- LOG_NEWS = 7 # network news subsystem
- LOG_UUCP = 8 # UUCP subsystem
- LOG_CRON = 9 # clock daemon
- LOG_AUTHPRIV = 10 # security/authorization messages (private)
- LOG_FTP = 11 # FTP daemon
-
- # other codes through 15 reserved for system use
- LOG_LOCAL0 = 16 # reserved for local use
- LOG_LOCAL1 = 17 # reserved for local use
- LOG_LOCAL2 = 18 # reserved for local use
- LOG_LOCAL3 = 19 # reserved for local use
- LOG_LOCAL4 = 20 # reserved for local use
- LOG_LOCAL5 = 21 # reserved for local use
- LOG_LOCAL6 = 22 # reserved for local use
- LOG_LOCAL7 = 23 # reserved for local use
-
- priority_names = {
- "alert": LOG_ALERT,
- "crit": LOG_CRIT,
- "critical": LOG_CRIT,
- "debug": LOG_DEBUG,
- "emerg": LOG_EMERG,
- "err": LOG_ERR,
- "error": LOG_ERR, # DEPRECATED
- "info": LOG_INFO,
- "notice": LOG_NOTICE,
- "panic": LOG_EMERG, # DEPRECATED
- "warn": LOG_WARNING, # DEPRECATED
- "warning": LOG_WARNING,
- }
-
- facility_names = {
- "auth": LOG_AUTH,
- "authpriv": LOG_AUTHPRIV,
- "cron": LOG_CRON,
- "daemon": LOG_DAEMON,
- "ftp": LOG_FTP,
- "kern": LOG_KERN,
- "lpr": LOG_LPR,
- "mail": LOG_MAIL,
- "news": LOG_NEWS,
- "security": LOG_AUTH, # DEPRECATED
- "syslog": LOG_SYSLOG,
- "user": LOG_USER,
- "uucp": LOG_UUCP,
- "local0": LOG_LOCAL0,
- "local1": LOG_LOCAL1,
- "local2": LOG_LOCAL2,
- "local3": LOG_LOCAL3,
- "local4": LOG_LOCAL4,
- "local5": LOG_LOCAL5,
- "local6": LOG_LOCAL6,
- "local7": LOG_LOCAL7,
- }
-
- #The map below appears to be trivially lowercasing the key. However,
- #there's more to it than meets the eye - in some locales, lowercasing
- #gives unexpected results. See SF #1524081: in the Turkish locale,
- #"INFO".lower() != "info"
- priority_map = {
- "DEBUG" : "debug",
- "INFO" : "info",
- "WARNING" : "warning",
- "ERROR" : "error",
- "CRITICAL" : "critical"
- }
-
- def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
- facility=LOG_USER, socktype=None):
- """
- Initialize a handler.
-
- If address is specified as a string, a UNIX socket is used. To log to a
- local syslogd, "SysLogHandler(address="/dev/log")" can be used.
- If facility is not specified, LOG_USER is used. If socktype is
- specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
- socket type will be used. For Unix sockets, you can also specify a
- socktype of None, in which case socket.SOCK_DGRAM will be used, falling
- back to socket.SOCK_STREAM.
- """
- logging.Handler.__init__(self)
-
- self.address = address
- self.facility = facility
- self.socktype = socktype
-
- if isinstance(address, basestring):
- self.unixsocket = 1
- self._connect_unixsocket(address)
- else:
- self.unixsocket = False
- if socktype is None:
- socktype = socket.SOCK_DGRAM
- host, port = address
- ress = socket.getaddrinfo(host, port, 0, socktype)
- if not ress:
- raise socket.error("getaddrinfo returns an empty list")
- for res in ress:
- af, socktype, proto, _, sa = res
- err = sock = None
- try:
- sock = socket.socket(af, socktype, proto)
- if socktype == socket.SOCK_STREAM:
- sock.connect(sa)
- break
- except socket.error as exc:
- err = exc
- if sock is not None:
- sock.close()
- if err is not None:
- raise err
- self.socket = sock
- self.socktype = socktype
-
- def _connect_unixsocket(self, address):
- use_socktype = self.socktype
- if use_socktype is None:
- use_socktype = socket.SOCK_DGRAM
- self.socket = socket.socket(socket.AF_UNIX, use_socktype)
- try:
- self.socket.connect(address)
- # it worked, so set self.socktype to the used type
- self.socktype = use_socktype
- except socket.error:
- self.socket.close()
- if self.socktype is not None:
- # user didn't specify falling back, so fail
- raise
- use_socktype = socket.SOCK_STREAM
- self.socket = socket.socket(socket.AF_UNIX, use_socktype)
- try:
- self.socket.connect(address)
- # it worked, so set self.socktype to the used type
- self.socktype = use_socktype
- except socket.error:
- self.socket.close()
- raise
-
- # curious: when talking to the unix-domain '/dev/log' socket, a
- # zero-terminator seems to be required. this string is placed
- # into a class variable so that it can be overridden if
- # necessary.
- log_format_string = '<%d>%s\000'
-
- def encodePriority(self, facility, priority):
- """
- Encode the facility and priority. You can pass in strings or
- integers - if strings are passed, the facility_names and
- priority_names mapping dictionaries are used to convert them to
- integers.
- """
- if isinstance(facility, basestring):
- facility = self.facility_names[facility]
- if isinstance(priority, basestring):
- priority = self.priority_names[priority]
- return (facility << 3) | priority
-
- def close(self):
- """
- Closes the socket.
- """
- self.acquire()
- try:
- if self.unixsocket:
- self.socket.close()
- finally:
- self.release()
- logging.Handler.close(self)
-
- def mapPriority(self, levelName):
- """
- Map a logging level name to a key in the priority_names map.
- This is useful in two scenarios: when custom levels are being
- used, and in the case where you can't do a straightforward
- mapping by lowercasing the logging level name because of locale-
- specific issues (see SF #1524081).
- """
- return self.priority_map.get(levelName, "warning")
-
- def emit(self, record):
- """
- Emit a record.
-
- The record is formatted, and then sent to the syslog server. If
- exception information is present, it is NOT sent to the server.
- """
- try:
- msg = self.format(record) + '\000'
- """
- We need to convert record level to lowercase, maybe this will
- change in the future.
- """
- prio = '<%d>' % self.encodePriority(self.facility,
- self.mapPriority(record.levelname))
- # Message is a string. Convert to bytes as required by RFC 5424
- if type(msg) is unicode:
- msg = msg.encode('utf-8')
- msg = prio + msg
- if self.unixsocket:
- try:
- self.socket.send(msg)
- except socket.error:
- self.socket.close() # See issue 17981
- self._connect_unixsocket(self.address)
- self.socket.send(msg)
- elif self.socktype == socket.SOCK_DGRAM:
- self.socket.sendto(msg, self.address)
- else:
- self.socket.sendall(msg)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class SMTPHandler(logging.Handler):
- """
- A handler class which sends an SMTP email for each logging event.
- """
- def __init__(self, mailhost, fromaddr, toaddrs, subject,
- credentials=None, secure=None):
- """
- Initialize the handler.
-
- Initialize the instance with the from and to addresses and subject
- line of the email. To specify a non-standard SMTP port, use the
- (host, port) tuple format for the mailhost argument. To specify
- authentication credentials, supply a (username, password) tuple
- for the credentials argument. To specify the use of a secure
- protocol (TLS), pass in a tuple for the secure argument. This will
- only be used when authentication credentials are supplied. The tuple
- will be either an empty tuple, or a single-value tuple with the name
- of a keyfile, or a 2-value tuple with the names of the keyfile and
- certificate file. (This tuple is passed to the `starttls` method).
- """
- logging.Handler.__init__(self)
- if isinstance(mailhost, (list, tuple)):
- self.mailhost, self.mailport = mailhost
- else:
- self.mailhost, self.mailport = mailhost, None
- if isinstance(credentials, (list, tuple)):
- self.username, self.password = credentials
- else:
- self.username = None
- self.fromaddr = fromaddr
- if isinstance(toaddrs, basestring):
- toaddrs = [toaddrs]
- self.toaddrs = toaddrs
- self.subject = subject
- self.secure = secure
- self._timeout = 5.0
-
- def getSubject(self, record):
- """
- Determine the subject for the email.
-
- If you want to specify a subject line which is record-dependent,
- override this method.
- """
- return self.subject
-
- def emit(self, record):
- """
- Emit a record.
-
- Format the record and send it to the specified addressees.
- """
- try:
- import smtplib
- from email.utils import formatdate
- port = self.mailport
- if not port:
- port = smtplib.SMTP_PORT
- smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
- msg = self.format(record)
- msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
- self.fromaddr,
- ",".join(self.toaddrs),
- self.getSubject(record),
- formatdate(), msg)
- if self.username:
- if self.secure is not None:
- smtp.ehlo()
- smtp.starttls(*self.secure)
- smtp.ehlo()
- smtp.login(self.username, self.password)
- smtp.sendmail(self.fromaddr, self.toaddrs, msg)
- smtp.quit()
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class NTEventLogHandler(logging.Handler):
- """
- A handler class which sends events to the NT Event Log. Adds a
- registry entry for the specified application name. If no dllname is
- provided, win32service.pyd (which contains some basic message
- placeholders) is used. Note that use of these placeholders will make
- your event logs big, as the entire message source is held in the log.
- If you want slimmer logs, you have to pass in the name of your own DLL
- which contains the message definitions you want to use in the event log.
- """
- def __init__(self, appname, dllname=None, logtype="Application"):
- logging.Handler.__init__(self)
- try:
- import win32evtlogutil, win32evtlog
- self.appname = appname
- self._welu = win32evtlogutil
- if not dllname:
- dllname = os.path.split(self._welu.__file__)
- dllname = os.path.split(dllname[0])
- dllname = os.path.join(dllname[0], r'win32service.pyd')
- self.dllname = dllname
- self.logtype = logtype
- self._welu.AddSourceToRegistry(appname, dllname, logtype)
- self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
- self.typemap = {
- logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
- logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
- logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
- logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
- logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
- }
- except ImportError:
- print("The Python Win32 extensions for NT (service, event "\
- "logging) appear not to be available.")
- self._welu = None
-
- def getMessageID(self, record):
- """
- Return the message ID for the event record. If you are using your
- own messages, you could do this by having the msg passed to the
- logger being an ID rather than a formatting string. Then, in here,
- you could use a dictionary lookup to get the message ID. This
- version returns 1, which is the base message ID in win32service.pyd.
- """
- return 1
-
- def getEventCategory(self, record):
- """
- Return the event category for the record.
-
- Override this if you want to specify your own categories. This version
- returns 0.
- """
- return 0
-
- def getEventType(self, record):
- """
- Return the event type for the record.
-
- Override this if you want to specify your own types. This version does
- a mapping using the handler's typemap attribute, which is set up in
- __init__() to a dictionary which contains mappings for DEBUG, INFO,
- WARNING, ERROR and CRITICAL. If you are using your own levels you will
- either need to override this method or place a suitable dictionary in
- the handler's typemap attribute.
- """
- return self.typemap.get(record.levelno, self.deftype)
-
- def emit(self, record):
- """
- Emit a record.
-
- Determine the message ID, event category and event type. Then
- log the message in the NT event log.
- """
- if self._welu:
- try:
- id = self.getMessageID(record)
- cat = self.getEventCategory(record)
- type = self.getEventType(record)
- msg = self.format(record)
- self._welu.ReportEvent(self.appname, id, cat, type, [msg])
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
- def close(self):
- """
- Clean up this handler.
-
- You can remove the application name from the registry as a
- source of event log entries. However, if you do this, you will
- not be able to see the events as you intended in the Event Log
- Viewer - it needs to be able to access the registry to get the
- DLL name.
- """
- #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
- logging.Handler.close(self)
-
-class HTTPHandler(logging.Handler):
- """
- A class which sends records to a Web server, using either GET or
- POST semantics.
- """
- def __init__(self, host, url, method="GET"):
- """
- Initialize the instance with the host, the request URL, and the method
- ("GET" or "POST")
- """
- logging.Handler.__init__(self)
- method = method.upper()
- if method not in ["GET", "POST"]:
- raise ValueError("method must be GET or POST")
- self.host = host
- self.url = url
- self.method = method
-
- def mapLogRecord(self, record):
- """
- Default implementation of mapping the log record into a dict
- that is sent as the CGI data. Overwrite in your class.
- Contributed by Franz Glasner.
- """
- return record.__dict__
-
- def emit(self, record):
- """
- Emit a record.
-
- Send the record to the Web server as a percent-encoded dictionary
- """
- try:
- import httplib, urllib
- host = self.host
- h = httplib.HTTP(host)
- url = self.url
- data = urllib.urlencode(self.mapLogRecord(record))
- if self.method == "GET":
- if (url.find('?') >= 0):
- sep = '&'
- else:
- sep = '?'
- url = url + "%c%s" % (sep, data)
- h.putrequest(self.method, url)
- # support multiple hosts on one IP address...
- # need to strip optional :port from host, if present
- i = host.find(":")
- if i >= 0:
- host = host[:i]
- h.putheader("Host", host)
- if self.method == "POST":
- h.putheader("Content-type",
- "application/x-www-form-urlencoded")
- h.putheader("Content-length", str(len(data)))
- h.endheaders(data if self.method == "POST" else None)
- h.getreply() #can't do anything with the result
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class BufferingHandler(logging.Handler):
- """
- A handler class which buffers logging records in memory. Whenever each
- record is added to the buffer, a check is made to see if the buffer should
- be flushed. If it should, then flush() is expected to do what's needed.
- """
- def __init__(self, capacity):
- """
- Initialize the handler with the buffer size.
- """
- logging.Handler.__init__(self)
- self.capacity = capacity
- self.buffer = []
-
- def shouldFlush(self, record):
- """
- Should the handler flush its buffer?
-
- Returns true if the buffer is up to capacity. This method can be
- overridden to implement custom flushing strategies.
- """
- return (len(self.buffer) >= self.capacity)
-
- def emit(self, record):
- """
- Emit a record.
-
- Append the record. If shouldFlush() tells us to, call flush() to process
- the buffer.
- """
- self.buffer.append(record)
- if self.shouldFlush(record):
- self.flush()
-
- def flush(self):
- """
- Override to implement custom flushing behaviour.
-
- This version just zaps the buffer to empty.
- """
- self.acquire()
- try:
- self.buffer = []
- finally:
- self.release()
-
- def close(self):
- """
- Close the handler.
-
- This version just flushes and chains to the parent class' close().
- """
- try:
- self.flush()
- finally:
- logging.Handler.close(self)
-
-class MemoryHandler(BufferingHandler):
- """
- A handler class which buffers logging records in memory, periodically
- flushing them to a target handler. Flushing occurs whenever the buffer
- is full, or when an event of a certain severity or greater is seen.
- """
- def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
- """
- Initialize the handler with the buffer size, the level at which
- flushing should occur and an optional target.
-
- Note that without a target being set either here or via setTarget(),
- a MemoryHandler is no use to anyone!
- """
- BufferingHandler.__init__(self, capacity)
- self.flushLevel = flushLevel
- self.target = target
-
- def shouldFlush(self, record):
- """
- Check for buffer full or a record at the flushLevel or higher.
- """
- return (len(self.buffer) >= self.capacity) or \
- (record.levelno >= self.flushLevel)
-
- def setTarget(self, target):
- """
- Set the target handler for this handler.
- """
- self.target = target
-
- def flush(self):
- """
- For a MemoryHandler, flushing means just sending the buffered
- records to the target, if there is one. Override if you want
- different behaviour.
- """
- self.acquire()
- try:
- if self.target:
- for record in self.buffer:
- self.target.handle(record)
- self.buffer = []
- finally:
- self.release()
-
- def close(self):
- """
- Flush, set the target to None and lose the buffer.
- """
- try:
- self.flush()
- finally:
- self.acquire()
- try:
- self.target = None
- BufferingHandler.close(self)
- finally:
- self.release()
diff --git a/Monika After Story/game/python-packages/pythoncom27.dll b/Monika After Story/game/python-packages/pythoncom27.dll
deleted file mode 100644
index d612171d33..0000000000
Binary files a/Monika After Story/game/python-packages/pythoncom27.dll and /dev/null differ
diff --git a/Monika After Story/game/python-packages/pythoncomloader27.dll b/Monika After Story/game/python-packages/pythoncomloader27.dll
deleted file mode 100644
index 1d643f97af..0000000000
Binary files a/Monika After Story/game/python-packages/pythoncomloader27.dll and /dev/null differ
diff --git a/Monika After Story/game/python-packages/pywintypes27.dll b/Monika After Story/game/python-packages/pywintypes27.dll
deleted file mode 100644
index 99f715ed80..0000000000
Binary files a/Monika After Story/game/python-packages/pywintypes27.dll and /dev/null differ
diff --git a/Monika After Story/game/python-packages/unittest/__init__.py b/Monika After Story/game/python-packages/unittest/__init__.py
new file mode 100644
index 0000000000..348dc471f4
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/__init__.py
@@ -0,0 +1,95 @@
+"""
+Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
+Smalltalk testing framework (used with permission).
+
+This module contains the core framework classes that form the basis of
+specific test cases and suites (TestCase, TestSuite etc.), and also a
+text-based utility class for running the tests and reporting the results
+ (TextTestRunner).
+
+Simple usage:
+
+ import unittest
+
+ class IntegerArithmeticTestCase(unittest.TestCase):
+ def testAdd(self): # test method names begin with 'test'
+ self.assertEqual((1 + 2), 3)
+ self.assertEqual(0 + 1, 1)
+ def testMultiply(self):
+ self.assertEqual((0 * 10), 0)
+ self.assertEqual((5 * 8), 40)
+
+ if __name__ == '__main__':
+ unittest.main()
+
+Further information is available in the bundled documentation, and from
+
+ http://docs.python.org/library/unittest.html
+
+Copyright (c) 1999-2003 Steve Purcell
+Copyright (c) 2003-2010 Python Software Foundation
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+"""
+
+__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite',
+ 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
+ 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
+ 'expectedFailure', 'TextTestResult', 'installHandler',
+ 'registerResult', 'removeResult', 'removeHandler',
+ 'addModuleCleanup']
+
+# Expose obsolete functions for backwards compatibility
+__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
+
+__unittest = True
+
+from .result import TestResult
+from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip,
+ skipIf, skipUnless, expectedFailure)
+from .suite import BaseTestSuite, TestSuite
+from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
+ findTestCases)
+from .main import TestProgram, main
+from .runner import TextTestRunner, TextTestResult
+from .signals import installHandler, registerResult, removeResult, removeHandler
+# IsolatedAsyncioTestCase will be imported lazily.
+
+# deprecated
+_TextTestResult = TextTestResult
+
+# There are no tests here, so don't try to run anything discovered from
+# introspecting the symbols (e.g. FunctionTestCase). Instead, all our
+# tests come from within unittest.test.
+def load_tests(loader, tests, pattern):
+ import os.path
+ # top level directory cached on loader instance
+ this_dir = os.path.dirname(__file__)
+ return loader.discover(start_dir=this_dir, pattern=pattern)
+
+
+# Lazy import of IsolatedAsyncioTestCase from .async_case
+# It imports asyncio, which is relatively heavy, but most tests
+# do not need it.
+
+def __dir__():
+ return globals().keys() | {'IsolatedAsyncioTestCase'}
+
+def __getattr__(name):
+ if name == 'IsolatedAsyncioTestCase':
+ global IsolatedAsyncioTestCase
+ from .async_case import IsolatedAsyncioTestCase
+ return IsolatedAsyncioTestCase
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/Monika After Story/game/python-packages/unittest/__main__.py b/Monika After Story/game/python-packages/unittest/__main__.py
new file mode 100644
index 0000000000..e5876f569b
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/__main__.py
@@ -0,0 +1,18 @@
+"""Main entry point"""
+
+import sys
+if sys.argv[0].endswith("__main__.py"):
+ import os.path
+ # We change sys.argv[0] to make help message more useful
+ # use executable without path, unquoted
+ # (it's just a hint anyway)
+ # (if you have spaces in your executable you get what you deserve!)
+ executable = os.path.basename(sys.executable)
+ sys.argv[0] = executable + " -m unittest"
+ del os
+
+__unittest = True
+
+from .main import main
+
+main(module=None)
diff --git a/Monika After Story/game/python-packages/unittest/_log.py b/Monika After Story/game/python-packages/unittest/_log.py
new file mode 100644
index 0000000000..94e7e758bd
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/_log.py
@@ -0,0 +1,69 @@
+import logging
+import collections
+
+from .case import _BaseTestCaseContext
+
+
+_LoggingWatcher = collections.namedtuple("_LoggingWatcher",
+ ["records", "output"])
+
+class _CapturingHandler(logging.Handler):
+ """
+ A logging handler capturing all (raw and formatted) logging output.
+ """
+
+ def __init__(self):
+ logging.Handler.__init__(self)
+ self.watcher = _LoggingWatcher([], [])
+
+ def flush(self):
+ pass
+
+ def emit(self, record):
+ self.watcher.records.append(record)
+ msg = self.format(record)
+ self.watcher.output.append(msg)
+
+
+class _AssertLogsContext(_BaseTestCaseContext):
+ """A context manager used to implement TestCase.assertLogs()."""
+
+ LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
+
+ def __init__(self, test_case, logger_name, level):
+ _BaseTestCaseContext.__init__(self, test_case)
+ self.logger_name = logger_name
+ if level:
+ self.level = logging._nameToLevel.get(level, level)
+ else:
+ self.level = logging.INFO
+ self.msg = None
+
+ def __enter__(self):
+ if isinstance(self.logger_name, logging.Logger):
+ logger = self.logger = self.logger_name
+ else:
+ logger = self.logger = logging.getLogger(self.logger_name)
+ formatter = logging.Formatter(self.LOGGING_FORMAT)
+ handler = _CapturingHandler()
+ handler.setFormatter(formatter)
+ self.watcher = handler.watcher
+ self.old_handlers = logger.handlers[:]
+ self.old_level = logger.level
+ self.old_propagate = logger.propagate
+ logger.handlers = [handler]
+ logger.setLevel(self.level)
+ logger.propagate = False
+ return handler.watcher
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.logger.handlers = self.old_handlers
+ self.logger.propagate = self.old_propagate
+ self.logger.setLevel(self.old_level)
+ if exc_type is not None:
+ # let unexpected exceptions pass through
+ return False
+ if len(self.watcher.records) == 0:
+ self._raiseFailure(
+ "no logs of level {} or higher triggered on {}"
+ .format(logging.getLevelName(self.level), self.logger.name))
diff --git a/Monika After Story/game/python-packages/unittest/async_case.py b/Monika After Story/game/python-packages/unittest/async_case.py
new file mode 100644
index 0000000000..520213c372
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/async_case.py
@@ -0,0 +1,160 @@
+import asyncio
+import inspect
+
+from .case import TestCase
+
+
+
+class IsolatedAsyncioTestCase(TestCase):
+ # Names intentionally have a long prefix
+ # to reduce a chance of clashing with user-defined attributes
+ # from inherited test case
+ #
+ # The class doesn't call loop.run_until_complete(self.setUp()) and family
+ # but uses a different approach:
+ # 1. create a long-running task that reads self.setUp()
+ # awaitable from queue along with a future
+ # 2. await the awaitable object passing in and set the result
+ # into the future object
+ # 3. Outer code puts the awaitable and the future object into a queue
+ # with waiting for the future
+ # The trick is necessary because every run_until_complete() call
+ # creates a new task with embedded ContextVar context.
+ # To share contextvars between setUp(), test and tearDown() we need to execute
+ # them inside the same task.
+
+ # Note: the test case modifies event loop policy if the policy was not instantiated
+ # yet.
+ # asyncio.get_event_loop_policy() creates a default policy on demand but never
+ # returns None
+ # I believe this is not an issue in user level tests but python itself for testing
+ # should reset a policy in every test module
+ # by calling asyncio.set_event_loop_policy(None) in tearDownModule()
+
+ def __init__(self, methodName='runTest'):
+ super().__init__(methodName)
+ self._asyncioTestLoop = None
+ self._asyncioCallsQueue = None
+
+ async def asyncSetUp(self):
+ pass
+
+ async def asyncTearDown(self):
+ pass
+
+ def addAsyncCleanup(self, func, /, *args, **kwargs):
+ # A trivial trampoline to addCleanup()
+ # the function exists because it has a different semantics
+ # and signature:
+ # addCleanup() accepts regular functions
+ # but addAsyncCleanup() accepts coroutines
+ #
+ # We intentionally don't add inspect.iscoroutinefunction() check
+ # for func argument because there is no way
+ # to check for async function reliably:
+ # 1. It can be "async def func()" iself
+ # 2. Class can implement "async def __call__()" method
+ # 3. Regular "def func()" that returns awaitable object
+ self.addCleanup(*(func, *args), **kwargs)
+
+ def _callSetUp(self):
+ self.setUp()
+ self._callAsync(self.asyncSetUp)
+
+ def _callTestMethod(self, method):
+ self._callMaybeAsync(method)
+
+ def _callTearDown(self):
+ self._callAsync(self.asyncTearDown)
+ self.tearDown()
+
+ def _callCleanup(self, function, *args, **kwargs):
+ self._callMaybeAsync(function, *args, **kwargs)
+
+ def _callAsync(self, func, /, *args, **kwargs):
+ assert self._asyncioTestLoop is not None
+ ret = func(*args, **kwargs)
+ assert inspect.isawaitable(ret)
+ fut = self._asyncioTestLoop.create_future()
+ self._asyncioCallsQueue.put_nowait((fut, ret))
+ return self._asyncioTestLoop.run_until_complete(fut)
+
+ def _callMaybeAsync(self, func, /, *args, **kwargs):
+ assert self._asyncioTestLoop is not None
+ ret = func(*args, **kwargs)
+ if inspect.isawaitable(ret):
+ fut = self._asyncioTestLoop.create_future()
+ self._asyncioCallsQueue.put_nowait((fut, ret))
+ return self._asyncioTestLoop.run_until_complete(fut)
+ else:
+ return ret
+
+ async def _asyncioLoopRunner(self, fut):
+ self._asyncioCallsQueue = queue = asyncio.Queue()
+ fut.set_result(None)
+ while True:
+ query = await queue.get()
+ queue.task_done()
+ if query is None:
+ return
+ fut, awaitable = query
+ try:
+ ret = await awaitable
+ if not fut.cancelled():
+ fut.set_result(ret)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except (BaseException, asyncio.CancelledError) as ex:
+ if not fut.cancelled():
+ fut.set_exception(ex)
+
+ def _setupAsyncioLoop(self):
+ assert self._asyncioTestLoop is None
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ loop.set_debug(True)
+ self._asyncioTestLoop = loop
+ fut = loop.create_future()
+ self._asyncioCallsTask = loop.create_task(self._asyncioLoopRunner(fut))
+ loop.run_until_complete(fut)
+
+ def _tearDownAsyncioLoop(self):
+ assert self._asyncioTestLoop is not None
+ loop = self._asyncioTestLoop
+ self._asyncioTestLoop = None
+ self._asyncioCallsQueue.put_nowait(None)
+ loop.run_until_complete(self._asyncioCallsQueue.join())
+
+ try:
+ # cancel all tasks
+ to_cancel = asyncio.all_tasks(loop)
+ if not to_cancel:
+ return
+
+ for task in to_cancel:
+ task.cancel()
+
+ loop.run_until_complete(
+ asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
+
+ for task in to_cancel:
+ if task.cancelled():
+ continue
+ if task.exception() is not None:
+ loop.call_exception_handler({
+ 'message': 'unhandled exception during test shutdown',
+ 'exception': task.exception(),
+ 'task': task,
+ })
+ # shutdown asyncgens
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ finally:
+ asyncio.set_event_loop(None)
+ loop.close()
+
+ def run(self, result=None):
+ self._setupAsyncioLoop()
+ try:
+ return super().run(result)
+ finally:
+ self._tearDownAsyncioLoop()
diff --git a/Monika After Story/game/python-packages/unittest/case.py b/Monika After Story/game/python-packages/unittest/case.py
new file mode 100644
index 0000000000..f8bc865ee8
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/case.py
@@ -0,0 +1,1437 @@
+"""Test case implementation"""
+
+import sys
+import functools
+import difflib
+import pprint
+import re
+import warnings
+import collections
+import contextlib
+import traceback
+import types
+
+from . import result
+from .util import (strclass, safe_repr, _count_diff_all_purpose,
+ _count_diff_hashable, _common_shorten_repr)
+
+__unittest = True
+
+_subtest_msg_sentinel = object()
+
+DIFF_OMITTED = ('\nDiff is %s characters long. '
+ 'Set self.maxDiff to None to see it.')
+
+class SkipTest(Exception):
+ """
+ Raise this exception in a test to skip it.
+
+ Usually you can use TestCase.skipTest() or one of the skipping decorators
+ instead of raising this directly.
+ """
+
+class _ShouldStop(Exception):
+ """
+ The test should stop.
+ """
+
+class _UnexpectedSuccess(Exception):
+ """
+ The test was supposed to fail, but it didn't!
+ """
+
+
+class _Outcome(object):
+ def __init__(self, result=None):
+ self.expecting_failure = False
+ self.result = result
+ self.result_supports_subtests = hasattr(result, "addSubTest")
+ self.success = True
+ self.skipped = []
+ self.expectedFailure = None
+ self.errors = []
+
+ @contextlib.contextmanager
+ def testPartExecutor(self, test_case, isTest=False):
+ old_success = self.success
+ self.success = True
+ try:
+ yield
+ except KeyboardInterrupt:
+ raise
+ except SkipTest as e:
+ self.success = False
+ self.skipped.append((test_case, str(e)))
+ except _ShouldStop:
+ pass
+ except:
+ exc_info = sys.exc_info()
+ if self.expecting_failure:
+ self.expectedFailure = exc_info
+ else:
+ self.success = False
+ self.errors.append((test_case, exc_info))
+ # explicitly break a reference cycle:
+ # exc_info -> frame -> exc_info
+ exc_info = None
+ else:
+ if self.result_supports_subtests and self.success:
+ self.errors.append((test_case, None))
+ finally:
+ self.success = self.success and old_success
+
+
+def _id(obj):
+ return obj
+
+
+_module_cleanups = []
+def addModuleCleanup(function, /, *args, **kwargs):
+ """Same as addCleanup, except the cleanup items are called even if
+ setUpModule fails (unlike tearDownModule)."""
+ _module_cleanups.append((function, args, kwargs))
+
+
+def doModuleCleanups():
+ """Execute all module cleanup functions. Normally called for you after
+ tearDownModule."""
+ exceptions = []
+ while _module_cleanups:
+ function, args, kwargs = _module_cleanups.pop()
+ try:
+ function(*args, **kwargs)
+ except Exception as exc:
+ exceptions.append(exc)
+ if exceptions:
+ # Swallows all but first exception. If a multi-exception handler
+ # gets written we should use that here instead.
+ raise exceptions[0]
+
+
+def skip(reason):
+ """
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+ if not isinstance(test_item, type):
+ @functools.wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+ test_item = skip_wrapper
+
+ test_item.__unittest_skip__ = True
+ test_item.__unittest_skip_why__ = reason
+ return test_item
+ if isinstance(reason, types.FunctionType):
+ test_item = reason
+ reason = ''
+ return decorator(test_item)
+ return decorator
+
+def skipIf(condition, reason):
+ """
+ Skip a test if the condition is true.
+ """
+ if condition:
+ return skip(reason)
+ return _id
+
+def skipUnless(condition, reason):
+ """
+ Skip a test unless the condition is true.
+ """
+ if not condition:
+ return skip(reason)
+ return _id
+
+def expectedFailure(test_item):
+ test_item.__unittest_expecting_failure__ = True
+ return test_item
+
+def _is_subtype(expected, basetype):
+ if isinstance(expected, tuple):
+ return all(_is_subtype(e, basetype) for e in expected)
+ return isinstance(expected, type) and issubclass(expected, basetype)
+
+class _BaseTestCaseContext:
+
+ def __init__(self, test_case):
+ self.test_case = test_case
+
+ def _raiseFailure(self, standardMsg):
+ msg = self.test_case._formatMessage(self.msg, standardMsg)
+ raise self.test_case.failureException(msg)
+
+class _AssertRaisesBaseContext(_BaseTestCaseContext):
+
+ def __init__(self, expected, test_case, expected_regex=None):
+ _BaseTestCaseContext.__init__(self, test_case)
+ self.expected = expected
+ self.test_case = test_case
+ if expected_regex is not None:
+ expected_regex = re.compile(expected_regex)
+ self.expected_regex = expected_regex
+ self.obj_name = None
+ self.msg = None
+
+ def handle(self, name, args, kwargs):
+ """
+ If args is empty, assertRaises/Warns is being used as a
+ context manager, so check for a 'msg' kwarg and return self.
+ If args is not empty, call a callable passing positional and keyword
+ arguments.
+ """
+ try:
+ if not _is_subtype(self.expected, self._base_type):
+ raise TypeError('%s() arg 1 must be %s' %
+ (name, self._base_type_str))
+ if not args:
+ self.msg = kwargs.pop('msg', None)
+ if kwargs:
+ raise TypeError('%r is an invalid keyword argument for '
+ 'this function' % (next(iter(kwargs)),))
+ return self
+
+ callable_obj, *args = args
+ try:
+ self.obj_name = callable_obj.__name__
+ except AttributeError:
+ self.obj_name = str(callable_obj)
+ with self:
+ callable_obj(*args, **kwargs)
+ finally:
+ # bpo-23890: manually break a reference cycle
+ self = None
+
+
+class _AssertRaisesContext(_AssertRaisesBaseContext):
+ """A context manager used to implement TestCase.assertRaises* methods."""
+
+ _base_type = BaseException
+ _base_type_str = 'an exception type or tuple of exception types'
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ if exc_type is None:
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ if self.obj_name:
+ self._raiseFailure("{} not raised by {}".format(exc_name,
+ self.obj_name))
+ else:
+ self._raiseFailure("{} not raised".format(exc_name))
+ else:
+ traceback.clear_frames(tb)
+ if not issubclass(exc_type, self.expected):
+ # let unexpected exceptions pass through
+ return False
+ # store exception, without traceback, for later retrieval
+ self.exception = exc_value.with_traceback(None)
+ if self.expected_regex is None:
+ return True
+
+ expected_regex = self.expected_regex
+ if not expected_regex.search(str(exc_value)):
+ self._raiseFailure('"{}" does not match "{}"'.format(
+ expected_regex.pattern, str(exc_value)))
+ return True
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+
+class _AssertWarnsContext(_AssertRaisesBaseContext):
+ """A context manager used to implement TestCase.assertWarns* methods."""
+
+ _base_type = Warning
+ _base_type_str = 'a warning type or tuple of warning types'
+
+ def __enter__(self):
+ # The __warningregistry__'s need to be in a pristine state for tests
+ # to work properly.
+ for v in sys.modules.values():
+ if getattr(v, '__warningregistry__', None):
+ v.__warningregistry__ = {}
+ self.warnings_manager = warnings.catch_warnings(record=True)
+ self.warnings = self.warnings_manager.__enter__()
+ warnings.simplefilter("always", self.expected)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.warnings_manager.__exit__(exc_type, exc_value, tb)
+ if exc_type is not None:
+ # let unexpected exceptions pass through
+ return
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ first_matching = None
+ for m in self.warnings:
+ w = m.message
+ if not isinstance(w, self.expected):
+ continue
+ if first_matching is None:
+ first_matching = w
+ if (self.expected_regex is not None and
+ not self.expected_regex.search(str(w))):
+ continue
+ # store warning for later retrieval
+ self.warning = w
+ self.filename = m.filename
+ self.lineno = m.lineno
+ return
+ # Now we simply try to choose a helpful failure message
+ if first_matching is not None:
+ self._raiseFailure('"{}" does not match "{}"'.format(
+ self.expected_regex.pattern, str(first_matching)))
+ if self.obj_name:
+ self._raiseFailure("{} not triggered by {}".format(exc_name,
+ self.obj_name))
+ else:
+ self._raiseFailure("{} not triggered".format(exc_name))
+
+
+
+class _OrderedChainMap(collections.ChainMap):
+ def __iter__(self):
+ seen = set()
+ for mapping in self.maps:
+ for k in mapping:
+ if k not in seen:
+ seen.add(k)
+ yield k
+
+
+class TestCase(object):
+ """A class whose instances are single test cases.
+
+ By default, the test code itself should be placed in a method named
+ 'runTest'.
+
+ If the fixture may be used for many test cases, create as
+ many test methods as are needed. When instantiating such a TestCase
+ subclass, specify in the constructor arguments the name of the test method
+ that the instance is to execute.
+
+ Test authors should subclass TestCase for their own tests. Construction
+ and deconstruction of the test's environment ('fixture') can be
+ implemented by overriding the 'setUp' and 'tearDown' methods respectively.
+
+ If it is necessary to override the __init__ method, the base class
+ __init__ method must always be called. It is important that subclasses
+ should not change the signature of their __init__ method, since instances
+ of the classes are instantiated automatically by parts of the framework
+ in order to be run.
+
+ When subclassing TestCase, you can set these attributes:
+ * failureException: determines which exception will be raised when
+ the instance's assertion methods fail; test methods raising this
+ exception will be deemed to have 'failed' rather than 'errored'.
+ * longMessage: determines whether long messages (including repr of
+ objects used in assert methods) will be printed on failure in *addition*
+ to any explicit message passed.
+ * maxDiff: sets the maximum length of a diff in failure messages
+ by assert methods using difflib. It is looked up as an instance
+ attribute so can be configured by individual tests if required.
+ """
+
+ failureException = AssertionError
+
+ longMessage = True
+
+ maxDiff = 80*8
+
+ # If a string is longer than _diffThreshold, use normal comparison instead
+ # of difflib. See #11763.
+ _diffThreshold = 2**16
+
+ # Attribute used by TestSuite for classSetUp
+
+ _classSetupFailed = False
+
+ _class_cleanups = []
+
+ def __init__(self, methodName='runTest'):
+ """Create an instance of the class that will use the named test
+ method when executed. Raises a ValueError if the instance does
+ not have a method with the specified name.
+ """
+ self._testMethodName = methodName
+ self._outcome = None
+ self._testMethodDoc = 'No test'
+ try:
+ testMethod = getattr(self, methodName)
+ except AttributeError:
+ if methodName != 'runTest':
+ # we allow instantiation with no explicit method name
+ # but not an *incorrect* or missing method name
+ raise ValueError("no such test method in %s: %s" %
+ (self.__class__, methodName))
+ else:
+ self._testMethodDoc = testMethod.__doc__
+ self._cleanups = []
+ self._subtest = None
+
+ # Map types to custom assertEqual functions that will compare
+ # instances of said type in more detail to generate a more useful
+ # error message.
+ self._type_equality_funcs = {}
+ self.addTypeEqualityFunc(dict, 'assertDictEqual')
+ self.addTypeEqualityFunc(list, 'assertListEqual')
+ self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
+ self.addTypeEqualityFunc(set, 'assertSetEqual')
+ self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
+ self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
+
+ def addTypeEqualityFunc(self, typeobj, function):
+ """Add a type specific assertEqual style function to compare a type.
+
+ This method is for use by TestCase subclasses that need to register
+ their own type equality functions to provide nicer error messages.
+
+ Args:
+ typeobj: The data type to call this function on when both values
+ are of the same type in assertEqual().
+ function: The callable taking two arguments and an optional
+ msg= argument that raises self.failureException with a
+ useful error message when the two arguments are not equal.
+ """
+ self._type_equality_funcs[typeobj] = function
+
+ def addCleanup(self, function, /, *args, **kwargs):
+ """Add a function, with arguments, to be called when the test is
+ completed. Functions added are called on a LIFO basis and are
+ called after tearDown on test failure or success.
+
+ Cleanup items are called even if setUp fails (unlike tearDown)."""
+ self._cleanups.append((function, args, kwargs))
+
+ @classmethod
+ def addClassCleanup(cls, function, /, *args, **kwargs):
+ """Same as addCleanup, except the cleanup items are called even if
+ setUpClass fails (unlike tearDownClass)."""
+ cls._class_cleanups.append((function, args, kwargs))
+
+ def setUp(self):
+ "Hook method for setting up the test fixture before exercising it."
+ pass
+
+ def tearDown(self):
+ "Hook method for deconstructing the test fixture after testing it."
+ pass
+
+ @classmethod
+ def setUpClass(cls):
+ "Hook method for setting up class fixture before running tests in the class."
+
+ @classmethod
+ def tearDownClass(cls):
+ "Hook method for deconstructing the class fixture after running all tests in the class."
+
+ def countTestCases(self):
+ return 1
+
+ def defaultTestResult(self):
+ return result.TestResult()
+
+ def shortDescription(self):
+ """Returns a one-line description of the test, or None if no
+ description has been provided.
+
+ The default implementation of this method returns the first line of
+ the specified test method's docstring.
+ """
+ doc = self._testMethodDoc
+ return doc.strip().split("\n")[0].strip() if doc else None
+
+
+ def id(self):
+ return "%s.%s" % (strclass(self.__class__), self._testMethodName)
+
+ def __eq__(self, other):
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return self._testMethodName == other._testMethodName
+
+ def __hash__(self):
+ return hash((type(self), self._testMethodName))
+
+ def __str__(self):
+ return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
+
+ def __repr__(self):
+ return "<%s testMethod=%s>" % \
+ (strclass(self.__class__), self._testMethodName)
+
+ def _addSkip(self, result, test_case, reason):
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None:
+ addSkip(test_case, reason)
+ else:
+ warnings.warn("TestResult has no addSkip method, skips not reported",
+ RuntimeWarning, 2)
+ result.addSuccess(test_case)
+
+ @contextlib.contextmanager
+ def subTest(self, msg=_subtest_msg_sentinel, **params):
+ """Return a context manager that will return the enclosed block
+ of code in a subtest identified by the optional message and
+ keyword parameters. A failure in the subtest marks the test
+ case as failed but resumes execution at the end of the enclosed
+ block, allowing further test code to be executed.
+ """
+ if self._outcome is None or not self._outcome.result_supports_subtests:
+ yield
+ return
+ parent = self._subtest
+ if parent is None:
+ params_map = _OrderedChainMap(params)
+ else:
+ params_map = parent.params.new_child(params)
+ self._subtest = _SubTest(self, msg, params_map)
+ try:
+ with self._outcome.testPartExecutor(self._subtest, isTest=True):
+ yield
+ if not self._outcome.success:
+ result = self._outcome.result
+ if result is not None and result.failfast:
+ raise _ShouldStop
+ elif self._outcome.expectedFailure:
+ # If the test is expecting a failure, we really want to
+ # stop now and register the expected failure.
+ raise _ShouldStop
+ finally:
+ self._subtest = parent
+
+ def _feedErrorsToResult(self, result, errors):
+ for test, exc_info in errors:
+ if isinstance(test, _SubTest):
+ result.addSubTest(test.test_case, test, exc_info)
+ elif exc_info is not None:
+ if issubclass(exc_info[0], self.failureException):
+ result.addFailure(test, exc_info)
+ else:
+ result.addError(test, exc_info)
+
+ def _addExpectedFailure(self, result, exc_info):
+ try:
+ addExpectedFailure = result.addExpectedFailure
+ except AttributeError:
+ warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
+ RuntimeWarning)
+ result.addSuccess(self)
+ else:
+ addExpectedFailure(self, exc_info)
+
+ def _addUnexpectedSuccess(self, result):
+ try:
+ addUnexpectedSuccess = result.addUnexpectedSuccess
+ except AttributeError:
+ warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failure",
+ RuntimeWarning)
+ # We need to pass an actual exception and traceback to addFailure,
+ # otherwise the legacy result can choke.
+ try:
+ raise _UnexpectedSuccess from None
+ except _UnexpectedSuccess:
+ result.addFailure(self, sys.exc_info())
+ else:
+ addUnexpectedSuccess(self)
+
+ def _callSetUp(self):
+ self.setUp()
+
+ def _callTestMethod(self, method):
+ method()
+
+ def _callTearDown(self):
+ self.tearDown()
+
+ def _callCleanup(self, function, /, *args, **kwargs):
+ function(*args, **kwargs)
+
+ def run(self, result=None):
+ orig_result = result
+ if result is None:
+ result = self.defaultTestResult()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+
+ result.startTest(self)
+
+ testMethod = getattr(self, self._testMethodName)
+ if (getattr(self.__class__, "__unittest_skip__", False) or
+ getattr(testMethod, "__unittest_skip__", False)):
+ # If the class or method was skipped.
+ try:
+ skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
+ or getattr(testMethod, '__unittest_skip_why__', ''))
+ self._addSkip(result, self, skip_why)
+ finally:
+ result.stopTest(self)
+ return
+ expecting_failure_method = getattr(testMethod,
+ "__unittest_expecting_failure__", False)
+ expecting_failure_class = getattr(self,
+ "__unittest_expecting_failure__", False)
+ expecting_failure = expecting_failure_class or expecting_failure_method
+ outcome = _Outcome(result)
+ try:
+ self._outcome = outcome
+
+ with outcome.testPartExecutor(self):
+ self._callSetUp()
+ if outcome.success:
+ outcome.expecting_failure = expecting_failure
+ with outcome.testPartExecutor(self, isTest=True):
+ self._callTestMethod(testMethod)
+ outcome.expecting_failure = False
+ with outcome.testPartExecutor(self):
+ self._callTearDown()
+
+ self.doCleanups()
+ for test, reason in outcome.skipped:
+ self._addSkip(result, test, reason)
+ self._feedErrorsToResult(result, outcome.errors)
+ if outcome.success:
+ if expecting_failure:
+ if outcome.expectedFailure:
+ self._addExpectedFailure(result, outcome.expectedFailure)
+ else:
+ self._addUnexpectedSuccess(result)
+ else:
+ result.addSuccess(self)
+ return result
+ finally:
+ result.stopTest(self)
+ if orig_result is None:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+
+ # explicitly break reference cycles:
+ # outcome.errors -> frame -> outcome -> outcome.errors
+ # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
+ outcome.errors.clear()
+ outcome.expectedFailure = None
+
+ # clear the outcome, no more needed
+ self._outcome = None
+
+ def doCleanups(self):
+ """Execute all cleanup functions. Normally called for you after
+ tearDown."""
+ outcome = self._outcome or _Outcome()
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop()
+ with outcome.testPartExecutor(self):
+ self._callCleanup(function, *args, **kwargs)
+
+ # return this for backwards compatibility
+ # even though we no longer use it internally
+ return outcome.success
+
+ @classmethod
+ def doClassCleanups(cls):
+ """Execute all class cleanup functions. Normally called for you after
+ tearDownClass."""
+ cls.tearDown_exceptions = []
+ while cls._class_cleanups:
+ function, args, kwargs = cls._class_cleanups.pop()
+ try:
+ function(*args, **kwargs)
+ except Exception:
+ cls.tearDown_exceptions.append(sys.exc_info())
+
+ def __call__(self, *args, **kwds):
+ return self.run(*args, **kwds)
+
+ def debug(self):
+ """Run the test without collecting errors in a TestResult"""
+ self.setUp()
+ getattr(self, self._testMethodName)()
+ self.tearDown()
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop(-1)
+ function(*args, **kwargs)
+
+ def skipTest(self, reason):
+ """Skip this test."""
+ raise SkipTest(reason)
+
+ def fail(self, msg=None):
+ """Fail immediately, with the given message."""
+ raise self.failureException(msg)
+
+ def assertFalse(self, expr, msg=None):
+ """Check that the expression is false."""
+ if expr:
+ msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ def assertTrue(self, expr, msg=None):
+ """Check that the expression is true."""
+ if not expr:
+ msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ def _formatMessage(self, msg, standardMsg):
+ """Honour the longMessage attribute when generating failure messages.
+ If longMessage is False this means:
+ * Use only an explicit message if it is provided
+ * Otherwise use the standard message for the assert
+
+ If longMessage is True:
+ * Use the standard message
+ * If an explicit message is provided, plus ' : ' and the explicit message
+ """
+ if not self.longMessage:
+ return msg or standardMsg
+ if msg is None:
+ return standardMsg
+ try:
+ # don't switch to '{}' formatting in Python 2.X
+ # it changes the way unicode input is handled
+ return '%s : %s' % (standardMsg, msg)
+ except UnicodeDecodeError:
+ return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
+
+ def assertRaises(self, expected_exception, *args, **kwargs):
+ """Fail unless an exception of class expected_exception is raised
+ by the callable when invoked with specified positional and
+ keyword arguments. If a different type of exception is
+ raised, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ If called with the callable and arguments omitted, will return a
+ context object used like this::
+
+ with self.assertRaises(SomeException):
+ do_something()
+
+ An optional keyword argument 'msg' can be provided when assertRaises
+ is used as a context object.
+
+ The context manager keeps a reference to the exception as
+ the 'exception' attribute. This allows you to inspect the
+ exception after the assertion::
+
+ with self.assertRaises(SomeException) as cm:
+ do_something()
+ the_exception = cm.exception
+ self.assertEqual(the_exception.error_code, 3)
+ """
+ context = _AssertRaisesContext(expected_exception, self)
+ try:
+ return context.handle('assertRaises', args, kwargs)
+ finally:
+ # bpo-23890: manually break a reference cycle
+ context = None
+
+ def assertWarns(self, expected_warning, *args, **kwargs):
+ """Fail unless a warning of class warnClass is triggered
+ by the callable when invoked with specified positional and
+ keyword arguments. If a different type of warning is
+ triggered, it will not be handled: depending on the other
+ warning filtering rules in effect, it might be silenced, printed
+ out, or raised as an exception.
+
+ If called with the callable and arguments omitted, will return a
+ context object used like this::
+
+ with self.assertWarns(SomeWarning):
+ do_something()
+
+ An optional keyword argument 'msg' can be provided when assertWarns
+ is used as a context object.
+
+ The context manager keeps a reference to the first matching
+ warning as the 'warning' attribute; similarly, the 'filename'
+ and 'lineno' attributes give you information about the line
+ of Python code from which the warning was triggered.
+ This allows you to inspect the warning after the assertion::
+
+ with self.assertWarns(SomeWarning) as cm:
+ do_something()
+ the_warning = cm.warning
+ self.assertEqual(the_warning.some_attribute, 147)
+ """
+ context = _AssertWarnsContext(expected_warning, self)
+ return context.handle('assertWarns', args, kwargs)
+
+ def assertLogs(self, logger=None, level=None):
+ """Fail unless a log message of level *level* or higher is emitted
+ on *logger_name* or its children. If omitted, *level* defaults to
+ INFO and *logger* defaults to the root logger.
+
+ This method must be used as a context manager, and will yield
+ a recording object with two attributes: `output` and `records`.
+ At the end of the context manager, the `output` attribute will
+ be a list of the matching formatted log messages and the
+ `records` attribute will be a list of the corresponding LogRecord
+ objects.
+
+ Example::
+
+ with self.assertLogs('foo', level='INFO') as cm:
+ logging.getLogger('foo').info('first message')
+ logging.getLogger('foo.bar').error('second message')
+ self.assertEqual(cm.output, ['INFO:foo:first message',
+ 'ERROR:foo.bar:second message'])
+ """
+ # Lazy import to avoid importing logging if it is not needed.
+ from ._log import _AssertLogsContext
+ return _AssertLogsContext(self, logger, level)
+
+ def _getAssertEqualityFunc(self, first, second):
+ """Get a detailed comparison function for the types of the two args.
+
+ Returns: A callable accepting (first, second, msg=None) that will
+ raise a failure exception if first != second with a useful human
+ readable error message for those types.
+ """
+ #
+ # NOTE(gregory.p.smith): I considered isinstance(first, type(second))
+ # and vice versa. I opted for the conservative approach in case
+ # subclasses are not intended to be compared in detail to their super
+ # class instances using a type equality func. This means testing
+ # subtypes won't automagically use the detailed comparison. Callers
+ # should use their type specific assertSpamEqual method to compare
+ # subclasses if the detailed comparison is desired and appropriate.
+ # See the discussion in http://bugs.python.org/issue2578.
+ #
+ if type(first) is type(second):
+ asserter = self._type_equality_funcs.get(type(first))
+ if asserter is not None:
+ if isinstance(asserter, str):
+ asserter = getattr(self, asserter)
+ return asserter
+
+ return self._baseAssertEqual
+
+ def _baseAssertEqual(self, first, second, msg=None):
+ """The default assertEqual implementation, not type specific."""
+ if not first == second:
+ standardMsg = '%s != %s' % _common_shorten_repr(first, second)
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertEqual(self, first, second, msg=None):
+ """Fail if the two objects are unequal as determined by the '=='
+ operator.
+ """
+ assertion_func = self._getAssertEqualityFunc(first, second)
+ assertion_func(first, second, msg=msg)
+
+ def assertNotEqual(self, first, second, msg=None):
+ """Fail if the two objects are equal as determined by the '!='
+ operator.
+ """
+ if not first != second:
+ msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
+ safe_repr(second)))
+ raise self.failureException(msg)
+
+ def assertAlmostEqual(self, first, second, places=None, msg=None,
+ delta=None):
+ """Fail if the two objects are unequal as determined by their
+ difference rounded to the given number of decimal places
+ (default 7) and comparing to zero, or by comparing that the
+ difference between the two objects is more than the given
+ delta.
+
+ Note that decimal places (from zero) are usually not the same
+ as significant digits (measured from the most significant digit).
+
+ If the two objects compare equal then they will automatically
+ compare almost equal.
+ """
+ if first == second:
+ # shortcut
+ return
+ if delta is not None and places is not None:
+ raise TypeError("specify delta or places not both")
+
+ diff = abs(first - second)
+ if delta is not None:
+ if diff <= delta:
+ return
+
+ standardMsg = '%s != %s within %s delta (%s difference)' % (
+ safe_repr(first),
+ safe_repr(second),
+ safe_repr(delta),
+ safe_repr(diff))
+ else:
+ if places is None:
+ places = 7
+
+ if round(diff, places) == 0:
+ return
+
+ standardMsg = '%s != %s within %r places (%s difference)' % (
+ safe_repr(first),
+ safe_repr(second),
+ places,
+ safe_repr(diff))
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertNotAlmostEqual(self, first, second, places=None, msg=None,
+ delta=None):
+ """Fail if the two objects are equal as determined by their
+ difference rounded to the given number of decimal places
+ (default 7) and comparing to zero, or by comparing that the
+ difference between the two objects is less than the given delta.
+
+ Note that decimal places (from zero) are usually not the same
+ as significant digits (measured from the most significant digit).
+
+ Objects that are equal automatically fail.
+ """
+ if delta is not None and places is not None:
+ raise TypeError("specify delta or places not both")
+ diff = abs(first - second)
+ if delta is not None:
+ if not (first == second) and diff > delta:
+ return
+ standardMsg = '%s == %s within %s delta (%s difference)' % (
+ safe_repr(first),
+ safe_repr(second),
+ safe_repr(delta),
+ safe_repr(diff))
+ else:
+ if places is None:
+ places = 7
+ if not (first == second) and round(diff, places) != 0:
+ return
+ standardMsg = '%s == %s within %r places' % (safe_repr(first),
+ safe_repr(second),
+ places)
+
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
+ """An equality assertion for ordered sequences (like lists and tuples).
+
+ For the purposes of this function, a valid ordered sequence type is one
+ which can be indexed, has a length, and has an equality operator.
+
+ Args:
+ seq1: The first sequence to compare.
+ seq2: The second sequence to compare.
+ seq_type: The expected datatype of the sequences, or None if no
+ datatype should be enforced.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+ """
+ if seq_type is not None:
+ seq_type_name = seq_type.__name__
+ if not isinstance(seq1, seq_type):
+ raise self.failureException('First sequence is not a %s: %s'
+ % (seq_type_name, safe_repr(seq1)))
+ if not isinstance(seq2, seq_type):
+ raise self.failureException('Second sequence is not a %s: %s'
+ % (seq_type_name, safe_repr(seq2)))
+ else:
+ seq_type_name = "sequence"
+
+ differing = None
+ try:
+ len1 = len(seq1)
+ except (TypeError, NotImplementedError):
+ differing = 'First %s has no length. Non-sequence?' % (
+ seq_type_name)
+
+ if differing is None:
+ try:
+ len2 = len(seq2)
+ except (TypeError, NotImplementedError):
+ differing = 'Second %s has no length. Non-sequence?' % (
+ seq_type_name)
+
+ if differing is None:
+ if seq1 == seq2:
+ return
+
+ differing = '%ss differ: %s != %s\n' % (
+ (seq_type_name.capitalize(),) +
+ _common_shorten_repr(seq1, seq2))
+
+ for i in range(min(len1, len2)):
+ try:
+ item1 = seq1[i]
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('\nUnable to index element %d of first %s\n' %
+ (i, seq_type_name))
+ break
+
+ try:
+ item2 = seq2[i]
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('\nUnable to index element %d of second %s\n' %
+ (i, seq_type_name))
+ break
+
+ if item1 != item2:
+ differing += ('\nFirst differing element %d:\n%s\n%s\n' %
+ ((i,) + _common_shorten_repr(item1, item2)))
+ break
+ else:
+ if (len1 == len2 and seq_type is None and
+ type(seq1) != type(seq2)):
+ # The sequences are the same, but have differing types.
+ return
+
+ if len1 > len2:
+ differing += ('\nFirst %s contains %d additional '
+ 'elements.\n' % (seq_type_name, len1 - len2))
+ try:
+ differing += ('First extra element %d:\n%s\n' %
+ (len2, safe_repr(seq1[len2])))
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('Unable to index element %d '
+ 'of first %s\n' % (len2, seq_type_name))
+ elif len1 < len2:
+ differing += ('\nSecond %s contains %d additional '
+ 'elements.\n' % (seq_type_name, len2 - len1))
+ try:
+ differing += ('First extra element %d:\n%s\n' %
+ (len1, safe_repr(seq2[len1])))
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('Unable to index element %d '
+ 'of second %s\n' % (len1, seq_type_name))
+ standardMsg = differing
+ diffMsg = '\n' + '\n'.join(
+ difflib.ndiff(pprint.pformat(seq1).splitlines(),
+ pprint.pformat(seq2).splitlines()))
+
+ standardMsg = self._truncateMessage(standardMsg, diffMsg)
+ msg = self._formatMessage(msg, standardMsg)
+ self.fail(msg)
+
+ def _truncateMessage(self, message, diff):
+ max_diff = self.maxDiff
+ if max_diff is None or len(diff) <= max_diff:
+ return message + diff
+ return message + (DIFF_OMITTED % len(diff))
+
+ def assertListEqual(self, list1, list2, msg=None):
+ """A list-specific equality assertion.
+
+ Args:
+ list1: The first list to compare.
+ list2: The second list to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+
+ """
+ self.assertSequenceEqual(list1, list2, msg, seq_type=list)
+
+ def assertTupleEqual(self, tuple1, tuple2, msg=None):
+ """A tuple-specific equality assertion.
+
+ Args:
+ tuple1: The first tuple to compare.
+ tuple2: The second tuple to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+ """
+ self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
+
+ def assertSetEqual(self, set1, set2, msg=None):
+ """A set-specific equality assertion.
+
+ Args:
+ set1: The first set to compare.
+ set2: The second set to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+
+ assertSetEqual uses ducktyping to support different types of sets, and
+ is optimized for sets specifically (parameters must support a
+ difference method).
+ """
+ try:
+ difference1 = set1.difference(set2)
+ except TypeError as e:
+ self.fail('invalid type when attempting set difference: %s' % e)
+ except AttributeError as e:
+ self.fail('first argument does not support set difference: %s' % e)
+
+ try:
+ difference2 = set2.difference(set1)
+ except TypeError as e:
+ self.fail('invalid type when attempting set difference: %s' % e)
+ except AttributeError as e:
+ self.fail('second argument does not support set difference: %s' % e)
+
+ if not (difference1 or difference2):
+ return
+
+ lines = []
+ if difference1:
+ lines.append('Items in the first set but not the second:')
+ for item in difference1:
+ lines.append(repr(item))
+ if difference2:
+ lines.append('Items in the second set but not the first:')
+ for item in difference2:
+ lines.append(repr(item))
+
+ standardMsg = '\n'.join(lines)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIn(self, member, container, msg=None):
+ """Just like self.assertTrue(a in b), but with a nicer default message."""
+ if member not in container:
+ standardMsg = '%s not found in %s' % (safe_repr(member),
+ safe_repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNotIn(self, member, container, msg=None):
+ """Just like self.assertTrue(a not in b), but with a nicer default message."""
+ if member in container:
+ standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
+ safe_repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIs(self, expr1, expr2, msg=None):
+ """Just like self.assertTrue(a is b), but with a nicer default message."""
+ if expr1 is not expr2:
+ standardMsg = '%s is not %s' % (safe_repr(expr1),
+ safe_repr(expr2))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNot(self, expr1, expr2, msg=None):
+ """Just like self.assertTrue(a is not b), but with a nicer default message."""
+ if expr1 is expr2:
+ standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertDictEqual(self, d1, d2, msg=None):
+ self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
+ self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
+
+ if d1 != d2:
+ standardMsg = '%s != %s' % _common_shorten_repr(d1, d2)
+ diff = ('\n' + '\n'.join(difflib.ndiff(
+ pprint.pformat(d1).splitlines(),
+ pprint.pformat(d2).splitlines())))
+ standardMsg = self._truncateMessage(standardMsg, diff)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertDictContainsSubset(self, subset, dictionary, msg=None):
+ """Checks whether dictionary is a superset of subset."""
+ warnings.warn('assertDictContainsSubset is deprecated',
+ DeprecationWarning)
+ missing = []
+ mismatched = []
+ for key, value in subset.items():
+ if key not in dictionary:
+ missing.append(key)
+ elif value != dictionary[key]:
+ mismatched.append('%s, expected: %s, actual: %s' %
+ (safe_repr(key), safe_repr(value),
+ safe_repr(dictionary[key])))
+
+ if not (missing or mismatched):
+ return
+
+ standardMsg = ''
+ if missing:
+ standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
+ missing)
+ if mismatched:
+ if standardMsg:
+ standardMsg += '; '
+ standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
+
+ self.fail(self._formatMessage(msg, standardMsg))
+
+
+ def assertCountEqual(self, first, second, msg=None):
+ """Asserts that two iterables have the same elements, the same number of
+ times, without regard to order.
+
+ self.assertEqual(Counter(list(first)),
+ Counter(list(second)))
+
+ Example:
+ - [0, 1, 1] and [1, 0, 1] compare equal.
+ - [0, 0, 1] and [0, 1] compare unequal.
+
+ """
+ first_seq, second_seq = list(first), list(second)
+ try:
+ first = collections.Counter(first_seq)
+ second = collections.Counter(second_seq)
+ except TypeError:
+ # Handle case with unhashable elements
+ differences = _count_diff_all_purpose(first_seq, second_seq)
+ else:
+ if first == second:
+ return
+ differences = _count_diff_hashable(first_seq, second_seq)
+
+ if differences:
+ standardMsg = 'Element counts were not equal:\n'
+ lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
+ diffMsg = '\n'.join(lines)
+ standardMsg = self._truncateMessage(standardMsg, diffMsg)
+ msg = self._formatMessage(msg, standardMsg)
+ self.fail(msg)
+
+ def assertMultiLineEqual(self, first, second, msg=None):
+ """Assert that two multi-line strings are equal."""
+ self.assertIsInstance(first, str, 'First argument is not a string')
+ self.assertIsInstance(second, str, 'Second argument is not a string')
+
+ if first != second:
+ # don't use difflib if the strings are too long
+ if (len(first) > self._diffThreshold or
+ len(second) > self._diffThreshold):
+ self._baseAssertEqual(first, second, msg)
+ firstlines = first.splitlines(keepends=True)
+ secondlines = second.splitlines(keepends=True)
+ if len(firstlines) == 1 and first.strip('\r\n') == first:
+ firstlines = [first + '\n']
+ secondlines = [second + '\n']
+ standardMsg = '%s != %s' % _common_shorten_repr(first, second)
+ diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
+ standardMsg = self._truncateMessage(standardMsg, diff)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertLess(self, a, b, msg=None):
+ """Just like self.assertTrue(a < b), but with a nicer default message."""
+ if not a < b:
+ standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertLessEqual(self, a, b, msg=None):
+ """Just like self.assertTrue(a <= b), but with a nicer default message."""
+ if not a <= b:
+ standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertGreater(self, a, b, msg=None):
+ """Just like self.assertTrue(a > b), but with a nicer default message."""
+ if not a > b:
+ standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertGreaterEqual(self, a, b, msg=None):
+ """Just like self.assertTrue(a >= b), but with a nicer default message."""
+ if not a >= b:
+ standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNone(self, obj, msg=None):
+ """Same as self.assertTrue(obj is None), with a nicer default message."""
+ if obj is not None:
+ standardMsg = '%s is not None' % (safe_repr(obj),)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNotNone(self, obj, msg=None):
+ """Included for symmetry with assertIsNone."""
+ if obj is None:
+ standardMsg = 'unexpectedly None'
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsInstance(self, obj, cls, msg=None):
+ """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
+ default message."""
+ if not isinstance(obj, cls):
+ standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNotIsInstance(self, obj, cls, msg=None):
+ """Included for symmetry with assertIsInstance."""
+ if isinstance(obj, cls):
+ standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertRaisesRegex(self, expected_exception, expected_regex,
+ *args, **kwargs):
+ """Asserts that the message in a raised exception matches a regex.
+
+ Args:
+ expected_exception: Exception class expected to be raised.
+ expected_regex: Regex (re.Pattern object or string) expected
+ to be found in error message.
+ args: Function to be called and extra positional args.
+ kwargs: Extra kwargs.
+ msg: Optional message used in case of failure. Can only be used
+ when assertRaisesRegex is used as a context manager.
+ """
+ context = _AssertRaisesContext(expected_exception, self, expected_regex)
+ return context.handle('assertRaisesRegex', args, kwargs)
+
+ def assertWarnsRegex(self, expected_warning, expected_regex,
+ *args, **kwargs):
+ """Asserts that the message in a triggered warning matches a regexp.
+ Basic functioning is similar to assertWarns() with the addition
+ that only warnings whose messages also match the regular expression
+ are considered successful matches.
+
+ Args:
+ expected_warning: Warning class expected to be triggered.
+ expected_regex: Regex (re.Pattern object or string) expected
+ to be found in error message.
+ args: Function to be called and extra positional args.
+ kwargs: Extra kwargs.
+ msg: Optional message used in case of failure. Can only be used
+ when assertWarnsRegex is used as a context manager.
+ """
+ context = _AssertWarnsContext(expected_warning, self, expected_regex)
+ return context.handle('assertWarnsRegex', args, kwargs)
+
+ def assertRegex(self, text, expected_regex, msg=None):
+ """Fail the test unless the text matches the regular expression."""
+ if isinstance(expected_regex, (str, bytes)):
+ assert expected_regex, "expected_regex must not be empty."
+ expected_regex = re.compile(expected_regex)
+ if not expected_regex.search(text):
+ standardMsg = "Regex didn't match: %r not found in %r" % (
+ expected_regex.pattern, text)
+ # _formatMessage ensures the longMessage option is respected
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertNotRegex(self, text, unexpected_regex, msg=None):
+ """Fail the test if the text matches the regular expression."""
+ if isinstance(unexpected_regex, (str, bytes)):
+ unexpected_regex = re.compile(unexpected_regex)
+ match = unexpected_regex.search(text)
+ if match:
+ standardMsg = 'Regex matched: %r matches %r in %r' % (
+ text[match.start() : match.end()],
+ unexpected_regex.pattern,
+ text)
+ # _formatMessage ensures the longMessage option is respected
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+
+ def _deprecate(original_func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn(
+ 'Please use {0} instead.'.format(original_func.__name__),
+ DeprecationWarning, 2)
+ return original_func(*args, **kwargs)
+ return deprecated_func
+
+ # see #9424
+ failUnlessEqual = assertEquals = _deprecate(assertEqual)
+ failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
+ failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
+ failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
+ failUnless = assert_ = _deprecate(assertTrue)
+ failUnlessRaises = _deprecate(assertRaises)
+ failIf = _deprecate(assertFalse)
+ assertRaisesRegexp = _deprecate(assertRaisesRegex)
+ assertRegexpMatches = _deprecate(assertRegex)
+ assertNotRegexpMatches = _deprecate(assertNotRegex)
+
+
+
+class FunctionTestCase(TestCase):
+ """A test case that wraps a test function.
+
+ This is useful for slipping pre-existing test functions into the
+ unittest framework. Optionally, set-up and tidy-up functions can be
+ supplied. As with TestCase, the tidy-up ('tearDown') function will
+ always be called if the set-up ('setUp') function ran successfully.
+ """
+
+ def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
+ super(FunctionTestCase, self).__init__()
+ self._setUpFunc = setUp
+ self._tearDownFunc = tearDown
+ self._testFunc = testFunc
+ self._description = description
+
+ def setUp(self):
+ if self._setUpFunc is not None:
+ self._setUpFunc()
+
+ def tearDown(self):
+ if self._tearDownFunc is not None:
+ self._tearDownFunc()
+
+ def runTest(self):
+ self._testFunc()
+
+ def id(self):
+ return self._testFunc.__name__
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._setUpFunc == other._setUpFunc and \
+ self._tearDownFunc == other._tearDownFunc and \
+ self._testFunc == other._testFunc and \
+ self._description == other._description
+
+ def __hash__(self):
+ return hash((type(self), self._setUpFunc, self._tearDownFunc,
+ self._testFunc, self._description))
+
+ def __str__(self):
+ return "%s (%s)" % (strclass(self.__class__),
+ self._testFunc.__name__)
+
+ def __repr__(self):
+ return "<%s tec=%s>" % (strclass(self.__class__),
+ self._testFunc)
+
+ def shortDescription(self):
+ if self._description is not None:
+ return self._description
+ doc = self._testFunc.__doc__
+ return doc and doc.split("\n")[0].strip() or None
+
+
+class _SubTest(TestCase):
+
+ def __init__(self, test_case, message, params):
+ super().__init__()
+ self._message = message
+ self.test_case = test_case
+ self.params = params
+ self.failureException = test_case.failureException
+
+ def runTest(self):
+ raise NotImplementedError("subtests cannot be run directly")
+
+ def _subDescription(self):
+ parts = []
+ if self._message is not _subtest_msg_sentinel:
+ parts.append("[{}]".format(self._message))
+ if self.params:
+ params_desc = ', '.join(
+ "{}={!r}".format(k, v)
+ for (k, v) in self.params.items())
+ parts.append("({})".format(params_desc))
+ return " ".join(parts) or '()'
+
+ def id(self):
+ return "{} {}".format(self.test_case.id(), self._subDescription())
+
+ def shortDescription(self):
+ """Returns a one-line description of the subtest, or None if no
+ description has been provided.
+ """
+ return self.test_case.shortDescription()
+
+ def __str__(self):
+ return "{} {}".format(self.test_case, self._subDescription())
diff --git a/Monika After Story/game/python-packages/unittest/loader.py b/Monika After Story/game/python-packages/unittest/loader.py
new file mode 100644
index 0000000000..ba7105e1ad
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/loader.py
@@ -0,0 +1,517 @@
+"""Loading unittests."""
+
+import os
+import re
+import sys
+import traceback
+import types
+import functools
+import warnings
+
+from fnmatch import fnmatch, fnmatchcase
+
+from . import case, suite, util
+
+__unittest = True
+
+# what about .pyc (etc)
+# we would need to avoid loading the same tests multiple times
+# from '.py', *and* '.pyc'
+VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
+
+
+class _FailedTest(case.TestCase):
+ _testMethodName = None
+
+ def __init__(self, method_name, exception):
+ self._exception = exception
+ super(_FailedTest, self).__init__(method_name)
+
+ def __getattr__(self, name):
+ if name != self._testMethodName:
+ return super(_FailedTest, self).__getattr__(name)
+ def testFailure():
+ raise self._exception
+ return testFailure
+
+
+def _make_failed_import_test(name, suiteClass):
+ message = 'Failed to import test module: %s\n%s' % (
+ name, traceback.format_exc())
+ return _make_failed_test(name, ImportError(message), suiteClass, message)
+
+def _make_failed_load_tests(name, exception, suiteClass):
+ message = 'Failed to call load_tests:\n%s' % (traceback.format_exc(),)
+ return _make_failed_test(
+ name, exception, suiteClass, message)
+
+def _make_failed_test(methodname, exception, suiteClass, message):
+ test = _FailedTest(methodname, exception)
+ return suiteClass((test,)), message
+
+def _make_skipped_test(methodname, exception, suiteClass):
+ @case.skip(str(exception))
+ def testSkipped(self):
+ pass
+ attrs = {methodname: testSkipped}
+ TestClass = type("ModuleSkipped", (case.TestCase,), attrs)
+ return suiteClass((TestClass(methodname),))
+
+def _jython_aware_splitext(path):
+ if path.lower().endswith('$py.class'):
+ return path[:-9]
+ return os.path.splitext(path)[0]
+
+
+class TestLoader(object):
+ """
+ This class is responsible for loading tests according to various criteria
+ and returning them wrapped in a TestSuite
+ """
+ testMethodPrefix = 'test'
+ sortTestMethodsUsing = staticmethod(util.three_way_cmp)
+ testNamePatterns = None
+ suiteClass = suite.TestSuite
+ _top_level_dir = None
+
+ def __init__(self):
+ super(TestLoader, self).__init__()
+ self.errors = []
+ # Tracks packages which we have called into via load_tests, to
+ # avoid infinite re-entrancy.
+ self._loading_packages = set()
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Return a suite of all test cases contained in testCaseClass"""
+ if issubclass(testCaseClass, suite.TestSuite):
+ raise TypeError("Test cases should not be derived from "
+ "TestSuite. Maybe you meant to derive from "
+ "TestCase?")
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
+ loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
+ return loaded_suite
+
+ # XXX After Python 3.5, remove backward compatibility hacks for
+ # use_load_tests deprecation via *args and **kws. See issue 16662.
+ def loadTestsFromModule(self, module, *args, pattern=None, **kws):
+ """Return a suite of all test cases contained in the given module"""
+ # This method used to take an undocumented and unofficial
+ # use_load_tests argument. For backward compatibility, we still
+ # accept the argument (which can also be the first position) but we
+ # ignore it and issue a deprecation warning if it's present.
+ if len(args) > 0 or 'use_load_tests' in kws:
+ warnings.warn('use_load_tests is deprecated and ignored',
+ DeprecationWarning)
+ kws.pop('use_load_tests', None)
+ if len(args) > 1:
+ # Complain about the number of arguments, but don't forget the
+ # required `module` argument.
+ complaint = len(args) + 1
+ raise TypeError('loadTestsFromModule() takes 1 positional argument but {} were given'.format(complaint))
+ if len(kws) != 0:
+ # Since the keyword arguments are unsorted (see PEP 468), just
+ # pick the alphabetically sorted first argument to complain about,
+ # if multiple were given. At least the error message will be
+ # predictable.
+ complaint = sorted(kws)[0]
+ raise TypeError("loadTestsFromModule() got an unexpected keyword argument '{}'".format(complaint))
+ tests = []
+ for name in dir(module):
+ obj = getattr(module, name)
+ if isinstance(obj, type) and issubclass(obj, case.TestCase):
+ tests.append(self.loadTestsFromTestCase(obj))
+
+ load_tests = getattr(module, 'load_tests', None)
+ tests = self.suiteClass(tests)
+ if load_tests is not None:
+ try:
+ return load_tests(self, tests, pattern)
+ except Exception as e:
+ error_case, error_message = _make_failed_load_tests(
+ module.__name__, e, self.suiteClass)
+ self.errors.append(error_message)
+ return error_case
+ return tests
+
+ def loadTestsFromName(self, name, module=None):
+ """Return a suite of all test cases given a string specifier.
+
+ The name may resolve either to a module, a test case class, a
+ test method within a test case class, or a callable object which
+ returns a TestCase or TestSuite instance.
+
+ The method optionally resolves the names relative to a given module.
+ """
+ parts = name.split('.')
+ error_case, error_message = None, None
+ if module is None:
+ parts_copy = parts[:]
+ while parts_copy:
+ try:
+ module_name = '.'.join(parts_copy)
+ module = __import__(module_name)
+ break
+ except ImportError:
+ next_attribute = parts_copy.pop()
+ # Last error so we can give it to the user if needed.
+ error_case, error_message = _make_failed_import_test(
+ next_attribute, self.suiteClass)
+ if not parts_copy:
+ # Even the top level import failed: report that error.
+ self.errors.append(error_message)
+ return error_case
+ parts = parts[1:]
+ obj = module
+ for part in parts:
+ try:
+ parent, obj = obj, getattr(obj, part)
+ except AttributeError as e:
+ # We can't traverse some part of the name.
+ if (getattr(obj, '__path__', None) is not None
+ and error_case is not None):
+ # This is a package (no __path__ per importlib docs), and we
+ # encountered an error importing something. We cannot tell
+ # the difference between package.WrongNameTestClass and
+ # package.wrong_module_name so we just report the
+ # ImportError - it is more informative.
+ self.errors.append(error_message)
+ return error_case
+ else:
+ # Otherwise, we signal that an AttributeError has occurred.
+ error_case, error_message = _make_failed_test(
+ part, e, self.suiteClass,
+ 'Failed to access attribute:\n%s' % (
+ traceback.format_exc(),))
+ self.errors.append(error_message)
+ return error_case
+
+ if isinstance(obj, types.ModuleType):
+ return self.loadTestsFromModule(obj)
+ elif isinstance(obj, type) and issubclass(obj, case.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ elif (isinstance(obj, types.FunctionType) and
+ isinstance(parent, type) and
+ issubclass(parent, case.TestCase)):
+ name = parts[-1]
+ inst = parent(name)
+ # static methods follow a different path
+ if not isinstance(getattr(inst, name), types.FunctionType):
+ return self.suiteClass([inst])
+ elif isinstance(obj, suite.TestSuite):
+ return obj
+ if callable(obj):
+ test = obj()
+ if isinstance(test, suite.TestSuite):
+ return test
+ elif isinstance(test, case.TestCase):
+ return self.suiteClass([test])
+ else:
+ raise TypeError("calling %s returned %s, not a test" %
+ (obj, test))
+ else:
+ raise TypeError("don't know how to make test from: %s" % obj)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a suite of all test cases found using the given sequence
+ of string specifiers. See 'loadTestsFromName()'.
+ """
+ suites = [self.loadTestsFromName(name, module) for name in names]
+ return self.suiteClass(suites)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Return a sorted sequence of method names found within testCaseClass
+ """
+ def shouldIncludeMethod(attrname):
+ if not attrname.startswith(self.testMethodPrefix):
+ return False
+ testFunc = getattr(testCaseClass, attrname)
+ if not callable(testFunc):
+ return False
+ fullName = f'%s.%s.%s' % (
+ testCaseClass.__module__, testCaseClass.__qualname__, attrname
+ )
+ return self.testNamePatterns is None or \
+ any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns)
+ testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass)))
+ if self.sortTestMethodsUsing:
+ testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
+ return testFnNames
+
+ def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
+ """Find and return all test modules from the specified start
+ directory, recursing into subdirectories to find them and return all
+ tests found within them. Only test files that match the pattern will
+ be loaded. (Using shell style pattern matching.)
+
+ All test modules must be importable from the top level of the project.
+ If the start directory is not the top level directory then the top
+ level directory must be specified separately.
+
+ If a test package name (directory with '__init__.py') matches the
+ pattern then the package will be checked for a 'load_tests' function. If
+ this exists then it will be called with (loader, tests, pattern) unless
+ the package has already had load_tests called from the same discovery
+ invocation, in which case the package module object is not scanned for
+ tests - this ensures that when a package uses discover to further
+ discover child tests that infinite recursion does not happen.
+
+ If load_tests exists then discovery does *not* recurse into the package,
+ load_tests is responsible for loading all tests in the package.
+
+ The pattern is deliberately not stored as a loader attribute so that
+ packages can continue discovery themselves. top_level_dir is stored so
+ load_tests does not need to pass this argument in to loader.discover().
+
+ Paths are sorted before being imported to ensure reproducible execution
+ order even on filesystems with non-alphabetical ordering like ext3/4.
+ """
+ set_implicit_top = False
+ if top_level_dir is None and self._top_level_dir is not None:
+ # make top_level_dir optional if called from load_tests in a package
+ top_level_dir = self._top_level_dir
+ elif top_level_dir is None:
+ set_implicit_top = True
+ top_level_dir = start_dir
+
+ top_level_dir = os.path.abspath(top_level_dir)
+
+ if not top_level_dir in sys.path:
+ # all test modules must be importable from the top level directory
+ # should we *unconditionally* put the start directory in first
+ # in sys.path to minimise likelihood of conflicts between installed
+ # modules and development versions?
+ sys.path.insert(0, top_level_dir)
+ self._top_level_dir = top_level_dir
+
+ is_not_importable = False
+ is_namespace = False
+ tests = []
+ if os.path.isdir(os.path.abspath(start_dir)):
+ start_dir = os.path.abspath(start_dir)
+ if start_dir != top_level_dir:
+ is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
+ else:
+ # support for discovery from dotted module names
+ try:
+ __import__(start_dir)
+ except ImportError:
+ is_not_importable = True
+ else:
+ the_module = sys.modules[start_dir]
+ top_part = start_dir.split('.')[0]
+ try:
+ start_dir = os.path.abspath(
+ os.path.dirname((the_module.__file__)))
+ except AttributeError:
+ # look for namespace packages
+ try:
+ spec = the_module.__spec__
+ except AttributeError:
+ spec = None
+
+ if spec and spec.loader is None:
+ if spec.submodule_search_locations is not None:
+ is_namespace = True
+
+ for path in the_module.__path__:
+ if (not set_implicit_top and
+ not path.startswith(top_level_dir)):
+ continue
+ self._top_level_dir = \
+ (path.split(the_module.__name__
+ .replace(".", os.path.sep))[0])
+ tests.extend(self._find_tests(path,
+ pattern,
+ namespace=True))
+ elif the_module.__name__ in sys.builtin_module_names:
+ # builtin module
+ raise TypeError('Can not use builtin modules '
+ 'as dotted module names') from None
+ else:
+ raise TypeError(
+ 'don\'t know how to discover from {!r}'
+ .format(the_module)) from None
+
+ if set_implicit_top:
+ if not is_namespace:
+ self._top_level_dir = \
+ self._get_directory_containing_module(top_part)
+ sys.path.remove(top_level_dir)
+ else:
+ sys.path.remove(top_level_dir)
+
+ if is_not_importable:
+ raise ImportError('Start directory is not importable: %r' % start_dir)
+
+ if not is_namespace:
+ tests = list(self._find_tests(start_dir, pattern))
+ return self.suiteClass(tests)
+
+ def _get_directory_containing_module(self, module_name):
+ module = sys.modules[module_name]
+ full_path = os.path.abspath(module.__file__)
+
+ if os.path.basename(full_path).lower().startswith('__init__.py'):
+ return os.path.dirname(os.path.dirname(full_path))
+ else:
+ # here we have been given a module rather than a package - so
+ # all we can do is search the *same* directory the module is in
+ # should an exception be raised instead
+ return os.path.dirname(full_path)
+
+ def _get_name_from_path(self, path):
+ if path == self._top_level_dir:
+ return '.'
+ path = _jython_aware_splitext(os.path.normpath(path))
+
+ _relpath = os.path.relpath(path, self._top_level_dir)
+ assert not os.path.isabs(_relpath), "Path must be within the project"
+ assert not _relpath.startswith('..'), "Path must be within the project"
+
+ name = _relpath.replace(os.path.sep, '.')
+ return name
+
+ def _get_module_from_name(self, name):
+ __import__(name)
+ return sys.modules[name]
+
+ def _match_path(self, path, full_path, pattern):
+ # override this method to use alternative matching strategy
+ return fnmatch(path, pattern)
+
+ def _find_tests(self, start_dir, pattern, namespace=False):
+ """Used by discovery. Yields test suites it loads."""
+ # Handle the __init__ in this package
+ name = self._get_name_from_path(start_dir)
+ # name is '.' when start_dir == top_level_dir (and top_level_dir is by
+ # definition not a package).
+ if name != '.' and name not in self._loading_packages:
+ # name is in self._loading_packages while we have called into
+ # loadTestsFromModule with name.
+ tests, should_recurse = self._find_test_path(
+ start_dir, pattern, namespace)
+ if tests is not None:
+ yield tests
+ if not should_recurse:
+ # Either an error occurred, or load_tests was used by the
+ # package.
+ return
+ # Handle the contents.
+ paths = sorted(os.listdir(start_dir))
+ for path in paths:
+ full_path = os.path.join(start_dir, path)
+ tests, should_recurse = self._find_test_path(
+ full_path, pattern, namespace)
+ if tests is not None:
+ yield tests
+ if should_recurse:
+ # we found a package that didn't use load_tests.
+ name = self._get_name_from_path(full_path)
+ self._loading_packages.add(name)
+ try:
+ yield from self._find_tests(full_path, pattern, namespace)
+ finally:
+ self._loading_packages.discard(name)
+
+ def _find_test_path(self, full_path, pattern, namespace=False):
+ """Used by discovery.
+
+ Loads tests from a single file, or a directories' __init__.py when
+ passed the directory.
+
+ Returns a tuple (None_or_tests_from_file, should_recurse).
+ """
+ basename = os.path.basename(full_path)
+ if os.path.isfile(full_path):
+ if not VALID_MODULE_NAME.match(basename):
+ # valid Python identifiers only
+ return None, False
+ if not self._match_path(basename, full_path, pattern):
+ return None, False
+ # if the test file matches, load it
+ name = self._get_name_from_path(full_path)
+ try:
+ module = self._get_module_from_name(name)
+ except case.SkipTest as e:
+ return _make_skipped_test(name, e, self.suiteClass), False
+ except:
+ error_case, error_message = \
+ _make_failed_import_test(name, self.suiteClass)
+ self.errors.append(error_message)
+ return error_case, False
+ else:
+ mod_file = os.path.abspath(
+ getattr(module, '__file__', full_path))
+ realpath = _jython_aware_splitext(
+ os.path.realpath(mod_file))
+ fullpath_noext = _jython_aware_splitext(
+ os.path.realpath(full_path))
+ if realpath.lower() != fullpath_noext.lower():
+ module_dir = os.path.dirname(realpath)
+ mod_name = _jython_aware_splitext(
+ os.path.basename(full_path))
+ expected_dir = os.path.dirname(full_path)
+ msg = ("%r module incorrectly imported from %r. Expected "
+ "%r. Is this module globally installed?")
+ raise ImportError(
+ msg % (mod_name, module_dir, expected_dir))
+ return self.loadTestsFromModule(module, pattern=pattern), False
+ elif os.path.isdir(full_path):
+ if (not namespace and
+ not os.path.isfile(os.path.join(full_path, '__init__.py'))):
+ return None, False
+
+ load_tests = None
+ tests = None
+ name = self._get_name_from_path(full_path)
+ try:
+ package = self._get_module_from_name(name)
+ except case.SkipTest as e:
+ return _make_skipped_test(name, e, self.suiteClass), False
+ except:
+ error_case, error_message = \
+ _make_failed_import_test(name, self.suiteClass)
+ self.errors.append(error_message)
+ return error_case, False
+ else:
+ load_tests = getattr(package, 'load_tests', None)
+ # Mark this package as being in load_tests (possibly ;))
+ self._loading_packages.add(name)
+ try:
+ tests = self.loadTestsFromModule(package, pattern=pattern)
+ if load_tests is not None:
+ # loadTestsFromModule(package) has loaded tests for us.
+ return tests, False
+ return tests, True
+ finally:
+ self._loading_packages.discard(name)
+ else:
+ return None, False
+
+
+defaultTestLoader = TestLoader()
+
+
+def _makeLoader(prefix, sortUsing, suiteClass=None, testNamePatterns=None):
+ loader = TestLoader()
+ loader.sortTestMethodsUsing = sortUsing
+ loader.testMethodPrefix = prefix
+ loader.testNamePatterns = testNamePatterns
+ if suiteClass:
+ loader.suiteClass = suiteClass
+ return loader
+
+def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp, testNamePatterns=None):
+ return _makeLoader(prefix, sortUsing, testNamePatterns=testNamePatterns).getTestCaseNames(testCaseClass)
+
+def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
+ suiteClass=suite.TestSuite):
+ return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
+ testCaseClass)
+
+def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
+ suiteClass=suite.TestSuite):
+ return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
+ module)
diff --git a/Monika After Story/game/python-packages/unittest/main.py b/Monika After Story/game/python-packages/unittest/main.py
new file mode 100644
index 0000000000..e62469aa2a
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/main.py
@@ -0,0 +1,275 @@
+"""Unittest main program"""
+
+import sys
+import argparse
+import os
+
+from . import loader, runner
+from .signals import installHandler
+
+__unittest = True
+
+MAIN_EXAMPLES = """\
+Examples:
+ %(prog)s test_module - run tests from test_module
+ %(prog)s module.TestClass - run tests from module.TestClass
+ %(prog)s module.Class.test_method - run specified test method
+ %(prog)s path/to/test_file.py - run tests from test_file.py
+"""
+
+MODULE_EXAMPLES = """\
+Examples:
+ %(prog)s - run default set of tests
+ %(prog)s MyTestSuite - run suite 'MyTestSuite'
+ %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething
+ %(prog)s MyTestCase - run all 'test*' test methods
+ in MyTestCase
+"""
+
+def _convert_name(name):
+ # on Linux / Mac OS X 'foo.PY' is not importable, but on
+ # Windows it is. Simpler to do a case insensitive match
+ # a better check would be to check that the name is a
+ # valid Python module name.
+ if os.path.isfile(name) and name.lower().endswith('.py'):
+ if os.path.isabs(name):
+ rel_path = os.path.relpath(name, os.getcwd())
+ if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
+ return name
+ name = rel_path
+ # on Windows both '\' and '/' are used as path
+ # separators. Better to replace both than rely on os.path.sep
+ return name[:-3].replace('\\', '.').replace('/', '.')
+ return name
+
+def _convert_names(names):
+ return [_convert_name(name) for name in names]
+
+
+def _convert_select_pattern(pattern):
+ if not '*' in pattern:
+ pattern = '*%s*' % pattern
+ return pattern
+
+
+class TestProgram(object):
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ # defaults for testing
+ module=None
+ verbosity = 1
+ failfast = catchbreak = buffer = progName = warnings = testNamePatterns = None
+ _discovery_parser = None
+
+ def __init__(self, module='__main__', defaultTest=None, argv=None,
+ testRunner=None, testLoader=loader.defaultTestLoader,
+ exit=True, verbosity=1, failfast=None, catchbreak=None,
+ buffer=None, warnings=None, *, tb_locals=False):
+ if isinstance(module, str):
+ self.module = __import__(module)
+ for part in module.split('.')[1:]:
+ self.module = getattr(self.module, part)
+ else:
+ self.module = module
+ if argv is None:
+ argv = sys.argv
+
+ self.exit = exit
+ self.failfast = failfast
+ self.catchbreak = catchbreak
+ self.verbosity = verbosity
+ self.buffer = buffer
+ self.tb_locals = tb_locals
+ if warnings is None and not sys.warnoptions:
+ # even if DeprecationWarnings are ignored by default
+ # print them anyway unless other warnings settings are
+ # specified by the warnings arg or the -W python flag
+ self.warnings = 'default'
+ else:
+ # here self.warnings is set either to the value passed
+ # to the warnings args or to None.
+ # If the user didn't pass a value self.warnings will
+ # be None. This means that the behavior is unchanged
+ # and depends on the values passed to -W.
+ self.warnings = warnings
+ self.defaultTest = defaultTest
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ self.progName = os.path.basename(argv[0])
+ self.parseArgs(argv)
+ self.runTests()
+
+ def usageExit(self, msg=None):
+ if msg:
+ print(msg)
+ if self._discovery_parser is None:
+ self._initArgParsers()
+ self._print_help()
+ sys.exit(2)
+
+ def _print_help(self, *args, **kwargs):
+ if self.module is None:
+ print(self._main_parser.format_help())
+ print(MAIN_EXAMPLES % {'prog': self.progName})
+ self._discovery_parser.print_help()
+ else:
+ print(self._main_parser.format_help())
+ print(MODULE_EXAMPLES % {'prog': self.progName})
+
+ def parseArgs(self, argv):
+ self._initArgParsers()
+ if self.module is None:
+ if len(argv) > 1 and argv[1].lower() == 'discover':
+ self._do_discovery(argv[2:])
+ return
+ self._main_parser.parse_args(argv[1:], self)
+ if not self.tests:
+ # this allows "python -m unittest -v" to still work for
+ # test discovery.
+ self._do_discovery([])
+ return
+ else:
+ self._main_parser.parse_args(argv[1:], self)
+
+ if self.tests:
+ self.testNames = _convert_names(self.tests)
+ if __name__ == '__main__':
+ # to support python -m unittest ...
+ self.module = None
+ elif self.defaultTest is None:
+ # createTests will load tests from self.module
+ self.testNames = None
+ elif isinstance(self.defaultTest, str):
+ self.testNames = (self.defaultTest,)
+ else:
+ self.testNames = list(self.defaultTest)
+ self.createTests()
+
+ def createTests(self, from_discovery=False, Loader=None):
+ if self.testNamePatterns:
+ self.testLoader.testNamePatterns = self.testNamePatterns
+ if from_discovery:
+ loader = self.testLoader if Loader is None else Loader()
+ self.test = loader.discover(self.start, self.pattern, self.top)
+ elif self.testNames is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames,
+ self.module)
+
+ def _initArgParsers(self):
+ parent_parser = self._getParentArgParser()
+ self._main_parser = self._getMainArgParser(parent_parser)
+ self._discovery_parser = self._getDiscoveryArgParser(parent_parser)
+
+ def _getParentArgParser(self):
+ parser = argparse.ArgumentParser(add_help=False)
+
+ parser.add_argument('-v', '--verbose', dest='verbosity',
+ action='store_const', const=2,
+ help='Verbose output')
+ parser.add_argument('-q', '--quiet', dest='verbosity',
+ action='store_const', const=0,
+ help='Quiet output')
+ parser.add_argument('--locals', dest='tb_locals',
+ action='store_true',
+ help='Show local variables in tracebacks')
+ if self.failfast is None:
+ parser.add_argument('-f', '--failfast', dest='failfast',
+ action='store_true',
+ help='Stop on first fail or error')
+ self.failfast = False
+ if self.catchbreak is None:
+ parser.add_argument('-c', '--catch', dest='catchbreak',
+ action='store_true',
+ help='Catch Ctrl-C and display results so far')
+ self.catchbreak = False
+ if self.buffer is None:
+ parser.add_argument('-b', '--buffer', dest='buffer',
+ action='store_true',
+ help='Buffer stdout and stderr during tests')
+ self.buffer = False
+ if self.testNamePatterns is None:
+ parser.add_argument('-k', dest='testNamePatterns',
+ action='append', type=_convert_select_pattern,
+ help='Only run tests which match the given substring')
+ self.testNamePatterns = []
+
+ return parser
+
+ def _getMainArgParser(self, parent):
+ parser = argparse.ArgumentParser(parents=[parent])
+ parser.prog = self.progName
+ parser.print_help = self._print_help
+
+ parser.add_argument('tests', nargs='*',
+ help='a list of any number of test modules, '
+ 'classes and test methods.')
+
+ return parser
+
+ def _getDiscoveryArgParser(self, parent):
+ parser = argparse.ArgumentParser(parents=[parent])
+ parser.prog = '%s discover' % self.progName
+ parser.epilog = ('For test discovery all test modules must be '
+ 'importable from the top level directory of the '
+ 'project.')
+
+ parser.add_argument('-s', '--start-directory', dest='start',
+ help="Directory to start discovery ('.' default)")
+ parser.add_argument('-p', '--pattern', dest='pattern',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_argument('-t', '--top-level-directory', dest='top',
+ help='Top level directory of project (defaults to '
+ 'start directory)')
+ for arg in ('start', 'pattern', 'top'):
+ parser.add_argument(arg, nargs='?',
+ default=argparse.SUPPRESS,
+ help=argparse.SUPPRESS)
+
+ return parser
+
+ def _do_discovery(self, argv, Loader=None):
+ self.start = '.'
+ self.pattern = 'test*.py'
+ self.top = None
+ if argv is not None:
+ # handle command line args for test discovery
+ if self._discovery_parser is None:
+ # for testing
+ self._initArgParsers()
+ self._discovery_parser.parse_args(argv, self)
+
+ self.createTests(from_discovery=True, Loader=Loader)
+
+ def runTests(self):
+ if self.catchbreak:
+ installHandler()
+ if self.testRunner is None:
+ self.testRunner = runner.TextTestRunner
+ if isinstance(self.testRunner, type):
+ try:
+ try:
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer,
+ warnings=self.warnings,
+ tb_locals=self.tb_locals)
+ except TypeError:
+ # didn't accept the tb_locals argument
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer,
+ warnings=self.warnings)
+ except TypeError:
+ # didn't accept the verbosity, buffer or failfast arguments
+ testRunner = self.testRunner()
+ else:
+ # it is assumed to be a TestRunner instance
+ testRunner = self.testRunner
+ self.result = testRunner.run(self.test)
+ if self.exit:
+ sys.exit(not self.result.wasSuccessful())
+
+main = TestProgram
diff --git a/Monika After Story/game/python-packages/unittest/mock.py b/Monika After Story/game/python-packages/unittest/mock.py
new file mode 100644
index 0000000000..f03c88baca
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/mock.py
@@ -0,0 +1,2891 @@
+# mock.py
+# Test tools for mocking and patching.
+# Maintained by Michael Foord
+# Backport for other versions of Python available from
+# https://pypi.org/project/mock
+
+__all__ = (
+ 'Mock',
+ 'MagicMock',
+ 'patch',
+ 'sentinel',
+ 'DEFAULT',
+ 'ANY',
+ 'call',
+ 'create_autospec',
+ 'AsyncMock',
+ 'FILTER_DIR',
+ 'NonCallableMock',
+ 'NonCallableMagicMock',
+ 'mock_open',
+ 'PropertyMock',
+ 'seal',
+)
+
+
+import asyncio
+import contextlib
+import io
+import inspect
+import pprint
+import sys
+import builtins
+from asyncio import iscoroutinefunction
+from types import CodeType, ModuleType, MethodType
+from unittest.util import safe_repr
+from functools import wraps, partial
+
+
+_builtins = {name for name in dir(builtins) if not name.startswith('_')}
+
+FILTER_DIR = True
+
+# Workaround for issue #12370
+# Without this, the __class__ properties wouldn't be set correctly
+_safe_super = super
+
+def _is_async_obj(obj):
+ if _is_instance_mock(obj) and not isinstance(obj, AsyncMock):
+ return False
+ if hasattr(obj, '__func__'):
+ obj = getattr(obj, '__func__')
+ return iscoroutinefunction(obj) or inspect.isawaitable(obj)
+
+
+def _is_async_func(func):
+ if getattr(func, '__code__', None):
+ return iscoroutinefunction(func)
+ else:
+ return False
+
+
+def _is_instance_mock(obj):
+ # can't use isinstance on Mock objects because they override __class__
+ # The base class for all mocks is NonCallableMock
+ return issubclass(type(obj), NonCallableMock)
+
+
+def _is_exception(obj):
+ return (
+ isinstance(obj, BaseException) or
+ isinstance(obj, type) and issubclass(obj, BaseException)
+ )
+
+
+def _extract_mock(obj):
+ # Autospecced functions will return a FunctionType with "mock" attribute
+ # which is the actual mock object that needs to be used.
+ if isinstance(obj, FunctionTypes) and hasattr(obj, 'mock'):
+ return obj.mock
+ else:
+ return obj
+
+
+def _get_signature_object(func, as_instance, eat_self):
+ """
+ Given an arbitrary, possibly callable object, try to create a suitable
+ signature object.
+ Return a (reduced func, signature) tuple, or None.
+ """
+ if isinstance(func, type) and not as_instance:
+ # If it's a type and should be modelled as a type, use __init__.
+ func = func.__init__
+ # Skip the `self` argument in __init__
+ eat_self = True
+ elif not isinstance(func, FunctionTypes):
+ # If we really want to model an instance of the passed type,
+ # __call__ should be looked up, not __init__.
+ try:
+ func = func.__call__
+ except AttributeError:
+ return None
+ if eat_self:
+ sig_func = partial(func, None)
+ else:
+ sig_func = func
+ try:
+ return func, inspect.signature(sig_func)
+ except ValueError:
+ # Certain callable types are not supported by inspect.signature()
+ return None
+
+
+def _check_signature(func, mock, skipfirst, instance=False):
+ sig = _get_signature_object(func, instance, skipfirst)
+ if sig is None:
+ return
+ func, sig = sig
+ def checksig(self, /, *args, **kwargs):
+ sig.bind(*args, **kwargs)
+ _copy_func_details(func, checksig)
+ type(mock)._mock_check_sig = checksig
+ type(mock).__signature__ = sig
+
+
+def _copy_func_details(func, funcopy):
+ # we explicitly don't copy func.__dict__ into this copy as it would
+ # expose original attributes that should be mocked
+ for attribute in (
+ '__name__', '__doc__', '__text_signature__',
+ '__module__', '__defaults__', '__kwdefaults__',
+ ):
+ try:
+ setattr(funcopy, attribute, getattr(func, attribute))
+ except AttributeError:
+ pass
+
+
+def _callable(obj):
+ if isinstance(obj, type):
+ return True
+ if isinstance(obj, (staticmethod, classmethod, MethodType)):
+ return _callable(obj.__func__)
+ if getattr(obj, '__call__', None) is not None:
+ return True
+ return False
+
+
+def _is_list(obj):
+ # checks for list or tuples
+ # XXXX badly named!
+ return type(obj) in (list, tuple)
+
+
+def _instance_callable(obj):
+ """Given an object, return True if the object is callable.
+ For classes, return True if instances would be callable."""
+ if not isinstance(obj, type):
+ # already an instance
+ return getattr(obj, '__call__', None) is not None
+
+ # *could* be broken by a class overriding __mro__ or __dict__ via
+ # a metaclass
+ for base in (obj,) + obj.__mro__:
+ if base.__dict__.get('__call__') is not None:
+ return True
+ return False
+
+
+def _set_signature(mock, original, instance=False):
+ # creates a function with signature (*args, **kwargs) that delegates to a
+ # mock. It still does signature checking by calling a lambda with the same
+ # signature as the original.
+
+ skipfirst = isinstance(original, type)
+ result = _get_signature_object(original, instance, skipfirst)
+ if result is None:
+ return mock
+ func, sig = result
+ def checksig(*args, **kwargs):
+ sig.bind(*args, **kwargs)
+ _copy_func_details(func, checksig)
+
+ name = original.__name__
+ if not name.isidentifier():
+ name = 'funcopy'
+ context = {'_checksig_': checksig, 'mock': mock}
+ src = """def %s(*args, **kwargs):
+ _checksig_(*args, **kwargs)
+ return mock(*args, **kwargs)""" % name
+ exec (src, context)
+ funcopy = context[name]
+ _setup_func(funcopy, mock, sig)
+ return funcopy
+
+
+def _setup_func(funcopy, mock, sig):
+ funcopy.mock = mock
+
+ def assert_called_with(*args, **kwargs):
+ return mock.assert_called_with(*args, **kwargs)
+ def assert_called(*args, **kwargs):
+ return mock.assert_called(*args, **kwargs)
+ def assert_not_called(*args, **kwargs):
+ return mock.assert_not_called(*args, **kwargs)
+ def assert_called_once(*args, **kwargs):
+ return mock.assert_called_once(*args, **kwargs)
+ def assert_called_once_with(*args, **kwargs):
+ return mock.assert_called_once_with(*args, **kwargs)
+ def assert_has_calls(*args, **kwargs):
+ return mock.assert_has_calls(*args, **kwargs)
+ def assert_any_call(*args, **kwargs):
+ return mock.assert_any_call(*args, **kwargs)
+ def reset_mock():
+ funcopy.method_calls = _CallList()
+ funcopy.mock_calls = _CallList()
+ mock.reset_mock()
+ ret = funcopy.return_value
+ if _is_instance_mock(ret) and not ret is mock:
+ ret.reset_mock()
+
+ funcopy.called = False
+ funcopy.call_count = 0
+ funcopy.call_args = None
+ funcopy.call_args_list = _CallList()
+ funcopy.method_calls = _CallList()
+ funcopy.mock_calls = _CallList()
+
+ funcopy.return_value = mock.return_value
+ funcopy.side_effect = mock.side_effect
+ funcopy._mock_children = mock._mock_children
+
+ funcopy.assert_called_with = assert_called_with
+ funcopy.assert_called_once_with = assert_called_once_with
+ funcopy.assert_has_calls = assert_has_calls
+ funcopy.assert_any_call = assert_any_call
+ funcopy.reset_mock = reset_mock
+ funcopy.assert_called = assert_called
+ funcopy.assert_not_called = assert_not_called
+ funcopy.assert_called_once = assert_called_once
+ funcopy.__signature__ = sig
+
+ mock._mock_delegate = funcopy
+
+
+def _setup_async_mock(mock):
+ mock._is_coroutine = asyncio.coroutines._is_coroutine
+ mock.await_count = 0
+ mock.await_args = None
+ mock.await_args_list = _CallList()
+
+ # Mock is not configured yet so the attributes are set
+ # to a function and then the corresponding mock helper function
+ # is called when the helper is accessed similar to _setup_func.
+ def wrapper(attr, /, *args, **kwargs):
+ return getattr(mock.mock, attr)(*args, **kwargs)
+
+ for attribute in ('assert_awaited',
+ 'assert_awaited_once',
+ 'assert_awaited_with',
+ 'assert_awaited_once_with',
+ 'assert_any_await',
+ 'assert_has_awaits',
+ 'assert_not_awaited'):
+
+ # setattr(mock, attribute, wrapper) causes late binding
+ # hence attribute will always be the last value in the loop
+ # Use partial(wrapper, attribute) to ensure the attribute is bound
+ # correctly.
+ setattr(mock, attribute, partial(wrapper, attribute))
+
+
+def _is_magic(name):
+ return '__%s__' % name[2:-2] == name
+
+
+class _SentinelObject(object):
+ "A unique, named, sentinel object."
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self):
+ return 'sentinel.%s' % self.name
+
+ def __reduce__(self):
+ return 'sentinel.%s' % self.name
+
+
+class _Sentinel(object):
+ """Access attributes to return a named object, usable as a sentinel."""
+ def __init__(self):
+ self._sentinels = {}
+
+ def __getattr__(self, name):
+ if name == '__bases__':
+ # Without this help(unittest.mock) raises an exception
+ raise AttributeError
+ return self._sentinels.setdefault(name, _SentinelObject(name))
+
+ def __reduce__(self):
+ return 'sentinel'
+
+
+sentinel = _Sentinel()
+
+DEFAULT = sentinel.DEFAULT
+_missing = sentinel.MISSING
+_deleted = sentinel.DELETED
+
+
+_allowed_names = {
+ 'return_value', '_mock_return_value', 'side_effect',
+ '_mock_side_effect', '_mock_parent', '_mock_new_parent',
+ '_mock_name', '_mock_new_name'
+}
+
+
+def _delegating_property(name):
+ _allowed_names.add(name)
+ _the_name = '_mock_' + name
+ def _get(self, name=name, _the_name=_the_name):
+ sig = self._mock_delegate
+ if sig is None:
+ return getattr(self, _the_name)
+ return getattr(sig, name)
+ def _set(self, value, name=name, _the_name=_the_name):
+ sig = self._mock_delegate
+ if sig is None:
+ self.__dict__[_the_name] = value
+ else:
+ setattr(sig, name, value)
+
+ return property(_get, _set)
+
+
+
+class _CallList(list):
+
+ def __contains__(self, value):
+ if not isinstance(value, list):
+ return list.__contains__(self, value)
+ len_value = len(value)
+ len_self = len(self)
+ if len_value > len_self:
+ return False
+
+ for i in range(0, len_self - len_value + 1):
+ sub_list = self[i:i+len_value]
+ if sub_list == value:
+ return True
+ return False
+
+ def __repr__(self):
+ return pprint.pformat(list(self))
+
+
+def _check_and_set_parent(parent, value, name, new_name):
+ value = _extract_mock(value)
+
+ if not _is_instance_mock(value):
+ return False
+ if ((value._mock_name or value._mock_new_name) or
+ (value._mock_parent is not None) or
+ (value._mock_new_parent is not None)):
+ return False
+
+ _parent = parent
+ while _parent is not None:
+ # setting a mock (value) as a child or return value of itself
+ # should not modify the mock
+ if _parent is value:
+ return False
+ _parent = _parent._mock_new_parent
+
+ if new_name:
+ value._mock_new_parent = parent
+ value._mock_new_name = new_name
+ if name:
+ value._mock_parent = parent
+ value._mock_name = name
+ return True
+
+# Internal class to identify if we wrapped an iterator object or not.
+class _MockIter(object):
+ def __init__(self, obj):
+ self.obj = iter(obj)
+ def __next__(self):
+ return next(self.obj)
+
+class Base(object):
+ _mock_return_value = DEFAULT
+ _mock_side_effect = None
+ def __init__(self, /, *args, **kwargs):
+ pass
+
+
+
+class NonCallableMock(Base):
+ """A non-callable version of `Mock`"""
+
+ def __new__(cls, /, *args, **kw):
+ # every instance has its own class
+ # so we can create magic methods on the
+ # class without stomping on other mocks
+ bases = (cls,)
+ if not issubclass(cls, AsyncMockMixin):
+ # Check if spec is an async object or function
+ bound_args = _MOCK_SIG.bind_partial(cls, *args, **kw).arguments
+ spec_arg = bound_args.get('spec_set', bound_args.get('spec'))
+ if spec_arg is not None and _is_async_obj(spec_arg):
+ bases = (AsyncMockMixin, cls)
+ new = type(cls.__name__, bases, {'__doc__': cls.__doc__})
+ instance = _safe_super(NonCallableMock, cls).__new__(new)
+ return instance
+
+
+ def __init__(
+ self, spec=None, wraps=None, name=None, spec_set=None,
+ parent=None, _spec_state=None, _new_name='', _new_parent=None,
+ _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs
+ ):
+ if _new_parent is None:
+ _new_parent = parent
+
+ __dict__ = self.__dict__
+ __dict__['_mock_parent'] = parent
+ __dict__['_mock_name'] = name
+ __dict__['_mock_new_name'] = _new_name
+ __dict__['_mock_new_parent'] = _new_parent
+ __dict__['_mock_sealed'] = False
+
+ if spec_set is not None:
+ spec = spec_set
+ spec_set = True
+ if _eat_self is None:
+ _eat_self = parent is not None
+
+ self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self)
+
+ __dict__['_mock_children'] = {}
+ __dict__['_mock_wraps'] = wraps
+ __dict__['_mock_delegate'] = None
+
+ __dict__['_mock_called'] = False
+ __dict__['_mock_call_args'] = None
+ __dict__['_mock_call_count'] = 0
+ __dict__['_mock_call_args_list'] = _CallList()
+ __dict__['_mock_mock_calls'] = _CallList()
+
+ __dict__['method_calls'] = _CallList()
+ __dict__['_mock_unsafe'] = unsafe
+
+ if kwargs:
+ self.configure_mock(**kwargs)
+
+ _safe_super(NonCallableMock, self).__init__(
+ spec, wraps, name, spec_set, parent,
+ _spec_state
+ )
+
+
+ def attach_mock(self, mock, attribute):
+ """
+ Attach a mock as an attribute of this one, replacing its name and
+ parent. Calls to the attached mock will be recorded in the
+ `method_calls` and `mock_calls` attributes of this one."""
+ inner_mock = _extract_mock(mock)
+
+ inner_mock._mock_parent = None
+ inner_mock._mock_new_parent = None
+ inner_mock._mock_name = ''
+ inner_mock._mock_new_name = None
+
+ setattr(self, attribute, mock)
+
+
+ def mock_add_spec(self, spec, spec_set=False):
+ """Add a spec to a mock. `spec` can either be an object or a
+ list of strings. Only attributes on the `spec` can be fetched as
+ attributes from the mock.
+
+ If `spec_set` is True then only attributes on the spec can be set."""
+ self._mock_add_spec(spec, spec_set)
+
+
+ def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False,
+ _eat_self=False):
+ _spec_class = None
+ _spec_signature = None
+ _spec_asyncs = []
+
+ for attr in dir(spec):
+ if iscoroutinefunction(getattr(spec, attr, None)):
+ _spec_asyncs.append(attr)
+
+ if spec is not None and not _is_list(spec):
+ if isinstance(spec, type):
+ _spec_class = spec
+ else:
+ _spec_class = type(spec)
+ res = _get_signature_object(spec,
+ _spec_as_instance, _eat_self)
+ _spec_signature = res and res[1]
+
+ spec = dir(spec)
+
+ __dict__ = self.__dict__
+ __dict__['_spec_class'] = _spec_class
+ __dict__['_spec_set'] = spec_set
+ __dict__['_spec_signature'] = _spec_signature
+ __dict__['_mock_methods'] = spec
+ __dict__['_spec_asyncs'] = _spec_asyncs
+
+ def __get_return_value(self):
+ ret = self._mock_return_value
+ if self._mock_delegate is not None:
+ ret = self._mock_delegate.return_value
+
+ if ret is DEFAULT:
+ ret = self._get_child_mock(
+ _new_parent=self, _new_name='()'
+ )
+ self.return_value = ret
+ return ret
+
+
+ def __set_return_value(self, value):
+ if self._mock_delegate is not None:
+ self._mock_delegate.return_value = value
+ else:
+ self._mock_return_value = value
+ _check_and_set_parent(self, value, None, '()')
+
+ __return_value_doc = "The value to be returned when the mock is called."
+ return_value = property(__get_return_value, __set_return_value,
+ __return_value_doc)
+
+
+ @property
+ def __class__(self):
+ if self._spec_class is None:
+ return type(self)
+ return self._spec_class
+
+ called = _delegating_property('called')
+ call_count = _delegating_property('call_count')
+ call_args = _delegating_property('call_args')
+ call_args_list = _delegating_property('call_args_list')
+ mock_calls = _delegating_property('mock_calls')
+
+
+ def __get_side_effect(self):
+ delegated = self._mock_delegate
+ if delegated is None:
+ return self._mock_side_effect
+ sf = delegated.side_effect
+ if (sf is not None and not callable(sf)
+ and not isinstance(sf, _MockIter) and not _is_exception(sf)):
+ sf = _MockIter(sf)
+ delegated.side_effect = sf
+ return sf
+
+ def __set_side_effect(self, value):
+ value = _try_iter(value)
+ delegated = self._mock_delegate
+ if delegated is None:
+ self._mock_side_effect = value
+ else:
+ delegated.side_effect = value
+
+ side_effect = property(__get_side_effect, __set_side_effect)
+
+
+ def reset_mock(self, visited=None,*, return_value=False, side_effect=False):
+ "Restore the mock object to its initial state."
+ if visited is None:
+ visited = []
+ if id(self) in visited:
+ return
+ visited.append(id(self))
+
+ self.called = False
+ self.call_args = None
+ self.call_count = 0
+ self.mock_calls = _CallList()
+ self.call_args_list = _CallList()
+ self.method_calls = _CallList()
+
+ if return_value:
+ self._mock_return_value = DEFAULT
+ if side_effect:
+ self._mock_side_effect = None
+
+ for child in self._mock_children.values():
+ if isinstance(child, _SpecState) or child is _deleted:
+ continue
+ child.reset_mock(visited, return_value=return_value, side_effect=side_effect)
+
+ ret = self._mock_return_value
+ if _is_instance_mock(ret) and ret is not self:
+ ret.reset_mock(visited)
+
+
+ def configure_mock(self, /, **kwargs):
+ """Set attributes on the mock through keyword arguments.
+
+ Attributes plus return values and side effects can be set on child
+ mocks using standard dot notation and unpacking a dictionary in the
+ method call:
+
+ >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
+ >>> mock.configure_mock(**attrs)"""
+ for arg, val in sorted(kwargs.items(),
+ # we sort on the number of dots so that
+ # attributes are set before we set attributes on
+ # attributes
+ key=lambda entry: entry[0].count('.')):
+ args = arg.split('.')
+ final = args.pop()
+ obj = self
+ for entry in args:
+ obj = getattr(obj, entry)
+ setattr(obj, final, val)
+
+
+ def __getattr__(self, name):
+ if name in {'_mock_methods', '_mock_unsafe'}:
+ raise AttributeError(name)
+ elif self._mock_methods is not None:
+ if name not in self._mock_methods or name in _all_magics:
+ raise AttributeError("Mock object has no attribute %r" % name)
+ elif _is_magic(name):
+ raise AttributeError(name)
+ if not self._mock_unsafe:
+ if name.startswith(('assert', 'assret')):
+ raise AttributeError("Attributes cannot start with 'assert' "
+ "or 'assret'")
+
+ result = self._mock_children.get(name)
+ if result is _deleted:
+ raise AttributeError(name)
+ elif result is None:
+ wraps = None
+ if self._mock_wraps is not None:
+ # XXXX should we get the attribute without triggering code
+ # execution?
+ wraps = getattr(self._mock_wraps, name)
+
+ result = self._get_child_mock(
+ parent=self, name=name, wraps=wraps, _new_name=name,
+ _new_parent=self
+ )
+ self._mock_children[name] = result
+
+ elif isinstance(result, _SpecState):
+ result = create_autospec(
+ result.spec, result.spec_set, result.instance,
+ result.parent, result.name
+ )
+ self._mock_children[name] = result
+
+ return result
+
+
+ def _extract_mock_name(self):
+ _name_list = [self._mock_new_name]
+ _parent = self._mock_new_parent
+ last = self
+
+ dot = '.'
+ if _name_list == ['()']:
+ dot = ''
+
+ while _parent is not None:
+ last = _parent
+
+ _name_list.append(_parent._mock_new_name + dot)
+ dot = '.'
+ if _parent._mock_new_name == '()':
+ dot = ''
+
+ _parent = _parent._mock_new_parent
+
+ _name_list = list(reversed(_name_list))
+ _first = last._mock_name or 'mock'
+ if len(_name_list) > 1:
+ if _name_list[1] not in ('()', '().'):
+ _first += '.'
+ _name_list[0] = _first
+ return ''.join(_name_list)
+
+ def __repr__(self):
+ name = self._extract_mock_name()
+
+ name_string = ''
+ if name not in ('mock', 'mock.'):
+ name_string = ' name=%r' % name
+
+ spec_string = ''
+ if self._spec_class is not None:
+ spec_string = ' spec=%r'
+ if self._spec_set:
+ spec_string = ' spec_set=%r'
+ spec_string = spec_string % self._spec_class.__name__
+ return "<%s%s%s id='%s'>" % (
+ type(self).__name__,
+ name_string,
+ spec_string,
+ id(self)
+ )
+
+
+ def __dir__(self):
+ """Filter the output of `dir(mock)` to only useful members."""
+ if not FILTER_DIR:
+ return object.__dir__(self)
+
+ extras = self._mock_methods or []
+ from_type = dir(type(self))
+ from_dict = list(self.__dict__)
+ from_child_mocks = [
+ m_name for m_name, m_value in self._mock_children.items()
+ if m_value is not _deleted]
+
+ from_type = [e for e in from_type if not e.startswith('_')]
+ from_dict = [e for e in from_dict if not e.startswith('_') or
+ _is_magic(e)]
+ return sorted(set(extras + from_type + from_dict + from_child_mocks))
+
+
+ def __setattr__(self, name, value):
+ if name in _allowed_names:
+ # property setters go through here
+ return object.__setattr__(self, name, value)
+ elif (self._spec_set and self._mock_methods is not None and
+ name not in self._mock_methods and
+ name not in self.__dict__):
+ raise AttributeError("Mock object has no attribute '%s'" % name)
+ elif name in _unsupported_magics:
+ msg = 'Attempting to set unsupported magic method %r.' % name
+ raise AttributeError(msg)
+ elif name in _all_magics:
+ if self._mock_methods is not None and name not in self._mock_methods:
+ raise AttributeError("Mock object has no attribute '%s'" % name)
+
+ if not _is_instance_mock(value):
+ setattr(type(self), name, _get_method(name, value))
+ original = value
+ value = lambda *args, **kw: original(self, *args, **kw)
+ else:
+ # only set _new_name and not name so that mock_calls is tracked
+ # but not method calls
+ _check_and_set_parent(self, value, None, name)
+ setattr(type(self), name, value)
+ self._mock_children[name] = value
+ elif name == '__class__':
+ self._spec_class = value
+ return
+ else:
+ if _check_and_set_parent(self, value, name, name):
+ self._mock_children[name] = value
+
+ if self._mock_sealed and not hasattr(self, name):
+ mock_name = f'{self._extract_mock_name()}.{name}'
+ raise AttributeError(f'Cannot set {mock_name}')
+
+ return object.__setattr__(self, name, value)
+
+
+ def __delattr__(self, name):
+ if name in _all_magics and name in type(self).__dict__:
+ delattr(type(self), name)
+ if name not in self.__dict__:
+ # for magic methods that are still MagicProxy objects and
+ # not set on the instance itself
+ return
+
+ obj = self._mock_children.get(name, _missing)
+ if name in self.__dict__:
+ _safe_super(NonCallableMock, self).__delattr__(name)
+ elif obj is _deleted:
+ raise AttributeError(name)
+ if obj is not _missing:
+ del self._mock_children[name]
+ self._mock_children[name] = _deleted
+
+
+ def _format_mock_call_signature(self, args, kwargs):
+ name = self._mock_name or 'mock'
+ return _format_call_signature(name, args, kwargs)
+
+
+ def _format_mock_failure_message(self, args, kwargs, action='call'):
+ message = 'expected %s not found.\nExpected: %s\nActual: %s'
+ expected_string = self._format_mock_call_signature(args, kwargs)
+ call_args = self.call_args
+ actual_string = self._format_mock_call_signature(*call_args)
+ return message % (action, expected_string, actual_string)
+
+
+ def _get_call_signature_from_name(self, name):
+ """
+ * If call objects are asserted against a method/function like obj.meth1
+ then there could be no name for the call object to lookup. Hence just
+ return the spec_signature of the method/function being asserted against.
+ * If the name is not empty then remove () and split by '.' to get
+ list of names to iterate through the children until a potential
+ match is found. A child mock is created only during attribute access
+ so if we get a _SpecState then no attributes of the spec were accessed
+ and can be safely exited.
+ """
+ if not name:
+ return self._spec_signature
+
+ sig = None
+ names = name.replace('()', '').split('.')
+ children = self._mock_children
+
+ for name in names:
+ child = children.get(name)
+ if child is None or isinstance(child, _SpecState):
+ break
+ else:
+ # If an autospecced object is attached using attach_mock the
+ # child would be a function with mock object as attribute from
+ # which signature has to be derived.
+ child = _extract_mock(child)
+ children = child._mock_children
+ sig = child._spec_signature
+
+ return sig
+
+
+ def _call_matcher(self, _call):
+ """
+ Given a call (or simply an (args, kwargs) tuple), return a
+ comparison key suitable for matching with other calls.
+ This is a best effort method which relies on the spec's signature,
+ if available, or falls back on the arguments themselves.
+ """
+
+ if isinstance(_call, tuple) and len(_call) > 2:
+ sig = self._get_call_signature_from_name(_call[0])
+ else:
+ sig = self._spec_signature
+
+ if sig is not None:
+ if len(_call) == 2:
+ name = ''
+ args, kwargs = _call
+ else:
+ name, args, kwargs = _call
+ try:
+ bound_call = sig.bind(*args, **kwargs)
+ return call(name, bound_call.args, bound_call.kwargs)
+ except TypeError as e:
+ return e.with_traceback(None)
+ else:
+ return _call
+
+ def assert_not_called(self):
+ """assert that the mock was never called.
+ """
+ if self.call_count != 0:
+ msg = ("Expected '%s' to not have been called. Called %s times.%s"
+ % (self._mock_name or 'mock',
+ self.call_count,
+ self._calls_repr()))
+ raise AssertionError(msg)
+
+ def assert_called(self):
+ """assert that the mock was called at least once
+ """
+ if self.call_count == 0:
+ msg = ("Expected '%s' to have been called." %
+ (self._mock_name or 'mock'))
+ raise AssertionError(msg)
+
+ def assert_called_once(self):
+ """assert that the mock was called only once.
+ """
+ if not self.call_count == 1:
+ msg = ("Expected '%s' to have been called once. Called %s times.%s"
+ % (self._mock_name or 'mock',
+ self.call_count,
+ self._calls_repr()))
+ raise AssertionError(msg)
+
+ def assert_called_with(self, /, *args, **kwargs):
+ """assert that the last call was made with the specified arguments.
+
+ Raises an AssertionError if the args and keyword args passed in are
+ different to the last call to the mock."""
+ if self.call_args is None:
+ expected = self._format_mock_call_signature(args, kwargs)
+ actual = 'not called.'
+ error_message = ('expected call not found.\nExpected: %s\nActual: %s'
+ % (expected, actual))
+ raise AssertionError(error_message)
+
+ def _error_message():
+ msg = self._format_mock_failure_message(args, kwargs)
+ return msg
+ expected = self._call_matcher(_Call((args, kwargs), two=True))
+ actual = self._call_matcher(self.call_args)
+ if actual != expected:
+ cause = expected if isinstance(expected, Exception) else None
+ raise AssertionError(_error_message()) from cause
+
+
+ def assert_called_once_with(self, /, *args, **kwargs):
+ """assert that the mock was called exactly once and that that call was
+ with the specified arguments."""
+ if not self.call_count == 1:
+ msg = ("Expected '%s' to be called once. Called %s times.%s"
+ % (self._mock_name or 'mock',
+ self.call_count,
+ self._calls_repr()))
+ raise AssertionError(msg)
+ return self.assert_called_with(*args, **kwargs)
+
+
+ def assert_has_calls(self, calls, any_order=False):
+ """assert the mock has been called with the specified calls.
+ The `mock_calls` list is checked for the calls.
+
+ If `any_order` is False (the default) then the calls must be
+ sequential. There can be extra calls before or after the
+ specified calls.
+
+ If `any_order` is True then the calls can be in any order, but
+ they must all appear in `mock_calls`."""
+ expected = [self._call_matcher(c) for c in calls]
+ cause = next((e for e in expected if isinstance(e, Exception)), None)
+ all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls)
+ if not any_order:
+ if expected not in all_calls:
+ if cause is None:
+ problem = 'Calls not found.'
+ else:
+ problem = ('Error processing expected calls.\n'
+ 'Errors: {}').format(
+ [e if isinstance(e, Exception) else None
+ for e in expected])
+ raise AssertionError(
+ f'{problem}\n'
+ f'Expected: {_CallList(calls)}'
+ f'{self._calls_repr(prefix="Actual").rstrip(".")}'
+ ) from cause
+ return
+
+ all_calls = list(all_calls)
+
+ not_found = []
+ for kall in expected:
+ try:
+ all_calls.remove(kall)
+ except ValueError:
+ not_found.append(kall)
+ if not_found:
+ raise AssertionError(
+ '%r does not contain all of %r in its call list, '
+ 'found %r instead' % (self._mock_name or 'mock',
+ tuple(not_found), all_calls)
+ ) from cause
+
+
+ def assert_any_call(self, /, *args, **kwargs):
+ """assert the mock has been called with the specified arguments.
+
+ The assert passes if the mock has *ever* been called, unlike
+ `assert_called_with` and `assert_called_once_with` that only pass if
+ the call is the most recent one."""
+ expected = self._call_matcher(_Call((args, kwargs), two=True))
+ cause = expected if isinstance(expected, Exception) else None
+ actual = [self._call_matcher(c) for c in self.call_args_list]
+ if cause or expected not in _AnyComparer(actual):
+ expected_string = self._format_mock_call_signature(args, kwargs)
+ raise AssertionError(
+ '%s call not found' % expected_string
+ ) from cause
+
+
+ def _get_child_mock(self, /, **kw):
+ """Create the child mocks for attributes and return value.
+ By default child mocks will be the same type as the parent.
+ Subclasses of Mock may want to override this to customize the way
+ child mocks are made.
+
+ For non-callable mocks the callable variant will be used (rather than
+ any custom subclass)."""
+ _new_name = kw.get("_new_name")
+ if _new_name in self.__dict__['_spec_asyncs']:
+ return AsyncMock(**kw)
+
+ _type = type(self)
+ if issubclass(_type, MagicMock) and _new_name in _async_method_magics:
+ # Any asynchronous magic becomes an AsyncMock
+ klass = AsyncMock
+ elif issubclass(_type, AsyncMockMixin):
+ if (_new_name in _all_sync_magics or
+ self._mock_methods and _new_name in self._mock_methods):
+ # Any synchronous method on AsyncMock becomes a MagicMock
+ klass = MagicMock
+ else:
+ klass = AsyncMock
+ elif not issubclass(_type, CallableMixin):
+ if issubclass(_type, NonCallableMagicMock):
+ klass = MagicMock
+ elif issubclass(_type, NonCallableMock):
+ klass = Mock
+ else:
+ klass = _type.__mro__[1]
+
+ if self._mock_sealed:
+ attribute = "." + kw["name"] if "name" in kw else "()"
+ mock_name = self._extract_mock_name() + attribute
+ raise AttributeError(mock_name)
+
+ return klass(**kw)
+
+
+ def _calls_repr(self, prefix="Calls"):
+ """Renders self.mock_calls as a string.
+
+ Example: "\nCalls: [call(1), call(2)]."
+
+ If self.mock_calls is empty, an empty string is returned. The
+ output will be truncated if very long.
+ """
+ if not self.mock_calls:
+ return ""
+ return f"\n{prefix}: {safe_repr(self.mock_calls)}."
+
+
+_MOCK_SIG = inspect.signature(NonCallableMock.__init__)
+
+
+class _AnyComparer(list):
+ """A list which checks if it contains a call which may have an
+ argument of ANY, flipping the components of item and self from
+ their traditional locations so that ANY is guaranteed to be on
+ the left."""
+ def __contains__(self, item):
+ for _call in self:
+ assert len(item) == len(_call)
+ if all([
+ expected == actual
+ for expected, actual in zip(item, _call)
+ ]):
+ return True
+ return False
+
+
+def _try_iter(obj):
+ if obj is None:
+ return obj
+ if _is_exception(obj):
+ return obj
+ if _callable(obj):
+ return obj
+ try:
+ return iter(obj)
+ except TypeError:
+ # XXXX backwards compatibility
+ # but this will blow up on first call - so maybe we should fail early?
+ return obj
+
+
+class CallableMixin(Base):
+
+ def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
+ wraps=None, name=None, spec_set=None, parent=None,
+ _spec_state=None, _new_name='', _new_parent=None, **kwargs):
+ self.__dict__['_mock_return_value'] = return_value
+ _safe_super(CallableMixin, self).__init__(
+ spec, wraps, name, spec_set, parent,
+ _spec_state, _new_name, _new_parent, **kwargs
+ )
+
+ self.side_effect = side_effect
+
+
+ def _mock_check_sig(self, /, *args, **kwargs):
+ # stub method that can be replaced with one with a specific signature
+ pass
+
+
+ def __call__(self, /, *args, **kwargs):
+ # can't use self in-case a function / method we are mocking uses self
+ # in the signature
+ self._mock_check_sig(*args, **kwargs)
+ self._increment_mock_call(*args, **kwargs)
+ return self._mock_call(*args, **kwargs)
+
+
+ def _mock_call(self, /, *args, **kwargs):
+ return self._execute_mock_call(*args, **kwargs)
+
+ def _increment_mock_call(self, /, *args, **kwargs):
+ self.called = True
+ self.call_count += 1
+
+ # handle call_args
+ # needs to be set here so assertions on call arguments pass before
+ # execution in the case of awaited calls
+ _call = _Call((args, kwargs), two=True)
+ self.call_args = _call
+ self.call_args_list.append(_call)
+
+ # initial stuff for method_calls:
+ do_method_calls = self._mock_parent is not None
+ method_call_name = self._mock_name
+
+ # initial stuff for mock_calls:
+ mock_call_name = self._mock_new_name
+ is_a_call = mock_call_name == '()'
+ self.mock_calls.append(_Call(('', args, kwargs)))
+
+ # follow up the chain of mocks:
+ _new_parent = self._mock_new_parent
+ while _new_parent is not None:
+
+ # handle method_calls:
+ if do_method_calls:
+ _new_parent.method_calls.append(_Call((method_call_name, args, kwargs)))
+ do_method_calls = _new_parent._mock_parent is not None
+ if do_method_calls:
+ method_call_name = _new_parent._mock_name + '.' + method_call_name
+
+ # handle mock_calls:
+ this_mock_call = _Call((mock_call_name, args, kwargs))
+ _new_parent.mock_calls.append(this_mock_call)
+
+ if _new_parent._mock_new_name:
+ if is_a_call:
+ dot = ''
+ else:
+ dot = '.'
+ is_a_call = _new_parent._mock_new_name == '()'
+ mock_call_name = _new_parent._mock_new_name + dot + mock_call_name
+
+ # follow the parental chain:
+ _new_parent = _new_parent._mock_new_parent
+
+ def _execute_mock_call(self, /, *args, **kwargs):
+ # separate from _increment_mock_call so that awaited functions are
+ # executed separately from their call, also AsyncMock overrides this method
+
+ effect = self.side_effect
+ if effect is not None:
+ if _is_exception(effect):
+ raise effect
+ elif not _callable(effect):
+ result = next(effect)
+ if _is_exception(result):
+ raise result
+ else:
+ result = effect(*args, **kwargs)
+
+ if result is not DEFAULT:
+ return result
+
+ if self._mock_return_value is not DEFAULT:
+ return self.return_value
+
+ if self._mock_wraps is not None:
+ return self._mock_wraps(*args, **kwargs)
+
+ return self.return_value
+
+
+
+class Mock(CallableMixin, NonCallableMock):
+ """
+ Create a new `Mock` object. `Mock` takes several optional arguments
+ that specify the behaviour of the Mock object:
+
+ * `spec`: This can be either a list of strings or an existing object (a
+ class or instance) that acts as the specification for the mock object. If
+ you pass in an object then a list of strings is formed by calling dir on
+ the object (excluding unsupported magic attributes and methods). Accessing
+ any attribute not in this list will raise an `AttributeError`.
+
+ If `spec` is an object (rather than a list of strings) then
+ `mock.__class__` returns the class of the spec object. This allows mocks
+ to pass `isinstance` tests.
+
+ * `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
+ or get an attribute on the mock that isn't on the object passed as
+ `spec_set` will raise an `AttributeError`.
+
+ * `side_effect`: A function to be called whenever the Mock is called. See
+ the `side_effect` attribute. Useful for raising exceptions or
+ dynamically changing return values. The function is called with the same
+ arguments as the mock, and unless it returns `DEFAULT`, the return
+ value of this function is used as the return value.
+
+ If `side_effect` is an iterable then each call to the mock will return
+ the next value from the iterable. If any of the members of the iterable
+ are exceptions they will be raised instead of returned.
+
+ * `return_value`: The value returned when the mock is called. By default
+ this is a new Mock (created on first access). See the
+ `return_value` attribute.
+
+ * `wraps`: Item for the mock object to wrap. If `wraps` is not None then
+ calling the Mock will pass the call through to the wrapped object
+ (returning the real result). Attribute access on the mock will return a
+ Mock object that wraps the corresponding attribute of the wrapped object
+ (so attempting to access an attribute that doesn't exist will raise an
+ `AttributeError`).
+
+ If the mock has an explicit `return_value` set then calls are not passed
+ to the wrapped object and the `return_value` is returned instead.
+
+ * `name`: If the mock has a name then it will be used in the repr of the
+ mock. This can be useful for debugging. The name is propagated to child
+ mocks.
+
+ Mocks can also be called with arbitrary keyword arguments. These will be
+ used to set attributes on the mock after it is created.
+ """
+
+
+def _dot_lookup(thing, comp, import_path):
+ try:
+ return getattr(thing, comp)
+ except AttributeError:
+ __import__(import_path)
+ return getattr(thing, comp)
+
+
+def _importer(target):
+ components = target.split('.')
+ import_path = components.pop(0)
+ thing = __import__(import_path)
+
+ for comp in components:
+ import_path += ".%s" % comp
+ thing = _dot_lookup(thing, comp, import_path)
+ return thing
+
+
+class _patch(object):
+
+ attribute_name = None
+ _active_patches = []
+
+ def __init__(
+ self, getter, attribute, new, spec, create,
+ spec_set, autospec, new_callable, kwargs
+ ):
+ if new_callable is not None:
+ if new is not DEFAULT:
+ raise ValueError(
+ "Cannot use 'new' and 'new_callable' together"
+ )
+ if autospec is not None:
+ raise ValueError(
+ "Cannot use 'autospec' and 'new_callable' together"
+ )
+
+ self.getter = getter
+ self.attribute = attribute
+ self.new = new
+ self.new_callable = new_callable
+ self.spec = spec
+ self.create = create
+ self.has_local = False
+ self.spec_set = spec_set
+ self.autospec = autospec
+ self.kwargs = kwargs
+ self.additional_patchers = []
+
+
+ def copy(self):
+ patcher = _patch(
+ self.getter, self.attribute, self.new, self.spec,
+ self.create, self.spec_set,
+ self.autospec, self.new_callable, self.kwargs
+ )
+ patcher.attribute_name = self.attribute_name
+ patcher.additional_patchers = [
+ p.copy() for p in self.additional_patchers
+ ]
+ return patcher
+
+
+ def __call__(self, func):
+ if isinstance(func, type):
+ return self.decorate_class(func)
+ if inspect.iscoroutinefunction(func):
+ return self.decorate_async_callable(func)
+ return self.decorate_callable(func)
+
+
+ def decorate_class(self, klass):
+ for attr in dir(klass):
+ if not attr.startswith(patch.TEST_PREFIX):
+ continue
+
+ attr_value = getattr(klass, attr)
+ if not hasattr(attr_value, "__call__"):
+ continue
+
+ patcher = self.copy()
+ setattr(klass, attr, patcher(attr_value))
+ return klass
+
+
+ @contextlib.contextmanager
+ def decoration_helper(self, patched, args, keywargs):
+ extra_args = []
+ with contextlib.ExitStack() as exit_stack:
+ for patching in patched.patchings:
+ arg = exit_stack.enter_context(patching)
+ if patching.attribute_name is not None:
+ keywargs.update(arg)
+ elif patching.new is DEFAULT:
+ extra_args.append(arg)
+
+ args += tuple(extra_args)
+ yield (args, keywargs)
+
+
+ def decorate_callable(self, func):
+ # NB. Keep the method in sync with decorate_async_callable()
+ if hasattr(func, 'patchings'):
+ func.patchings.append(self)
+ return func
+
+ @wraps(func)
+ def patched(*args, **keywargs):
+ with self.decoration_helper(patched,
+ args,
+ keywargs) as (newargs, newkeywargs):
+ return func(*newargs, **newkeywargs)
+
+ patched.patchings = [self]
+ return patched
+
+
+ def decorate_async_callable(self, func):
+ # NB. Keep the method in sync with decorate_callable()
+ if hasattr(func, 'patchings'):
+ func.patchings.append(self)
+ return func
+
+ @wraps(func)
+ async def patched(*args, **keywargs):
+ with self.decoration_helper(patched,
+ args,
+ keywargs) as (newargs, newkeywargs):
+ return await func(*newargs, **newkeywargs)
+
+ patched.patchings = [self]
+ return patched
+
+
+ def get_original(self):
+ target = self.getter()
+ name = self.attribute
+
+ original = DEFAULT
+ local = False
+
+ try:
+ original = target.__dict__[name]
+ except (AttributeError, KeyError):
+ original = getattr(target, name, DEFAULT)
+ else:
+ local = True
+
+ if name in _builtins and isinstance(target, ModuleType):
+ self.create = True
+
+ if not self.create and original is DEFAULT:
+ raise AttributeError(
+ "%s does not have the attribute %r" % (target, name)
+ )
+ return original, local
+
+
+ def __enter__(self):
+ """Perform the patch."""
+ new, spec, spec_set = self.new, self.spec, self.spec_set
+ autospec, kwargs = self.autospec, self.kwargs
+ new_callable = self.new_callable
+ self.target = self.getter()
+
+ # normalise False to None
+ if spec is False:
+ spec = None
+ if spec_set is False:
+ spec_set = None
+ if autospec is False:
+ autospec = None
+
+ if spec is not None and autospec is not None:
+ raise TypeError("Can't specify spec and autospec")
+ if ((spec is not None or autospec is not None) and
+ spec_set not in (True, None)):
+ raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
+
+ original, local = self.get_original()
+
+ if new is DEFAULT and autospec is None:
+ inherit = False
+ if spec is True:
+ # set spec to the object we are replacing
+ spec = original
+ if spec_set is True:
+ spec_set = original
+ spec = None
+ elif spec is not None:
+ if spec_set is True:
+ spec_set = spec
+ spec = None
+ elif spec_set is True:
+ spec_set = original
+
+ if spec is not None or spec_set is not None:
+ if original is DEFAULT:
+ raise TypeError("Can't use 'spec' with create=True")
+ if isinstance(original, type):
+ # If we're patching out a class and there is a spec
+ inherit = True
+ if spec is None and _is_async_obj(original):
+ Klass = AsyncMock
+ else:
+ Klass = MagicMock
+ _kwargs = {}
+ if new_callable is not None:
+ Klass = new_callable
+ elif spec is not None or spec_set is not None:
+ this_spec = spec
+ if spec_set is not None:
+ this_spec = spec_set
+ if _is_list(this_spec):
+ not_callable = '__call__' not in this_spec
+ else:
+ not_callable = not callable(this_spec)
+ if _is_async_obj(this_spec):
+ Klass = AsyncMock
+ elif not_callable:
+ Klass = NonCallableMagicMock
+
+ if spec is not None:
+ _kwargs['spec'] = spec
+ if spec_set is not None:
+ _kwargs['spec_set'] = spec_set
+
+ # add a name to mocks
+ if (isinstance(Klass, type) and
+ issubclass(Klass, NonCallableMock) and self.attribute):
+ _kwargs['name'] = self.attribute
+
+ _kwargs.update(kwargs)
+ new = Klass(**_kwargs)
+
+ if inherit and _is_instance_mock(new):
+ # we can only tell if the instance should be callable if the
+ # spec is not a list
+ this_spec = spec
+ if spec_set is not None:
+ this_spec = spec_set
+ if (not _is_list(this_spec) and not
+ _instance_callable(this_spec)):
+ Klass = NonCallableMagicMock
+
+ _kwargs.pop('name')
+ new.return_value = Klass(_new_parent=new, _new_name='()',
+ **_kwargs)
+ elif autospec is not None:
+ # spec is ignored, new *must* be default, spec_set is treated
+ # as a boolean. Should we check spec is not None and that spec_set
+ # is a bool?
+ if new is not DEFAULT:
+ raise TypeError(
+ "autospec creates the mock for you. Can't specify "
+ "autospec and new."
+ )
+ if original is DEFAULT:
+ raise TypeError("Can't use 'autospec' with create=True")
+ spec_set = bool(spec_set)
+ if autospec is True:
+ autospec = original
+
+ new = create_autospec(autospec, spec_set=spec_set,
+ _name=self.attribute, **kwargs)
+ elif kwargs:
+ # can't set keyword args when we aren't creating the mock
+ # XXXX If new is a Mock we could call new.configure_mock(**kwargs)
+ raise TypeError("Can't pass kwargs to a mock we aren't creating")
+
+ new_attr = new
+
+ self.temp_original = original
+ self.is_local = local
+ self._exit_stack = contextlib.ExitStack()
+ try:
+ setattr(self.target, self.attribute, new_attr)
+ if self.attribute_name is not None:
+ extra_args = {}
+ if self.new is DEFAULT:
+ extra_args[self.attribute_name] = new
+ for patching in self.additional_patchers:
+ arg = self._exit_stack.enter_context(patching)
+ if patching.new is DEFAULT:
+ extra_args.update(arg)
+ return extra_args
+
+ return new
+ except:
+ if not self.__exit__(*sys.exc_info()):
+ raise
+
+ def __exit__(self, *exc_info):
+ """Undo the patch."""
+ if self.is_local and self.temp_original is not DEFAULT:
+ setattr(self.target, self.attribute, self.temp_original)
+ else:
+ delattr(self.target, self.attribute)
+ if not self.create and (not hasattr(self.target, self.attribute) or
+ self.attribute in ('__doc__', '__module__',
+ '__defaults__', '__annotations__',
+ '__kwdefaults__')):
+ # needed for proxy objects like django settings
+ setattr(self.target, self.attribute, self.temp_original)
+
+ del self.temp_original
+ del self.is_local
+ del self.target
+ exit_stack = self._exit_stack
+ del self._exit_stack
+ return exit_stack.__exit__(*exc_info)
+
+
+ def start(self):
+ """Activate a patch, returning any created mock."""
+ result = self.__enter__()
+ self._active_patches.append(self)
+ return result
+
+
+ def stop(self):
+ """Stop an active patch."""
+ try:
+ self._active_patches.remove(self)
+ except ValueError:
+ # If the patch hasn't been started this will fail
+ return None
+
+ return self.__exit__(None, None, None)
+
+
+
+def _get_target(target):
+ try:
+ target, attribute = target.rsplit('.', 1)
+ except (TypeError, ValueError):
+ raise TypeError("Need a valid target to patch. You supplied: %r" %
+ (target,))
+ getter = lambda: _importer(target)
+ return getter, attribute
+
+
+def _patch_object(
+ target, attribute, new=DEFAULT, spec=None,
+ create=False, spec_set=None, autospec=None,
+ new_callable=None, **kwargs
+ ):
+ """
+ patch the named member (`attribute`) on an object (`target`) with a mock
+ object.
+
+ `patch.object` can be used as a decorator, class decorator or a context
+ manager. Arguments `new`, `spec`, `create`, `spec_set`,
+ `autospec` and `new_callable` have the same meaning as for `patch`. Like
+ `patch`, `patch.object` takes arbitrary keyword arguments for configuring
+ the mock object it creates.
+
+ When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
+ for choosing which methods to wrap.
+ """
+ if type(target) is str:
+ raise TypeError(
+ f"{target!r} must be the actual object to be patched, not a str"
+ )
+ getter = lambda: target
+ return _patch(
+ getter, attribute, new, spec, create,
+ spec_set, autospec, new_callable, kwargs
+ )
+
+
+def _patch_multiple(target, spec=None, create=False, spec_set=None,
+ autospec=None, new_callable=None, **kwargs):
+ """Perform multiple patches in a single call. It takes the object to be
+ patched (either as an object or a string to fetch the object by importing)
+ and keyword arguments for the patches::
+
+ with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
+ ...
+
+ Use `DEFAULT` as the value if you want `patch.multiple` to create
+ mocks for you. In this case the created mocks are passed into a decorated
+ function by keyword, and a dictionary is returned when `patch.multiple` is
+ used as a context manager.
+
+ `patch.multiple` can be used as a decorator, class decorator or a context
+ manager. The arguments `spec`, `spec_set`, `create`,
+ `autospec` and `new_callable` have the same meaning as for `patch`. These
+ arguments will be applied to *all* patches done by `patch.multiple`.
+
+ When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
+ for choosing which methods to wrap.
+ """
+ if type(target) is str:
+ getter = lambda: _importer(target)
+ else:
+ getter = lambda: target
+
+ if not kwargs:
+ raise ValueError(
+ 'Must supply at least one keyword argument with patch.multiple'
+ )
+ # need to wrap in a list for python 3, where items is a view
+ items = list(kwargs.items())
+ attribute, new = items[0]
+ patcher = _patch(
+ getter, attribute, new, spec, create, spec_set,
+ autospec, new_callable, {}
+ )
+ patcher.attribute_name = attribute
+ for attribute, new in items[1:]:
+ this_patcher = _patch(
+ getter, attribute, new, spec, create, spec_set,
+ autospec, new_callable, {}
+ )
+ this_patcher.attribute_name = attribute
+ patcher.additional_patchers.append(this_patcher)
+ return patcher
+
+
+def patch(
+ target, new=DEFAULT, spec=None, create=False,
+ spec_set=None, autospec=None, new_callable=None, **kwargs
+ ):
+ """
+ `patch` acts as a function decorator, class decorator or a context
+ manager. Inside the body of the function or with statement, the `target`
+ is patched with a `new` object. When the function/with statement exits
+ the patch is undone.
+
+ If `new` is omitted, then the target is replaced with an
+ `AsyncMock if the patched object is an async function or a
+ `MagicMock` otherwise. If `patch` is used as a decorator and `new` is
+ omitted, the created mock is passed in as an extra argument to the
+ decorated function. If `patch` is used as a context manager the created
+ mock is returned by the context manager.
+
+ `target` should be a string in the form `'package.module.ClassName'`. The
+ `target` is imported and the specified object replaced with the `new`
+ object, so the `target` must be importable from the environment you are
+ calling `patch` from. The target is imported when the decorated function
+ is executed, not at decoration time.
+
+ The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
+ if patch is creating one for you.
+
+ In addition you can pass `spec=True` or `spec_set=True`, which causes
+ patch to pass in the object being mocked as the spec/spec_set object.
+
+ `new_callable` allows you to specify a different class, or callable object,
+ that will be called to create the `new` object. By default `AsyncMock` is
+ used for async functions and `MagicMock` for the rest.
+
+ A more powerful form of `spec` is `autospec`. If you set `autospec=True`
+ then the mock will be created with a spec from the object being replaced.
+ All attributes of the mock will also have the spec of the corresponding
+ attribute of the object being replaced. Methods and functions being
+ mocked will have their arguments checked and will raise a `TypeError` if
+ they are called with the wrong signature. For mocks replacing a class,
+ their return value (the 'instance') will have the same spec as the class.
+
+ Instead of `autospec=True` you can pass `autospec=some_object` to use an
+ arbitrary object as the spec instead of the one being replaced.
+
+ By default `patch` will fail to replace attributes that don't exist. If
+ you pass in `create=True`, and the attribute doesn't exist, patch will
+ create the attribute for you when the patched function is called, and
+ delete it again afterwards. This is useful for writing tests against
+ attributes that your production code creates at runtime. It is off by
+ default because it can be dangerous. With it switched on you can write
+ passing tests against APIs that don't actually exist!
+
+ Patch can be used as a `TestCase` class decorator. It works by
+ decorating each test method in the class. This reduces the boilerplate
+ code when your test methods share a common patchings set. `patch` finds
+ tests by looking for method names that start with `patch.TEST_PREFIX`.
+ By default this is `test`, which matches the way `unittest` finds tests.
+ You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
+
+ Patch can be used as a context manager, with the with statement. Here the
+ patching applies to the indented block after the with statement. If you
+ use "as" then the patched object will be bound to the name after the
+ "as"; very useful if `patch` is creating a mock object for you.
+
+ `patch` takes arbitrary keyword arguments. These will be passed to
+ `AsyncMock` if the patched object is asynchronous, to `MagicMock`
+ otherwise or to `new_callable` if specified.
+
+ `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
+ available for alternate use-cases.
+ """
+ getter, attribute = _get_target(target)
+ return _patch(
+ getter, attribute, new, spec, create,
+ spec_set, autospec, new_callable, kwargs
+ )
+
+
+class _patch_dict(object):
+ """
+ Patch a dictionary, or dictionary like object, and restore the dictionary
+ to its original state after the test.
+
+ `in_dict` can be a dictionary or a mapping like container. If it is a
+ mapping then it must at least support getting, setting and deleting items
+ plus iterating over keys.
+
+ `in_dict` can also be a string specifying the name of the dictionary, which
+ will then be fetched by importing it.
+
+ `values` can be a dictionary of values to set in the dictionary. `values`
+ can also be an iterable of `(key, value)` pairs.
+
+ If `clear` is True then the dictionary will be cleared before the new
+ values are set.
+
+ `patch.dict` can also be called with arbitrary keyword arguments to set
+ values in the dictionary::
+
+ with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
+ ...
+
+ `patch.dict` can be used as a context manager, decorator or class
+ decorator. When used as a class decorator `patch.dict` honours
+ `patch.TEST_PREFIX` for choosing which methods to wrap.
+ """
+
+ def __init__(self, in_dict, values=(), clear=False, **kwargs):
+ self.in_dict = in_dict
+ # support any argument supported by dict(...) constructor
+ self.values = dict(values)
+ self.values.update(kwargs)
+ self.clear = clear
+ self._original = None
+
+
+ def __call__(self, f):
+ if isinstance(f, type):
+ return self.decorate_class(f)
+ @wraps(f)
+ def _inner(*args, **kw):
+ self._patch_dict()
+ try:
+ return f(*args, **kw)
+ finally:
+ self._unpatch_dict()
+
+ return _inner
+
+
+ def decorate_class(self, klass):
+ for attr in dir(klass):
+ attr_value = getattr(klass, attr)
+ if (attr.startswith(patch.TEST_PREFIX) and
+ hasattr(attr_value, "__call__")):
+ decorator = _patch_dict(self.in_dict, self.values, self.clear)
+ decorated = decorator(attr_value)
+ setattr(klass, attr, decorated)
+ return klass
+
+
+ def __enter__(self):
+ """Patch the dict."""
+ self._patch_dict()
+ return self.in_dict
+
+
+ def _patch_dict(self):
+ values = self.values
+ if isinstance(self.in_dict, str):
+ self.in_dict = _importer(self.in_dict)
+ in_dict = self.in_dict
+ clear = self.clear
+
+ try:
+ original = in_dict.copy()
+ except AttributeError:
+ # dict like object with no copy method
+ # must support iteration over keys
+ original = {}
+ for key in in_dict:
+ original[key] = in_dict[key]
+ self._original = original
+
+ if clear:
+ _clear_dict(in_dict)
+
+ try:
+ in_dict.update(values)
+ except AttributeError:
+ # dict like object with no update method
+ for key in values:
+ in_dict[key] = values[key]
+
+
+ def _unpatch_dict(self):
+ in_dict = self.in_dict
+ original = self._original
+
+ _clear_dict(in_dict)
+
+ try:
+ in_dict.update(original)
+ except AttributeError:
+ for key in original:
+ in_dict[key] = original[key]
+
+
+ def __exit__(self, *args):
+ """Unpatch the dict."""
+ if self._original is not None:
+ self._unpatch_dict()
+ return False
+
+
+ def start(self):
+ """Activate a patch, returning any created mock."""
+ result = self.__enter__()
+ _patch._active_patches.append(self)
+ return result
+
+
+ def stop(self):
+ """Stop an active patch."""
+ try:
+ _patch._active_patches.remove(self)
+ except ValueError:
+ # If the patch hasn't been started this will fail
+ return None
+
+ return self.__exit__(None, None, None)
+
+
+def _clear_dict(in_dict):
+ try:
+ in_dict.clear()
+ except AttributeError:
+ keys = list(in_dict)
+ for key in keys:
+ del in_dict[key]
+
+
+def _patch_stopall():
+ """Stop all active patches. LIFO to unroll nested patches."""
+ for patch in reversed(_patch._active_patches):
+ patch.stop()
+
+
+patch.object = _patch_object
+patch.dict = _patch_dict
+patch.multiple = _patch_multiple
+patch.stopall = _patch_stopall
+patch.TEST_PREFIX = 'test'
+
+magic_methods = (
+ "lt le gt ge eq ne "
+ "getitem setitem delitem "
+ "len contains iter "
+ "hash str sizeof "
+ "enter exit "
+ # we added divmod and rdivmod here instead of numerics
+ # because there is no idivmod
+ "divmod rdivmod neg pos abs invert "
+ "complex int float index "
+ "round trunc floor ceil "
+ "bool next "
+ "fspath "
+ "aiter "
+)
+
+numerics = (
+ "add sub mul matmul div floordiv mod lshift rshift and xor or pow truediv"
+)
+inplace = ' '.join('i%s' % n for n in numerics.split())
+right = ' '.join('r%s' % n for n in numerics.split())
+
+# not including __prepare__, __instancecheck__, __subclasscheck__
+# (as they are metaclass methods)
+# __del__ is not supported at all as it causes problems if it exists
+
+_non_defaults = {
+ '__get__', '__set__', '__delete__', '__reversed__', '__missing__',
+ '__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__',
+ '__getstate__', '__setstate__', '__getformat__', '__setformat__',
+ '__repr__', '__dir__', '__subclasses__', '__format__',
+ '__getnewargs_ex__',
+}
+
+
+def _get_method(name, func):
+ "Turns a callable object (like a mock) into a real function"
+ def method(self, /, *args, **kw):
+ return func(self, *args, **kw)
+ method.__name__ = name
+ return method
+
+
+_magics = {
+ '__%s__' % method for method in
+ ' '.join([magic_methods, numerics, inplace, right]).split()
+}
+
+# Magic methods used for async `with` statements
+_async_method_magics = {"__aenter__", "__aexit__", "__anext__"}
+# Magic methods that are only used with async calls but are synchronous functions themselves
+_sync_async_magics = {"__aiter__"}
+_async_magics = _async_method_magics | _sync_async_magics
+
+_all_sync_magics = _magics | _non_defaults
+_all_magics = _all_sync_magics | _async_magics
+
+_unsupported_magics = {
+ '__getattr__', '__setattr__',
+ '__init__', '__new__', '__prepare__',
+ '__instancecheck__', '__subclasscheck__',
+ '__del__'
+}
+
+_calculate_return_value = {
+ '__hash__': lambda self: object.__hash__(self),
+ '__str__': lambda self: object.__str__(self),
+ '__sizeof__': lambda self: object.__sizeof__(self),
+ '__fspath__': lambda self: f"{type(self).__name__}/{self._extract_mock_name()}/{id(self)}",
+}
+
+_return_values = {
+ '__lt__': NotImplemented,
+ '__gt__': NotImplemented,
+ '__le__': NotImplemented,
+ '__ge__': NotImplemented,
+ '__int__': 1,
+ '__contains__': False,
+ '__len__': 0,
+ '__exit__': False,
+ '__complex__': 1j,
+ '__float__': 1.0,
+ '__bool__': True,
+ '__index__': 1,
+ '__aexit__': False,
+}
+
+
+def _get_eq(self):
+ def __eq__(other):
+ ret_val = self.__eq__._mock_return_value
+ if ret_val is not DEFAULT:
+ return ret_val
+ if self is other:
+ return True
+ return NotImplemented
+ return __eq__
+
+def _get_ne(self):
+ def __ne__(other):
+ if self.__ne__._mock_return_value is not DEFAULT:
+ return DEFAULT
+ if self is other:
+ return False
+ return NotImplemented
+ return __ne__
+
+def _get_iter(self):
+ def __iter__():
+ ret_val = self.__iter__._mock_return_value
+ if ret_val is DEFAULT:
+ return iter([])
+ # if ret_val was already an iterator, then calling iter on it should
+ # return the iterator unchanged
+ return iter(ret_val)
+ return __iter__
+
+def _get_async_iter(self):
+ def __aiter__():
+ ret_val = self.__aiter__._mock_return_value
+ if ret_val is DEFAULT:
+ return _AsyncIterator(iter([]))
+ return _AsyncIterator(iter(ret_val))
+ return __aiter__
+
+_side_effect_methods = {
+ '__eq__': _get_eq,
+ '__ne__': _get_ne,
+ '__iter__': _get_iter,
+ '__aiter__': _get_async_iter
+}
+
+
+
+def _set_return_value(mock, method, name):
+ fixed = _return_values.get(name, DEFAULT)
+ if fixed is not DEFAULT:
+ method.return_value = fixed
+ return
+
+ return_calculator = _calculate_return_value.get(name)
+ if return_calculator is not None:
+ return_value = return_calculator(mock)
+ method.return_value = return_value
+ return
+
+ side_effector = _side_effect_methods.get(name)
+ if side_effector is not None:
+ method.side_effect = side_effector(mock)
+
+
+
+class MagicMixin(Base):
+ def __init__(self, /, *args, **kw):
+ self._mock_set_magics() # make magic work for kwargs in init
+ _safe_super(MagicMixin, self).__init__(*args, **kw)
+ self._mock_set_magics() # fix magic broken by upper level init
+
+
+ def _mock_set_magics(self):
+ orig_magics = _magics | _async_method_magics
+ these_magics = orig_magics
+
+ if getattr(self, "_mock_methods", None) is not None:
+ these_magics = orig_magics.intersection(self._mock_methods)
+
+ remove_magics = set()
+ remove_magics = orig_magics - these_magics
+
+ for entry in remove_magics:
+ if entry in type(self).__dict__:
+ # remove unneeded magic methods
+ delattr(self, entry)
+
+ # don't overwrite existing attributes if called a second time
+ these_magics = these_magics - set(type(self).__dict__)
+
+ _type = type(self)
+ for entry in these_magics:
+ setattr(_type, entry, MagicProxy(entry, self))
+
+
+
+class NonCallableMagicMock(MagicMixin, NonCallableMock):
+ """A version of `MagicMock` that isn't callable."""
+ def mock_add_spec(self, spec, spec_set=False):
+ """Add a spec to a mock. `spec` can either be an object or a
+ list of strings. Only attributes on the `spec` can be fetched as
+ attributes from the mock.
+
+ If `spec_set` is True then only attributes on the spec can be set."""
+ self._mock_add_spec(spec, spec_set)
+ self._mock_set_magics()
+
+
+class AsyncMagicMixin(MagicMixin):
+ def __init__(self, /, *args, **kw):
+ self._mock_set_magics() # make magic work for kwargs in init
+ _safe_super(AsyncMagicMixin, self).__init__(*args, **kw)
+ self._mock_set_magics() # fix magic broken by upper level init
+
+class MagicMock(MagicMixin, Mock):
+ """
+ MagicMock is a subclass of Mock with default implementations
+ of most of the magic methods. You can use MagicMock without having to
+ configure the magic methods yourself.
+
+ If you use the `spec` or `spec_set` arguments then *only* magic
+ methods that exist in the spec will be created.
+
+ Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
+ """
+ def mock_add_spec(self, spec, spec_set=False):
+ """Add a spec to a mock. `spec` can either be an object or a
+ list of strings. Only attributes on the `spec` can be fetched as
+ attributes from the mock.
+
+ If `spec_set` is True then only attributes on the spec can be set."""
+ self._mock_add_spec(spec, spec_set)
+ self._mock_set_magics()
+
+
+
+class MagicProxy(Base):
+ def __init__(self, name, parent):
+ self.name = name
+ self.parent = parent
+
+ def create_mock(self):
+ entry = self.name
+ parent = self.parent
+ m = parent._get_child_mock(name=entry, _new_name=entry,
+ _new_parent=parent)
+ setattr(parent, entry, m)
+ _set_return_value(parent, m, entry)
+ return m
+
+ def __get__(self, obj, _type=None):
+ return self.create_mock()
+
+
+class AsyncMockMixin(Base):
+ await_count = _delegating_property('await_count')
+ await_args = _delegating_property('await_args')
+ await_args_list = _delegating_property('await_args_list')
+
+ def __init__(self, /, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # iscoroutinefunction() checks _is_coroutine property to say if an
+ # object is a coroutine. Without this check it looks to see if it is a
+ # function/method, which in this case it is not (since it is an
+ # AsyncMock).
+ # It is set through __dict__ because when spec_set is True, this
+ # attribute is likely undefined.
+ self.__dict__['_is_coroutine'] = asyncio.coroutines._is_coroutine
+ self.__dict__['_mock_await_count'] = 0
+ self.__dict__['_mock_await_args'] = None
+ self.__dict__['_mock_await_args_list'] = _CallList()
+ code_mock = NonCallableMock(spec_set=CodeType)
+ code_mock.co_flags = inspect.CO_COROUTINE
+ self.__dict__['__code__'] = code_mock
+
+ async def _execute_mock_call(self, /, *args, **kwargs):
+ # This is nearly just like super(), except for special handling
+ # of coroutines
+
+ _call = _Call((args, kwargs), two=True)
+ self.await_count += 1
+ self.await_args = _call
+ self.await_args_list.append(_call)
+
+ effect = self.side_effect
+ if effect is not None:
+ if _is_exception(effect):
+ raise effect
+ elif not _callable(effect):
+ try:
+ result = next(effect)
+ except StopIteration:
+ # It is impossible to propogate a StopIteration
+ # through coroutines because of PEP 479
+ raise StopAsyncIteration
+ if _is_exception(result):
+ raise result
+ elif iscoroutinefunction(effect):
+ result = await effect(*args, **kwargs)
+ else:
+ result = effect(*args, **kwargs)
+
+ if result is not DEFAULT:
+ return result
+
+ if self._mock_return_value is not DEFAULT:
+ return self.return_value
+
+ if self._mock_wraps is not None:
+ if iscoroutinefunction(self._mock_wraps):
+ return await self._mock_wraps(*args, **kwargs)
+ return self._mock_wraps(*args, **kwargs)
+
+ return self.return_value
+
+ def assert_awaited(self):
+ """
+ Assert that the mock was awaited at least once.
+ """
+ if self.await_count == 0:
+ msg = f"Expected {self._mock_name or 'mock'} to have been awaited."
+ raise AssertionError(msg)
+
+ def assert_awaited_once(self):
+ """
+ Assert that the mock was awaited exactly once.
+ """
+ if not self.await_count == 1:
+ msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once."
+ f" Awaited {self.await_count} times.")
+ raise AssertionError(msg)
+
+ def assert_awaited_with(self, /, *args, **kwargs):
+ """
+ Assert that the last await was with the specified arguments.
+ """
+ if self.await_args is None:
+ expected = self._format_mock_call_signature(args, kwargs)
+ raise AssertionError(f'Expected await: {expected}\nNot awaited')
+
+ def _error_message():
+ msg = self._format_mock_failure_message(args, kwargs, action='await')
+ return msg
+
+ expected = self._call_matcher(_Call((args, kwargs), two=True))
+ actual = self._call_matcher(self.await_args)
+ if actual != expected:
+ cause = expected if isinstance(expected, Exception) else None
+ raise AssertionError(_error_message()) from cause
+
+ def assert_awaited_once_with(self, /, *args, **kwargs):
+ """
+ Assert that the mock was awaited exactly once and with the specified
+ arguments.
+ """
+ if not self.await_count == 1:
+ msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once."
+ f" Awaited {self.await_count} times.")
+ raise AssertionError(msg)
+ return self.assert_awaited_with(*args, **kwargs)
+
+ def assert_any_await(self, /, *args, **kwargs):
+ """
+ Assert the mock has ever been awaited with the specified arguments.
+ """
+ expected = self._call_matcher(_Call((args, kwargs), two=True))
+ cause = expected if isinstance(expected, Exception) else None
+ actual = [self._call_matcher(c) for c in self.await_args_list]
+ if cause or expected not in _AnyComparer(actual):
+ expected_string = self._format_mock_call_signature(args, kwargs)
+ raise AssertionError(
+ '%s await not found' % expected_string
+ ) from cause
+
+ def assert_has_awaits(self, calls, any_order=False):
+ """
+ Assert the mock has been awaited with the specified calls.
+ The :attr:`await_args_list` list is checked for the awaits.
+
+ If `any_order` is False (the default) then the awaits must be
+ sequential. There can be extra calls before or after the
+ specified awaits.
+
+ If `any_order` is True then the awaits can be in any order, but
+ they must all appear in :attr:`await_args_list`.
+ """
+ expected = [self._call_matcher(c) for c in calls]
+ cause = next((e for e in expected if isinstance(e, Exception)), None)
+ all_awaits = _CallList(self._call_matcher(c) for c in self.await_args_list)
+ if not any_order:
+ if expected not in all_awaits:
+ if cause is None:
+ problem = 'Awaits not found.'
+ else:
+ problem = ('Error processing expected awaits.\n'
+ 'Errors: {}').format(
+ [e if isinstance(e, Exception) else None
+ for e in expected])
+ raise AssertionError(
+ f'{problem}\n'
+ f'Expected: {_CallList(calls)}\n'
+ f'Actual: {self.await_args_list}'
+ ) from cause
+ return
+
+ all_awaits = list(all_awaits)
+
+ not_found = []
+ for kall in expected:
+ try:
+ all_awaits.remove(kall)
+ except ValueError:
+ not_found.append(kall)
+ if not_found:
+ raise AssertionError(
+ '%r not all found in await list' % (tuple(not_found),)
+ ) from cause
+
+ def assert_not_awaited(self):
+ """
+ Assert that the mock was never awaited.
+ """
+ if self.await_count != 0:
+ msg = (f"Expected {self._mock_name or 'mock'} to not have been awaited."
+ f" Awaited {self.await_count} times.")
+ raise AssertionError(msg)
+
+ def reset_mock(self, /, *args, **kwargs):
+ """
+ See :func:`.Mock.reset_mock()`
+ """
+ super().reset_mock(*args, **kwargs)
+ self.await_count = 0
+ self.await_args = None
+ self.await_args_list = _CallList()
+
+
+class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock):
+ """
+ Enhance :class:`Mock` with features allowing to mock
+ an async function.
+
+ The :class:`AsyncMock` object will behave so the object is
+ recognized as an async function, and the result of a call is an awaitable:
+
+ >>> mock = AsyncMock()
+ >>> iscoroutinefunction(mock)
+ True
+ >>> inspect.isawaitable(mock())
+ True
+
+
+ The result of ``mock()`` is an async function which will have the outcome
+ of ``side_effect`` or ``return_value``:
+
+ - if ``side_effect`` is a function, the async function will return the
+ result of that function,
+ - if ``side_effect`` is an exception, the async function will raise the
+ exception,
+ - if ``side_effect`` is an iterable, the async function will return the
+ next value of the iterable, however, if the sequence of result is
+ exhausted, ``StopIteration`` is raised immediately,
+ - if ``side_effect`` is not defined, the async function will return the
+ value defined by ``return_value``, hence, by default, the async function
+ returns a new :class:`AsyncMock` object.
+
+ If the outcome of ``side_effect`` or ``return_value`` is an async function,
+ the mock async function obtained when the mock object is called will be this
+ async function itself (and not an async function returning an async
+ function).
+
+ The test author can also specify a wrapped object with ``wraps``. In this
+ case, the :class:`Mock` object behavior is the same as with an
+ :class:`.Mock` object: the wrapped object may have methods
+ defined as async function functions.
+
+ Based on Martin Richard's asynctest project.
+ """
+
+
+class _ANY(object):
+ "A helper object that compares equal to everything."
+
+ def __eq__(self, other):
+ return True
+
+ def __ne__(self, other):
+ return False
+
+ def __repr__(self):
+ return ''
+
+ANY = _ANY()
+
+
+
+def _format_call_signature(name, args, kwargs):
+ message = '%s(%%s)' % name
+ formatted_args = ''
+ args_string = ', '.join([repr(arg) for arg in args])
+ kwargs_string = ', '.join([
+ '%s=%r' % (key, value) for key, value in kwargs.items()
+ ])
+ if args_string:
+ formatted_args = args_string
+ if kwargs_string:
+ if formatted_args:
+ formatted_args += ', '
+ formatted_args += kwargs_string
+
+ return message % formatted_args
+
+
+
+class _Call(tuple):
+ """
+ A tuple for holding the results of a call to a mock, either in the form
+ `(args, kwargs)` or `(name, args, kwargs)`.
+
+ If args or kwargs are empty then a call tuple will compare equal to
+ a tuple without those values. This makes comparisons less verbose::
+
+ _Call(('name', (), {})) == ('name',)
+ _Call(('name', (1,), {})) == ('name', (1,))
+ _Call(((), {'a': 'b'})) == ({'a': 'b'},)
+
+ The `_Call` object provides a useful shortcut for comparing with call::
+
+ _Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
+ _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
+
+ If the _Call has no name then it will match any name.
+ """
+ def __new__(cls, value=(), name='', parent=None, two=False,
+ from_kall=True):
+ args = ()
+ kwargs = {}
+ _len = len(value)
+ if _len == 3:
+ name, args, kwargs = value
+ elif _len == 2:
+ first, second = value
+ if isinstance(first, str):
+ name = first
+ if isinstance(second, tuple):
+ args = second
+ else:
+ kwargs = second
+ else:
+ args, kwargs = first, second
+ elif _len == 1:
+ value, = value
+ if isinstance(value, str):
+ name = value
+ elif isinstance(value, tuple):
+ args = value
+ else:
+ kwargs = value
+
+ if two:
+ return tuple.__new__(cls, (args, kwargs))
+
+ return tuple.__new__(cls, (name, args, kwargs))
+
+
+ def __init__(self, value=(), name=None, parent=None, two=False,
+ from_kall=True):
+ self._mock_name = name
+ self._mock_parent = parent
+ self._mock_from_kall = from_kall
+
+
+ def __eq__(self, other):
+ try:
+ len_other = len(other)
+ except TypeError:
+ return NotImplemented
+
+ self_name = ''
+ if len(self) == 2:
+ self_args, self_kwargs = self
+ else:
+ self_name, self_args, self_kwargs = self
+
+ if (getattr(self, '_mock_parent', None) and getattr(other, '_mock_parent', None)
+ and self._mock_parent != other._mock_parent):
+ return False
+
+ other_name = ''
+ if len_other == 0:
+ other_args, other_kwargs = (), {}
+ elif len_other == 3:
+ other_name, other_args, other_kwargs = other
+ elif len_other == 1:
+ value, = other
+ if isinstance(value, tuple):
+ other_args = value
+ other_kwargs = {}
+ elif isinstance(value, str):
+ other_name = value
+ other_args, other_kwargs = (), {}
+ else:
+ other_args = ()
+ other_kwargs = value
+ elif len_other == 2:
+ # could be (name, args) or (name, kwargs) or (args, kwargs)
+ first, second = other
+ if isinstance(first, str):
+ other_name = first
+ if isinstance(second, tuple):
+ other_args, other_kwargs = second, {}
+ else:
+ other_args, other_kwargs = (), second
+ else:
+ other_args, other_kwargs = first, second
+ else:
+ return False
+
+ if self_name and other_name != self_name:
+ return False
+
+ # this order is important for ANY to work!
+ return (other_args, other_kwargs) == (self_args, self_kwargs)
+
+
+ __ne__ = object.__ne__
+
+
+ def __call__(self, /, *args, **kwargs):
+ if self._mock_name is None:
+ return _Call(('', args, kwargs), name='()')
+
+ name = self._mock_name + '()'
+ return _Call((self._mock_name, args, kwargs), name=name, parent=self)
+
+
+ def __getattr__(self, attr):
+ if self._mock_name is None:
+ return _Call(name=attr, from_kall=False)
+ name = '%s.%s' % (self._mock_name, attr)
+ return _Call(name=name, parent=self, from_kall=False)
+
+
+ def __getattribute__(self, attr):
+ if attr in tuple.__dict__:
+ raise AttributeError
+ return tuple.__getattribute__(self, attr)
+
+
+ def _get_call_arguments(self):
+ if len(self) == 2:
+ args, kwargs = self
+ else:
+ name, args, kwargs = self
+
+ return args, kwargs
+
+ @property
+ def args(self):
+ return self._get_call_arguments()[0]
+
+ @property
+ def kwargs(self):
+ return self._get_call_arguments()[1]
+
+ def __repr__(self):
+ if not self._mock_from_kall:
+ name = self._mock_name or 'call'
+ if name.startswith('()'):
+ name = 'call%s' % name
+ return name
+
+ if len(self) == 2:
+ name = 'call'
+ args, kwargs = self
+ else:
+ name, args, kwargs = self
+ if not name:
+ name = 'call'
+ elif not name.startswith('()'):
+ name = 'call.%s' % name
+ else:
+ name = 'call%s' % name
+ return _format_call_signature(name, args, kwargs)
+
+
+ def call_list(self):
+ """For a call object that represents multiple calls, `call_list`
+ returns a list of all the intermediate calls as well as the
+ final call."""
+ vals = []
+ thing = self
+ while thing is not None:
+ if thing._mock_from_kall:
+ vals.append(thing)
+ thing = thing._mock_parent
+ return _CallList(reversed(vals))
+
+
+call = _Call(from_kall=False)
+
+
+def create_autospec(spec, spec_set=False, instance=False, _parent=None,
+ _name=None, **kwargs):
+ """Create a mock object using another object as a spec. Attributes on the
+ mock will use the corresponding attribute on the `spec` object as their
+ spec.
+
+ Functions or methods being mocked will have their arguments checked
+ to check that they are called with the correct signature.
+
+ If `spec_set` is True then attempting to set attributes that don't exist
+ on the spec object will raise an `AttributeError`.
+
+ If a class is used as a spec then the return value of the mock (the
+ instance of the class) will have the same spec. You can use a class as the
+ spec for an instance object by passing `instance=True`. The returned mock
+ will only be callable if instances of the mock are callable.
+
+ `create_autospec` also takes arbitrary keyword arguments that are passed to
+ the constructor of the created mock."""
+ if _is_list(spec):
+ # can't pass a list instance to the mock constructor as it will be
+ # interpreted as a list of strings
+ spec = type(spec)
+
+ is_type = isinstance(spec, type)
+ is_async_func = _is_async_func(spec)
+ _kwargs = {'spec': spec}
+ if spec_set:
+ _kwargs = {'spec_set': spec}
+ elif spec is None:
+ # None we mock with a normal mock without a spec
+ _kwargs = {}
+ if _kwargs and instance:
+ _kwargs['_spec_as_instance'] = True
+
+ _kwargs.update(kwargs)
+
+ Klass = MagicMock
+ if inspect.isdatadescriptor(spec):
+ # descriptors don't have a spec
+ # because we don't know what type they return
+ _kwargs = {}
+ elif is_async_func:
+ if instance:
+ raise RuntimeError("Instance can not be True when create_autospec "
+ "is mocking an async function")
+ Klass = AsyncMock
+ elif not _callable(spec):
+ Klass = NonCallableMagicMock
+ elif is_type and instance and not _instance_callable(spec):
+ Klass = NonCallableMagicMock
+
+ _name = _kwargs.pop('name', _name)
+
+ _new_name = _name
+ if _parent is None:
+ # for a top level object no _new_name should be set
+ _new_name = ''
+
+ mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
+ name=_name, **_kwargs)
+
+ if isinstance(spec, FunctionTypes):
+ # should only happen at the top level because we don't
+ # recurse for functions
+ mock = _set_signature(mock, spec)
+ if is_async_func:
+ _setup_async_mock(mock)
+ else:
+ _check_signature(spec, mock, is_type, instance)
+
+ if _parent is not None and not instance:
+ _parent._mock_children[_name] = mock
+
+ if is_type and not instance and 'return_value' not in kwargs:
+ mock.return_value = create_autospec(spec, spec_set, instance=True,
+ _name='()', _parent=mock)
+
+ for entry in dir(spec):
+ if _is_magic(entry):
+ # MagicMock already does the useful magic methods for us
+ continue
+
+ # XXXX do we need a better way of getting attributes without
+ # triggering code execution (?) Probably not - we need the actual
+ # object to mock it so we would rather trigger a property than mock
+ # the property descriptor. Likewise we want to mock out dynamically
+ # provided attributes.
+ # XXXX what about attributes that raise exceptions other than
+ # AttributeError on being fetched?
+ # we could be resilient against it, or catch and propagate the
+ # exception when the attribute is fetched from the mock
+ try:
+ original = getattr(spec, entry)
+ except AttributeError:
+ continue
+
+ kwargs = {'spec': original}
+ if spec_set:
+ kwargs = {'spec_set': original}
+
+ if not isinstance(original, FunctionTypes):
+ new = _SpecState(original, spec_set, mock, entry, instance)
+ mock._mock_children[entry] = new
+ else:
+ parent = mock
+ if isinstance(spec, FunctionTypes):
+ parent = mock.mock
+
+ skipfirst = _must_skip(spec, entry, is_type)
+ kwargs['_eat_self'] = skipfirst
+ if iscoroutinefunction(original):
+ child_klass = AsyncMock
+ else:
+ child_klass = MagicMock
+ new = child_klass(parent=parent, name=entry, _new_name=entry,
+ _new_parent=parent,
+ **kwargs)
+ mock._mock_children[entry] = new
+ _check_signature(original, new, skipfirst=skipfirst)
+
+ # so functions created with _set_signature become instance attributes,
+ # *plus* their underlying mock exists in _mock_children of the parent
+ # mock. Adding to _mock_children may be unnecessary where we are also
+ # setting as an instance attribute?
+ if isinstance(new, FunctionTypes):
+ setattr(mock, entry, new)
+
+ return mock
+
+
+def _must_skip(spec, entry, is_type):
+ """
+ Return whether we should skip the first argument on spec's `entry`
+ attribute.
+ """
+ if not isinstance(spec, type):
+ if entry in getattr(spec, '__dict__', {}):
+ # instance attribute - shouldn't skip
+ return False
+ spec = spec.__class__
+
+ for klass in spec.__mro__:
+ result = klass.__dict__.get(entry, DEFAULT)
+ if result is DEFAULT:
+ continue
+ if isinstance(result, (staticmethod, classmethod)):
+ return False
+ elif isinstance(result, FunctionTypes):
+ # Normal method => skip if looked up on type
+ # (if looked up on instance, self is already skipped)
+ return is_type
+ else:
+ return False
+
+ # function is a dynamically provided attribute
+ return is_type
+
+
+class _SpecState(object):
+
+ def __init__(self, spec, spec_set=False, parent=None,
+ name=None, ids=None, instance=False):
+ self.spec = spec
+ self.ids = ids
+ self.spec_set = spec_set
+ self.parent = parent
+ self.instance = instance
+ self.name = name
+
+
+FunctionTypes = (
+ # python function
+ type(create_autospec),
+ # instance method
+ type(ANY.__eq__),
+)
+
+
+file_spec = None
+
+
+def _to_stream(read_data):
+ if isinstance(read_data, bytes):
+ return io.BytesIO(read_data)
+ else:
+ return io.StringIO(read_data)
+
+
+def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read`, `readline` and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ _read_data = _to_stream(read_data)
+ _state = [_read_data, None]
+
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return _state[0].readlines(*args, **kwargs)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return _state[0].read(*args, **kwargs)
+
+ def _readline_side_effect(*args, **kwargs):
+ yield from _iter_side_effect()
+ while True:
+ yield _state[0].readline(*args, **kwargs)
+
+ def _iter_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _state[0]:
+ yield line
+
+ def _next_side_effect():
+ if handle.readline.return_value is not None:
+ return handle.readline.return_value
+ return next(_state[0])
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ _state[1] = _readline_side_effect()
+ handle.readline.side_effect = _state[1]
+ handle.readlines.side_effect = _readlines_side_effect
+ handle.__iter__.side_effect = _iter_side_effect
+ handle.__next__.side_effect = _next_side_effect
+
+ def reset_data(*args, **kwargs):
+ _state[0] = _to_stream(read_data)
+ if handle.readline.side_effect == _state[1]:
+ # Only reset the side effect if the user hasn't overridden it.
+ _state[1] = _readline_side_effect()
+ handle.readline.side_effect = _state[1]
+ return DEFAULT
+
+ mock.side_effect = reset_data
+ mock.return_value = handle
+ return mock
+
+
+class PropertyMock(Mock):
+ """
+ A mock intended to be used as a property, or other descriptor, on a class.
+ `PropertyMock` provides `__get__` and `__set__` methods so you can specify
+ a return value when it is fetched.
+
+ Fetching a `PropertyMock` instance from an object calls the mock, with
+ no args. Setting it calls the mock with the value being set.
+ """
+ def _get_child_mock(self, /, **kwargs):
+ return MagicMock(**kwargs)
+
+ def __get__(self, obj, obj_type=None):
+ return self()
+ def __set__(self, obj, val):
+ self(val)
+
+
+def seal(mock):
+ """Disable the automatic generation of child mocks.
+
+ Given an input Mock, seals it to ensure no further mocks will be generated
+ when accessing an attribute that was not already defined.
+
+ The operation recursively seals the mock passed in, meaning that
+ the mock itself, any mocks generated by accessing one of its attributes,
+ and all assigned mocks without a name or spec will be sealed.
+ """
+ mock._mock_sealed = True
+ for attr in dir(mock):
+ try:
+ m = getattr(mock, attr)
+ except AttributeError:
+ continue
+ if not isinstance(m, NonCallableMock):
+ continue
+ if m._mock_new_parent is mock:
+ seal(m)
+
+
+class _AsyncIterator:
+ """
+ Wraps an iterator in an asynchronous iterator.
+ """
+ def __init__(self, iterator):
+ self.iterator = iterator
+ code_mock = NonCallableMock(spec_set=CodeType)
+ code_mock.co_flags = inspect.CO_ITERABLE_COROUTINE
+ self.__dict__['__code__'] = code_mock
+
+ async def __anext__(self):
+ try:
+ return next(self.iterator)
+ except StopIteration:
+ pass
+ raise StopAsyncIteration
diff --git a/Monika After Story/game/python-packages/unittest/result.py b/Monika After Story/game/python-packages/unittest/result.py
new file mode 100644
index 0000000000..111317b329
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/result.py
@@ -0,0 +1,216 @@
+"""Test result object"""
+
+import io
+import sys
+import traceback
+
+from . import util
+from functools import wraps
+
+__unittest = True
+
+def failfast(method):
+ @wraps(method)
+ def inner(self, *args, **kw):
+ if getattr(self, 'failfast', False):
+ self.stop()
+ return method(self, *args, **kw)
+ return inner
+
+STDOUT_LINE = '\nStdout:\n%s'
+STDERR_LINE = '\nStderr:\n%s'
+
+
+class TestResult(object):
+ """Holder for test result information.
+
+ Test results are automatically managed by the TestCase and TestSuite
+ classes, and do not need to be explicitly manipulated by writers of tests.
+
+ Each instance holds the total number of tests run, and collections of
+ failures and errors that occurred among those test runs. The collections
+ contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
+ formatted traceback of the error that occurred.
+ """
+ _previousTestClass = None
+ _testRunEntered = False
+ _moduleSetUpFailed = False
+ def __init__(self, stream=None, descriptions=None, verbosity=None):
+ self.failfast = False
+ self.failures = []
+ self.errors = []
+ self.testsRun = 0
+ self.skipped = []
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+ self.shouldStop = False
+ self.buffer = False
+ self.tb_locals = False
+ self._stdout_buffer = None
+ self._stderr_buffer = None
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ self._mirrorOutput = False
+
+ def printErrors(self):
+ "Called by TestRunner after test run"
+
+ def startTest(self, test):
+ "Called when the given test is about to be run"
+ self.testsRun += 1
+ self._mirrorOutput = False
+ self._setupStdout()
+
+ def _setupStdout(self):
+ if self.buffer:
+ if self._stderr_buffer is None:
+ self._stderr_buffer = io.StringIO()
+ self._stdout_buffer = io.StringIO()
+ sys.stdout = self._stdout_buffer
+ sys.stderr = self._stderr_buffer
+
+ def startTestRun(self):
+ """Called once before any tests are executed.
+
+ See startTest for a method called before each test.
+ """
+
+ def stopTest(self, test):
+ """Called when the given test has been run"""
+ self._restoreStdout()
+ self._mirrorOutput = False
+
+ def _restoreStdout(self):
+ if self.buffer:
+ if self._mirrorOutput:
+ output = sys.stdout.getvalue()
+ error = sys.stderr.getvalue()
+ if output:
+ if not output.endswith('\n'):
+ output += '\n'
+ self._original_stdout.write(STDOUT_LINE % output)
+ if error:
+ if not error.endswith('\n'):
+ error += '\n'
+ self._original_stderr.write(STDERR_LINE % error)
+
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
+ self._stdout_buffer.seek(0)
+ self._stdout_buffer.truncate()
+ self._stderr_buffer.seek(0)
+ self._stderr_buffer.truncate()
+
+ def stopTestRun(self):
+ """Called once after all tests are executed.
+
+ See stopTest for a method called after each test.
+ """
+
+ @failfast
+ def addError(self, test, err):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+ """
+ self.errors.append((test, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ @failfast
+ def addFailure(self, test, err):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info()."""
+ self.failures.append((test, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ def addSubTest(self, test, subtest, err):
+ """Called at the end of a subtest.
+ 'err' is None if the subtest ended successfully, otherwise it's a
+ tuple of values as returned by sys.exc_info().
+ """
+ # By default, we don't do anything with successful subtests, but
+ # more sophisticated test results might want to record them.
+ if err is not None:
+ if getattr(self, 'failfast', False):
+ self.stop()
+ if issubclass(err[0], test.failureException):
+ errors = self.failures
+ else:
+ errors = self.errors
+ errors.append((subtest, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ def addSuccess(self, test):
+ "Called when a test has completed successfully"
+ pass
+
+ def addSkip(self, test, reason):
+ """Called when a test is skipped."""
+ self.skipped.append((test, reason))
+
+ def addExpectedFailure(self, test, err):
+ """Called when an expected failure/error occurred."""
+ self.expectedFailures.append(
+ (test, self._exc_info_to_string(err, test)))
+
+ @failfast
+ def addUnexpectedSuccess(self, test):
+ """Called when a test was expected to fail, but succeed."""
+ self.unexpectedSuccesses.append(test)
+
+ def wasSuccessful(self):
+ """Tells whether or not this result was a success."""
+ # The hasattr check is for test_result's OldResult test. That
+ # way this method works on objects that lack the attribute.
+ # (where would such result instances come from? old stored pickles?)
+ return ((len(self.failures) == len(self.errors) == 0) and
+ (not hasattr(self, 'unexpectedSuccesses') or
+ len(self.unexpectedSuccesses) == 0))
+
+ def stop(self):
+ """Indicates that the tests should be aborted."""
+ self.shouldStop = True
+
+ def _exc_info_to_string(self, err, test):
+ """Converts a sys.exc_info()-style tuple of values into a string."""
+ exctype, value, tb = err
+ # Skip test runner traceback levels
+ while tb and self._is_relevant_tb_level(tb):
+ tb = tb.tb_next
+
+ if exctype is test.failureException:
+ # Skip assert*() traceback levels
+ length = self._count_relevant_tb_levels(tb)
+ else:
+ length = None
+ tb_e = traceback.TracebackException(
+ exctype, value, tb, limit=length, capture_locals=self.tb_locals)
+ msgLines = list(tb_e.format())
+
+ if self.buffer:
+ output = sys.stdout.getvalue()
+ error = sys.stderr.getvalue()
+ if output:
+ if not output.endswith('\n'):
+ output += '\n'
+ msgLines.append(STDOUT_LINE % output)
+ if error:
+ if not error.endswith('\n'):
+ error += '\n'
+ msgLines.append(STDERR_LINE % error)
+ return ''.join(msgLines)
+
+
+ def _is_relevant_tb_level(self, tb):
+ return '__unittest' in tb.tb_frame.f_globals
+
+ def _count_relevant_tb_levels(self, tb):
+ length = 0
+ while tb and not self._is_relevant_tb_level(tb):
+ length += 1
+ tb = tb.tb_next
+ return length
+
+ def __repr__(self):
+ return ("<%s run=%i errors=%i failures=%i>" %
+ (util.strclass(self.__class__), self.testsRun, len(self.errors),
+ len(self.failures)))
diff --git a/Monika After Story/game/python-packages/unittest/runner.py b/Monika After Story/game/python-packages/unittest/runner.py
new file mode 100644
index 0000000000..45e7e4c045
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/runner.py
@@ -0,0 +1,221 @@
+"""Running tests"""
+
+import sys
+import time
+import warnings
+
+from . import result
+from .signals import registerResult
+
+__unittest = True
+
+
+class _WritelnDecorator(object):
+ """Used to decorate file-like objects with a handy 'writeln' method"""
+ def __init__(self,stream):
+ self.stream = stream
+
+ def __getattr__(self, attr):
+ if attr in ('stream', '__getstate__'):
+ raise AttributeError(attr)
+ return getattr(self.stream,attr)
+
+ def writeln(self, arg=None):
+ if arg:
+ self.write(arg)
+ self.write('\n') # text-mode streams translate to \r\n if needed
+
+
+class TextTestResult(result.TestResult):
+ """A test result class that can print formatted text results to a stream.
+
+ Used by TextTestRunner.
+ """
+ separator1 = '=' * 70
+ separator2 = '-' * 70
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__(stream, descriptions, verbosity)
+ self.stream = stream
+ self.showAll = verbosity > 1
+ self.dots = verbosity == 1
+ self.descriptions = descriptions
+
+ def getDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return '\n'.join((str(test), doc_first_line))
+ else:
+ return str(test)
+
+ def startTest(self, test):
+ super(TextTestResult, self).startTest(test)
+ if self.showAll:
+ self.stream.write(self.getDescription(test))
+ self.stream.write(" ... ")
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super(TextTestResult, self).addSuccess(test)
+ if self.showAll:
+ self.stream.writeln("ok")
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ def addError(self, test, err):
+ super(TextTestResult, self).addError(test, err)
+ if self.showAll:
+ self.stream.writeln("ERROR")
+ elif self.dots:
+ self.stream.write('E')
+ self.stream.flush()
+
+ def addFailure(self, test, err):
+ super(TextTestResult, self).addFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("FAIL")
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ def addSkip(self, test, reason):
+ super(TextTestResult, self).addSkip(test, reason)
+ if self.showAll:
+ self.stream.writeln("skipped {0!r}".format(reason))
+ elif self.dots:
+ self.stream.write("s")
+ self.stream.flush()
+
+ def addExpectedFailure(self, test, err):
+ super(TextTestResult, self).addExpectedFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("expected failure")
+ elif self.dots:
+ self.stream.write("x")
+ self.stream.flush()
+
+ def addUnexpectedSuccess(self, test):
+ super(TextTestResult, self).addUnexpectedSuccess(test)
+ if self.showAll:
+ self.stream.writeln("unexpected success")
+ elif self.dots:
+ self.stream.write("u")
+ self.stream.flush()
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ def printErrorList(self, flavour, errors):
+ for test, err in errors:
+ self.stream.writeln(self.separator1)
+ self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
+ self.stream.writeln(self.separator2)
+ self.stream.writeln("%s" % err)
+
+
+class TextTestRunner(object):
+ """A test runner class that displays results in textual form.
+
+ It prints out the names of tests as they are run, errors as they
+ occur, and a summary of the results at the end of the test run.
+ """
+ resultclass = TextTestResult
+
+ def __init__(self, stream=None, descriptions=True, verbosity=1,
+ failfast=False, buffer=False, resultclass=None, warnings=None,
+ *, tb_locals=False):
+ """Construct a TextTestRunner.
+
+ Subclasses should accept **kwargs to ensure compatibility as the
+ interface changes.
+ """
+ if stream is None:
+ stream = sys.stderr
+ self.stream = _WritelnDecorator(stream)
+ self.descriptions = descriptions
+ self.verbosity = verbosity
+ self.failfast = failfast
+ self.buffer = buffer
+ self.tb_locals = tb_locals
+ self.warnings = warnings
+ if resultclass is not None:
+ self.resultclass = resultclass
+
+ def _makeResult(self):
+ return self.resultclass(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = self._makeResult()
+ registerResult(result)
+ result.failfast = self.failfast
+ result.buffer = self.buffer
+ result.tb_locals = self.tb_locals
+ with warnings.catch_warnings():
+ if self.warnings:
+ # if self.warnings is set, use it to filter all the warnings
+ warnings.simplefilter(self.warnings)
+ # if the filter is 'default' or 'always', special-case the
+ # warnings from the deprecated unittest methods to show them
+ # no more than once per module, because they can be fairly
+ # noisy. The -Wd and -Wa flags can be used to bypass this
+ # only when self.warnings is None.
+ if self.warnings in ['default', 'always']:
+ warnings.filterwarnings('module',
+ category=DeprecationWarning,
+ message=r'Please use assert\w+ instead.')
+ startTime = time.perf_counter()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+ try:
+ test(result)
+ finally:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+ stopTime = time.perf_counter()
+ timeTaken = stopTime - startTime
+ result.printErrors()
+ if hasattr(result, 'separator2'):
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+
+ expectedFails = unexpectedSuccesses = skipped = 0
+ try:
+ results = map(len, (result.expectedFailures,
+ result.unexpectedSuccesses,
+ result.skipped))
+ except AttributeError:
+ pass
+ else:
+ expectedFails, unexpectedSuccesses, skipped = results
+
+ infos = []
+ if not result.wasSuccessful():
+ self.stream.write("FAILED")
+ failed, errored = len(result.failures), len(result.errors)
+ if failed:
+ infos.append("failures=%d" % failed)
+ if errored:
+ infos.append("errors=%d" % errored)
+ else:
+ self.stream.write("OK")
+ if skipped:
+ infos.append("skipped=%d" % skipped)
+ if expectedFails:
+ infos.append("expected failures=%d" % expectedFails)
+ if unexpectedSuccesses:
+ infos.append("unexpected successes=%d" % unexpectedSuccesses)
+ if infos:
+ self.stream.writeln(" (%s)" % (", ".join(infos),))
+ else:
+ self.stream.write("\n")
+ return result
diff --git a/Monika After Story/game/python-packages/unittest/signals.py b/Monika After Story/game/python-packages/unittest/signals.py
new file mode 100644
index 0000000000..e6a5fc5243
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/signals.py
@@ -0,0 +1,71 @@
+import signal
+import weakref
+
+from functools import wraps
+
+__unittest = True
+
+
+class _InterruptHandler(object):
+ def __init__(self, default_handler):
+ self.called = False
+ self.original_handler = default_handler
+ if isinstance(default_handler, int):
+ if default_handler == signal.SIG_DFL:
+ # Pretend it's signal.default_int_handler instead.
+ default_handler = signal.default_int_handler
+ elif default_handler == signal.SIG_IGN:
+ # Not quite the same thing as SIG_IGN, but the closest we
+ # can make it: do nothing.
+ def default_handler(unused_signum, unused_frame):
+ pass
+ else:
+ raise TypeError("expected SIGINT signal handler to be "
+ "signal.SIG_IGN, signal.SIG_DFL, or a "
+ "callable object")
+ self.default_handler = default_handler
+
+ def __call__(self, signum, frame):
+ installed_handler = signal.getsignal(signal.SIGINT)
+ if installed_handler is not self:
+ # if we aren't the installed handler, then delegate immediately
+ # to the default handler
+ self.default_handler(signum, frame)
+
+ if self.called:
+ self.default_handler(signum, frame)
+ self.called = True
+ for result in _results.keys():
+ result.stop()
+
+_results = weakref.WeakKeyDictionary()
+def registerResult(result):
+ _results[result] = 1
+
+def removeResult(result):
+ return bool(_results.pop(result, None))
+
+_interrupt_handler = None
+def installHandler():
+ global _interrupt_handler
+ if _interrupt_handler is None:
+ default_handler = signal.getsignal(signal.SIGINT)
+ _interrupt_handler = _InterruptHandler(default_handler)
+ signal.signal(signal.SIGINT, _interrupt_handler)
+
+
+def removeHandler(method=None):
+ if method is not None:
+ @wraps(method)
+ def inner(*args, **kwargs):
+ initial = signal.getsignal(signal.SIGINT)
+ removeHandler()
+ try:
+ return method(*args, **kwargs)
+ finally:
+ signal.signal(signal.SIGINT, initial)
+ return inner
+
+ global _interrupt_handler
+ if _interrupt_handler is not None:
+ signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
diff --git a/Monika After Story/game/python-packages/unittest/suite.py b/Monika After Story/game/python-packages/unittest/suite.py
new file mode 100644
index 0000000000..41993f9cf6
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/suite.py
@@ -0,0 +1,361 @@
+"""TestSuite"""
+
+import sys
+
+from . import case
+from . import util
+
+__unittest = True
+
+
+def _call_if_exists(parent, attr):
+ func = getattr(parent, attr, lambda: None)
+ func()
+
+
+class BaseTestSuite(object):
+ """A simple test suite that doesn't provide class or module shared fixtures.
+ """
+ _cleanup = True
+
+ def __init__(self, tests=()):
+ self._tests = []
+ self._removed_tests = 0
+ self.addTests(tests)
+
+ def __repr__(self):
+ return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return list(self) == list(other)
+
+ def __iter__(self):
+ return iter(self._tests)
+
+ def countTestCases(self):
+ cases = self._removed_tests
+ for test in self:
+ if test:
+ cases += test.countTestCases()
+ return cases
+
+ def addTest(self, test):
+ # sanity checks
+ if not callable(test):
+ raise TypeError("{} is not callable".format(repr(test)))
+ if isinstance(test, type) and issubclass(test,
+ (case.TestCase, TestSuite)):
+ raise TypeError("TestCases and TestSuites must be instantiated "
+ "before passing them to addTest()")
+ self._tests.append(test)
+
+ def addTests(self, tests):
+ if isinstance(tests, str):
+ raise TypeError("tests must be an iterable of tests, not a string")
+ for test in tests:
+ self.addTest(test)
+
+ def run(self, result):
+ for index, test in enumerate(self):
+ if result.shouldStop:
+ break
+ test(result)
+ if self._cleanup:
+ self._removeTestAtIndex(index)
+ return result
+
+ def _removeTestAtIndex(self, index):
+ """Stop holding a reference to the TestCase at index."""
+ try:
+ test = self._tests[index]
+ except TypeError:
+ # support for suite implementations that have overridden self._tests
+ pass
+ else:
+ # Some unittest tests add non TestCase/TestSuite objects to
+ # the suite.
+ if hasattr(test, 'countTestCases'):
+ self._removed_tests += test.countTestCases()
+ self._tests[index] = None
+
+ def __call__(self, *args, **kwds):
+ return self.run(*args, **kwds)
+
+ def debug(self):
+ """Run the tests without collecting errors in a TestResult"""
+ for test in self:
+ test.debug()
+
+
+class TestSuite(BaseTestSuite):
+ """A test suite is a composite test consisting of a number of TestCases.
+
+ For use, create an instance of TestSuite, then add test case instances.
+ When all tests have been added, the suite can be passed to a test
+ runner, such as TextTestRunner. It will run the individual test cases
+ in the order in which they were added, aggregating the results. When
+ subclassing, do not forget to call the base class constructor.
+ """
+
+ def run(self, result, debug=False):
+ topLevel = False
+ if getattr(result, '_testRunEntered', False) is False:
+ result._testRunEntered = topLevel = True
+
+ for index, test in enumerate(self):
+ if result.shouldStop:
+ break
+
+ if _isnotsuite(test):
+ self._tearDownPreviousClass(test, result)
+ self._handleModuleFixture(test, result)
+ self._handleClassSetUp(test, result)
+ result._previousTestClass = test.__class__
+
+ if (getattr(test.__class__, '_classSetupFailed', False) or
+ getattr(result, '_moduleSetUpFailed', False)):
+ continue
+
+ if not debug:
+ test(result)
+ else:
+ test.debug()
+
+ if self._cleanup:
+ self._removeTestAtIndex(index)
+
+ if topLevel:
+ self._tearDownPreviousClass(None, result)
+ self._handleModuleTearDown(result)
+ result._testRunEntered = False
+ return result
+
+ def debug(self):
+ """Run the tests without collecting errors in a TestResult"""
+ debug = _DebugResult()
+ self.run(debug, True)
+
+ ################################
+
+ def _handleClassSetUp(self, test, result):
+ previousClass = getattr(result, '_previousTestClass', None)
+ currentClass = test.__class__
+ if currentClass == previousClass:
+ return
+ if result._moduleSetUpFailed:
+ return
+ if getattr(currentClass, "__unittest_skip__", False):
+ return
+
+ try:
+ currentClass._classSetupFailed = False
+ except TypeError:
+ # test may actually be a function
+ # so its class will be a builtin-type
+ pass
+
+ setUpClass = getattr(currentClass, 'setUpClass', None)
+ if setUpClass is not None:
+ _call_if_exists(result, '_setupStdout')
+ try:
+ setUpClass()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ currentClass._classSetupFailed = True
+ className = util.strclass(currentClass)
+ self._createClassOrModuleLevelException(result, e,
+ 'setUpClass',
+ className)
+ finally:
+ _call_if_exists(result, '_restoreStdout')
+ if currentClass._classSetupFailed is True:
+ currentClass.doClassCleanups()
+ if len(currentClass.tearDown_exceptions) > 0:
+ for exc in currentClass.tearDown_exceptions:
+ self._createClassOrModuleLevelException(
+ result, exc[1], 'setUpClass', className,
+ info=exc)
+
+ def _get_previous_module(self, result):
+ previousModule = None
+ previousClass = getattr(result, '_previousTestClass', None)
+ if previousClass is not None:
+ previousModule = previousClass.__module__
+ return previousModule
+
+
+ def _handleModuleFixture(self, test, result):
+ previousModule = self._get_previous_module(result)
+ currentModule = test.__class__.__module__
+ if currentModule == previousModule:
+ return
+
+ self._handleModuleTearDown(result)
+
+
+ result._moduleSetUpFailed = False
+ try:
+ module = sys.modules[currentModule]
+ except KeyError:
+ return
+ setUpModule = getattr(module, 'setUpModule', None)
+ if setUpModule is not None:
+ _call_if_exists(result, '_setupStdout')
+ try:
+ setUpModule()
+ except Exception as e:
+ try:
+ case.doModuleCleanups()
+ except Exception as exc:
+ self._createClassOrModuleLevelException(result, exc,
+ 'setUpModule',
+ currentModule)
+ if isinstance(result, _DebugResult):
+ raise
+ result._moduleSetUpFailed = True
+ self._createClassOrModuleLevelException(result, e,
+ 'setUpModule',
+ currentModule)
+ finally:
+ _call_if_exists(result, '_restoreStdout')
+
+ def _createClassOrModuleLevelException(self, result, exc, method_name,
+ parent, info=None):
+ errorName = f'{method_name} ({parent})'
+ self._addClassOrModuleLevelException(result, exc, errorName, info)
+
+ def _addClassOrModuleLevelException(self, result, exception, errorName,
+ info=None):
+ error = _ErrorHolder(errorName)
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None and isinstance(exception, case.SkipTest):
+ addSkip(error, str(exception))
+ else:
+ if not info:
+ result.addError(error, sys.exc_info())
+ else:
+ result.addError(error, info)
+
+ def _handleModuleTearDown(self, result):
+ previousModule = self._get_previous_module(result)
+ if previousModule is None:
+ return
+ if result._moduleSetUpFailed:
+ return
+
+ try:
+ module = sys.modules[previousModule]
+ except KeyError:
+ return
+
+ tearDownModule = getattr(module, 'tearDownModule', None)
+ if tearDownModule is not None:
+ _call_if_exists(result, '_setupStdout')
+ try:
+ tearDownModule()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ self._createClassOrModuleLevelException(result, e,
+ 'tearDownModule',
+ previousModule)
+ finally:
+ _call_if_exists(result, '_restoreStdout')
+ try:
+ case.doModuleCleanups()
+ except Exception as e:
+ self._createClassOrModuleLevelException(result, e,
+ 'tearDownModule',
+ previousModule)
+
+ def _tearDownPreviousClass(self, test, result):
+ previousClass = getattr(result, '_previousTestClass', None)
+ currentClass = test.__class__
+ if currentClass == previousClass:
+ return
+ if getattr(previousClass, '_classSetupFailed', False):
+ return
+ if getattr(result, '_moduleSetUpFailed', False):
+ return
+ if getattr(previousClass, "__unittest_skip__", False):
+ return
+
+ tearDownClass = getattr(previousClass, 'tearDownClass', None)
+ if tearDownClass is not None:
+ _call_if_exists(result, '_setupStdout')
+ try:
+ tearDownClass()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ className = util.strclass(previousClass)
+ self._createClassOrModuleLevelException(result, e,
+ 'tearDownClass',
+ className)
+ finally:
+ _call_if_exists(result, '_restoreStdout')
+ previousClass.doClassCleanups()
+ if len(previousClass.tearDown_exceptions) > 0:
+ for exc in previousClass.tearDown_exceptions:
+ className = util.strclass(previousClass)
+ self._createClassOrModuleLevelException(result, exc[1],
+ 'tearDownClass',
+ className,
+ info=exc)
+
+
+class _ErrorHolder(object):
+ """
+ Placeholder for a TestCase inside a result. As far as a TestResult
+ is concerned, this looks exactly like a unit test. Used to insert
+ arbitrary errors into a test suite run.
+ """
+ # Inspired by the ErrorHolder from Twisted:
+ # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
+
+ # attribute used by TestResult._exc_info_to_string
+ failureException = None
+
+ def __init__(self, description):
+ self.description = description
+
+ def id(self):
+ return self.description
+
+ def shortDescription(self):
+ return None
+
+ def __repr__(self):
+ return "" % (self.description,)
+
+ def __str__(self):
+ return self.id()
+
+ def run(self, result):
+ # could call result.addError(...) - but this test-like object
+ # shouldn't be run anyway
+ pass
+
+ def __call__(self, result):
+ return self.run(result)
+
+ def countTestCases(self):
+ return 0
+
+def _isnotsuite(test):
+ "A crude way to tell apart testcases and suites with duck-typing"
+ try:
+ iter(test)
+ except TypeError:
+ return True
+ return False
+
+
+class _DebugResult(object):
+ "Used by the TestSuite to hold previous class when running in debug."
+ _previousTestClass = None
+ _moduleSetUpFailed = False
+ shouldStop = False
diff --git a/Monika After Story/game/python-packages/unittest/util.py b/Monika After Story/game/python-packages/unittest/util.py
new file mode 100644
index 0000000000..050eaed0b3
--- /dev/null
+++ b/Monika After Story/game/python-packages/unittest/util.py
@@ -0,0 +1,170 @@
+"""Various utility functions."""
+
+from collections import namedtuple, Counter
+from os.path import commonprefix
+
+__unittest = True
+
+_MAX_LENGTH = 80
+_PLACEHOLDER_LEN = 12
+_MIN_BEGIN_LEN = 5
+_MIN_END_LEN = 5
+_MIN_COMMON_LEN = 5
+_MIN_DIFF_LEN = _MAX_LENGTH - \
+ (_MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN +
+ _PLACEHOLDER_LEN + _MIN_END_LEN)
+assert _MIN_DIFF_LEN >= 0
+
+def _shorten(s, prefixlen, suffixlen):
+ skip = len(s) - prefixlen - suffixlen
+ if skip > _PLACEHOLDER_LEN:
+ s = '%s[%d chars]%s' % (s[:prefixlen], skip, s[len(s) - suffixlen:])
+ return s
+
+def _common_shorten_repr(*args):
+ args = tuple(map(safe_repr, args))
+ maxlen = max(map(len, args))
+ if maxlen <= _MAX_LENGTH:
+ return args
+
+ prefix = commonprefix(args)
+ prefixlen = len(prefix)
+
+ common_len = _MAX_LENGTH - \
+ (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
+ if common_len > _MIN_COMMON_LEN:
+ assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
+ (maxlen - prefixlen) < _MAX_LENGTH
+ prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
+ return tuple(prefix + s[prefixlen:] for s in args)
+
+ prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
+ return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
+ for s in args)
+
+def safe_repr(obj, short=False):
+ try:
+ result = repr(obj)
+ except Exception:
+ result = object.__repr__(obj)
+ if not short or len(result) < _MAX_LENGTH:
+ return result
+ return result[:_MAX_LENGTH] + ' [truncated]...'
+
+def strclass(cls):
+ return "%s.%s" % (cls.__module__, cls.__qualname__)
+
+def sorted_list_difference(expected, actual):
+ """Finds elements in only one or the other of two, sorted input lists.
+
+ Returns a two-element tuple of lists. The first list contains those
+ elements in the "expected" list but not in the "actual" list, and the
+ second contains those elements in the "actual" list but not in the
+ "expected" list. Duplicate elements in either input list are ignored.
+ """
+ i = j = 0
+ missing = []
+ unexpected = []
+ while True:
+ try:
+ e = expected[i]
+ a = actual[j]
+ if e < a:
+ missing.append(e)
+ i += 1
+ while expected[i] == e:
+ i += 1
+ elif e > a:
+ unexpected.append(a)
+ j += 1
+ while actual[j] == a:
+ j += 1
+ else:
+ i += 1
+ try:
+ while expected[i] == e:
+ i += 1
+ finally:
+ j += 1
+ while actual[j] == a:
+ j += 1
+ except IndexError:
+ missing.extend(expected[i:])
+ unexpected.extend(actual[j:])
+ break
+ return missing, unexpected
+
+
+def unorderable_list_difference(expected, actual):
+ """Same behavior as sorted_list_difference but
+ for lists of unorderable items (like dicts).
+
+ As it does a linear search per item (remove) it
+ has O(n*n) performance."""
+ missing = []
+ while expected:
+ item = expected.pop()
+ try:
+ actual.remove(item)
+ except ValueError:
+ missing.append(item)
+
+ # anything left in actual is unexpected
+ return missing, actual
+
+def three_way_cmp(x, y):
+ """Return -1 if x < y, 0 if x == y and 1 if x > y"""
+ return (x > y) - (x < y)
+
+_Mismatch = namedtuple('Mismatch', 'actual expected value')
+
+def _count_diff_all_purpose(actual, expected):
+ 'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
+ # elements need not be hashable
+ s, t = list(actual), list(expected)
+ m, n = len(s), len(t)
+ NULL = object()
+ result = []
+ for i, elem in enumerate(s):
+ if elem is NULL:
+ continue
+ cnt_s = cnt_t = 0
+ for j in range(i, m):
+ if s[j] == elem:
+ cnt_s += 1
+ s[j] = NULL
+ for j, other_elem in enumerate(t):
+ if other_elem == elem:
+ cnt_t += 1
+ t[j] = NULL
+ if cnt_s != cnt_t:
+ diff = _Mismatch(cnt_s, cnt_t, elem)
+ result.append(diff)
+
+ for i, elem in enumerate(t):
+ if elem is NULL:
+ continue
+ cnt_t = 0
+ for j in range(i, n):
+ if t[j] == elem:
+ cnt_t += 1
+ t[j] = NULL
+ diff = _Mismatch(0, cnt_t, elem)
+ result.append(diff)
+ return result
+
+def _count_diff_hashable(actual, expected):
+ 'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
+ # elements must be hashable
+ s, t = Counter(actual), Counter(expected)
+ result = []
+ for elem, cnt_s in s.items():
+ cnt_t = t.get(elem, 0)
+ if cnt_s != cnt_t:
+ diff = _Mismatch(cnt_s, cnt_t, elem)
+ result.append(diff)
+ for elem, cnt_t in t.items():
+ if elem not in s:
+ diff = _Mismatch(0, cnt_t, elem)
+ result.append(diff)
+ return result
diff --git a/Monika After Story/game/python-packages/win32api.pyd b/Monika After Story/game/python-packages/win32api.pyd
deleted file mode 100644
index 6efa006706..0000000000
Binary files a/Monika After Story/game/python-packages/win32api.pyd and /dev/null differ
diff --git a/Monika After Story/game/python-packages/win32gui.pyd b/Monika After Story/game/python-packages/win32gui.pyd
deleted file mode 100644
index 63d2734c7d..0000000000
Binary files a/Monika After Story/game/python-packages/win32gui.pyd and /dev/null differ
diff --git a/Monika After Story/game/python-packages/winnie32api/__init__.py b/Monika After Story/game/python-packages/winnie32api/__init__.py
new file mode 100644
index 0000000000..73c74a7124
--- /dev/null
+++ b/Monika After Story/game/python-packages/winnie32api/__init__.py
@@ -0,0 +1,15 @@
+"""
+winnie32api - minimalistic Windows API
+
+Provides a small number of utils, allowing to not include big dependencies like pywin32/win32api
+"""
+
+from __future__ import annotations
+
+__title__ = "winnie32api"
+__author__ = "Booplicate"
+__version__ = "0.1.2"
+
+from .mouse import *
+from .windows import *
+from .notifs import *
diff --git a/Monika After Story/game/python-packages/winnie32api/common.py b/Monika After Story/game/python-packages/winnie32api/common.py
new file mode 100644
index 0000000000..9df75297bf
--- /dev/null
+++ b/Monika After Story/game/python-packages/winnie32api/common.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import ctypes
+from ctypes.wintypes import (
+ INT
+)
+from dataclasses import dataclass
+
+from typing import (
+ Any,
+ NamedTuple
+)
+
+
+user32 = ctypes.windll.user32
+kernel32 = ctypes.windll.kernel32
+
+
+Coord = int
+
+class Point(NamedTuple):
+ """
+ Represents a point on a screen
+ """
+ x: Coord
+ y: Coord
+
+class Rect(NamedTuple):
+ """
+ Represents a rectangle on a screen
+ """
+ top_left: Point
+ bottom_right: Point
+
+ @classmethod
+ def from_coords(cls, left: Coord, top: Coord, right: Coord, bottom: Coord) -> Rect:
+ """
+ Constructs a rect from 4 coordinates
+ """
+ return cls(
+ Point(left, top),
+ Point(right, bottom)
+ )
+
+
+@dataclass
+class Pack():
+ """
+ Class that we use as a pointer to the inner value
+ """
+ value: Any
+
+
+def _reset_last_err():
+ """
+ Clears the last error
+ """
+ kernel32.SetLastError(INT(0))
+
+def _get_last_err() -> int:
+ """
+ Returns the last error code
+ """
+ return kernel32.GetLastError()
diff --git a/Monika After Story/game/python-packages/winnie32api/errors.py b/Monika After Story/game/python-packages/winnie32api/errors.py
new file mode 100644
index 0000000000..48fed9f17d
--- /dev/null
+++ b/Monika After Story/game/python-packages/winnie32api/errors.py
@@ -0,0 +1,27 @@
+class Winnie32APIError(Exception):
+ """
+ The base class for all exceptions in winnie32api
+ """
+
+class WinAPIError(Winnie32APIError):
+ """
+ Represents an error in win API
+ """
+ def __init__(self, msg: str, code: int):# pylint: disable=super-init-not-called
+ self.msg = msg
+ self.code = code
+
+ def __str__(self) -> str:
+ return f"{self.msg}. Status code: {self.code}"
+
+class NotifError(Winnie32APIError):
+ """
+ The base class for notification-related exceptions
+ """
+
+class ManagerAlreadyExistsError(NotifError):
+ """
+ An error raised when tried to create more than one manager
+ """
+ def __str__(self) -> str:
+ return "notification manager has already been defined for this app"
diff --git a/Monika After Story/game/python-packages/winnie32api/mouse.py b/Monika After Story/game/python-packages/winnie32api/mouse.py
new file mode 100644
index 0000000000..eab4b835d3
--- /dev/null
+++ b/Monika After Story/game/python-packages/winnie32api/mouse.py
@@ -0,0 +1,28 @@
+__all__ = (
+ "get_screen_mouse_pos",
+)
+
+import ctypes
+import ctypes.wintypes as wt
+
+from .common import Point, _get_last_err
+from .errors import WinAPIError
+
+
+user32 = ctypes.windll.user32
+
+
+user32.GetCursorPos.argtypes = (wt.LPPOINT,)
+user32.GetCursorPos.restype = wt.BOOL
+
+
+def get_screen_mouse_pos() -> Point:
+ """
+ Returns mouse position in screen coords
+ """
+ c_point = wt.POINT()
+ result = user32.GetCursorPos(ctypes.byref(c_point))
+ if not result:
+ raise WinAPIError("failed to get mouse position", _get_last_err())
+
+ return Point(c_point.x, c_point.y)# type: ignore
diff --git a/Monika After Story/game/python-packages/winnie32api/notifs.py b/Monika After Story/game/python-packages/winnie32api/notifs.py
new file mode 100644
index 0000000000..e211c0d614
--- /dev/null
+++ b/Monika After Story/game/python-packages/winnie32api/notifs.py
@@ -0,0 +1,853 @@
+# pylint: disable=attribute-defined-outside-init
+# pylint: disable=invalid-name
+from __future__ import annotations
+
+__all__ = (
+ "NotifManager",
+)
+
+import ctypes
+import ctypes.wintypes as wt
+import weakref
+import threading
+import atexit
+from collections.abc import Callable
+
+from .common import _get_last_err
+from .errors import WinAPIError, ManagerAlreadyExistsError
+
+
+user32 = ctypes.windll.user32
+kernel32 = ctypes.windll.kernel32
+shell32 = ctypes.windll.shell32
+
+
+APP_ID = 922
+
+
+# This is missing from wintypes
+LRESULT = wt.LPARAM#ctypes.c_long
+# This has undocumented value, but seems to work
+CW_USEDEFAULT = -2147483648
+# There's literally only one place on the internet where it says the value is -3
+# and it's not microsoft docs. At least it seems to work...
+HWND_MESSAGE = -3
+# Undocumented, but probably is correct
+NOTIFYICON_VERSION_4 = 4
+# The base value of user-defined msgs
+WM_USER = 0x0400
+
+
+WNDPROC = ctypes.WINFUNCTYPE(LRESULT, wt.HWND, wt.UINT, wt.WPARAM, wt.LPARAM)
+
+class NotifyIconDataW(ctypes.Structure):
+ """
+ Docs: https://docs.microsoft.com/en-us/windows/win32/api/shellapi/ns-shellapi-notifyicondataw#syntax
+ """
+ _fields_ = [
+ ("cbSize", wt.DWORD),
+ ("hWnd", wt.HWND),
+ ("uID", wt.UINT),
+ ("uFlags", wt.UINT),
+ ("uCallbackMessage", wt.UINT),
+ ("hIcon", wt.HICON),
+ ("szTip", wt.WCHAR * 128),
+ ("dwState", wt.DWORD),
+ ("dwStateMask", wt.DWORD),
+ ("szInfo", wt.WCHAR * 256),
+ ("uVersion", wt.UINT),
+ ("szInfoTitle", wt.WCHAR * 64),
+ ("dwInfoFlags", wt.DWORD),
+ ("guidItem", ctypes.c_char * 16),
+ ("hBalloonIcon", wt.HICON)
+ ]
+
+class WndClassExw(ctypes.Structure):
+ """
+ Docs: https://docs.microsoft.com/en-us/windows/win32/api/winuser/ns-winuser-wndclassexw#syntax
+ """
+ _fields_ = [
+ ("cbSize", wt.UINT),
+ ("style", wt.UINT),
+ ("lpfnWndProc", WNDPROC),
+ ("cbClsExtra", wt.INT),
+ ("cbWndExtra", wt.INT),
+ ("hInstance", wt.HINSTANCE),
+ ("hIcon", wt.HICON),
+ ("hCursor", wt.HANDLE),
+ ("hbrBackground", wt.HBRUSH),
+ ("lpszMenuName", wt.LPCWSTR),
+ ("lpszClassName", wt.LPCWSTR),
+ ("hIconSm", wt.HICON),
+ ]
+
+class Msg(ctypes.Structure):
+ """
+ Docs: https://docs.microsoft.com/en-us/windows/win32/api/winuser/ns-winuser-msg
+ """
+ _fields_ = [
+ ("hwnd", wt.HWND),
+ ("message", wt.UINT),
+ ("wParam", wt.WPARAM),
+ ("lParam", wt.LPARAM),
+ ("time", wt.DWORD),
+ ("pt", wt.POINT),
+ ("lPrivate", wt.DWORD)
+ ]
+
+
+class NIF():
+ """
+ 0x00000001. The uCallbackMessage member is valid.
+ 0x00000002. The hIcon member is valid.
+ 0x00000004. The szTip member is valid.
+ 0x00000008. The dwState and dwStateMask members are valid.
+ 0x00000010. Display a balloon notification.
+ The szInfo, szInfoTitle, dwInfoFlags, and uTimeout members are valid.
+ Note that uTimeout is valid only in Windows 2000 and Windows XP.
+ To display the balloon notification, specify NIF_INFO and provide text in szInfo.
+ To remove a balloon notification, specify NIF_INFO and provide an empty
+ string through szInfo.
+ To add a notification area icon without displaying a notification,
+ do not set the NIF_INFO flag.
+ 0x00000020.
+ Windows 7 and later: The guidItem is valid.
+ Windows Vista and earlier: Reserved.
+ 0x00000040. Windows Vista and later.
+ If the balloon notification cannot be displayed immediately, discard it.
+ 0x00000080. Windows Vista and later. Use the standard tooltip.
+ """
+ MESSAGE = 0x00000001
+ ICON = 0x00000002
+ TIP = 0x00000004
+ STATE = 0x00000008
+ INFO = 0x00000010
+ GUID = 0x00000020
+ REALTIME = 0x00000040
+ SHOWTIP = 0x00000080
+
+class NIS():
+ """
+ The state of the icon. One or both of the following values
+ 0x00000001. The icon is hidden.
+ 0x00000002. The icon resource is shared between multiple icons.
+ """
+ HIDDEN = 0x00000001
+ SHAREDICON = 0x00000002
+
+class NIIF():
+ """
+ 0x00000000. No icon.
+ 0x00000001. An information icon.
+ 0x00000002. A warning icon.
+ 0x00000003. An error icon.
+ 0x00000004. Windows XP SP2 and later.
+ Windows XP: Use the icon identified in hIcon
+ as the notification balloon's title icon.
+ Windows Vista and later: Use the icon identified in hBalloonIcon
+ as the notification balloon's title icon.
+ 0x00000010. Windows XP and later.
+ Do not play the associated sound. Applies only to notifications.
+ 0x00000020. Windows Vista and later.
+ The large version of the icon should be used as the notification icon.
+ 0x00000080. Windows 7 and later.
+ Do not display the balloon notification if the current user is in "quiet time"
+ 0x0000000F. Windows XP and later. Reserved.
+ """
+ NONE = 0x00000000
+ INFO = 0x00000001
+ WARNING = 0x00000002
+ ERROR = 0x00000003
+ USER = 0x00000004
+ NOSOUND = 0x00000010
+ LARGE_ICON = 0x00000020
+ RESPECT_QUIET_TIME = 0x00000080
+ ICON_MASK = 0x0000000F
+
+class NIM():
+ """
+ 0x00000000. Adds an icon to the status area.
+ 0x00000001. Modifies an icon in the status area.
+ 0x00000002. Deletes an icon from the status area.
+ 0x00000003. Shell32.dll version 5.0 and later only.
+ Returns focus to the taskbar notification area.
+ 0x00000004. Shell32.dll version 5.0 and later only.
+ Instructs the notification area to behave according to the version number
+ specified in the uVersion member of the structure pointed to by lpdata.
+ """
+ ADD = 0x00000000
+ MODIFY = 0x00000001
+ DELETE = 0x00000002
+ SETFOCUS = 0x00000003
+ SETVERSION = 0x00000004
+
+
+class LR():
+ """
+ 0x00002000. When the uType parameter specifies IMAGE_BITMAP,
+ causes the function to return a DIB section bitmap rather than a compatible bitmap.
+ This flag is useful for loading a bitmap without mapping it
+ to the colors of the display device.
+ 0x00000000. The default flag; it does nothing. All it means is "not LR_MONOCHROME".
+ 0x00000040. Uses the width or height specified by the system metric values
+ for cursors or icons, if the cxDesired or cyDesired values are set to zero.
+ 0x00000010. Loads the stand-alone image from the file specified by lpszName
+ (icon, cursor, or bitmap file).
+ 0x00001000. Searches the color table for the image and replaces
+ the following shades of gray with the corresponding 3-D color.
+ Dk Gray, RGB(128,128,128) with COLOR_3DSHADOW
+ Gray, RGB(192,192,192) with COLOR_3DFACE
+ Lt Gray, RGB(223,223,223) with COLOR_3DLIGHT
+ Do not use this option if you are loading a bitmap with a color depth greater than 8bpp.
+ 0x00000020. Retrieves the color value of the first pixel in the image
+ and replaces the corresponding entry in the color table with
+ the default window color (COLOR_WINDOW).
+ 0x00000001. Loads the image in black and white.
+ 0x00008000. Shares the image handle if the image is loaded multiple times.
+ 0x00000080. Uses true VGA colors.
+ """
+ CREATEDIBSECTION = 0x00002000
+ DEFAULTCOLOR = 0x00000000
+ DEFAULTSIZE = 0x00000040
+ LOADFROMFILE = 0x00000010
+ LOADMAP3DCOLORS = 0x00001000
+ LOADTRANSPARENT = 0x00000020
+ MONOCHROME = 0x00000001
+ SHARED = 0x00008000
+ VGACOLOR = 0x00000080
+
+class WS():
+ """
+ Docs: https://docs.microsoft.com/en-us/windows/win32/winmsg/window-styles
+ """
+ BORDER = 0x00800000
+ CAPTION = 0x00C00000
+ CHILD = 0x40000000
+ CHILDWINDOW = 0x40000000
+ CLIPCHILDREN = 0x02000000
+ CLIPSIBLINGS = 0x04000000
+ DISABLED = 0x08000000
+ DLGFRAME = 0x00400000
+ GROUP = 0x00020000
+ HSCROLL = 0x00100000
+ ICONIC = 0x20000000
+ MAXIMIZE = 0x01000000
+ MAXIMIZEBOX = 0x00010000
+ MINIMIZE = 0x20000000
+ MINIMIZEBOX = 0x00020000
+ OVERLAPPED = 0x00000000
+ SYSMENU = 0x00080000
+ THICKFRAME = 0x00040000
+ OVERLAPPEDWINDOW = (
+ OVERLAPPED | CAPTION | SYSMENU | THICKFRAME | MINIMIZEBOX | MAXIMIZEBOX
+ )
+ POPUP = 0x80000000
+ POPUPWINDOW = POPUP | BORDER | SYSMENU
+ SIZEBOX = 0x00040000
+ TABSTOP = 0x00010000
+ TILED = 0x00000000
+ TILEDWINDOW = (
+ OVERLAPPED | CAPTION | SYSMENU | THICKFRAME | MINIMIZEBOX | MAXIMIZEBOX
+ )
+ VISIBLE = 0x10000000
+ VSCROLL = 0x00200000
+
+class IMAGE():
+ """
+ 0. Copies a bitmap.
+ 2. Copies a cursor.
+ 1. Copies an icon.
+ """
+ BITMAP = 0
+ CURSOR = 2
+ ICON = 1
+
+# class WM():
+# """
+# The documentation is to scattered for these
+# constants, so I implemented only the minimal set
+# Docs: https://docs.microsoft.com/en-us/windows/win32/winmsg/window-notifications
+# """
+# CREATE = 0x0001
+# DESTROY = 0x0002
+# GETMINMAXINFO = 0x0024
+# NCCALCSIZE = 0x0083
+# NCCREATE = 0x0081
+# NCDESTROY = 0x0082
+# CLOSE = 0x0010
+# QUIT = 0x0012
+
+# class PM():
+# """
+# NOREMOVE. Messages are not removed from the queue
+# after processing by PeekMessage
+# REMOVE. Messages are removed from the queue after processing
+# NOYIELD. Prevents the system from releasing any thread that is waiting
+# for the caller to go idle (see WaitForInputIdle).
+# Combine this value with either PM_NOREMOVE or PM_REMOVE
+# """
+# NOREMOVE = 0x0000
+# REMOVE = 0x0001
+# NOYIELD = 0x0002
+
+class MsgValue():
+ """
+ A namespace for msg values constants
+ """
+ TRAY_ICON_EVENT = WM_USER + 1
+ SHUTDOWN_THREAD = WM_USER + 999
+
+class LParamValue():
+ """
+ A namespace for LPARAM values constants
+ these are only some I was able to get via try and fail
+ sadly they have no documented values
+ """
+ NOTIF_SHOW = 60425218
+ NOTIF_HIDE = 60425220
+ # Not sure about this one
+ NOTIF_DISMISS = 60425221
+ HOVER = 60424704
+ LMB_PRESS = 60424705
+ LMB_DPRESS = 60424707# double press
+ # Not sure about this one
+ LMB_HOLD = 60425216
+ LMB_RELEASE = 60424706
+ MMB_PRESS = 60424711
+ MMB_RELEASE = 60424712
+ RMB_PRESS = 60424708
+ RMB_DPRESS = 60424710# double press
+ # Not sure about this one
+ RMB_HOLD = 60424315
+ RMB_RELEASE = 60424709
+
+
+user32.LoadImageW.argtypes = (
+ wt.HINSTANCE, wt.LPCWSTR, wt.UINT, wt.INT, wt.INT, wt.UINT
+)
+user32.LoadImageW.restype = wt.HANDLE
+
+user32.DestroyIcon.argtypes = (wt.HICON,)
+user32.DestroyIcon.restype = wt.BOOL
+
+kernel32.GetModuleHandleW.argtypes = (wt.LPCWSTR,)
+kernel32.GetModuleHandleW.restype = wt.HMODULE
+
+user32.DefWindowProcW.argtypes = (wt.HWND, wt.UINT, wt.WPARAM, wt.LPARAM)
+user32.DefWindowProcW.restype = LRESULT
+
+user32.RegisterClassExW.argtypes = (ctypes.POINTER(WndClassExw),)
+user32.RegisterClassExW.restype = wt.ATOM
+
+user32.UnregisterClassW.argtypes = (wt.LPCWSTR, wt.HINSTANCE)
+user32.UnregisterClassW.restype = wt.BOOL
+
+user32.CreateWindowExW.argtypes = (
+ wt.DWORD,
+ wt.ATOM,# This could be LPCWSTR instead of ATOM, but we'd have to use cls name
+ wt.LPCWSTR, wt.DWORD,
+ wt.INT, wt.INT, wt.INT, wt.INT,
+ wt.HWND, wt.HMENU, wt.HINSTANCE, wt.LPVOID
+)
+user32.CreateWindowExW.restype = wt.HWND
+
+user32.UpdateWindow.argtypes = (wt.HWND,)
+user32.UpdateWindow.restype = wt.BOOL
+
+user32.DestroyWindow.argtypes = (wt.HWND,)
+user32.DestroyWindow.restype = wt.BOOL
+
+shell32.Shell_NotifyIconW.argtypes = (wt.DWORD, ctypes.POINTER(NotifyIconDataW))
+shell32.Shell_NotifyIconW.restype = wt.BOOL
+
+user32.GetMessageW.argtypes = (ctypes.POINTER(Msg), wt.HWND, wt.UINT, wt.UINT)
+user32.GetMessageW.restype = wt.INT
+
+# user32.PeekMessageW.argtypes = (ctypes.POINTER(Msg), wt.HWND, wt.UINT, wt.UINT, wt.UINT)
+# user32.PeekMessageW.restype = wt.BOOL
+
+user32.TranslateMessage.argtypes = (ctypes.POINTER(Msg),)
+user32.TranslateMessage.restype = wt.BOOL
+
+user32.DispatchMessageW.argtypes = (ctypes.POINTER(Msg),)
+user32.DispatchMessageW.restype = LRESULT
+
+kernel32.GetCurrentThreadId.argtypes = ()
+kernel32.GetCurrentThreadId.restype = wt.DWORD
+
+user32.AttachThreadInput.argtypes = (wt.DWORD, wt.DWORD, wt.BOOL)
+user32.AttachThreadInput.restype = wt.BOOL
+
+user32.PostMessageW.argtypes = (wt.HWND, wt.UINT, wt.WPARAM, wt.LPARAM)
+user32.PostMessageW.restype = wt.BOOL
+
+
+NotifCallback = Callable[[], None]
+
+class _App():
+ """
+ Private class to represent an app
+ """
+ def __init__(
+ self,
+ name: str,
+ icon_path: str|None,
+ on_show: NotifCallback|None,
+ on_hide: NotifCallback|None,
+ on_dismiss: NotifCallback|None,
+ on_hover: NotifCallback|None,
+ on_lmb_click: NotifCallback|None,
+ on_lmb_dclick: NotifCallback|None,
+ on_mmb_click: NotifCallback|None,
+ on_rmb_click: NotifCallback|None,
+ on_rmb_dclick: NotifCallback|None
+ ):
+ """
+ Constructor
+
+ IN:
+ name - the name of the app
+ icon_path - path to optional icon for this notif
+ on_hover - on hover event callback
+ on_lmb_click - on left click event callback
+ on_lmb_dclick - on left double click event callback
+ on_mmb_click - on middle click event callback
+ on_rmb_click - on right click event callback
+ on_rmb_dclick - on right double click event callback
+ """
+ self._name = name
+ self._icon_path = icon_path
+
+ self._callback_map = {
+ LParamValue.NOTIF_SHOW: on_show,
+ LParamValue.NOTIF_HIDE: on_hide,
+ LParamValue.NOTIF_DISMISS: on_dismiss,
+ LParamValue.HOVER: on_hover,
+ LParamValue.LMB_PRESS: on_lmb_click,
+ LParamValue.LMB_DPRESS: on_lmb_dclick,
+ LParamValue.MMB_PRESS: on_mmb_click,
+ LParamValue.RMB_PRESS: on_rmb_click,
+ LParamValue.RMB_DPRESS: on_rmb_dclick
+ }
+
+ self._thread: threading.Thread | None = None
+ self._is_shown = False
+
+ self._hinstance = None
+ self._win_cls = None
+ self._cls_atom = None
+ self._hicon = None
+ self._hwnd = None
+
+ def _init(self):
+ """
+ Allocated resources for the app
+ """
+ self._set_hinstance()
+ self._register_win_cls()
+ self._load_icon()
+ self._create_win()
+ self._show_tray_icon()
+
+ def _deinit(self):
+ """
+ Deallocated the app resources
+ """
+ self._hide_tray_icon()
+ self._destroy_win()
+ self._unload_icon()
+ self._unregister_win_cls()
+ self._unset_hinstance()
+
+ def __del__(self):
+ """
+ Cleanup on gc
+ """
+ self.stop()
+ # Just in case
+ self._deinit()
+
+ def _load_icon(self):
+ """
+ Loads the app icon
+ """
+ if self._icon_path:
+ icon_flags = LR.DEFAULTCOLOR | LR.LOADFROMFILE | LR.DEFAULTSIZE | LR.SHARED
+ hicon = user32.LoadImageW(
+ None,# Use NULL since we're loading a "stand-alone" resource
+ self._icon_path,
+ IMAGE.ICON,
+ 0,
+ 0,
+ icon_flags
+ )
+ if not hicon:
+ raise WinAPIError("failed to load icon", _get_last_err())
+
+ else:
+ hicon = 0# TODO: doesn't work
+
+ self._hicon = hicon
+
+ def _unload_icon(self):
+ """
+ Unloads the app icon
+ """
+ if self._hicon:
+ user32.DestroyIcon(self._hicon)
+ self._hicon = None
+
+ def _set_hinstance(self):
+ """
+ Gets the handler of this dll
+ """
+ handle = kernel32.GetModuleHandleW(None)
+ if not handle:
+ raise WinAPIError("failed to get module handle", _get_last_err())
+ self._hinstance = handle
+
+ def _unset_hinstance(self):
+ """
+ Removes the pointer to the handler of this dll
+ """
+ self._hinstance = None
+
+ def _register_win_cls(self):
+ """
+ Registers a window class
+ """
+ def winproc(hwnd: wt.HWND, msg: wt.UINT, wparam: wt.WPARAM, lparam: wt.LPARAM) -> LRESULT:
+ cb = self._callback_map.get(lparam, None)# type: ignore
+ if cb:
+ cb()
+ return 1# type: ignore
+ # print(f"{hex(msg)}: {wparam} | {lparam}")# type: ignore
+ return user32.DefWindowProcW(hwnd, msg, wparam, lparam)
+
+ self._win_cls = win_cls = WndClassExw()
+ win_cls.cbSize = ctypes.sizeof(win_cls)
+ win_cls.style = 0
+ win_cls.lpfnWndProc = WNDPROC(winproc)
+ win_cls.cbClsExtra = 0
+ win_cls.cbWndExtra = 0
+ win_cls.hInstance = self._hinstance
+ win_cls.hIcon = 0
+ win_cls.hCursor = 0
+ win_cls.hbrBackground = 0
+ win_cls.lpszClassName = self._name
+
+ cls_atom = user32.RegisterClassExW(ctypes.byref(win_cls))
+ if not cls_atom:
+ raise WinAPIError("failed to create class ATOM", _get_last_err())
+ self._cls_atom = cls_atom
+
+ def _unregister_win_cls(self):
+ """
+ Unregisters the window class
+ """
+ if self._win_cls:
+ user32.UnregisterClassW(self._win_cls.lpszClassName, self._hinstance)
+ self._win_cls = None
+ self._cls_atom = None
+
+ def _create_win(self):
+ """
+ Creates a tray window
+ """
+ win_style = WS.OVERLAPPED | WS.SYSMENU
+ hwnd = user32.CreateWindowExW(
+ 0,
+ self._cls_atom,
+ # self._win_cls.lpszClassName,
+ # self._name,
+ self._win_cls.lpszClassName,
+ win_style,
+ CW_USEDEFAULT,
+ CW_USEDEFAULT,
+ CW_USEDEFAULT,
+ CW_USEDEFAULT,
+ HWND_MESSAGE,
+ None,
+ self._hinstance,
+ None
+ )
+ if not hwnd:
+ raise WinAPIError("failed to create a window", _get_last_err())
+ user32.UpdateWindow(hwnd)
+ self._hwnd = hwnd
+
+ def _destroy_win(self):
+ """
+ Destroys the tray window
+ """
+ if self._hwnd:
+ user32.DestroyWindow(self._hwnd)
+ self._hwnd = None
+
+ def _get_base_nid(self) -> NotifyIconDataW:
+ """
+ Constructs and returns "base" version of NotifyIconDataW
+ """
+ nid = NotifyIconDataW()
+ nid.cbSize = ctypes.sizeof(nid)
+ nid.hWnd = self._hwnd
+ nid.uID = APP_ID
+ nid.uFlags = NIF.ICON | NIF.STATE | NIF.MESSAGE | NIF.TIP
+ nid.uCallbackMessage = MsgValue.TRAY_ICON_EVENT
+ nid.hIcon = self._hicon
+ nid.szTip = self._name[:128]
+ # nid.szInfo = body[:256]
+ nid.uVersion = NOTIFYICON_VERSION_4
+ # nid.szInfoTitle = title[:64]
+ nid.dwInfoFlags = NIIF.NOSOUND | NIIF.USER | NIIF.LARGE_ICON | NIIF.RESPECT_QUIET_TIME
+
+ return nid
+
+ def _run_event_loop(self):
+ msg = Msg()
+ msg_p = ctypes.byref(msg)
+ hwnd = self._hwnd
+ # both 0s == no filter
+ filter_min = 0
+ filter_max = 0
+ # print("starting")
+ while (rv := user32.GetMessageW(msg_p, hwnd, filter_min, filter_max)) != 0:
+ # print("pumped")
+ if rv == -1:
+ raise WinAPIError("GetMessageW returned an error code", _get_last_err())
+ if msg.message == MsgValue.SHUTDOWN_THREAD:
+ # print("shutting down")
+ break
+ user32.TranslateMessage(msg_p)
+ user32.DispatchMessageW(msg_p)
+
+ # print("exiting")
+
+ def _run(self, main_th_id: int):
+ """
+ Shows the app + runs the event loop + hides the app
+ NOTE: Blocking call
+ """
+ child_th_id = kernel32.GetCurrentThreadId()
+ user32.AttachThreadInput(main_th_id, child_th_id, True)
+ self._init()
+ try:
+ self._run_event_loop()
+ finally:
+ self._deinit()
+ user32.AttachThreadInput(main_th_id, child_th_id, False)
+
+ def start(self):
+ """
+ Runs the app
+ """
+ if not self._thread:
+ main_th_id = kernel32.GetCurrentThreadId()
+ self._thread = thread = threading.Thread(
+ target=self._run,
+ args=(main_th_id,),
+ daemon=True
+ )
+ thread.start()
+
+ def stop(self):
+ """
+ Stops the app
+ NOTE: this will block until the app is stopped
+ """
+ if self._hwnd:
+ user32.PostMessageW(self._hwnd, MsgValue.SHUTDOWN_THREAD, 0, 0)
+ if self._thread:
+ self._thread.join()
+ self._thread = None
+
+ def _show_tray_icon(self) -> bool:
+ """
+ Shows tha app tray icon
+ """
+ rv = False
+
+ if not self._is_shown:
+ nid = self._get_base_nid()
+ rv = bool(shell32.Shell_NotifyIconW(NIM.ADD, ctypes.byref(nid)))
+ if rv:
+ shell32.Shell_NotifyIconW(NIM.SETVERSION, ctypes.byref(nid))
+ self._is_shown = True
+
+ return rv
+
+ def _hide_tray_icon(self) -> bool:
+ """
+ Hides tha app tray icon
+ """
+ rv = False
+
+ if self._is_shown:
+ nid = self._get_base_nid()
+ rv = bool(shell32.Shell_NotifyIconW(NIM.DELETE, ctypes.byref(nid)))
+
+ self._is_shown = False
+
+ return rv
+
+ def send_notif(self, title: str, body: str) -> bool:
+ """
+ Sends a notification
+
+ IN:
+ title - the title of the notification
+ body - the body of the notification
+ """
+ if not self._is_shown:
+ return False
+
+ nid = self._get_base_nid()
+
+ nid.uFlags |= NIF.INFO | NIF.SHOWTIP
+ nid.szInfo = body[:256]
+ nid.szInfoTitle = title[:64]
+
+ return bool(shell32.Shell_NotifyIconW(NIM.MODIFY, ctypes.byref(nid)))
+
+ def clear_notifs(self):
+ """
+ Clears notifications
+ """
+ if not self._is_shown:
+ return
+
+ # According to microsoft docs this should work
+ # but it works 1 out of 20 times...
+ # nid = self._get_base_nid()
+ # nid.uFlags = NIF.INFO
+ # nid.szInfo = ""
+ # nid.szInfoTitle = ""
+ # shell32.Shell_NotifyIconW(NIM.MODIFY, ctypes.byref(nid))
+
+ # So we're doing this hack
+ self._hide_tray_icon()
+ self._show_tray_icon()
+
+
+class NotifManager():
+ """
+ Notification manager
+ """
+ _instance = None
+
+ def __new__(cls, *args, **kwargs) -> NotifManager:# pylint: disable=unused-argument
+ """
+ Singleton implementation
+ """
+ if cls._instance is not None:
+ raise ManagerAlreadyExistsError()
+
+ self = super().__new__(cls)
+ cls._instance = weakref.ref(self)
+
+ return self
+
+ def __init__(
+ self,
+ app_name: str,
+ icon_path: str|None = None,
+ on_show: NotifCallback|None = None,
+ on_hide: NotifCallback|None = None,
+ on_dismiss: NotifCallback|None = None,
+ on_hover: NotifCallback|None = None,
+ on_lmb_click: NotifCallback|None = None,
+ on_lmb_dclick: NotifCallback|None = None,
+ on_mmb_click: NotifCallback|None = None,
+ on_rmb_click: NotifCallback|None = None,
+ on_rmb_dclick: NotifCallback|None = None
+ ):
+ """
+ Constructor
+
+ IN:
+ app_name - the app name shared by the notifs
+ icon_path - the path to the icon shared by the notifs
+ on_show - on notif show event callback
+ (Default: None)
+ on_hide - on notif hide event callback
+ (Default: None)
+ on_dismiss - on notif dismiss event callback
+ if a dismiss event has been fired, hide won't be fired
+ (Default: None)
+ on_hover - on hover event callback
+ NOTE: hover callback may run event during click events
+ (Default: None)
+ on_lmb_click - on left click event callback
+ (Default: None)
+ on_lmb_dclick - on left double click event callback
+ NOTE: before a double click event, a click event will still be fired
+ (Default: None)
+ on_mmb_click - on middle click event callback
+ (Default: None)
+ on_rmb_click - on right click event callback
+ (Default: None)
+ on_rmb_dclick - on right double click event callback
+ NOTE: before a double click event, a click event will still be fired
+ (Default: None)
+ """
+ # Ask the interpreter for cleanup
+ atexit.register(self.shutdown)
+
+ self._app: _App|None = _App(
+ app_name,
+ icon_path,
+ on_show=on_show,
+ on_hide=on_hide,
+ on_dismiss=on_dismiss,
+ on_hover=on_hover,
+ on_lmb_click=on_lmb_click,
+ on_lmb_dclick=on_lmb_dclick,
+ on_mmb_click=on_mmb_click,
+ on_rmb_click=on_rmb_click,
+ on_rmb_dclick=on_rmb_dclick
+ )
+ self._app.start()
+
+ def __del__(self):
+ self.shutdown()
+
+ def is_ready(self) -> bool:
+ """
+ Checks if the manager and app are ready to send notifications
+ """
+ return self._app is not None and self._app._is_shown# pylint: disable=protected-access
+
+ def send(self, title: str, body: str) -> bool:
+ """
+ Sends a notifification
+
+ IN:
+ title - the title of the notification
+ body - the body of the notification
+
+ OUT:
+ boolean - success status
+ """
+ if not self._app:
+ return False
+ return self._app.send_notif(title, body)
+
+ def clear(self):
+ """
+ Clears all notification this manager has access to
+ To completely free the resources on quit, use
+ the 'shutdown' method
+ """
+ if self._app:
+ self._app.clear_notifs()
+
+ def shutdown(self):
+ """
+ A method to call on shutdown of your app
+ Gracefully clears notifs, hides the icon, frees the resources
+ """
+ if self._app:
+ self._app.stop()
+ # Incase you're a clever laf and run the cleanup, we can abort it
+ atexit.unregister(self.shutdown)
+ self._app = None
diff --git a/Monika After Story/game/python-packages/winnie32api/windows.py b/Monika After Story/game/python-packages/winnie32api/windows.py
new file mode 100644
index 0000000000..93b7d8d490
--- /dev/null
+++ b/Monika After Story/game/python-packages/winnie32api/windows.py
@@ -0,0 +1,238 @@
+# pylint: disable=attribute-defined-outside-init
+# pylint: disable=invalid-name
+from __future__ import annotations
+
+__all__ = (
+ "get_hwnd_by_title",
+ "get_window_title",
+ "get_window_rect",
+ "flash_window",
+ "unflash_window",
+ "set_active_window",
+ "get_active_window_hwnd",
+ "get_active_window_title",
+ "get_active_window_rect"
+)
+
+import ctypes
+import ctypes.wintypes as wt
+
+from .common import (
+ Rect,
+ Pack,
+ _get_last_err,
+ _reset_last_err
+)
+from .errors import WinAPIError
+
+
+user32 = ctypes.windll.user32
+kernel32 = ctypes.windll.kernel32
+
+
+WNDENUMPROC = ctypes.WINFUNCTYPE(wt.BOOL, wt.HWND, wt.LPARAM)
+
+
+class FlashWInfo(ctypes.Structure):
+ _fields_ = [
+ ("cbSize", wt.UINT),
+ ("hwnd", wt.HWND),
+ ("dwFlags", wt.DWORD),
+ ("uCount", wt.UINT),
+ ("dwTimeout", wt.DWORD)
+ ]
+
+class FLASHW():
+ """
+ 0x00000003. Flash both the window caption and taskbar button.
+ 0x00000001. Flash the window caption.
+ 0. Stop flashing. The system restores the window to its original state.
+ 0x00000004. Flash continuously, until the FLASHW_STOP flag is set.
+ 0x0000000C. Flash continuously until the window comes to the foreground.
+ 0x00000002. Flash the taskbar button.
+ """
+ ALL = 0x00000003
+ CAPTION = 0x00000001
+ STOP = 0
+ TIMER = 0x00000004
+ TIMERNOFG = 0x0000000C
+ TRAY = 0x00000002
+
+
+user32.IsWindowVisible.argtypes = (wt.HWND,)
+user32.IsWindowVisible.restype = wt.BOOL
+
+user32.GetWindowTextLengthW.argtypes = (wt.HWND,)
+user32.GetWindowTextLengthW.restype = wt.INT
+
+user32.GetWindowTextW.argtypes = (wt.HWND, wt.LPWSTR, wt.INT)
+user32.GetWindowTextW.restype = wt.INT
+
+user32.EnumWindows.argtypes = (WNDENUMPROC, wt.LPARAM)
+user32.EnumWindows.restype = wt.BOOL
+
+user32.GetWindowRect.argtypes = (wt.HWND, wt.LPRECT)
+user32.GetWindowRect.restype = wt.BOOL
+
+user32.FlashWindowEx.argtypes = (ctypes.POINTER(FlashWInfo),)
+user32.FlashWindowEx.restype = wt.BOOL
+
+user32.GetForegroundWindow.argtypes = ()
+user32.GetForegroundWindow.restype = wt.HWND
+
+
+def get_hwnd_by_title(title: str) -> int|None:
+ """
+ Returns first window hwnd with the given title
+ """
+ pack = Pack(None)
+
+ def callback(hwnd: int, lparam: int) -> bool:
+ c_hwnd = wt.HWND(hwnd)
+
+ if user32.IsWindowVisible(c_hwnd):
+ rv = get_window_title(hwnd)
+ if title == rv:
+ pack.value = hwnd
+ return False
+
+ return True
+
+ user32.EnumWindows(WNDENUMPROC(callback), wt.LPARAM(0))
+ return pack.value
+
+def get_window_title(hwnd: int) -> str:
+ """
+ Returns a window title as a str
+ """
+ _reset_last_err()
+
+ title_len = user32.GetWindowTextLengthW(hwnd)
+ if not title_len:
+ last_err = _get_last_err()
+ if last_err:
+ raise WinAPIError("failed to get title length", last_err)
+
+ buffer = ctypes.create_unicode_buffer(title_len + 1)
+ result = user32.GetWindowTextW(
+ hwnd,
+ buffer,
+ title_len + 1
+ )
+ if result != title_len:
+ last_err = _get_last_err()
+ if last_err:
+ raise WinAPIError("failed to get title", last_err)
+
+ return buffer.value
+
+def get_window_rect(hwnd: int) -> Rect:
+ """
+ Returns a window rect
+ """
+ c_rect = wt.RECT()
+ result = user32.GetWindowRect(hwnd, ctypes.byref(c_rect))
+ if not result:
+ raise WinAPIError("failed to get window rect", _get_last_err())
+
+ return Rect.from_coords(c_rect.left, c_rect.top, c_rect.right, c_rect.bottom)# type: ignore
+
+
+def flash_window(
+ hwnd: int,
+ count: int|None = 1,
+ caption: bool = True,
+ tray: bool = True
+):
+ """
+ Flashes a window
+
+ IN:
+ hwnd - the window hwnd
+ coutn - the number of flashes
+ -1 means flash infinitely until asked to stop
+ None means flash infinitely until the window becomes focused
+ caption - do we flash window caption
+ tray - do weflash tray icon
+ """
+ flash_info = FlashWInfo()
+ flash_info.cbSize = ctypes.sizeof(flash_info)
+ flash_info.hwnd = hwnd
+
+ flags = 0
+ if caption:
+ flags |= FLASHW.CAPTION
+ if tray:
+ flags |= FLASHW.TRAY
+ if count is None:
+ flags |= FLASHW.TIMERNOFG
+ count = 0
+ elif count == -1:
+ flags |= FLASHW.TIMER
+ count = 0
+
+ flash_info.dwFlags = flags
+ flash_info.uCount = count
+ flash_info.dwTimeout = 0
+
+ user32.FlashWindowEx(ctypes.byref(flash_info))
+
+def unflash_window(hwnd: int):
+ """
+ Stops window flashing
+ """
+ flash_info = FlashWInfo()
+ flash_info.cbSize = ctypes.sizeof(flash_info)
+ flash_info.hwnd = hwnd
+ flash_info.dwFlags = FLASHW.STOP
+ flash_info.uCount = 0
+ flash_info.dwTimeout = 0
+ user32.FlashWindowEx(ctypes.byref(flash_info))
+
+
+def set_active_window(hwnd: int):
+ """
+ Sets focus to a new window
+ NOTE:
+ A process may or may not allow to change "foreground" window
+ to other processes
+ It's impossible to "activate" a window from another process
+ """
+ # Also tried:
+ # - linking threads, no result
+ # - emulating input, no result
+ user32.SetFocus(hwnd)# err 5
+ user32.BringWindowToTop(hwnd)# no err, but no result
+ user32.SetForegroundWindow(hwnd)# err 1400
+ # user32.SetActiveWindow(hwnd)# err 1400
+
+
+def get_active_window_hwnd() -> int|None:
+ """
+ Returns active window title hwnd (id)
+ """
+ active_win_hwnd = user32.GetForegroundWindow()
+ if not active_win_hwnd or not user32.IsWindowVisible(active_win_hwnd):
+ return None
+
+ return active_win_hwnd
+
+def get_active_window_title() -> str|None:
+ """
+ Returns active window title as a str
+ """
+ hwnd = get_active_window_hwnd()
+ if hwnd is None:
+ return None
+
+ return get_window_title(hwnd)
+
+def get_active_window_rect() -> Rect|None:
+ """
+ Returns active window rect
+ """
+ hwnd = get_active_window_hwnd()
+ if hwnd is None:
+ return None
+
+ return get_window_rect(hwnd)
diff --git a/Monika After Story/game/screens.rpy b/Monika After Story/game/screens.rpy
index 8a4b37273c..0e87a24a78 100644
--- a/Monika After Story/game/screens.rpy
+++ b/Monika After Story/game/screens.rpy
@@ -768,7 +768,7 @@ style quick_button_text_dark:
## to other menus, and to start the game.
init 4 python:
- def FinishEnterName():
+ def _finishEnterName():
global player
if not player:
@@ -900,7 +900,7 @@ screen navigation():
if main_menu:
- textbutton _("Just Monika") action If(persistent.playername, true=Start(), false=Show(screen="name_input", message="Please enter your name", ok_action=Function(FinishEnterName)))
+ textbutton _("Just Monika") action If(persistent.playername, true=Start(), false=Show(screen="name_input", message="Please enter your name", ok_action=Function(_finishEnterName)))
else:
@@ -3096,6 +3096,17 @@ screen mas_generic_restart:
textbutton _("OK") action Return(True)
+style poem_vbox:
+ xalign 0.5
+style poem_viewport:
+ xanchor 0
+ xsize 720
+ xpos 280
+style poem_vbar is vscrollbar:
+ xpos 1000
+ yalign 0.5
+ ysize 700
+
# Partial generic showpoem screen
# IN:
# _poem - Poem object to show
diff --git a/Monika After Story/game/script-affection.rpy b/Monika After Story/game/script-affection.rpy
index 3d4eb94c87..f7e71b9970 100644
--- a/Monika After Story/game/script-affection.rpy
+++ b/Monika After Story/game/script-affection.rpy
@@ -1701,6 +1701,7 @@ init 15 python in mas_affection:
Initializes the talk quiplists
"""
global talk_menu_quips
+
def save_quips(_aff, quiplist):
mas_ql = store.MASQuipList(allow_label=False)
for _quip in quiplist:
@@ -1838,6 +1839,7 @@ init 15 python in mas_affection:
Initializes the play quipliust
"""
global play_menu_quips
+
def save_quips(_aff, quiplist):
mas_ql = store.MASQuipList(allow_label=False)
for _quip in quiplist:
@@ -3078,7 +3080,7 @@ label mas_player_nickname_loop(check_scrollable_text, nickname_pool):
python:
done = False
- acceptable_nicknames = _return.keys()
+ acceptable_nicknames = list(_return.keys())
if acceptable_nicknames:
dlg_line = "Is there anything else you'd like me to call you?"
@@ -3332,7 +3334,7 @@ label mas_finalfarewell_start:
allow_dialogue = False
store.songs.enabled = False
mas_in_finalfarewell_mode = True
- layout.QUIT = glitchtext(20)
+ layout.QUIT = mas_glitchText(20)
#Console is not going to save you.
config.keymap["console"] = []
diff --git a/Monika After Story/game/script-apologies.rpy b/Monika After Story/game/script-apologies.rpy
index a052af5cb0..6892f13d71 100644
--- a/Monika After Story/game/script-apologies.rpy
+++ b/Monika After Story/game/script-apologies.rpy
@@ -26,7 +26,7 @@ init python:
_today = datetime.date.today()
#Iter thru the stuffs in the apology time tb
- for ev_label in persistent._mas_apology_time_db.keys():
+ for ev_label in tuple(persistent._mas_apology_time_db.keys()):
if current_total_playtime >= persistent._mas_apology_time_db[ev_label][0] or _today >= persistent._mas_apology_time_db[ev_label][1]:
#Pop the ev_label from the time db and lock the event label. You just lost your chance
store.mas_lockEVL(ev_label,'APL')
@@ -94,7 +94,7 @@ label monika_playerapologizes:
python:
apologylist = [
(ev.prompt, ev.eventlabel, False, False)
- for ev_label, ev in store.mas_apology.apology_db.iteritems()
+ for ev_label, ev in store.mas_apology.apology_db.items()
if ev.unlocked and (ev.prompt != "...for something." and ev.prompt != "...for something else.")
]
diff --git a/Monika After Story/game/script-ch30.rpy b/Monika After Story/game/script-ch30.rpy
index 5420101213..f9f3153acd 100644
--- a/Monika After Story/game/script-ch30.rpy
+++ b/Monika After Story/game/script-ch30.rpy
@@ -9,9 +9,7 @@ default persistent.monika_kill = None
# Whether or not you launched the mod before
default persistent.first_run = True
default persistent.rejected_monika = None
-default initial_monika_file_check = None
define modoorg.CHANCE = 20
-define mas_battery_supported = False
define mas_in_intro_flow = False
# True means disable animations, False means enable
@@ -39,7 +37,7 @@ init -890 python in mas_globals:
store.persistent._mas_pm_has_went_back_in_time = True
#Internal renpy version check
- is_r7 = renpy.version(True)[0] == 7
+ is_at_least_r7 = renpy.version(True)[0] >= 7
# Check whether or not the user uses a steam install
is_steam = "steamapps" in renpy.config.basedir.lower()
@@ -308,51 +306,24 @@ define MAS_PRONOUN_GENDER_MAP = {
"hero": {"M": "hero", "F": "heroine", "X": "hero"}
}
+init 1 python:
+ currentuser = mas_get_user()
+ # name changes if necessary
+ if not currentuser:
+ currentuser = persistent.playername
+ if not persistent.mcname:
+ persistent.mcname = currentuser
+
+ mcname = persistent.mcname
+
init python:
import subprocess
import os
- import eliza # mod specific
- import datetime # mod specific
- import battery # mod specific
+ import datetime
import re
import store.songs as songs
import store.hkb_button as hkb_button
import store.mas_globals as mas_globals
- therapist = eliza.eliza()
- process_list = []
- currentuser = None # start if with no currentuser
- if renpy.windows:
- try:
- process_list = subprocess.check_output("wmic process get Description", shell=True).lower().replace("\r", "").replace(" ", "").split("\n")
- except:
- pass
- try:
- for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
- user = os.environ.get(name)
- if user:
- currentuser = user
- except:
- pass
-
- try:
- renpy.file("../characters/monika.chr")
- initial_monika_file_check = True
- except:
- #Monika will mention that you don't have a char file in ch30_main instead
- pass
-
-
- # name changes if necessary
- if not currentuser or len(currentuser) == 0:
- currentuser = persistent.playername
- if not persistent.mcname or len(persistent.mcname) == 0:
- persistent.mcname = currentuser
- mcname = currentuser
- else:
- mcname = persistent.mcname
-
- # check for battery support
- mas_battery_supported = battery.is_supported()
# we need a new music channel for background audio (like rain!)
# this uses the amb (ambient) mixer.
@@ -387,7 +358,7 @@ init python:
renpy.jump("mas_pick_a_game")
- def mas_getuser():
+ def mas_get_user():
"""
Attempts to get the current user
@@ -437,7 +408,6 @@ init python:
"""
Draws the appropriate masks according to the current state of the
game.
-
IN:
dissolve_masks - True will dissolve masks, False will not
(Default; True)
@@ -522,19 +492,6 @@ init python:
# config.keymap['dismiss'] = dismiss_keys
# renpy.display.behavior.clear_keymap_cache()
- @store.mas_utils.deprecated(use_instead="mas_isDayNow", should_raise=True)
- def mas_isMorning():
- """DEPRECATED
- Checks if it is day or night via suntimes
-
- NOTE: the wording of this function is bad. This does not literally
- mean that it is morning. USE mas_isDayNow
-
- RETURNS: True if day, false if not
- """
- return mas_isDayNow()
-
-
def mas_progressFilter():
"""
Changes filter according to rules.
@@ -549,13 +506,6 @@ init python:
return curr_flt != new_flt
- @store.mas_utils.deprecated(should_raise=True)
- def mas_shouldChangeTime():
- """DEPRECATED
- This no longer makes sense with the filtering system.
- """
- return False
-
def mas_shouldRain():
"""
@@ -968,7 +918,7 @@ init 999 python in mas_reset:
else:
store.mas_lockGame("nou")
- for game_name, game_startlabel in game_unlock_db.iteritems():
+ for game_name, game_startlabel in game_unlock_db.items():
# unlock if we've seen the label
if store.mas_getEVL_shown_count(game_startlabel) > 0:
store.mas_unlockGame(game_name)
@@ -1352,7 +1302,7 @@ init 999 python in mas_reset:
Runs reset code for window reactions
"""
#set MAS window global
- mas_windowutils._setMASWindow()
+ mas_windowutils._setMASWindow_Linux()
@ch30_reset(-600)
@@ -1585,10 +1535,10 @@ label spaceroom(start_bg=None, hide_mask=None, hide_monika=False, dissolve_all=F
# add show/hide statements for decos
if bg_change_info is not None:
if not scene_change:
- for h_adf in bg_change_info.hides.itervalues():
+ for h_adf in bg_change_info.hides.values():
h_adf.hide()
- for s_tag, s_info in bg_change_info.shows.iteritems():
+ for s_tag, s_info in bg_change_info.shows.items():
s_tag_real, s_adf = s_info
s_adf.show(s_tag_real)
@@ -1744,8 +1694,8 @@ label ch30_autoload:
mas_cleanEventList()
- # set the gender
- call mas_set_gender
+ # set the gender
+ mas_set_pronouns()
# call reset stuff
call ch30_reset
@@ -2187,15 +2137,6 @@ label ch30_post_mid_loop_eval:
window auto
-# python:
-# if (
-# mas_battery_supported
-# and battery.is_battery_present()
-# and not battery.is_charging()
-# and battery.get_level() < 20
-# ):
-# pushEvent("monika_battery")
-
if (
store.mas_globals.in_idle_mode
or (
diff --git a/Monika After Story/game/script-compliments.rpy b/Monika After Story/game/script-compliments.rpy
index b81b859430..e9396d97b1 100644
--- a/Monika After Story/game/script-compliments.rpy
+++ b/Monika After Story/game/script-compliments.rpy
@@ -91,7 +91,7 @@ label monika_compliments:
# build menu list
compliments_menu_items = [
(ev.prompt, ev_label, not seen_event(ev_label), False)
- for ev_label, ev in mas_compliments.compliment_database.iteritems()
+ for ev_label, ev in mas_compliments.compliment_database.items()
if (
Event._filterEvent(ev, unlocked=True, aff=mas_curr_affection, flag_ban=EV_FLAG_HFM)
and ev.checkConditional()
diff --git a/Monika After Story/game/script-easter-eggs.rpy b/Monika After Story/game/script-easter-eggs.rpy
index a27b519472..d6fed7ea07 100644
--- a/Monika After Story/game/script-easter-eggs.rpy
+++ b/Monika After Story/game/script-easter-eggs.rpy
@@ -196,10 +196,10 @@ label natsuki_name_scare_hungry:
#play special music and display glitch text.
$ adjusted_6g = "bgm/6g.ogg"
$ renpy.play(adjusted_6g, channel="sound")
- $ ntext = glitchtext(96)
+ $ ntext = mas_glitchText(96)
$ style.say_dialogue = style.edited
n "{cps=*2}{color=#000}[ntext]{/color}{/cps}{nw}"
- $ ntext = glitchtext(96)
+ $ ntext = mas_glitchText(96)
n "{cps=*2}{color=#000}[ntext]{/color}{/cps}{nw}"
# tear screen and glitch sound to mark end of glitch.
diff --git a/Monika After Story/game/script-farewells.rpy b/Monika After Story/game/script-farewells.rpy
index 589f4b714a..2e0e5040c0 100644
--- a/Monika After Story/game/script-farewells.rpy
+++ b/Monika After Story/game/script-farewells.rpy
@@ -143,7 +143,7 @@ init -1 python in mas_farewells:
check_time = datetime.datetime.now()
# now filter
- for ev_label, ev in fare_db.iteritems():
+ for ev_label, ev in fare_db.items():
if _filterFarewell(
ev,
curr_priority,
@@ -199,7 +199,7 @@ label mas_farewell_start:
# build a prompt list
bye_prompt_list = sorted([
(ev.prompt, ev, False, False)
- for k,ev in bye_pool_events.iteritems()
+ for k,ev in bye_pool_events.items()
])
most_used_fare = sorted(bye_pool_events.values(), key=Event.getSortShownCount)[-1]
@@ -694,10 +694,10 @@ label bye_prompt_sleep:
# show screen mas_background_timed_jump(4, "bye_prompt_sleep.reglitch")
# $ _history_list.pop()
# menu:
- # m "[glitchtext(41)]{fast}"
- # "[glitchtext(15)]":
+ # m "[mas_glitchText(41)]{fast}"
+ # "[mas_glitchText(15)]":
# pass
- # "[glitchtext(12)]":
+ # "[mas_glitchText(12)]":
# pass
# hide screen mas_background_timed_jump
diff --git a/Monika After Story/game/script-fun-facts.rpy b/Monika After Story/game/script-fun-facts.rpy
index 549bcd5461..4acf78d553 100644
--- a/Monika After Story/game/script-fun-facts.rpy
+++ b/Monika After Story/game/script-fun-facts.rpy
@@ -14,7 +14,7 @@ init -10 python in mas_fun_facts:
"""
return [
fun_fact_evl
- for fun_fact_evl, ev in fun_fact_db.iteritems()
+ for fun_fact_evl, ev in fun_fact_db.items()
if not ev.unlocked
]
@@ -25,7 +25,7 @@ init -10 python in mas_fun_facts:
OUT:
List of all fun fact eventlabels
"""
- return fun_fact_db.keys()
+ return list(fun_fact_db.keys())
#Whether or not the last fun fact seen was a good fact
diff --git a/Monika After Story/game/script-greetings.rpy b/Monika After Story/game/script-greetings.rpy
index 6903acfc12..e05ffeb019 100644
--- a/Monika After Story/game/script-greetings.rpy
+++ b/Monika After Story/game/script-greetings.rpy
@@ -232,7 +232,7 @@ init -1 python in mas_greetings:
check_time = datetime.datetime.now()
# now filter
- for ev_label, ev in gre_db.iteritems():
+ for ev_label, ev in gre_db.items():
if _filterGreeting(
ev,
curr_priority,
diff --git a/Monika After Story/game/script-holidays.rpy b/Monika After Story/game/script-holidays.rpy
index 4d0e57b249..5a5db1a5a4 100644
--- a/Monika After Story/game/script-holidays.rpy
+++ b/Monika After Story/game/script-holidays.rpy
@@ -31,7 +31,6 @@ init 10 python:
if key is None:
key = datetime.date.today()
-
persistent._mas_event_clothes_map[key] = clothes.name
#We also unlock the event clothes selector here
@@ -340,9 +339,9 @@ init 501 python:
)
init python:
- MAS_O31_COSTUME_CG_MAP = {
- mas_clothes_marisa: "o31mcg",
- mas_clothes_rin: "o31rcg"
+ MAS_O31_COSTUME_CG_MAP: dict[str, str] = {
+ mas_clothes_marisa.name: "o31mcg",
+ mas_clothes_rin.name: "o31rcg"
}
#Functions
@@ -535,8 +534,8 @@ init -10 python:
if wearing_costume:
#Check if the current costume is in the cg map, and if so, prep the cg
- if monika_chr.clothes in MAS_O31_COSTUME_CG_MAP:
- store.mas_o31_event.cg_decoded = store.mas_o31_event.decodeImage(MAS_O31_COSTUME_CG_MAP[monika_chr.clothes])
+ if monika_chr.clothes.name in MAS_O31_COSTUME_CG_MAP:
+ store.mas_o31_event.cg_decoded = store.mas_o31_event.decodeImage(MAS_O31_COSTUME_CG_MAP[monika_chr.clothes.name])
return monika_chr.clothes
return None
@@ -561,8 +560,8 @@ init -10 python:
random_outfit = random.choice(selection_pool)
#Setup the image decode
- if random_outfit in MAS_O31_COSTUME_CG_MAP:
- store.mas_o31_event.cg_decoded = store.mas_o31_event.decodeImage(MAS_O31_COSTUME_CG_MAP[random_outfit])
+ if random_outfit.name in MAS_O31_COSTUME_CG_MAP:
+ store.mas_o31_event.cg_decoded = store.mas_o31_event.decodeImage(MAS_O31_COSTUME_CG_MAP[random_outfit.name])
#And return the outfit
return random_outfit
@@ -1296,7 +1295,7 @@ label mas_o31_lingerie:
python:
curr_song = songs.current_track
mas_play_song(None)
- mas_display_notif("M̷̢͘ô̴͎ṇ̵͐i̴͎͂k̸̗̂ả̴̫", ["C̸̳̓ą̵́n̷̳̎ ̸̖̊y̴̦͝õ̷̯ų̷͌ ̴̼͘h̷̭̚e̴̪͝a̴̙̐ŕ̵̖ ̴̠́m̸̰̂ě̵̬?̷̮̐"], "Topic Alerts")
+ mas_display_notif("M̷̢͘ô̴͎ṇ̵͐i̴͎͂k̸̗̂ả̴̫", ["C̸̳̓ą̵́n̷̳̎ ̸̖̊y̴̦͝õ̷̯ų̷͌ ̴̼͘h̷̭̚e̴̪͝a̴̙̐ŕ̵̖ ̴̠́m̸̰̂ě̵̬?̷̮̐"], "Topic Alerts", flash_window=True)
scene black
pause 2.0
@@ -1350,7 +1349,7 @@ label mas_o31_lingerie_end:
mas_lockEVL("greeting_o31_lingerie", "GRE")
# restart song/sounds that were playing before event
- if globals().get("curr_song", -1) is not -1 and curr_song != store.songs.FP_MONIKA_LULLABY:
+ if globals().get("curr_song", -1) != -1 and curr_song != store.songs.FP_MONIKA_LULLABY:
mas_play_song(curr_song, 1.0)
else:
mas_play_song(None, 1.0)
@@ -2128,7 +2127,7 @@ init -10 python in mas_d25_utils:
# save remaining d25 gifts and delete the packages
# they will be reacted to later
- for c_gift_name, gift_name in d25_map.iteritems():
+ for c_gift_name, gift_name in d25_map.items():
#Only add if the gift isn't already stored under the tree
if c_gift_name not in store.persistent._mas_d25_gifts_given:
store.persistent._mas_d25_gifts_given.append(c_gift_name)
@@ -2137,7 +2136,7 @@ init -10 python in mas_d25_utils:
store.mas_docking_station.destroyPackage(gift_name)
# set all excluded and generic gifts to react now
- for c_gift_name, mas_gift in found_map.iteritems():
+ for c_gift_name, mas_gift in found_map.items():
store.persistent._mas_filereacts_reacted_map[c_gift_name] = mas_gift
# register these gifts
diff --git a/Monika After Story/game/script-islands-event.rpy b/Monika After Story/game/script-islands-event.rpy
index 1530c3a9cd..2ddf86e1f3 100644
--- a/Monika After Story/game/script-islands-event.rpy
+++ b/Monika After Story/game/script-islands-event.rpy
@@ -330,7 +330,7 @@ init -20 python in mas_island_event:
# FIXME: py3 update
return {
id_: data.default_unlocked
- for id_, data in cls._data_map.iteritems()
+ for id_, data in cls._data_map.items()
}
@classmethod
@@ -344,7 +344,7 @@ init -20 python in mas_island_event:
# FIXME: py3 update
return {
id_: data.fp_map
- for id_, data in cls._data_map.iteritems()
+ for id_, data in cls._data_map.items()
if data.type == type_ and data.fp_map
}
@@ -1125,10 +1125,10 @@ init -25 python in mas_island_event:
map_ - the map to get filenames from, and which will be overriden
"""
# FIXME: py3 update
- for name, path_map in map_.iteritems():
- for sprite_type, path in path_map.iteritems():
+ for name, path_map in map_.items():
+ for sprite_type, path in path_map.items():
raw_data = zip_file.read(path)
- img = store.MASImageData(raw_data, "{}_{}.png".format(name, sprite_type))
+ img = store.im.Data(raw_data, "{}_{}.png".format(name, sprite_type))
path_map[sprite_type] = img
try:
@@ -1144,7 +1144,7 @@ init -25 python in mas_island_event:
# Anim frames are handled a bit differently
glitch_frames = tuple(
- (store.MASImageData(zip_file.read(fn), fn + ".png") for fn in GLITCH_FPS)
+ (store.im.Data(zip_file.read(fn), fn + ".png") for fn in GLITCH_FPS)
)
tree_lights_imgs = {}
@@ -1156,8 +1156,8 @@ init -25 python in mas_island_event:
# Audio is being loaded right away
isly_data = IslandsDataDefinition.getDataFor("other_isly")
if isly_data:
- for fn, fp in isly_data.fp_map.iteritems():
- audio_data = store.MASAudioData(zip_file.read(fp), fp + ".ogg")
+ for fn, fp in isly_data.fp_map.items():
+ audio_data = AudioData(zip_file.read(fp), fp + ".ogg")
setattr(store.audio, "isld_isly_" + fn, audio_data)
except Exception as e:
@@ -1196,7 +1196,7 @@ init -25 python in mas_island_event:
precip_map = {}
- for p_type, suffix in precip_to_suffix_map.iteritems():
+ for p_type, suffix in precip_to_suffix_map.items():
k = main_key + suffix
if k in img_map:
precip_map[p_type] = img_map[k]
@@ -1271,25 +1271,25 @@ init -25 python in mas_island_event:
global bg_disp_map, overlay_disp_map, interior_disp_map
# Build the islands
- for island_name, img_map in island_imgs_maps.iteritems():
+ for island_name, img_map in island_imgs_maps.items():
disp = _build_ifwd(img_map)
partial_disp = IslandsDataDefinition.getDataFor(island_name).partial_disp
island_disp_map[island_name] = partial_disp(disp)
# Build the decals
- for decal_name, img_map in decal_imgs_maps.iteritems():
+ for decal_name, img_map in decal_imgs_maps.items():
disp = _build_ifwd(img_map)
partial_disp = IslandsDataDefinition.getDataFor(decal_name).partial_disp
decal_disp_map[decal_name] = partial_disp(disp)
# Build the bg
- for bg_name, img_map in bg_imgs_maps.iteritems():
+ for bg_name, img_map in bg_imgs_maps.items():
disp = _build_ifwd(img_map)
partial_disp = IslandsDataDefinition.getDataFor(bg_name).partial_disp
bg_disp_map[bg_name] = partial_disp(disp)
# Build the overlays
- for overlay_name, img_map in overlay_imgs_maps.iteritems():
+ for overlay_name, img_map in overlay_imgs_maps.items():
disp = _build_fwd(img_map)
partial_disp = IslandsDataDefinition.getDataFor(overlay_name).partial_disp
if partial_disp is not None:
@@ -1297,7 +1297,7 @@ init -25 python in mas_island_event:
overlay_disp_map[overlay_name] = disp
# Build the interior
- for name, img_map in interior_imgs_map.iteritems():
+ for name, img_map in interior_imgs_map.items():
interior_disp_map[name] = img_map
if interior_disp_map:
@@ -1753,7 +1753,7 @@ init -25 python in mas_island_event:
# Add all unlocked islands
sub_displayables = [
_reset_parallax_disp(disp)
- for key, disp in island_disp_map.iteritems()
+ for key, disp in island_disp_map.items()
if _is_unlocked(key)
]
@@ -1785,7 +1785,7 @@ init -25 python in mas_island_event:
),
"island_5": ("decal_gravestones",)
}
- for isld, decals in isld_to_decals_map.iteritems():
+ for isld, decals in isld_to_decals_map.items():
island_disp_map[isld].add_decals(
*(decal_disp_map[key] for key in decals if _is_unlocked(key))
)
diff --git a/Monika After Story/game/script-moods.rpy b/Monika After Story/game/script-moods.rpy
index e3dc546155..b0319e282c 100644
--- a/Monika After Story/game/script-moods.rpy
+++ b/Monika After Story/game/script-moods.rpy
@@ -649,8 +649,7 @@ label mas_mood_bored:
unlocked_games = {
# use display name, or prompt as backup
ev_label: game_ev.rules.get("display_name", game_ev.prompt)
-
- for ev_label, game_ev in mas_games.game_db.iteritems()
+ for ev_label, game_ev in mas_games.game_db.items()
if mas_isGameUnlocked(game_ev.prompt)
}
diff --git a/Monika After Story/game/script-python.rpy b/Monika After Story/game/script-python.rpy
index 22375fdb65..de627b001b 100644
--- a/Monika After Story/game/script-python.rpy
+++ b/Monika After Story/game/script-python.rpy
@@ -1024,11 +1024,11 @@ init -1 python in mas_ptod:
IN:
cmd - the command to write to the console
"""
+ global cn_line, cn_cmd, state, stack_level
+
if state == STATE_OFF:
return
- global cn_line, cn_cmd, state, stack_level
-
if state == STATE_MULTI:
# this is bad! You should execute the previous command first!
# in this case, we will clear your current command and reset
@@ -1203,14 +1203,14 @@ init -1 python in mas_ptod:
locals here.
If None, then we use the local_ctx.
"""
+ global cn_cmd, cn_line, state, stack_level, blk_cmd
+
if state == STATE_OFF:
return
if context is None:
context = local_ctx
- global cn_cmd, cn_line, state, stack_level, blk_cmd
-
################### setup some initial conditions ################
# block mode just means we are in a block
diff --git a/Monika After Story/game/script-songs.rpy b/Monika After Story/game/script-songs.rpy
index dd5b2e9bd6..3ebbb9bcb8 100644
--- a/Monika After Story/game/script-songs.rpy
+++ b/Monika After Story/game/script-songs.rpy
@@ -76,14 +76,14 @@ init python in mas_songs:
if length is None:
return [
(ev.prompt, ev_label, False, False)
- for ev_label, ev in song_db.iteritems()
+ for ev_label, ev in song_db.items()
if ev.unlocked
]
else:
return [
(ev.prompt, ev_label, False, False)
- for ev_label, ev in song_db.iteritems()
+ for ev_label, ev in song_db.items()
if ev.unlocked and length in ev.category
]
@@ -100,7 +100,7 @@ init python in mas_songs:
if unseen_only:
return [
ev_label
- for ev_label, ev in song_db.iteritems()
+ for ev_label, ev in song_db.items()
if (
not store.seen_event(ev_label)
and ev.random
@@ -111,7 +111,7 @@ init python in mas_songs:
return [
ev_label
- for ev_label, ev in song_db.iteritems()
+ for ev_label, ev in song_db.items()
if ev.random and TYPE_SHORT in ev.category and ev.checkAffection(store.mas_curr_affection)
]
@@ -144,7 +144,7 @@ init python in mas_songs:
return [
(ev.prompt, ev_label, False, False)
- for ev_label, ev in song_db.iteritems()
+ for ev_label, ev in song_db.items()
if ev.unlocked and TYPE_ANALYSIS in ev.category and ev.checkAffection(curr_aff)
]
diff --git a/Monika After Story/game/script-stories.rpy b/Monika After Story/game/script-stories.rpy
index 7ecb1eb6c5..1d41716a27 100644
--- a/Monika After Story/game/script-stories.rpy
+++ b/Monika After Story/game/script-stories.rpy
@@ -100,7 +100,7 @@ init -1 python in mas_stories:
story_type - story type to get
OUT:
- list of locked stories for the given story type
+ dict of locked stories for the given story type
"""
return store.Event.filterEvents(
story_database,
@@ -123,7 +123,7 @@ init -1 python in mas_stories:
stories = get_new_stories_for_type(story_type)
#Grab one of the stories
- story = renpy.random.choice(stories.values())
+ story = renpy.random.choice(tuple(stories.values()))
#Unlock and return its eventlabel
story.unlocked = True
@@ -173,7 +173,7 @@ label monika_short_stories_menu:
# build menu list
stories_menu_items = [
(story_ev.prompt, story_evl, False, False)
- for story_evl, story_ev in mas_stories.story_database.iteritems()
+ for story_evl, story_ev in mas_stories.story_database.items()
if Event._filterEvent(
story_ev,
pool=False,
diff --git a/Monika After Story/game/script-story-events.rpy b/Monika After Story/game/script-story-events.rpy
index 1812cb8747..c168593309 100644
--- a/Monika After Story/game/script-story-events.rpy
+++ b/Monika After Story/game/script-story-events.rpy
@@ -60,7 +60,7 @@ label mas_gender:
#Unlock the gender redo event
$ mas_unlockEVL("monika_gender_redo","EVE")
# set pronouns
- call mas_set_gender
+ $ mas_set_pronouns()
#Set up the preferredname topic
python:
@@ -161,7 +161,7 @@ label monika_gender_redo:
m 5hubsa "I'll always love you for who you are~"
# set pronouns
- call mas_set_gender
+ $ mas_set_pronouns()
return "love"
label mas_gender_neither:
@@ -1066,7 +1066,7 @@ init 5 python:
label mas_random_limit_reached:
#Notif so people don't get stuck here
- $ mas_display_notif(m_name, ["Hey [player]..."], "Topic Alerts")
+ $ mas_display_notif(m_name, ["Hey [player]..."], "Topic Alerts", flash_window=True)
python:
limit_quips = [
@@ -1640,7 +1640,7 @@ label mas_corrupted_persistent:
call mas_showpoem(mas_note_backups_all_good)
window auto
- $ _gtext = glitchtext(7)
+ $ _gtext = mas_glitchText(7)
m 1ekc "Do you know what this is about?{nw}"
$ _history_list.pop()
diff --git a/Monika After Story/game/script-topics.rpy b/Monika After Story/game/script-topics.rpy
index b6f0113265..8ffaa0abd0 100644
--- a/Monika After Story/game/script-topics.rpy
+++ b/Monika After Story/game/script-topics.rpy
@@ -8,7 +8,6 @@ define mas_rev_unseen = []
define mas_rev_seen = []
define mas_rev_mostseen = []
define testitem = 0
-define mas_did_monika_battery = False
define mas_sensitive_limit = 3
init -2 python in mas_topics:
@@ -663,7 +662,7 @@ init python in mas_bookmarks_derand:
#Firstly, let's get our derandom keys
derand_keys = [
label_prefix_data["derand_persist_key"]
- for label_prefix_data in label_prefix_map.itervalues()
+ for label_prefix_data in label_prefix_map.values()
if "derand_persist_key" in label_prefix_data
]
@@ -756,7 +755,7 @@ label mas_rerandom:
show monika at t11
python:
- for ev_label in topics_to_rerandom.iterkeys():
+ for ev_label in topics_to_rerandom.keys():
#Get the ev
rerand_ev = mas_getEV(ev_label)
@@ -8426,35 +8425,35 @@ label monika_wolf:
m 1ekbsa "You're my [hero] after all~"
return
-label monika_battery:
- if mas_did_monika_battery:
- jump monika_close_game_battery
- else:
- jump monika_complain_battery
-
-label monika_complain_battery:
- $ mas_did_monika_battery = True
- m 1euc "Umm, [player]..."
- m 1eua "It looks like your computer's battery is about to run out..."
- m 1eka "Can you charge it for me?"
- m 1lksdlc "I don't want us to be separated, or worse..."
- m 2ekc "It'd be really unpleasant for me if I suddenly lose consciousness."
- m 2eka "So please charge your computer, okay?"
- m 3eka "...or at least let me know when you're going."
- m 1hua "Thank you, [player]~"
- return
-
-label monika_close_game_battery:
- $ mas_loseAffection()
- m 1lksdlc "[player]..."
- m 1ekc "I'm sorry, but I'm gonna have to close the game before the battery runs out."
- m 3eka "So...I'll just close the game for now until you can charge your computer.{w=3.0} {nw}"
-
- $ is_charging = battery.is_charging()
- if is_charging:
- jump monika_system_charging
- $ persistent.closed_self = True
- jump _quit
+# label monika_battery:
+# if mas_did_monika_battery:
+# jump monika_close_game_battery
+# else:
+# jump monika_complain_battery
+
+# label monika_complain_battery:
+# $ mas_did_monika_battery = True
+# m 1euc "Umm, [player]..."
+# m 1eua "It looks like your computer's battery is about to run out..."
+# m 1eka "Can you charge it for me?"
+# m 1lksdlc "I don't want us to be separated, or worse..."
+# m 2ekc "It'd be really unpleasant for me if I suddenly lose consciousness."
+# m 2eka "So please charge your computer, okay?"
+# m 3eka "...or at least let me know when you're going."
+# m 1hua "Thank you, [player]~"
+# return
+
+# label monika_close_game_battery:
+# $ mas_loseAffection()
+# m 1lksdlc "[player]..."
+# m 1ekc "I'm sorry, but I'm gonna have to close the game before the battery runs out."
+# m 3eka "So...I'll just close the game for now until you can charge your computer.{w=3.0} {nw}"
+
+# $ is_charging = battery.is_charging()
+# if is_charging:
+# jump monika_system_charging
+# $ persistent.closed_self = True
+# jump _quit
label monika_system_charging:
$ mas_gainAffection()
diff --git a/Monika After Story/game/script-windowreacts.rpy b/Monika After Story/game/script-windowreacts.rpy
index 994c3bd901..2d14e7921b 100644
--- a/Monika After Story/game/script-windowreacts.rpy
+++ b/Monika After Story/game/script-windowreacts.rpy
@@ -1,17 +1,22 @@
+init python:
+ # 99% of the wr use the same set of event rules
+ # let's reuse them
+ __DEFAULT_WR_RULES = {
+ "notif-group": "Window Reactions",
+ "skip alert": None,
+ "keep_idle_exp": None,
+ "skip_pause": None
+ }
init 5 python:
addEvent(
Event(
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_pinterest",
category=["Pinterest"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -37,14 +42,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_duolingo",
category=["Duolingo"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -70,14 +71,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_wikipedia",
category=["- Wikipedia"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -121,14 +118,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_virtualpiano",
category=["^Virtual Piano"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -158,14 +151,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_youtube",
category=["- YouTube"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -185,25 +174,25 @@ label mas_wrs_youtube:
return
init 5 python:
+ rules = dict(__DEFAULT_WR_RULES)
+ rules.pop("keep_idle_exp")
addEvent(
Event(
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_r34m",
category=[r"(?i)(((r34|rule\s?34).*monika)|(post \d+:[\w\s]+monika)|(monika.*(r34|rule\s?34)))"],
aff_range=(mas_aff.AFFECTIONATE, None),
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "skip_pause": None
- },
+ rules=rules,
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
+ del rules
label mas_wrs_r34m:
python:
- mas_display_notif(m_name, ["Hey, [player]...what are you looking at?"],'Window Reactions')
+ mas_display_notif(m_name, ["Hey, [player]...what are you looking at?"], "Window Reactions", flash_window=True)
choice = random.randint(1,10)
@@ -238,14 +227,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_monikamoddev",
category=["MonikaModDev"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -270,14 +255,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_twitter",
category=["/ Twitter"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -292,7 +273,7 @@ label mas_wrs_twitter:
"Anything interesting to share, [player]?": False,
"280 characters? I only need [temp_len]...\n[temp_line]": True
}
- quip = renpy.random.choice(ily_quips_map.keys())
+ quip = renpy.random.choice(tuple(ily_quips_map.keys()))
wrs_success = mas_display_notif(
m_name,
@@ -312,14 +293,10 @@ label mas_wrs_twitter:
# persistent._mas_windowreacts_database,
# eventlabel="mas_wrs_monikatwitter",
# category=['twitter', 'lilmonix3'],
-# rules={
-# "notif-group": "Window Reactions",
-# "skip alert": None,
-# "keep_idle_exp": None,
-# "skip_pause": None
-# },
+# rules=dict(__DEFAULT_WR_RULES),
# show_in_idle=True
# ),
+# restartBlacklist=True,
# code="WRS"
# )
@@ -345,14 +322,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_4chan",
category=["- 4chan"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -380,14 +353,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_pixiv",
category=["- pixiv"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -429,14 +398,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_reddit",
category=[r"(?i)reddit"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -462,14 +427,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_mal",
category=["MyAnimeList"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -496,14 +457,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_deviantart",
category=["DeviantArt"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -528,14 +485,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_netflix",
category=["Netflix"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -561,14 +514,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_twitch",
category=["- Twitch"],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
@@ -594,14 +543,10 @@ init 5 python:
persistent._mas_windowreacts_database,
eventlabel="mas_wrs_word_processor",
category=['Google Docs|LibreOffice Writer|Microsoft Word'],
- rules={
- "notif-group": "Window Reactions",
- "skip alert": None,
- "keep_idle_exp": None,
- "skip_pause": None
- },
+ rules=dict(__DEFAULT_WR_RULES),
show_in_idle=True
),
+ restartBlacklist=True,
code="WRS"
)
diff --git a/Monika After Story/game/special-effects.rpy b/Monika After Story/game/special-effects.rpy
index f87e30535a..810a270b9f 100644
--- a/Monika After Story/game/special-effects.rpy
+++ b/Monika After Story/game/special-effects.rpy
@@ -183,6 +183,7 @@ init -500 python in mas_parallax:
@property
def children(self):
+ # Is not here because instance checks are mandatory
return [decal for decal in self._decals if decal is not self._base]
@property
@@ -834,6 +835,53 @@ init -500 python in mas_parallax:
return [child for child in self.children]
+init -500 python:
+ import random
+ import math
+
+ # Backported from DDLC, used in splash screen
+ class ParticleBurst(object):
+ def __init__(self, theDisplayable, explodeTime=0, numParticles=20, particleTime = 0.500, particleXSpeed = 3, particleYSpeed = 5):
+ self.sm = SpriteManager(update=self.update)
+
+ self.stars = [ ]
+ self.displayable = theDisplayable
+ self.explodeTime = explodeTime
+ self.numParticles = numParticles
+ self.particleTime = particleTime
+ self.particleXSpeed = particleXSpeed
+ self.particleYSpeed = particleYSpeed
+ self.gravity = 240
+ self.timePassed = 0
+
+ for i in range(self.numParticles):
+ self.add(self.displayable, 1)
+
+ def add(self, d, speed):
+ s = self.sm.create(d)
+ speed = random.random()
+ angle = random.random() * 3.14159 * 2
+ xSpeed = speed * math.cos(angle) * self.particleXSpeed
+ ySpeed = speed * math.sin(angle) * self.particleYSpeed - 1
+ s.x = xSpeed * 24
+ s.y = ySpeed * 24
+ pTime = self.particleTime
+ self.stars.append((s, ySpeed, xSpeed, pTime))
+
+ def update(self, st):
+ sindex=0
+ for s, ySpeed, xSpeed, particleTime in self.stars:
+ if (st < particleTime):
+ s.x = xSpeed * 120 * (st + .20)
+ s.y = (ySpeed * 120 * (st + .20) + (self.gravity * st * st))
+ else:
+ s.destroy()
+ self.stars.pop(sindex)
+ sindex += 1
+ return 0
+
+
+
image yuri dragon2:
parallel:
"yuri/dragon1.png"
@@ -1312,7 +1360,7 @@ label mas_timed_text_events_wrapup:
mas_DropShield_timedtext()
# restart song/sounds that were playing before event
- if globals().get("curr_song", -1) is not -1 and curr_song != store.songs.FP_MONIKA_LULLABY:
+ if globals().get("curr_song", -1) != -1 and curr_song != store.songs.FP_MONIKA_LULLABY:
mas_play_song(curr_song, 1.0)
else:
mas_play_song(None, 1.0)
diff --git a/Monika After Story/game/splash.rpy b/Monika After Story/game/splash.rpy
index 451957f5f5..58b06aa1fe 100644
--- a/Monika After Story/game/splash.rpy
+++ b/Monika After Story/game/splash.rpy
@@ -2,12 +2,12 @@
##
## Before load, check to be sure that the archive files were found.
## If not, display an error message and quit.
-init -100 python:
- #Check for each archive needed
- for archive in ['audio','images','scripts','fonts']:
- if not archive in config.archives:
- #If one is missing, throw an error and chlose
- renpy.error("DDLC archive files not found in /game folder. Check installation and try again.")
+# init -100 python:
+# #Check for each archive needed
+# for archive in ['audio','images','scripts','fonts']:
+# if not archive in config.archives:
+# #If one is missing, throw an error and chlose
+# renpy.error("DDLC archive files not found in /game folder. Check installation and try again.")
## First, a disclaimer declaring this is a mod is shown, then there is a
## check for the original DDLC assets in the install folder. If those are
diff --git a/Monika After Story/game/sprite-chart-matrix.rpy b/Monika After Story/game/sprite-chart-matrix.rpy
index 61f762d625..ee9e696618 100644
--- a/Monika After Story/game/sprite-chart-matrix.rpy
+++ b/Monika After Story/game/sprite-chart-matrix.rpy
@@ -285,7 +285,7 @@ python early:
img = renpy.substitute(img)
args = []
- for flt in store.mas_sprites.FILTERS.iterkeys():
+ for flt in store.mas_sprites.FILTERS.keys():
# condition
args.append("store.mas_sprites.get_filter() == '{0}'".format(flt))
@@ -352,7 +352,7 @@ python early:
if filterize_def:
# default should be filterized
- for flt in store.mas_sprites.FILTERS.iterkeys():
+ for flt in store.mas_sprites.FILTERS.keys():
# only use the filtesr we have not already added
if flt not in flt_pairs:
@@ -589,7 +589,7 @@ init 1 python in mas_sprites:
Raises all errors.
"""
- for mfwm_id, mfwm in FW_DB.iteritems():
+ for mfwm_id, mfwm in FW_DB.items():
_verify_mfwm(mfwm_id, mfwm)
@@ -868,12 +868,6 @@ init -99 python in mas_sprites:
FILTERS[flt_enum] = imx
- @store.mas_utils.deprecated(use_instead="get_filter", should_raise=True)
- def _decide_filter():
- """DEPRECATED
- Please use get_filter
- """
- return get_filter()
def get_filter():
@@ -1083,11 +1077,11 @@ init -4 python in mas_sprites:
"""
Clears all caches
"""
- for cid, cache in CACHE_TABLE.iteritems():
- for key in cache.keys():
+ for cid, cache in CACHE_TABLE.items():
+ for key in tuple(cache.keys()):
cache.pop(key)
- for key in MFM_CACHE.keys():
+ for key in tuple(MFM_CACHE.keys()):
MFM_CACHE.pop(key)
@@ -3117,7 +3111,7 @@ init -50 python:
RETURNS: list of all filter names in this map
"""
- return self.__mfm.map.keys()
+ return list(self.__mfm.map.keys())
def get(self, flt, defval=None):
"""
diff --git a/Monika After Story/game/sprite-chart.rpy b/Monika After Story/game/sprite-chart.rpy
index 6a1400ad53..091f56ca2b 100644
--- a/Monika After Story/game/sprite-chart.rpy
+++ b/Monika After Story/game/sprite-chart.rpy
@@ -1042,7 +1042,7 @@ init -5 python in mas_sprites:
# reverse map for eaiser lookup
ARMS_LEAN = {}
- for lean, values in LEAN_ARMS.iteritems():
+ for lean, values in LEAN_ARMS.items():
for value in values:
ARMS_LEAN[value] = lean
@@ -1093,27 +1093,6 @@ init -5 python in mas_sprites:
return allow_none
return _verify_uprightpose(val) or _verify_leaningpose(val)
- @store.mas_utils.deprecated(should_raise=True)
- def acs_lean_mode(sprite_list, lean):
- """
- NOTE: DEPRECATED
-
- Adds the appropriate accessory prefix dpenedong on lean
-
- IN:
- sprite_list - list to add sprites to
- lean - type of lean
- """
- if lean:
- sprite_list.extend((
- PREFIX_ACS_LEAN,
- lean,
- ART_DLM
- ))
-
- else:
- sprite_list.append(PREFIX_ACS)
-
def face_lean_mode(lean):
"""
@@ -1185,7 +1164,7 @@ init -5 python in mas_sprites:
if no ACS of the given type
"""
return [
- acs for acs in ACS_MAP.itervalues()
+ acs for acs in ACS_MAP.values()
if acs.acs_type == acs_type
]
@@ -1380,7 +1359,7 @@ init -5 python in mas_sprites:
if predicate:
return [
spr_object
- for spr_object in sprite_map.itervalues()
+ for spr_object in sprite_map.values()
if predicate(spr_object)
]
@@ -2829,10 +2808,18 @@ init -3 python:
startup - True if we are loading on start, False if not
(Default: False)
"""
+ clothes = store.mas_sprites.CLOTH_MAP.get(_clothes_name, None)
+ if clothes is None:
+ store.mas_utils.mas_log.warning(f"Failed to find clothes '{_clothes_name}', restoring default")
+ clothes = store.mas_clothes_def
+ hair = store.mas_sprites.HAIR_MAP.get(_hair_name, None)
+ if hair is None:
+ store.mas_utils.mas_log.warning(f"Failed to find hair '{_hair_name}', restoring default")
+ hair = store.mas_hair_def
# clothes and hair
self.change_outfit(
- store.mas_sprites.CLOTH_MAP.get(_clothes_name, store.mas_clothes_def),
- store.mas_sprites.HAIR_MAP.get(_hair_name, store.mas_hair_def),
+ clothes,
+ hair,
startup=startup
)
@@ -3506,7 +3493,7 @@ init -3 python:
IN:
exprop - exprop to check for
"""
- for acs_name in self.acs_list_map.keys():
+ for acs_name in tuple(self.acs_list_map.keys()):
_acs = store.mas_sprites.ACS_MAP.get(acs_name, None)
if _acs and _acs.hasprop(exprop):
self.remove_acs_in(_acs, self.acs_list_map[acs_name])
@@ -3520,7 +3507,7 @@ init -3 python:
"""
for mux_type in mux_types:
acs_with_mux = self._acs_type_map.get(mux_type, {})
- for acs_name in acs_with_mux.keys():
+ for acs_name in tuple(acs_with_mux.keys()):
self.remove_acs(store.mas_sprites.get_acs(acs_name))
def remove_acs_in(self, accessory, acs_layer):
@@ -4295,7 +4282,7 @@ init -3 python:
vhl_data,
msg_log,
ind_lvl,
- layer_map.keys()
+ list(layer_map.keys())
):
# success
hl_data = vhl_data.get("hl_data", None)
@@ -4366,7 +4353,7 @@ init -3 python:
IN:
mapping - mapping to clean
"""
- for map_key in mapping.keys():
+ for map_key in tuple(mapping.keys()):
if map_key not in self.__MPA_KEYS:
mapping.pop(map_key)
@@ -4569,7 +4556,7 @@ init -3 python:
arm_data - cleaned arm data
"""
# first validate the arm data
- for arm_key in arm_data.keys():
+ for arm_key in tuple(arm_data.keys()):
# then check
if arm_key in store.mas_sprites.NUM_ARMS:
@@ -4651,7 +4638,7 @@ init -3 python:
# loop over valid arm data
isbad = False
- for arm_id, arm_sid in store.mas_sprites.NUM_ARMS.iteritems():
+ for arm_id, arm_sid in store.mas_sprites.NUM_ARMS.items():
if arm_sid in json_obj:
arm_obj = json_obj.pop(arm_sid)
@@ -5195,7 +5182,7 @@ init -3 python:
# verify other params
isbad = False
- for prop_name in json_obj.keys():
+ for prop_name in tuple(json_obj.keys()):
prop_val = json_obj.pop(prop_name)
if prop_name in cls.CONS_PARAM_NAMES:
if not cls._verify_mpm_item(
@@ -5274,13 +5261,13 @@ init -3 python:
"""
try:
values = []
- for value in self.__all_map.itervalues():
+ for value in self.__all_map.values():
if value is not None and value not in values:
values.append(value)
return values
except:
- return self.values()
+ return list(self.values())
def values(self):
"""
@@ -5290,7 +5277,7 @@ init -3 python:
"""
return [
value
- for value in self.__all_map.itervalues()
+ for value in self.__all_map.values()
if value is not None
]
@@ -5605,7 +5592,7 @@ init -3 python:
if self.hl_map is None:
return []
- return self.hl_map.keys()
+ return list(self.hl_map.keys())
def is_dynamic(self):
"""
@@ -8395,7 +8382,7 @@ python early:
exps = (exps,)
for exp in exps:
- for aff_lvl, exp_list in self.exp_map.iteritems():
+ for aff_lvl, exp_list in self.exp_map.items():
if exp.check_aff(aff_lvl):
exp_list.append(exp)
@@ -8447,7 +8434,7 @@ python early:
"""
need_redraw = self.current_exp is exp
- for exp_list in self.exp_map.itervalues():
+ for exp_list in self.exp_map.values():
if exp in exp_list:
exp_list.remove(exp)
need_redraw = True
@@ -8482,7 +8469,7 @@ python early:
need_redraw = True
break
- for exp_list in self.exp_map.itervalues():
+ for exp_list in self.exp_map.values():
for exp_id in range(len(exp_list)-1, -1, -1):
if exp_list[exp_id].tag == tag:
exp_list.pop(exp_id)
@@ -9420,7 +9407,7 @@ python early:
"""
Adds 4 new params
"""
- super(_MASMoniFollowTransformDissolve, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
# KEY CHANGES
self.new_widget_st = None
@@ -9452,16 +9439,31 @@ python early:
bottom = renpy.display.transition.render(self.old_widget, width, height, old_widget_st, old_widget_at)
top = renpy.display.transition.render(self.new_widget, width, height, new_widget_st, new_widget_at)
+ # END KEY CHANGES
width = min(top.width, bottom.width)
height = min(top.height, bottom.height)
- rv = renpy.display.render.Render(width, height, opaque=not self.alpha)
+ rv = renpy.display.render.Render(width, height)
rv.operation = renpy.display.render.DISSOLVE
- rv.operation_alpha = self.alpha
+ rv.operation_alpha = self.alpha or renpy.config.dissolve_force_alpha
rv.operation_complete = complete
+ if renpy.display.render.models:
+
+ target = rv.get_size()
+
+ if top.get_size() != target:
+ top = top.subsurface((0, 0, width, height))
+ if bottom.get_size() != target:
+ bottom = bottom.subsurface((0, 0, width, height))
+
+ rv.mesh = True
+ rv.add_shader("renpy.dissolve")
+ rv.add_uniform("u_renpy_dissolve", complete)
+ rv.add_property("mipmap", renpy.config.mipmap_dissolves if (self.style.mipmap is None) else self.style.mipmap)
+
rv.blit(bottom, (0, 0), focus=False, main=False)
rv.blit(top, (0, 0), focus=True, main=True)
@@ -9519,8 +9521,9 @@ python early:
# self.up_eyes_code: self.up_eyes_img,
# self.down_eyes_code: self.down_eyes_img
}
- for first_img_code in img_map.iterkeys():
- for second_img_code in img_map.iterkeys():
+
+ for first_img_code in img_map.keys():
+ for second_img_code in img_map.keys():
if first_img_code != second_img_code:
self.transform_map[(first_img_code, second_img_code)] = _MASMoniFollowTransformDissolve(
time=MASMoniFollowTransform.DIS_DUR,
diff --git a/Monika After Story/game/sprite-decoder.rpy b/Monika After Story/game/sprite-decoder.rpy
index f04b7aadb6..858ada6d96 100644
--- a/Monika After Story/game/sprite-decoder.rpy
+++ b/Monika After Story/game/sprite-decoder.rpy
@@ -56,14 +56,14 @@ init python in mas_sprite_decoder:
SWEAT_MAP = jobj["sweat"]
MOD_MAP = jobj["MOD_MAP"]
# Convert lists into sets for speed
- for sub_map in MOD_MAP.itervalues():
- for key, value in sub_map.iteritems():
+ for sub_map in MOD_MAP.values():
+ for key, value in sub_map.items():
sub_map[key] = set(value)
#Since tuples aren't supported in json, we need to do some conversion here
ARM_MAP["5"] = tuple(ARM_MAP["5"])
- for side_key, side_list in SIDES_MAP.iteritems():
+ for side_key, side_list in SIDES_MAP.items():
SIDES_MAP[side_key] = tuple(side_list)
#I don't really like this but it's a cleaner way of bringing up this exception once instead of multiple times
diff --git a/Monika After Story/game/sprite-generator.rpy b/Monika After Story/game/sprite-generator.rpy
index a08da1643a..d9e9b5ddc8 100644
--- a/Monika After Story/game/sprite-generator.rpy
+++ b/Monika After Story/game/sprite-generator.rpy
@@ -12,7 +12,7 @@ init python in mas_sprites:
name - tuple of strings (tag, attributes)
d - displayables
"""
- if store.mas_globals.is_r7:
+ if store.mas_globals.is_at_least_r7:
renpy.display.image.register_image(name, d)
else:
diff --git a/Monika After Story/game/styles.rpy b/Monika After Story/game/styles.rpy
index ac16a2c9bb..a5a2033ebe 100644
--- a/Monika After Story/game/styles.rpy
+++ b/Monika After Story/game/styles.rpy
@@ -172,7 +172,7 @@ init python:
# FIXME: could be done on startup for some speedup
new_aliases = {}
- for style_tuple, style_ptr in renpy.style.styles.iteritems():
+ for style_tuple, style_ptr in renpy.style.styles.items():
style_name = style_tuple[0]
if mas_isTextDarkStyle(style_name):
text_dark_suffix = "_text" + mas_ui.dark_suffix
@@ -181,7 +181,7 @@ init python:
if not style.exists(alias_name):
new_aliases[alias_name] = style_ptr
- for alias_name, alias_style_ptr in new_aliases.iteritems():
+ for alias_name, alias_style_ptr in new_aliases.items():
setattr(style, alias_name, alias_style_ptr)
# Automagically switch every style which has a dark variant
@@ -251,6 +251,8 @@ init python in mas_settings:
"""
Handles the toggling of fields so the menu options become mutually exclusive
"""
+ global dark_mode_clicked
+
if _persistent._mas_dark_mode_enabled:
_persistent._mas_dark_mode_enabled = False
@@ -258,7 +260,6 @@ init python in mas_settings:
_persistent._mas_dark_mode_enabled = True
_persistent._mas_auto_mode_enabled = False
- global dark_mode_clicked
dark_mode_clicked = True
def _ui_change_wrapper(*args):
@@ -269,6 +270,7 @@ init python in mas_settings:
*args - values to pass to dark mode
"""
global ui_changed
+
ui_changed = True
store.mas_darkMode(*args)
@@ -437,7 +439,7 @@ init 25 python in mas_ui:
OUT:
dict of key-value pairs
"""
- return {item[0]: item[1]["return_value"] for item in buttons_data.iteritems() if item[1]["return_value"] == item[1]["true_value"] or return_all}
+ return {item[0]: item[1]["return_value"] for item in buttons_data.items() if item[1]["return_value"] == item[1]["true_value"] or return_all}
def check_scr_menu_choose_prompt(buttons_data, selected_prompt, default_prompt):
"""
@@ -451,7 +453,7 @@ init 25 python in mas_ui:
OUT:
string with prompt
"""
- for data in buttons_data.itervalues():
+ for data in buttons_data.values():
if data["return_value"] == data["true_value"]:
return selected_prompt
return default_prompt
@@ -459,10 +461,10 @@ init 25 python in mas_ui:
# Methods for twopane_scrollable_menu
TWOPANE_MENU_MAX_FLT_ITEMS = 50
TWOPANE_MENU_SEARCH_DBS = (
- store.mas_all_ev_db_map["EVE"].values()
+ list(store.mas_all_ev_db_map["EVE"].values())
# + store.mas_all_ev_db_map["BYE"].values()
# + store.mas_all_ev_db_map["STY"].values()
- + store.mas_all_ev_db_map["CMP"].values()
+ + list(store.mas_all_ev_db_map["CMP"].values())
# + store.mas_all_ev_db_map["SNG"].values()
)
TWOPANE_MENU_DELEGATES_CALLBACK_MAP = {
@@ -681,3 +683,14 @@ init 25 python in mas_ui:
scr.scope["flt_evs"] = _twopane_menu_search_events(search_query)
# Update the screen
renpy.restart_interaction()
+
+##BASE DDLC Console Styles
+style console_text:
+ font "gui/font/F25_Bank_Printer.ttf"
+ color "#fff"
+ size 18
+ outlines []
+
+
+style console_text_console is console_text:
+ slow_cps 30
diff --git a/Monika After Story/game/updater.rpy b/Monika After Story/game/updater.rpy
index 948c5dbcf8..4c1536c4fd 100644
--- a/Monika After Story/game/updater.rpy
+++ b/Monika After Story/game/updater.rpy
@@ -12,6 +12,7 @@ default persistent._mas_just_updated = False
# new s3 links
define mas_updater.regular = "http://d2vycydjjutzqv.cloudfront.net/updates.json"
define mas_updater.unstable = "http://dzfsgufpiee38.cloudfront.net/updates.json"
+define mas_updater.r7 = "http://d1j8x24k8p6koi.cloudfront.net/updates.json"
define mas_updater.force = False
define mas_updater.timeout = 10 # timeout default
@@ -328,14 +329,12 @@ init -1 python:
new_url - the redirect we want to connect to
Returns read_json if we got a connection, Nnone otherwise
"""
- import httplib
+ from http.client import HTTPConnection, HTTPException
_http, double_slash, url = new_url.partition("//")
url, single_slash, req_uri = url.partition("/")
read_json = None
- h_conn = httplib.HTTPConnection(
- url
- )
+ h_conn = HTTPConnection(url)
try:
# make connection
@@ -351,7 +350,7 @@ init -1 python:
read_json = server_response.read()
- except httplib.HTTPException:
+ except HTTPException:
# we assume a timeout / connection error
return None
@@ -381,7 +380,7 @@ init -1 python:
_thread_result
appends appropriate state for use
"""
- import httplib
+ from http.client import HTTPConnection, HTTPException
import json
# separate the update link parts
@@ -389,9 +388,7 @@ init -1 python:
_http, double_slash, url = update_link.partition("//")
url, single_slash, json_file = url.partition("/")
read_json = None
- h_conn = httplib.HTTPConnection(
- url
- )
+ h_conn = HTTPConnection(url)
try:
# make connection and attempt to connect
@@ -429,7 +426,7 @@ init -1 python:
# good status, lets get the value
read_json = server_response.read()
- except httplib.HTTPException:
+ except HTTPException:
# we assume a timeout / connection error
thread_result.append(MASUpdaterDisplayable.STATE_TIMEOUT)
return
@@ -671,7 +668,7 @@ init -1 python:
init python in mas_updater:
-
+ import store
def checkUpdate():
"""
diff --git a/Monika After Story/game/updates.rpy b/Monika After Story/game/updates.rpy
index a5d0ca52db..847a1849be 100644
--- a/Monika After Story/game/updates.rpy
+++ b/Monika After Story/game/updates.rpy
@@ -332,7 +332,7 @@ init 10 python:
]
store.mas_versions.init()
- ver_list = store.updates.version_updates.keys()
+ ver_list = list(store.updates.version_updates.keys())
if "-" in config.version:
working_version = config.version[:config.version.index("-")]
@@ -374,6 +374,12 @@ label v0_3_1(version=version): # 0.3.1
# non generic updates go here
+# 0.13.0 aka RenPy8/Python3
+label v0_13_0(version="v0_13_0"):
+ python hide:
+ pass
+ return
+
# 0.12.15
label v0_12_15(version="v0_12_15"):
python hide:
@@ -939,7 +945,7 @@ label v0_11_9_1(version="v0_11_9_1"):
# We don't use this var anymore
safeDel("chess_strength")
- for story_type, story_last_seen in persistent._mas_last_seen_new_story.iteritems():
+ for story_type, story_last_seen in persistent._mas_last_seen_new_story.items():
if story_last_seen is not None:
persistent._mas_last_seen_new_story[story_type] = datetime.datetime.combine(
story_last_seen, datetime.time()
@@ -1205,7 +1211,7 @@ label v0_11_4(version="v0_11_4"):
label v0_11_3(version="v0_11_3"):
python:
#Rerandom all songs which aren't d25 exclusive
- for song_ev in mas_songs.song_db.itervalues():
+ for song_ev in mas_songs.song_db.values():
if (
song_ev.eventlabel not in ["mas_song_aiwfc", "mas_song_merry_christmas_baby"]
and mas_songs.TYPE_LONG not in song_ev.category
@@ -1221,7 +1227,7 @@ label v0_11_3(version="v0_11_3"):
persistent._mas_pool_unlocks += store.mas_xp.level() * 4
#Adjust consumables to be at their max stock amount
- for consumable_id in persistent._mas_consumable_map.iterkeys():
+ for consumable_id in persistent._mas_consumable_map.keys():
cons = mas_getConsumable(consumable_id)
if cons and cons.getStock() > cons.max_stock_amount:
@@ -1456,7 +1462,7 @@ label v0_11_1(version="v0_11_1"):
label v0_11_0(version="v0_11_0"):
python:
#First, we're fixing the consumables map
- for cons_id in persistent._mas_consumable_map.iterkeys():
+ for cons_id in persistent._mas_consumable_map.keys():
persistent._mas_consumable_map[cons_id]["has_restock_warned"] = False
#Let's stock current users on some consumables (assuming they've gifted before)
@@ -1522,7 +1528,7 @@ label v0_11_0(version="v0_11_0"):
"greeting_hamlet": "store.mas_getAbsenceLength() >= datetime.timedelta(days=7)"
}
- for gr_label, conditional in new_greetings_conditions.iteritems():
+ for gr_label, conditional in new_greetings_conditions.items():
gr_ev = mas_getEV(gr_label)
if gr_ev:
gr_ev.conditional = conditional
@@ -1561,7 +1567,7 @@ label v0_11_0(version="v0_11_0"):
"monika_changename": "mas_preferredname"
}
- for new_evl, old_evl in topic_transfer_map.iteritems():
+ for new_evl, old_evl in topic_transfer_map.items():
mas_transferTopicData(new_evl, old_evl, persistent.event_database)
#If we've seen this event before, then we shouldn't allow its conditions to be true again
@@ -1742,7 +1748,7 @@ label v0_10_6(version="v0_10_6"):
persistent._mas_history_archives[year]["player_bday.saw_surprise"] = True
#Give unseen fun facts the unlocked prop
- for ev in mas_fun_facts.fun_fact_db.itervalues():
+ for ev in mas_fun_facts.fun_fact_db.values():
if ev.shown_count:
ev.unlocked = True
@@ -1833,7 +1839,7 @@ label v0_10_5(version="v0_10_5"):
"mas_bad_facts_4": "mas_bad_fact_tree_moss",
}
- for old_evl, new_evl in fun_facts_evls.iteritems():
+ for old_evl, new_evl in fun_facts_evls.items():
mas_transferTopicData(
new_evl,
old_evl,
@@ -1860,7 +1866,7 @@ label v0_10_5(version="v0_10_5"):
"mas_monika_daynight2": "mas_island_daynight2"
}
- for old_label, new_label in islands_evs.iteritems():
+ for old_label, new_label in islands_evs.items():
mas_transferTopicSeen(old_label, new_label)
#Fix these persist vars
@@ -2065,10 +2071,10 @@ label v0_10_3(version="v0_10_3"):
python:
#Convert fav/derand dicts to lists based on their keys if needed
if isinstance(persistent._mas_player_bookmarked, dict):
- persistent._mas_player_bookmarked = persistent._mas_player_bookmarked.keys()
+ persistent._mas_player_bookmarked = list(persistent._mas_player_bookmarked.keys())
if isinstance(persistent._mas_player_derandomed, dict):
- persistent._mas_player_derandomed = persistent._mas_player_derandomed.keys()
+ persistent._mas_player_derandomed = list(persistent._mas_player_derandomed.keys())
return
@@ -2585,7 +2591,7 @@ label v0_9_0(version="v0_9_0"):
mas_bd_ev.action = EV_ACT_QUEUE
# remove random props from all greetings
- for gre_label, gre_ev in store.evhand.greeting_database.iteritems():
+ for gre_label, gre_ev in store.evhand.greeting_database.items():
# hopefully we never use random in greetings ever
gre_ev.random = False
diff --git a/Monika After Story/game/updates_topics.rpy b/Monika After Story/game/updates_topics.rpy
index 82b014f9bb..63cd20cecb 100644
--- a/Monika After Story/game/updates_topics.rpy
+++ b/Monika After Story/game/updates_topics.rpy
@@ -29,16 +29,6 @@ init -1 python in mas_db_merging:
)
-# preeerything
-init -1 python:
- @store.mas_utils.deprecated(use_instead="mas_versions.clear", should_raise=True)
- def clearUpdateStructs():
- """DEPRECATED
- Use mas_versions.clear instead
- """
- store.mas_versions.clear()
-
-
init 9 python:
store.mas_versions.init()
@@ -101,6 +91,7 @@ init -2 python in mas_versions:
# use dot notation to separate the parts of a version
add_steps({
+ # "0.13.0": "0.12.15",
#"0.12.15": ("0.12.14", "0.12.13"),
"0.12.13": "0.12.12",
"0.12.12": ("0.12.11", "0.12.10"),
diff --git a/Monika After Story/game/zz_apikeys.rpy b/Monika After Story/game/zz_apikeys.rpy
index 87766fa635..b659c43f88 100644
--- a/Monika After Story/game/zz_apikeys.rpy
+++ b/Monika After Story/game/zz_apikeys.rpy
@@ -494,8 +494,22 @@ init -980 python in mas_api_keys:
# null key is not counted
return
- # clear newlines
- new_key = clean_key(new_key)
+ try:
+ # clear newlines
+ new_key = clean_key(new_key.decode("utf-8"))
+
+ except UnicodeDecodeError as e:
+ # log the error
+ store.mas_utils.mas_log.error("Failed to decode API key: {}".format(e))
+
+ # show message box
+ store.renpy.show_screen(
+ "dialog",
+ message="Failed to decode API key.",
+ ok_action=store.Hide("dialog")
+ )
+ # can't get a clean key, return here
+ return
# on change
onchange_rv = _run_on_change(feature, new_key)
diff --git a/Monika After Story/game/zz_backgrounds.rpy b/Monika After Story/game/zz_backgrounds.rpy
index 8e0fa23dba..6c648f5f7b 100644
--- a/Monika After Story/game/zz_backgrounds.rpy
+++ b/Monika After Story/game/zz_backgrounds.rpy
@@ -846,7 +846,7 @@ init -10 python:
for sl_data in self._slices:
filters[sl_data.flt_slice.name] = None
- return filters.keys()
+ return list(filters.keys())
def first_flt(self):
"""
@@ -1233,11 +1233,11 @@ init -10 python:
def __repr__(self):
day_f = self._day_filters
if day_f is not None:
- day_f = day_f.keys()
+ day_f = list(day_f.keys())
night_f = self._night_filters
if night_f is not None:
- night_f = night_f.keys()
+ night_f = list(night_f.keys())
return (
" 0
and seen_images_key[0] == "monika"
diff --git a/Monika After Story/game/zz_calendar.rpy b/Monika After Story/game/zz_calendar.rpy
index 7c80cff205..db41f35d3d 100644
--- a/Monika After Story/game/zz_calendar.rpy
+++ b/Monika After Story/game/zz_calendar.rpy
@@ -1312,8 +1312,9 @@ init -1 python in mas_calendar:
the database
- database a dict containing the events
"""
- with open(renpy.config.savedir + '/db.mcal', 'w') as fp:
- json.dump(calendar_database, fp, cls=encoder)
+ #TODO: Remove the unicode conversion once r8 is here
+ with open(renpy.config.savedir + '/db.mcal', 'w', encoding="utf-8") as fp:
+ fp.write(unicode(json.dumps(calendar_database, cls=encoder, ensure_ascii=False)))
def loadCalendarDatabase():
diff --git a/Monika After Story/game/zz_cardgames.rpy b/Monika After Story/game/zz_cardgames.rpy
index 4c12de4669..7a7e433e6f 100644
--- a/Monika After Story/game/zz_cardgames.rpy
+++ b/Monika After Story/game/zz_cardgames.rpy
@@ -1095,6 +1095,7 @@ init 5 python in mas_nou:
player - the player we check
"""
global winner
+
if player.hand:
return
@@ -2442,7 +2443,7 @@ init 5 python in mas_nou:
return rv
sorted_list = sorted(
- cards_data.iteritems(),
+ cards_data.items(),
key=lambda item: sortKey(
item,
keys_sort_order=keys_sort_order,
@@ -5357,7 +5358,7 @@ init -10 python in mas_cardgames:
# Fill the map with the sprites (or use the def as a fallback)
fb = sprites_map.get(store.mas_background.MBG_DEF)
- for bg_id in store.mas_background.BACKGROUND_MAP.iterkeys():
+ for bg_id in store.mas_background.BACKGROUND_MAP.keys():
if bg_id not in DESK_SPRITES_MAP:
filename = sprites_map.get(bg_id, fb)
DESK_SPRITES_MAP[bg_id] = MASFilterSwitch(DESK_SPRITES_PATH + filename)
@@ -5592,7 +5593,7 @@ init -10 python in mas_cardgames:
layer - the layer we'll render our table on
(Default: "minigames")
"""
- for v in self.cards.itervalues():
+ for v in self.cards.values():
v._offset = __Fixed(0, 0)
ui.layer(layer)
@@ -5801,8 +5802,8 @@ init -10 python in mas_cardgames:
Returns a list of all displayable objects we use
"""
stacks_bases = [stack.base for stack in self.stacks]
- cards_faces = [card.face for card in self.cards.itervalues()]
- cards_backs = [card.back for card in self.cards.itervalues()]
+ cards_faces = [card.face for card in self.cards.values()]
+ cards_backs = [card.back for card in self.cards.values()]
return stacks_bases + cards_faces + cards_backs
diff --git a/Monika After Story/game/zz_consumables.rpy b/Monika After Story/game/zz_consumables.rpy
index 9098b112c6..af58a5614d 100644
--- a/Monika After Story/game/zz_consumables.rpy
+++ b/Monika After Story/game/zz_consumables.rpy
@@ -708,7 +708,7 @@ init 5 python:
list of all consumables Monika is low on (or critical on)
"""
low_cons = []
- for _type in store.mas_consumables.consumable_map.iterkeys():
+ for _type in store.mas_consumables.consumable_map.keys():
low_cons += MASConsumable._getLowConsType(_type, critical)
return low_cons
@@ -726,7 +726,7 @@ init 5 python:
list of all consumables Monika
"""
low_cons = []
- for _type in store.mas_consumables.consumable_map.iterkeys():
+ for _type in store.mas_consumables.consumable_map.keys():
low_cons += MASConsumable._getLowConsType(_type, critical, exclude_restock_warned=True)
return low_cons
@@ -753,14 +753,14 @@ init 5 python:
if exclude_restock_warned:
return [
cons
- for cons in store.mas_consumables.consumable_map[_type].itervalues()
+ for cons in store.mas_consumables.consumable_map[_type].values()
if cons.enabled() and cons.should_restock_warn and cons.isCriticalLow() and not cons.hasRestockWarned()
]
else:
return [
cons
- for cons in store.mas_consumables.consumable_map[_type].itervalues()
+ for cons in store.mas_consumables.consumable_map[_type].values()
if cons.enabled() and cons.should_restock_warn and cons.isCriticalLow()
]
@@ -768,14 +768,14 @@ init 5 python:
if exclude_restock_warned:
return [
cons
- for cons in store.mas_consumables.consumable_map[_type].itervalues()
+ for cons in store.mas_consumables.consumable_map[_type].values()
if cons.enabled() and cons.should_restock_warn and cons.isLow() and not cons.hasRestockWarned()
]
else:
return [
cons
- for cons in store.mas_consumables.consumable_map[_type].itervalues()
+ for cons in store.mas_consumables.consumable_map[_type].values()
if cons.enabled() and cons.should_restock_warn and cons.isLow()
]
@@ -920,7 +920,7 @@ init 5 python:
return [
cons
- for cons in mas_consumables.consumable_map[_type].itervalues()
+ for cons in mas_consumables.consumable_map[_type].values()
if cons.enabled() and cons.hasServing() and cons.checkCanHave() and cons.isConsTime()
]
@@ -1516,7 +1516,7 @@ label mas_consumables_generic_finish_having(consumable):
and mas_getEV("mas_consumables_generic_queued_running_out").timePassedSinceLastSeen_d(datetime.timedelta(days=7))
and len(MASConsumable._getLowCons()) > 0
):
- $ mas_display_notif(m_name, ("Hey, [player]...",), "Topic Alerts")
+ $ mas_display_notif(m_name, ("Hey, [player]...",), "Topic Alerts", flash_window=True)
$ MASEventList.queue("mas_consumables_generic_queued_running_out")
#Only have one left
diff --git a/Monika After Story/game/zz_dockingstation.rpy b/Monika After Story/game/zz_dockingstation.rpy
index 6f66fa1d54..fcc2855885 100644
--- a/Monika After Story/game/zz_dockingstation.rpy
+++ b/Monika After Story/game/zz_dockingstation.rpy
@@ -71,8 +71,7 @@ init -45 python:
"""
import hashlib # sha256 signatures
import base64 # "packing" shipments involve base64
- from StringIO import StringIO as slowIO
- from cStringIO import StringIO as fastIO
+ from io import BytesIO, StringIO
import store.mas_utils as mas_utils # logging
@@ -340,12 +339,12 @@ init -45 python:
RETURNS:
tuple of the following format:
- [0] - base64 version of the given data, in a cStringIO buffer
+ [0] - base64 version of the given data, in a BytesIO buffer
[1] - sha256 checksum if pkg_slip is True, None otherwise
"""
box = None
try:
- box = self.fastIO()
+ box = self.BytesIO()
return (box, self._pack(contents, box, True, pkg_slip))
@@ -482,8 +481,8 @@ init -45 python:
### we have a package, lets unpack it
if keep_contents:
- # use slowIO since we dont know contents unpacked
- contents = slowIO()
+ # use StringIO since we dont know contents unpacked
+ contents = StringIO()
# we always want a package slip in this case
# we only want to unpack if we are keeping contents
@@ -591,7 +590,7 @@ init -45 python:
# internalize contents so we can do proper file closing
if contents is None:
- _contents = self.slowIO()
+ _contents = self.BytesIO()
else:
_contents = contents
@@ -699,7 +698,7 @@ init -45 python:
def unpackPackage(self, package, pkg_slip=None):
"""
Unpacks a package
- (decodes a base64 file into a regular StringIO buffer)
+ (decodes a base64 file into a regular BytesIO buffer)
NOTE: may throw exceptions
@@ -712,7 +711,7 @@ init -45 python:
(Default: None)
RETURNS:
- StringIO buffer containing the package decoded
+ BytesIO buffer containing the package decoded
Or None if pkg_slip checksum was passed in and the given
package failed the checksum
"""
@@ -722,7 +721,7 @@ init -45 python:
contents = None
try:
# NOTE: we use regular StringIO in case of unicode
- contents = self.slowIO()
+ contents = self.BytesIO()
_pkg_slip = self._unpack(
package,
@@ -1083,7 +1082,7 @@ init -11 python in mas_dockstat:
Returns TRUE upon success, False otherwise
"""
if len(selective) == 0:
- selective = image_dict.keys()
+ selective = list(image_dict.keys())
for b64_name in selective:
real_name, chksum = image_dict[b64_name]
@@ -1170,7 +1169,7 @@ init -11 python in mas_dockstat:
AKA quitting
"""
if len(selective) == 0:
- selective = image_dict.keys()
+ selective = list(image_dict.keys())
for b64_name in selective:
real_name, chksum = image_dict[b64_name]
@@ -1179,7 +1178,7 @@ init -11 python in mas_dockstat:
init python in mas_dockstat:
import store
- import cPickle
+ import renpy.compat.pickle as pickle
import math
# previous vars dict
@@ -1211,7 +1210,7 @@ init 200 python in mas_dockstat:
import store.mas_greetings as mas_greetings
import store.mas_ics as mas_ics
import store.evhand as evhand
- from cStringIO import StringIO as fastIO
+ from io import StringIO
import codecs
import re
import os
@@ -1283,7 +1282,7 @@ init 200 python in mas_dockstat:
END_DELIM = "|||per|"
try:
- _outbuffer.write(codecs.encode(cPickle.dumps(store.persistent), "base64"))
+ _outbuffer.write(codecs.encode(pickle.dumps(store.persistent), "base64"))
_outbuffer.write(END_DELIM)
return True
@@ -1469,7 +1468,7 @@ init 200 python in mas_dockstat:
### other stuff we need
# inital buffer
- moni_buffer = fastIO()
+ moni_buffer = StringIO()
moni_buffer = codecs.getwriter("utf8")(moni_buffer)
# number deliemter
@@ -1533,7 +1532,7 @@ init 200 python in mas_dockstat:
moni_buffer,
blocksize
)
- moni_tbuffer = fastIO()
+ moni_tbuffer = StringIO()
moni_tbuffer = codecs.getwriter("utf8")(moni_tbuffer)
moni_tbuffer.write(str(lines) + NUM_DELIM)
for _line in moni_buffer_iter:
@@ -1832,8 +1831,8 @@ init 200 python in mas_dockstat:
# TODO: change separator to a very large delimeter so we can handle persistents larger than 4MB
splitted = data_line.split("|||per|")
if(len(splitted)>0):
- return cPickle.loads(codecs.decode(splitted[0] + b'='*4, "base64"))
- return cPickle.loads(codecs.decode(data_line + b'='*4, "base64"))
+ return pickle.loads(codecs.decode(splitted[0] + b'='*4, "base64"))
+ return pickle.loads(codecs.decode(data_line + b'='*4, "base64"))
except Exception as e:
log.error(
diff --git a/Monika After Story/game/zz_dump.rpy b/Monika After Story/game/zz_dump.rpy
index cf41f7ab2c..3f0552ff36 100644
--- a/Monika After Story/game/zz_dump.rpy
+++ b/Monika After Story/game/zz_dump.rpy
@@ -1,7 +1,6 @@
## dumps file for unstablers
init 999 python:
-
def mas_eventDataDump():
"""
Data dump for purely events stats
@@ -86,7 +85,6 @@ init 999 python:
def calcAvgs(self):
"""
Calculates averages
-
Returns tuple:
[0]: show count avg
[1]: pool show count avg
@@ -111,7 +109,6 @@ init 999 python:
elif self.most_seen_ev.shown_count < ev.shown_count:
self.most_seen_ev = ev
-
def inDB(self, ev):
"""
returns true if the given ev is in this db
@@ -159,7 +156,6 @@ init 999 python:
return _seen
-
def __str__(self):
"""
to String
@@ -297,7 +293,8 @@ init 999 python:
last_sesh_ed = persistent.sessions.get("last_session_end", "N/A")
if total_sesh and total_playtime is not None:
- avg_sesh = total_playtime / total_sesh
+ total_playtime = total_playtime.total_seconds()
+ avg_sesh = datetime.timedelta(seconds=total_playtime / total_sesh)
else:
avg_sesh = "N/A"
@@ -333,7 +330,6 @@ init 999 python:
return outstr.format(*output)
-
def mas_varDataDump():
"""
Dumps other kinds of data.
diff --git a/Monika After Story/game/zz_games.rpy b/Monika After Story/game/zz_games.rpy
index 782b5134a5..d6f1af27a8 100644
--- a/Monika After Story/game/zz_games.rpy
+++ b/Monika After Story/game/zz_games.rpy
@@ -28,7 +28,7 @@ init 1 python in mas_games:
global game_db
total_shown_count = 0
- for ev in game_db.itervalues():
+ for ev in game_db.values():
if ev.eventlabel not in exclude_list:
total_shown_count += ev.shown_count
@@ -51,7 +51,7 @@ init 7 python in mas_games:
gamename = gamename.lower()
#Now search
- for ev in game_db.itervalues():
+ for ev in game_db.values():
if renpy.substitute(ev.prompt).lower() == gamename:
return ev
return None
@@ -201,7 +201,7 @@ label mas_pick_a_game:
#Now let's get all of the unlocked games at the aff level
game_menuitems = sorted([
(ev.prompt, ev.eventlabel, False, False)
- for ev in mas_games.game_db.itervalues()
+ for ev in mas_games.game_db.values()
if mas_isGameUnlocked(renpy.substitute(ev.prompt))
], key=lambda x:renpy.substitute(x[0]))
diff --git a/Monika After Story/game/zz_hangman.rpy b/Monika After Story/game/zz_hangman.rpy
index ce919dc6fe..342a129a46 100644
--- a/Monika After Story/game/zz_hangman.rpy
+++ b/Monika After Story/game/zz_hangman.rpy
@@ -188,10 +188,11 @@ init -1 python in mas_hangman:
def _add_monika_words(wordlist):
for word in MONI_WORDS:
- wordlist.append(renpy.store.PoemWord(glitch=False,sPoint=0,yPoint=0,nPoint=0,word=word))
+ wordlist.append(renpy.store.MASPoemWord(sPoint=0,yPoint=0,nPoint=0, mPoint=4, word=word))
# file names
+ EASY_LIST = "mod_assets/games/hangman/poemwords.txt"
NORMAL_LIST = "mod_assets/games/hangman/MASpoemwords.txt"
HARD_LIST = "mod_assets/games/hangman/1000poemwords.txt"
@@ -244,10 +245,24 @@ init -1 python in mas_hangman:
easy_list = all_hm_words[EASY_MODE]
# lets start with Non Monika words
- easy_list[:] = [
- store.MASPoemWord._build(word, 0)._hangman()
- for word in store.full_wordlist
- ]
+ with open(renpy.config.gamedir + "/" + EASY_LIST, "r") as poemwords:
+ for line in poemwords:
+ line = line.strip()
+
+ #Ignore line if commented/empty
+ if line == '' or line[0] == '#':
+ continue
+
+ # add the word
+ splitword = line.split(',')
+ easy_list.append(store.MASPoemWord(
+ splitword[0],
+ float(splitword[1]),
+ float(splitword[2]),
+ float(splitword[3]),
+ 0
+ ))
+
# now for monika words
moni_list = list()
@@ -257,7 +272,6 @@ init -1 python in mas_hangman:
copyWordsList(EASY_MODE)
-
def buildNormalList():
"""
Builds the normal word list
@@ -523,7 +537,7 @@ label mas_hangman_game_loop:
$ mas_RaiseShield_core()
# setup glitch text
- $ hm_glitch_word = glitchtext(40) + "?"
+ $ hm_glitch_word = mas_glitchText(40) + "?"
$ style.say_dialogue = style.edited
# show hanging sayori
diff --git a/Monika After Story/game/zz_history.rpy b/Monika After Story/game/zz_history.rpy
index e762a11274..9cc7f83935 100644
--- a/Monika After Story/game/zz_history.rpy
+++ b/Monika After Story/game/zz_history.rpy
@@ -187,7 +187,7 @@ init -860 python in mas_history:
found_data = lookup_otl(key, years_list)
years_found = []
- for year, data_tuple in found_data.iteritems():
+ for year, data_tuple in found_data.items():
status, _data = data_tuple
if status == L_FOUND and _data == _verify:
@@ -246,7 +246,7 @@ init -860 python in mas_history:
ASSUMES: the mhs database is already filled
"""
- for mhs_id, mhs_data in store.persistent._mas_history_mhs_data.iteritems():
+ for mhs_id, mhs_data in store.persistent._mas_history_mhs_data.items():
mhs = mhs_db.get(mhs_id, None)
if mhs is not None:
mhs.fromTuple(mhs_data)
@@ -259,7 +259,7 @@ init -860 python in mas_history:
"""
Saves MASHistorySaver data from mhs_db into persistent
"""
- for mhs_id, mhs in mhs_db.iteritems():
+ for mhs_id, mhs in mhs_db.items():
store.persistent._mas_history_mhs_data[mhs_id] = mhs.toTuple()
@@ -822,7 +822,7 @@ init -850 python:
save_year -= 1
# go through mapping and save data
- for p_key, data_key in self.mapping.iteritems():
+ for p_key, data_key in self.mapping.items():
# retrieve and save
dest._store(source.get(p_key, None), data_key, save_year)
@@ -876,7 +876,7 @@ init -800 python in mas_history:
# is past today.
_now = datetime.datetime.now()
-# for mhs in mhs_db.itervalues():
+# for mhs in mhs_db.values():
for mhs in mhs_sorted_list:
# trigger rules:
# current date must be past trigger
diff --git a/Monika After Story/game/zz_hotkeys.rpy b/Monika After Story/game/zz_hotkeys.rpy
index 6f0d2e6745..fa00e5319b 100644
--- a/Monika After Story/game/zz_hotkeys.rpy
+++ b/Monika After Story/game/zz_hotkeys.rpy
@@ -25,8 +25,9 @@ init -10 python in mas_hotkeys:
IN:
value - True will allow dismiss, False will not
"""
+ global allow_dismiss
+
if not lock_dismiss:
- global allow_dismiss
allow_dismiss = value
diff --git a/Monika After Story/game/zz_interactions.rpy b/Monika After Story/game/zz_interactions.rpy
index 3a7fc7ad59..387cd54b62 100644
--- a/Monika After Story/game/zz_interactions.rpy
+++ b/Monika After Story/game/zz_interactions.rpy
@@ -100,8 +100,8 @@ init -10 python in mas_interactions:
[1] zoom level
[2] clickzone
"""
- for zl, zl_d in self._zoom_cz.iteritems():
- for zone_key, cz in zl_d.iteritems():
+ for zl, zl_d in self._zoom_cz.items():
+ for zone_key, cz in zl_d.items():
yield zone_key, zl, cz
def _debug(self, value):
@@ -147,7 +147,7 @@ init -10 python in mas_interactions:
self._zones.pop(zone_key)
# remove from zoom levels
- for zone_d in self._zoom_cz.itervalues():
+ for zone_d in self._zoom_cz.values():
if zone_key in zone_d:
zone_d.pop(zone_key)
@@ -159,7 +159,7 @@ init -10 python in mas_interactions:
zone_key - key of the clickzone to change
value - value to set disabled to
"""
- for zl_d in self._zoom_cz.itervalues():
+ for zl_d in self._zoom_cz.values():
cz = zl_d.get(zone_key, None)
if cz is not None:
cz.disabled = value
@@ -174,7 +174,7 @@ init -10 python in mas_interactions:
# get zoom level dict containing clickzones
zl_set = self._zoom_cz.get(zoom_level, {})
- for zone_key, cz in self._zones.iteritems():
+ for zone_key, cz in self._zones.items():
# only add clickzones that dont already exist
if zone_key not in zl_set:
@@ -221,10 +221,10 @@ init -10 python in mas_interactions:
],
ZONE_CHEST_1_R: [
(514, 453), # (her) right top
- (491, 509),
+ (491, 509),
(489, 533),
(493, 551),
- (498, 555), # (her) right to arm
+ (498, 555), # (her) right to arm
(508, 498),
(515, 453),
],
@@ -467,7 +467,7 @@ init -9 python:
start_zoom - pass this in if the clickzones are startnig at
a zoom level that is not the current.
(Default: None)
- """
+ """
if zone_actions is None:
zone_actions = {}
if zone_order is None:
@@ -482,7 +482,7 @@ init -9 python:
self._zones_unorder = {}
self._last_zoom_level = start_zoom
-
+
self._end_int = None
self._rst_int = False
self._jump_to = None
@@ -592,7 +592,7 @@ init -9 python:
def event(self, ev, x, y, st):
"""
- By default, we process events in order and return/jump as
+ By default, we process events in order and return/jump as
appropriate.
"""
self.event_begin(ev, x, y, st)
@@ -659,7 +659,7 @@ init -9 python:
return r
renders = []
-
+
# render in reverse zone order for visual clarity
for zone_key, cz in self.zone_iter_r():
if not cz.disabled:
diff --git a/Monika After Story/game/zz_music_selector.rpy b/Monika After Story/game/zz_music_selector.rpy
index 26e073e23c..2ff92b6e04 100644
--- a/Monika After Story/game/zz_music_selector.rpy
+++ b/Monika After Story/game/zz_music_selector.rpy
@@ -160,8 +160,8 @@ init -1 python in songs:
# sayori - True if the player name is sayori, which means only
# allow Surprise in the player
- global music_choices
- global music_pages
+ global music_choices, music_pages
+
music_choices = list()
# SONGS:
# if you want to add a song, add it to this list as a tuple, where:
diff --git a/Monika After Story/game/zz_pianokeys.rpy b/Monika After Story/game/zz_pianokeys.rpy
index 1fab614afd..95bb7a6bf1 100644
--- a/Monika After Story/game/zz_pianokeys.rpy
+++ b/Monika After Story/game/zz_pianokeys.rpy
@@ -1958,7 +1958,7 @@ init 800 python in mas_piano_keys:
# We only include stock songs if the player's played them successfully before
song_list = [
(pnml.name, pnml, False, False)
- for pnml in pnml_db.itervalues()
+ for pnml in pnml_db.values()
if (pnml.name not in STOCK_SONG_NAMES or pnml.wins > 0)
]
@@ -2663,7 +2663,7 @@ init 810 python:
# NOTE: highly recommend not adding too many detections
self.pnml_list = []
if self.mode == self.MODE_FREE:
- for _pnml in mas_piano_keys.pnml_db.itervalues():
+ for _pnml in mas_piano_keys.pnml_db.values():
if _pnml.wins == 0:
self.pnml_list.append(_pnml)
_pnml._gen_pnm_sprites()
@@ -2862,7 +2862,7 @@ init 810 python:
self.live_keymap = dict(mas_piano_keys.KEYMAP)
# now apply adjustments
- for key,real_key in persistent._mas_piano_keymaps.iteritems():
+ for key,real_key in persistent._mas_piano_keymaps.items():
if (
real_key in self.live_keymap
and real_key == self.live_keymap[real_key]
diff --git a/Monika After Story/game/zz_poemgame.rpy b/Monika After Story/game/zz_poemgame.rpy
index c6a34b8d02..5ee09e5e4f 100644
--- a/Monika After Story/game/zz_poemgame.rpy
+++ b/Monika After Story/game/zz_poemgame.rpy
@@ -142,24 +142,26 @@ init -4 python:
def readInFile(self, wordfile):
- # copied from poemgame (with adjustments)
- #
- # Reads in a file into the wordlist
- #
- # NOTE: On the poemwords file
- # The file must consist of the following format:
- # word,#1,#2,#3,#4
- # WHERE:
- # word - the word we want in the poem
- # #1 - the points this word gives to sayori
- # #2 - the points this word gives to natsuki
- # #3 - the points this word gives to yuri
- # #4 - the points this word gives to monika
- # (LINES that strat with # are ignored)
- #
- # IN:
- # wordfile - the filename/path of the file to read words
- with renpy.file(wordfile) as words:
+ """
+ copied from poemgame (with adjustments)
+
+ Reads in a file into the wordlist
+
+ NOTE: On the poemwords file
+ The file must consist of the following format:
+ word,#1,#2,#3,#4
+ WHERE:
+ word - the word we want in the poem
+ #1 - the points this word gives to sayori
+ #2 - the points this word gives to natsuki
+ #3 - the points this word gives to yuri
+ #4 - the points this word gives to monika
+ (LINES that strat with # are ignored)
+
+ IN:
+ wordfile - the filename/path of the file to read words
+ """
+ with open(f"{renpy.config.gamedir}/{wordfile}", "r") as words:
for line in words:
line = line.strip()
@@ -1246,7 +1248,7 @@ label mas_poem_minigame (flow,music_filename=audio.t4,show_monika=True,
random.randint(1,glitch_wordscare[1]) == 1
)):
- word = MASPoemWord(glitchtext(7), 0, 0, 0, 0, True)
+ word = MASPoemWord(mas_glitchText(7), 0, 0, 0, 0, True)
# are we displaying a glitched Monika word
elif glitch_words:
@@ -1275,7 +1277,7 @@ label mas_poem_minigame (flow,music_filename=audio.t4,show_monika=True,
random.randint(1,glitch_wordscare[1]) == 1
)):
- word.word = glitchtext(len(word.word))
+ word.word = mas_glitchText(len(word.word))
word.glitch = True
# glitchy words (visual)
@@ -1408,7 +1410,7 @@ label mas_poem_minigame (flow,music_filename=audio.t4,show_monika=True,
# figure out the winner
largest = ""
largestVal = 0
- for girl,points in points.iteritems():
+ for girl,points in points.items():
if points > largestVal:
largest = girl
largestVal = points
diff --git a/Monika After Story/game/zz_poems.rpy b/Monika After Story/game/zz_poems.rpy
index 419a03f571..7d4ea3dacb 100644
--- a/Monika After Story/game/zz_poems.rpy
+++ b/Monika After Story/game/zz_poems.rpy
@@ -59,14 +59,14 @@ init 11 python in mas_poems:
if unseen:
return [
poem
- for poem in poem_map.itervalues()
+ for poem in poem_map.values()
if not poem.is_seen() and poem.category == category
]
#Otherwise we just get all
return [
poem
- for poem in poem_map.itervalues()
+ for poem in poem_map.values()
if poem.category == category
]
@@ -76,7 +76,7 @@ init 11 python in mas_poems:
"""
return sorted([
poem
- for poem in poem_map.itervalues()
+ for poem in poem_map.values()
if poem.is_seen()
], key=poem_sort_key)
@@ -86,7 +86,7 @@ init 11 python in mas_poems:
"""
return sorted([
poem
- for poem in poem_map.itervalues()
+ for poem in poem_map.values()
if not poem.is_seen()
], key=poem_sort_key)
@@ -112,7 +112,7 @@ init 11 python in mas_poems:
"""
return sorted([
(poem.prompt, poem, False, False)
- for poem in poem_map.itervalues()
+ for poem in poem_map.values()
if poem.is_seen()
], key=poem_menu_sort_key)
@@ -232,6 +232,167 @@ init 10 python:
"""
return store.persistent._mas_poems_seen.get(self.poem_id, 0)
+
+## Stock DDLC poems
+init 20 python:
+ poem_m1 = MASPoem(
+ poem_id="poem_m1",
+ category="ddlc",
+ prompt="Hole in Wall (Part 1)",
+ title="Hole in Wall",
+ text="""\
+It couldn't have been me.
+See, the direction the spackle protrudes.
+A noisy neighbor? An angry boyfriend? I'll never know. I wasn't home.
+I peer inside for a clue.
+No! I can't see. I reel, blind, like a film left out in the sun.
+But it's too late. My retinas.
+Already scorched with a permanent copy of the meaningless image.
+It's just a little hole. It wasn't too bright.
+It was too deep.
+Stretching forever into everything.
+A hole of infinite choices.
+I realize now, that I wasn't looking in.
+I was looking out.
+And he, on the other side, was looking in.\
+"""
+ )
+
+ poem_m21 = MASPoem(
+ poem_id="poem_m21",
+ category="ddlc",
+ prompt="Hole in Wall (Part 2)",
+ title="Hole in Wall",
+ text="""\
+But he wasn't looking at me.
+Confused, I frantically glance at my surroundings.
+But my burned eyes can no longer see color.
+Are there others in this room? Are they talking?
+Or are they simply poems on flat sheets of paper,
+The sound of frantic scrawling playing tricks on my ears?
+The room begins to crinkle.
+Closing in on me.
+The air I breathe dissipates before it reaches my lungs.
+I panic. There must be a way out.
+It's right there. He's right there.
+
+Swallowing my fears, I brandish my pen.\
+"""
+ )
+
+ poem_m2 = MASPoem(
+ poem_id="poem_m2",
+ category="ddlc",
+ prompt="Save Me",
+ title="Save Me",
+ text="""\
+The colors, they won't stop.
+Bright, beautiful colors
+Flashing, expanding, piercing
+Red, green, blue
+An endless
+cacophony
+Of meaningless
+noise
+
+
+The noise, it won't stop.
+Violent, grating waveforms
+Squeaking, screeching, piercing
+Sine, cosine, tangent
+ Like playing a chalkboard on a turntable
+ Like playing a vinyl on a pizza crust
+An endless
+poem
+Of meaningless\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
+Load Me
+ \
+"""
+ )
+
+ poem_m3 = MASPoem(
+ poem_id="poem_m3",
+ category="ddlc",
+ prompt="The Lady who Knows Everything",
+ title="The Lady who Knows Everything",
+ text="""\
+An old tale tells of a lady who wanders Earth.
+The Lady who Knows Everything.
+A beautiful lady who has found every answer,
+All meaning,
+All purpose,
+And all that was ever sought.
+
+And here I am,
+
+
+ a feather
+
+
+Lost adrift the sky, victim of the currents of the wind.
+
+Day after day, I search.
+I search with little hope, knowing legends don't exist.
+But when all else has failed me,
+When all others have turned away,
+The legend is all that remains - the last dim star glimmering in the twilit sky.
+
+Until one day, the wind ceases to blow.
+I fall.
+And I fall and fall, and fall even more.
+Gentle as a feather.
+A dry quill, expressionless.
+
+But a hand catches me between the thumb and forefinger.
+The hand of a beautiful lady.
+I look at her eyes and find no end to her gaze.
+
+The Lady who Knows Everything knows what I am thinking.
+Before I can speak, she responds in a hollow voice.
+"I have found every answer, all of which amount to nothing.
+There is no meaning.
+There is no purpose.
+And we seek only the impossible.
+I am not your legend.
+Your legend does not exist."
+
+And with a breath, she blows me back afloat, and I pick up a gust of wind.\
+"""
+ )
+
+ poem_m4 = MASPoem(
+ poem_id="poem_m4",
+ category="ddlc",
+ prompt="Happy End",
+ title="Happy End",
+ text="""\
+Pen in hand, I find my strength.
+The courage endowed upon me by my one and only love.
+Together, let us dismantle this crumbling world
+And write a novel of our own fantasies.
+
+With a flick of her pen, the lost finds her way.
+In a world of infinite choices, behold this special day.
+
+After all,
+Not all good times must come to an end.\
+"""
+ )
+
+# Backport some poem images from DDLC. NOTE: still use DDLC assets
+image paper = "images/bg/poem.jpg"
+# image paper_glitch = Composite((1280, 720), (0, 0), "paper_glitch1", (0, 0), "paper_glitch2")
+# image paper_glitch1 = "images/bg/poem-glitch1.png"
+image paper_glitch2:
+ "images/bg/poem-glitch2.png"
+ block:
+ yoffset 0
+ 0.05
+ yoffset 20
+ 0.05
+ repeat
+
+
#### mas_showpoem ####
#Handles showing poems and automatically incrementing the shown counts of MASPoems
#Can also show normal poems
diff --git a/Monika After Story/game/zz_reactions.rpy b/Monika After Story/game/zz_reactions.rpy
index 87e3ad90dd..59b6ea8e9a 100644
--- a/Monika After Story/game/zz_reactions.rpy
+++ b/Monika After Story/game/zz_reactions.rpy
@@ -313,7 +313,7 @@ init -11 python in mas_filereacts:
"""
return [
giftname
- for giftname, react_ev in filereact_map.iteritems()
+ for giftname, react_ev in filereact_map.items()
if _key in react_ev.rules
]
@@ -481,7 +481,7 @@ init -11 python in mas_filereacts:
return []
# put the gifts in the reacted map
- for c_gift_name, mas_gift in found_map.iteritems():
+ for c_gift_name, mas_gift in found_map.items():
store.persistent._mas_filereacts_reacted_map[c_gift_name] = mas_gift
found_gifts.sort()
@@ -567,7 +567,7 @@ init -11 python in mas_filereacts:
# otherwise check for random deletion
if _filename is None:
- _filename = random.choice(_map.keys())
+ _filename = random.choice(tuple(_map.keys()))
file_to_delete = _map.get(_filename, None)
if file_to_delete is None:
@@ -679,7 +679,7 @@ init -11 python in mas_filereacts:
IN:
_map - map to delete all
"""
- _map_keys = _map.keys()
+ _map_keys = tuple(_map.keys())
for _key in _map_keys:
_core_delete(_key, _map)
@@ -842,7 +842,7 @@ init python:
return (None, None, None, None, None)
elif len(persistent._mas_filereacts_sprite_reacted) > 0:
- sp_data = persistent._mas_filereacts_sprite_reacted.keys()[0]
+ sp_data = tuple(persistent._mas_filereacts_sprite_reacted.keys())[0]
giftname = persistent._mas_filereacts_sprite_reacted[sp_data]
else:
@@ -922,7 +922,7 @@ init python:
"""
return sorted([
_date
- for _date, giftstat in persistent._mas_filereacts_historic.iteritems()
+ for _date, giftstat in persistent._mas_filereacts_historic.items()
if giftlabel in giftstat
])
diff --git a/Monika After Story/game/zz_seasons.rpy b/Monika After Story/game/zz_seasons.rpy
index 3f18659b5b..d23edb624b 100644
--- a/Monika After Story/game/zz_seasons.rpy
+++ b/Monika After Story/game/zz_seasons.rpy
@@ -244,7 +244,7 @@ init 10 python in mas_seasons:
"""
Determins the current season and returns appropriate season ID
"""
- for _id, logic in _season_logic_map.iteritems():
+ for _id, logic in _season_logic_map.items():
if logic():
return _id
diff --git a/Monika After Story/game/zz_selector.rpy b/Monika After Story/game/zz_selector.rpy
index 48a8cc0b46..459e46c85a 100644
--- a/Monika After Story/game/zz_selector.rpy
+++ b/Monika After Story/game/zz_selector.rpy
@@ -1058,18 +1058,18 @@ init -10 python in mas_selspr:
(Default: False)
"""
if select_type == SELECT_ACS:
- new_map_view = new_map.viewkeys()
+ new_map_view = new_map.keys()
# determine which map is the "old" and which is "new"
# we want to remove what is excess from the desired map
if use_old:
- old_map_view = old_map.viewkeys()
+ old_map_view = old_map.keys()
remove_keys = new_map_view - old_map_view
remove_map = new_map
add_map = old_map
else:
- old_map_view = prev_map.viewkeys()
+ old_map_view = prev_map.keys()
remove_keys = old_map_view - new_map_view
remove_map = prev_map
add_map = new_map
@@ -1082,7 +1082,7 @@ init -10 python in mas_selspr:
# then readd everything that was previous
# EXCEPT removers
- for item in add_map.itervalues():
+ for item in add_map.values():
if not item.selectable.remover:
moni_chr.wear_acs(item.selectable.get_sprobj())
@@ -1096,7 +1096,7 @@ init -10 python in mas_selspr:
select_map = new_map
# change to that map
- for item in select_map.itervalues():
+ for item in select_map.values():
if use_old or item.selected:
prev_hair = moni_chr.hair
new_hair = item.selectable.get_sprobj()
@@ -1130,7 +1130,7 @@ init -10 python in mas_selspr:
select_map = new_map
# change to that map
- for item in select_map.itervalues():
+ for item in select_map.values():
if use_old or item.selected:
prev_cloth = moni_chr.clothes
new_cloth = item.selectable.get_sprobj()
@@ -1148,11 +1148,11 @@ init -10 python in mas_selspr:
except Exception as e:
store.mas_utils.mas_log.warning("BAD CLOTHES: " + repr(e))
moni_chr.change_clothes(prev_cloth)
-
+
# undo the selection
prev_sel = prev_map[prev_cloth.name]
prev_sel._core_select() # re-adds the old clothing to select map
- prev_sel._send_select_dlg()
+ prev_sel._send_select_dlg()
new_map.pop(new_cloth.name)
return # quit early since you can only have 1 clothes
@@ -1260,7 +1260,7 @@ init -10 python in mas_selspr:
OUT:
select_map - select map cleaned of non-selectd items
"""
- for item_name in select_map.keys():
+ for item_name in tuple(select_map.keys()):
if force or not select_map[item_name].selected:
item = select_map.pop(item_name)
item.selected = False # force deselection
@@ -1292,8 +1292,8 @@ init -10 python in mas_selspr:
map.
IN:
- old_map_view - viewkeys view of the old map
- new_map_view - viewkeys view of the new map
+ old_map_view - dict_keys view of the old map
+ new_map_view - dict_keys view of the new map
RETURNS:
True if the maps are the same, false if different.
@@ -1316,7 +1316,7 @@ init -10 python in mas_selspr:
source - source data to read
dest - data place to save
"""
- for item_name, item in source.iteritems():
+ for item_name, item in source.items():
dest[item_name] = item.toTuple()
@@ -1343,7 +1343,7 @@ init -10 python in mas_selspr:
source - source data to load from
dest - data to save the loaded data into
"""
- for item_name, item_tuple in source.iteritems():
+ for item_name, item_tuple in source.items():
if item_name in dest:
dest[item_name].fromTuple(item_tuple)
@@ -1681,16 +1681,6 @@ init -10 python in mas_selspr:
"""
_unlock_item(hair, SELECT_HAIR)
- @store.mas_utils.deprecated(use_instead="unlock_prompt", should_raise=True)
- def unlock_selector(group):
- """DEPRECATED - Use unlock_prompt instead
- Unlocks the selector of the given group.
-
- IN:
- group - group to unlock selector topic.
- """
- unlock_prompt(group)
-
def json_sprite_unlock(sp_obj, unlock_label=True):
"""RUNTIME ONLY
@@ -2728,7 +2718,7 @@ init -1 python:
if not self.multi_select:
# must clean select map
- for item in self.select_map.itervalues():
+ for item in self.select_map.values():
# setting to False will queue for removal of item
# NOTE: the caller must handle teh removal
item.selected = False
@@ -3675,8 +3665,8 @@ label mas_selector_sidebar_select(items, select_type, preview_selections=True, o
prev_select_map = dict(select_map)
# also create views that we use for comparisons
- old_view = old_select_map.viewkeys()
- new_view = select_map.viewkeys()
+ old_view = old_select_map.keys()
+ new_view = select_map.keys()
# disable menu interactions to prevent bugs
disable_esc()
@@ -3813,7 +3803,7 @@ label mas_selector_sidebar_select_confirm:
monika_chr.restore(prev_moni_state)
# If monika is wearing a remover ACS, remove it.
- for item_name in select_map.keys():
+ for item_name in tuple(select_map.keys()):
sel_obj = select_map[item_name].selectable
if sel_obj.remover:
spr_obj = sel_obj.get_sprobj()
diff --git a/Monika After Story/game/zz_spritedeco.rpy b/Monika After Story/game/zz_spritedeco.rpy
index 0c85dae336..106dfdf5d8 100644
--- a/Monika After Story/game/zz_spritedeco.rpy
+++ b/Monika After Story/game/zz_spritedeco.rpy
@@ -1,4 +1,3 @@
-
# large rewrite incoming
init -700 python in mas_deco:
diff --git a/Monika After Story/game/zz_spritejsons.rpy b/Monika After Story/game/zz_spritejsons.rpy
index 5df303533f..92793697eb 100644
--- a/Monika After Story/game/zz_spritejsons.rpy
+++ b/Monika After Story/game/zz_spritejsons.rpy
@@ -394,7 +394,7 @@ default persistent._mas_sprites_json_gifted_sprites = {}
init -21 python in mas_sprites_json:
- import __builtin__
+ import builtins
import json
import store
import store.mas_utils as mas_utils
@@ -448,14 +448,12 @@ init -21 python in mas_sprites_json:
RETURNS: string to be logged
"""
self.update_levelname(record)
- return self.replace_lf(
- self.apply_newline_prefix(
- record,
- "[{0}]: {1}{2}".format(
- record.levelname,
- " " * (record.indent_lvl * 2),
- record.msg
- )
+ return self.apply_newline_prefix(
+ record,
+ "[{0}]: {1}{2}".format(
+ record.levelname,
+ " " * (record.indent_lvl * 2),
+ record.msg
)
)
@@ -467,8 +465,8 @@ init -21 python in mas_sprites_json:
adapter_ctor=SpriteJsonLogAdapter
)
- py_list = __builtin__.list
- py_dict = __builtin__.dict
+ py_list = builtins.list
+ py_dict = builtins.dict
sprite_station = store.MASDockingStation(
renpy.config.basedir + "/game/mod_assets/monika/j/"
@@ -936,12 +934,12 @@ init 189 python in mas_sprites_json:
_sel_list = sml.CLOTH_SEL_SL
# remvoe from sprite object map
- if sp_name in _item_map:
+ if sp_name in tuple(_item_map.keys()):
_item_map.pop(sp_name)
if sml.get_sel(sp_obj) is not None:
# remove from selectable map
- if sp_name in _sel_map:
+ if sp_name in tuple(_sel_map.keys()):
_sel_map.pop(sp_name)
# remove from selectable list
@@ -1350,7 +1348,7 @@ init 189 python in mas_sprites_json:
allow_none = not required
is_bad = False
- for param_name, verifier_info in param_dict.iteritems():
+ for param_name, verifier_info in param_dict.items():
if param_name in jobj:
param_val = jobj.pop(param_name)
desired_type, verifier = verifier_info
@@ -1822,7 +1820,7 @@ init 189 python in mas_sprites_json:
hair_map = obj_based.pop("hair_map")
is_bad = False
- for hair_key,hair_value in hair_map.iteritems():
+ for hair_key, hair_value in hair_map.items():
# start with type validations
# key
@@ -1964,7 +1962,7 @@ init 189 python in mas_sprites_json:
ex_props = obj_based.pop("ex_props")
isbad = False
- for ep_key,ep_val in ex_props.iteritems():
+ for ep_key,ep_val in ex_props.items():
if not _verify_str(ep_key):
msg_log.append((
MSG_ERR_T,
@@ -2262,7 +2260,7 @@ init 189 python in mas_sprites_json:
dry_run = True
# get rid of __keys
- for jkey in jobj.keys():
+ for jkey in tuple(jobj.keys()):
if jkey.startswith("__"):
jobj.pop(jkey)
@@ -2730,7 +2728,7 @@ init 189 python in mas_sprites_json:
frs_gifts = store.persistent._mas_filereacts_sprite_gifts
msj_gifts = store.persistent._mas_sprites_json_gifted_sprites
- for giftname in frs_gifts.keys():
+ for giftname in tuple(frs_gifts.keys()):
if giftname in giftname_map:
# overwrite the gift data if in here
frs_gifts[giftname] = giftname_map[giftname]
diff --git a/Monika After Story/game/zz_submods.rpy b/Monika After Story/game/zz_submods.rpy
index 498775f2a8..7be68be663 100644
--- a/Monika After Story/game/zz_submods.rpy
+++ b/Monika After Story/game/zz_submods.rpy
@@ -11,7 +11,7 @@ init -989 python:
mas_submod_utils.submod_log.info(
"\nINSTALLED SUBMODS:\n{0}".format(
",\n".join(
- [" '{0}' v{1}".format(submod.name, submod.version) for submod in store.mas_submod_utils.submod_map.itervalues()]
+ [" '{0}' v{1}".format(submod.name, submod.version) for submod in store.mas_submod_utils.submod_map.values()]
)
)
)
@@ -64,7 +64,7 @@ init -991 python in mas_submod_utils:
FB_VERS_STR = "0.0.0"
#Regular expression representing a valid author and name
- AN_REGEXP = re.compile(ur'^[ a-zA-Z_\u00a0-\ufffd][ 0-9a-zA-Z_\u00a0-\ufffd]*$')
+ AN_REGEXP = re.compile(r'^[ a-zA-Z_\u00a0-\ufffd][ 0-9a-zA-Z_\u00a0-\ufffd]*$')
def __init__(
self,
@@ -119,8 +119,8 @@ init -991 python in mas_submod_utils:
#Now we verify that the version number is something proper
try:
- map(int, version.split('.'))
- except:
+ tuple(map(int, version.split('.')))
+ except ValueError:
raise SubmodError("Version number '{0}' is invalid.".format(version))
#Make sure author and name are proper label names
@@ -159,7 +159,7 @@ init -991 python in mas_submod_utils:
OUT:
List of integers representing the version number
"""
- return map(int, self.version.split('.'))
+ return list(map(int, self.version.split('.')))
def hasUpdated(self):
"""
@@ -177,7 +177,7 @@ init -991 python in mas_submod_utils:
return False
try:
- old_vers = map(int, old_vers.split('.'))
+ old_vers = list(map(int, old_vers.split('.')))
#Persist data was bad, we'll replace it with something safe and return False as we need not check more
except:
@@ -226,7 +226,7 @@ init -991 python in mas_submod_utils:
Checks if submods have updated and sets the appropriate update scripts for them to run
"""
#Iter thru all submods we've got stored
- for submod in submod_map.itervalues():
+ for submod in submod_map.values():
#If it has updated, we need to call their update scripts and adjust the version data value
if submod.hasUpdated():
submod.updateFrom(
@@ -257,10 +257,10 @@ init -991 python in mas_submod_utils:
NOTE: Does not handle errors as to get here, formats must be correct regardless
"""
- return map(int, version.split('.'))
+ return tuple(map(int, version.split('.')))
- for submod in submod_map.itervalues():
- for dependency, minmax_version_tuple in submod.dependencies.iteritems():
+ for submod in submod_map.values():
+ for dependency, minmax_version_tuple in submod.dependencies.items():
dependency_submod = Submod._getSubmod(dependency)
if dependency_submod is not None:
@@ -600,7 +600,7 @@ init -980 python in mas_submod_utils:
#First, we need to convert the functions into a list of tuples
func_list = [
(_function, data_tuple)
- for _function, data_tuple in function_plugins[_label].iteritems()
+ for _function, data_tuple in function_plugins[_label].items()
]
return sorted(func_list, key=PRIORITY_SORT_KEY)
@@ -653,5 +653,5 @@ init 999 python:
Populates a lookup dict for all label overrides which are in effect
"""
#Let's loop here to update our label overrides map
- for overridden_label, label_override in config.label_overrides.iteritems():
+ for overridden_label, label_override in config.label_overrides.items():
_OVERRIDE_LABEL_TO_BASE_LABEL_MAP[label_override] = overridden_label
diff --git a/Monika After Story/game/zz_threading.rpy b/Monika After Story/game/zz_threading.rpy
index 445d761e5f..56ab560834 100644
--- a/Monika After Story/game/zz_threading.rpy
+++ b/Monika After Story/game/zz_threading.rpy
@@ -42,7 +42,7 @@ init -2000 python in mas_threading:
def __init__(self,
async_fun,
- async_args=[],
+ async_args=[],# FIXME: mutable obj as default
async_kwargs={}
):
"""
diff --git a/Monika After Story/game/zz_transforms.rpy b/Monika After Story/game/zz_transforms.rpy
index 864d03f1a3..8df9ee3add 100644
--- a/Monika After Story/game/zz_transforms.rpy
+++ b/Monika After Story/game/zz_transforms.rpy
@@ -213,3 +213,73 @@ transform streaming_tears_transform(open_eyes_img, closed_eyes_img):
closed_eyes_img
0.15
repeat
+
+transform tcommon(x=640, z=0.80):
+ yanchor 1.0 subpixel True
+ on show:
+ ypos 1.03
+ zoom z*0.95 alpha 0.00
+ xcenter x yoffset -20
+ easein .25 yoffset 0 zoom z*1.00 alpha 1.00
+ on replace:
+
+ alpha 1.00
+ parallel:
+ easein .25 xcenter x zoom z*1.00
+ parallel:
+ easein .15 yoffset 0 ypos 1.03
+
+transform tinstant(x=640, z=0.80):
+ xcenter x yoffset 0 zoom z*1.00 alpha 1.00 yanchor 1.0 ypos 1.03
+
+transform t41:
+ tcommon(200)
+transform t42:
+ tcommon(493)
+transform t43:
+ tcommon(786)
+transform t44:
+ tcommon(1080)
+transform t31:
+ tcommon(240)
+transform t32:
+ tcommon(640)
+transform t33:
+ tcommon(1040)
+transform t21:
+ tcommon(400)
+transform t22:
+ tcommon(880)
+transform t11:
+ tcommon(640)
+
+transform i41:
+ tinstant(200)
+transform i42:
+ tinstant(493)
+transform i43:
+ tinstant(786)
+transform i44:
+ tinstant(1080)
+transform i31:
+ tinstant(240)
+transform i32:
+ tinstant(640)
+transform i33:
+ tinstant(1040)
+transform i21:
+ tinstant(400)
+transform i22:
+ tinstant(880)
+transform i11:
+ tinstant(640)
+
+transform sticker_hop:
+ easein_quad .18 yoffset -80
+ easeout_quad .18 yoffset 0
+ easein_quad .18 yoffset -80
+ easeout_quad .18 yoffset 0
+
+transform sticker_move_n:
+ easein_quad .08 yoffset -15
+ easeout_quad .08 yoffset 0
diff --git a/Monika After Story/game/zz_weather.rpy b/Monika After Story/game/zz_weather.rpy
index 332826addb..9d72dc0c79 100644
--- a/Monika After Story/game/zz_weather.rpy
+++ b/Monika After Story/game/zz_weather.rpy
@@ -231,6 +231,7 @@ init -99 python in mas_weather:
RETURNS: the created image tag
"""
global old_weather_id
+
tag = old_weather_tag.format(old_weather_id)
store.renpy.image(tag, disp)
OLD_WEATHER_OBJ[old_weather_id] = tag
@@ -259,13 +260,13 @@ init -20 python in mas_weather:
RETURNS:
- True or false on whether or not to call spaceroom
"""
+ global weather_change_time
#If the player forced weather or we're not in a background that supports weather, we do nothing
if force_weather or store.mas_current_background.disable_progressive:
return False
#Otherwise we do stuff
- global weather_change_time
#Set a time for startup
if not weather_change_time:
# TODO: make this a function so init can set the weather_change _time and prevent double weather setting
@@ -316,7 +317,7 @@ init -20 python in mas_weather:
if store.persistent._mas_weather_MWdata is None:
return
- for mw_id, mw_data in store.persistent._mas_weather_MWdata.iteritems():
+ for mw_id, mw_data in store.persistent._mas_weather_MWdata.items():
mw_obj = WEATHER_MAP.get(mw_id, None)
if mw_obj is not None:
mw_obj.fromTuple(mw_data)
@@ -326,7 +327,7 @@ init -20 python in mas_weather:
"""
Saves MASWeather data from weather map into persistent
"""
- for mw_id, mw_obj in WEATHER_MAP.iteritems():
+ for mw_id, mw_obj in WEATHER_MAP.items():
store.persistent._mas_weather_MWdata[mw_id] = mw_obj.toTuple()
@@ -335,7 +336,7 @@ init -20 python in mas_weather:
Returns number of unlocked weather items
"""
count = 0
- for mw_id, mw_obj in WEATHER_MAP.iteritems():
+ for mw_id, mw_obj in WEATHER_MAP.items():
if mw_obj.unlocked:
count += 1
@@ -652,21 +653,6 @@ init -50 python:
"""
self.unlocked = data_tuple[0]
- @store.mas_utils.deprecated(use_instead="get_mask", should_raise=True)
- def sp_window(self, day):
- """DEPRECATED
- Use get_mask instead.
- This returns whatever get_mask returns.
- """
- return self.get_mask()
-
- @store.mas_utils.deprecated(should_raise=True)
- def isbg_window(self, day, no_frame):
- """DEPRECATED
- Islands are now separate images. See script-islands-event.
- """
- return ""
-
def toTuple(self):
"""
Converts this MASWeather object into a tuple
@@ -786,7 +772,7 @@ init -50 python:
ignored. Values should be MASWeatherMap objects.
"""
# validate MASWeatherMap objects
- for wmap in filter_pairs.itervalues():
+ for wmap in filter_pairs.values():
if not isinstance(wmap, MASWeatherMap):
raise TypeError(
"Expected MASWeatherMap object, not {0}".format(
@@ -1004,6 +990,7 @@ init 799 python:
_weather - weather to set to.
"""
global mas_current_weather
+
old_weather = mas_current_weather
mas_current_weather = _weather
mas_current_weather.entry(old_weather)
@@ -1142,7 +1129,7 @@ label monika_change_weather:
# build other weather list
other_weathers = [
(mw_obj.prompt, mw_obj, False, False)
- for mw_id, mw_obj in mas_weather.WEATHER_MAP.iteritems()
+ for mw_id, mw_obj in mas_weather.WEATHER_MAP.items()
if mw_id != "def" and mw_obj.unlocked
]
diff --git a/Monika After Story/game/zz_windowutils.rpy b/Monika After Story/game/zz_windowutils.rpy
index a9c4dbf2a3..dcfb472f22 100644
--- a/Monika After Story/game/zz_windowutils.rpy
+++ b/Monika After Story/game/zz_windowutils.rpy
@@ -43,9 +43,16 @@ init python in mas_windowutils:
from store import mas_utils
#The initial setup
+ ## Linux
# The window object, used on Linux systems, otherwise always None
MAS_WINDOW = None
+ ## Windows
+ # The notification manager
+ WIN_NOTIF_MANAGER = None
+ # Window handler
+ HWND = None
+
#We can only do this on windows
if renpy.windows:
#We need to extend the sys path to see our packages
@@ -54,22 +61,27 @@ init python in mas_windowutils:
#We try/catch/except to make sure the game can run if load fails here
try:
- #Going to import win32gui for use in destroying notifs
- import win32gui
- #Import win32api so we know if we can or cannot use notifs
- import win32api
-
- #Since importing the required libs was successful, we can move onto importing and initializing a balloontip
- import balloontip
-
- #And finally, import the internal functions to make getting window handle easier
- from win32gui import GetWindowText, GetForegroundWindow
+ import winnie32api
#Now we initialize the notification class
- __tip = balloontip.WindowsBalloonTip()
-
- #Now we set the hwnd of this temporarily
- __tip.hwnd = None
+ WIN_NOTIF_MANAGER = winnie32api.NotifManager(
+ renpy.config.name,
+ os.path.join(renpy.config.gamedir, "mod_assets/mas_icon.ico"),
+ on_dismiss=lambda: (
+ focusMASWindow(),
+ _unflashMASWindow_Windows(),
+ WIN_NOTIF_MANAGER.clear()
+ ),
+ on_lmb_click=lambda: (
+ focusMASWindow(),
+ _unflashMASWindow_Windows(),
+ WIN_NOTIF_MANAGER.clear()
+ ),
+ on_rmb_click=lambda: (
+ _unflashMASWindow_Windows(),
+ WIN_NOTIF_MANAGER.clear()
+ )
+ )
except Exception as e:
#If we fail to import, then we're going to have to make sure nothing can run.
@@ -78,7 +90,7 @@ init python in mas_windowutils:
#Log this
store.mas_utils.mas_log.warning(
- "win32api/win32gui failed to be imported, disabling notifications: {}".format(e)
+ f"winnie32api failed to be imported, disabling notifications: {e}"
)
elif renpy.linux:
@@ -86,13 +98,13 @@ init python in mas_windowutils:
session_type = os.environ.get("XDG_SESSION_TYPE")
#Wayland is not supported, disable wrs
- if session_type == "wayland":
+ if session_type in ("wayland", None) or os.environ.get("WAYLAND_DISPLAY"):
store.mas_windowreacts.can_show_notifs = False
store.mas_windowreacts.can_do_windowreacts = False
store.mas_utils.mas_log.warning("Wayland is not yet supported, disabling notifications.")
#X11 however is fine
- elif session_type == "x11":
+ elif session_type == "x11" or os.environ.get("DISPLAY"):
try:
import Xlib
@@ -107,7 +119,7 @@ init python in mas_windowutils:
store.mas_windowreacts.can_do_windowreacts = False
store.mas_utils.mas_log.warning(
- "Xlib failed to be imported, disabling notifications: {}".format(e)
+ f"Xlib failed to be imported, disabling notifications: {e}"
)
else:
@@ -120,24 +132,13 @@ init python in mas_windowutils:
store.mas_windowreacts.can_do_windowreacts = False
- class MASWindowFoundException(Exception):
- """
- Custom exception class to flag a window found during a window enum
-
- Has the hwnd as a property
- """
- def __init__(self, hwnd):
- self.hwnd = hwnd
-
- def __str__(self):
- return self.hwnd
-
#Fallback Const Defintion
DEF_MOUSE_POS_RETURN = (0, 0)
+
##Now, we start defining OS specific functions which we can set to a var for proper cross platform on a single func
#Firstly, the internal helper functions
- def __getActiveWindowObj_Linux():
+ def __getActiveWindow_Linux():
"""
Gets the active window object
@@ -164,7 +165,7 @@ init python in mas_windowutils:
mas_utils.mas_log.error("Failed to get active window object: {}".format(e))
return None
- def __getMASWindowLinux():
+ def __getMASWindow_Linux():
"""
Funtion to get the MAS window on Linux systems
@@ -199,36 +200,30 @@ init python in mas_windowutils:
mas_utils.mas_log.error("Failed to get MAS window object: {}".format(e))
return None
- def __getMASWindowHWND():
+ def __getMASWindowHWND_Windows() -> int|None:
"""
Gets the hWnd of the MAS window
- NOTE: Windows ONLY
-
OUT:
int - represents the hWnd of the MAS window
+ None - if we failed to get hwnd
"""
- #Verify we can actually do this before doing anything
- if not store.mas_windowreacts.can_do_windowreacts:
- return None
-
- def checkMASWindow(hwnd, lParam):
- """
- Internal function to identify the MAS window. Raises an exception when found to allow the main func to return
- """
- if store.mas_getWindowTitle() == win32gui.GetWindowText(hwnd):
- raise MASWindowFoundException(hwnd)
-
- try:
- win32gui.EnumWindows(checkMASWindow, None)
+ global HWND
- except MASWindowFoundException as e:
- return e.hwnd
+ #Verify we can actually do this before doing anything
+ if store.mas_windowreacts.can_do_windowreacts:
+ if HWND is None:
+ try:
+ HWND = winnie32api.get_hwnd_by_title(store.mas_getWindowTitle())
+ except Exception as e:
+ HWND = None
+ mas_utils.mas_log.error(f"Failed to get MAS window hwnd: {e}")
+ else:
+ HWND = None
- mas_utils.mas_log.error("Failed to get MAS window hwnd")
- return None
+ return HWND
- def __getAbsoluteGeometry(win):
+ def __getAbsoluteGeometry_Linux(win):
"""
Returns the (x, y, height, width) of a window relative to the top-left
of the screen.
@@ -242,7 +237,7 @@ init python in mas_windowutils:
#If win is None, then we should just return a None here
if win is None:
# This handles some odd issues with setting window on Linux
- win = _setMASWindow()
+ win = _setMASWindow_Linux()
if win is None:
return None
@@ -263,14 +258,14 @@ init python in mas_windowutils:
except Xlib.error.BadDrawable:
#In the case of a bad drawable, we'll try to re-get the MAS window to get a good one
- _setMASWindow()
+ _setMASWindow_Linux()
except XError as e:
- mas_utils.mas_log.error("Failed to get window geometry: {}".format(e))
+ mas_utils.mas_log.error(f"Failed to get window geometry: {e}")
return None
- def _setMASWindow():
+ def _setMASWindow_Linux():
"""
Sets the MAS_WINDOW global on Linux systems
@@ -280,7 +275,7 @@ init python in mas_windowutils:
global MAS_WINDOW
if renpy.linux:
- MAS_WINDOW = __getMASWindowLinux()
+ MAS_WINDOW = __getMASWindow_Linux()
else:
MAS_WINDOW = None
@@ -288,7 +283,7 @@ init python in mas_windowutils:
return MAS_WINDOW
#Next, the active window handle getters
- def _getActiveWindowHandle_Windows():
+ def _getActiveWindowHandle_Windows() -> str:
"""
Funtion to get the active window on Windows systems
@@ -297,9 +292,13 @@ init python in mas_windowutils:
ASSUMES: OS IS WINDOWS (renpy.windows)
"""
- return unicode(GetWindowText(GetForegroundWindow()))
+ try:
+ # winnie32api can return None
+ return winnie32api.get_active_window_title() or ""
+ except Exception:
+ return ""
- def _getActiveWindowHandle_Linux():
+ def _getActiveWindowHandle_Linux() -> str:
"""
Funtion to get the active window on Linux systems
@@ -309,7 +308,7 @@ init python in mas_windowutils:
ASSUMES: OS IS LINUX (renpy.linux)
"""
NET_WM_NAME = __display.intern_atom("_NET_WM_NAME")
- active_winobj = __getActiveWindowObj_Linux()
+ active_winobj = __getActiveWindow_Linux()
if active_winobj is None:
return ""
@@ -318,22 +317,17 @@ init python in mas_windowutils:
# Subsequent method calls might raise BadWindow exception if active_winid refers to nonexistent window.
active_winname_prop = active_winobj.get_full_property(NET_WM_NAME, 0)
+ # TODO: consider logging if this is None, also catch a more generic exception just in case
if active_winname_prop is not None:
active_winname = unicode(active_winname_prop.value, encoding = "utf-8")
return active_winname.replace("\n", "")
- else:
- return ""
-
- except BadWindow:
- return ""
-
- except XError as e:
- mas_utils.mas_log.error("Failed to get active window handle: {}".format(e))
+ except (XError, BadWindow) as e:
+ mas_utils.mas_log.error(f"Failed to get active window handle: {e}")
return ""
- def _getActiveWindowHandle_OSX():
+ def _getActiveWindowHandle_OSX() -> str:
"""
Gets the active window on macOS
@@ -342,6 +336,55 @@ init python in mas_windowutils:
"""
return ""
+ def _flashMASWindow_Windows():
+ """
+ Tries to flash MAS window
+ """
+ hwnd = __getMASWindowHWND_Windows()
+ if hwnd:
+ winnie32api.flash_window(
+ hwnd,
+ count=None,
+ caption=False,
+ tray=True
+ )
+
+ def _unflashMASWindow_Windows():
+ """
+ Tries to stop flashing MAS window
+ """
+ hwnd = __getMASWindowHWND_Windows()
+ if hwnd:
+ winnie32api.unflash_window(hwnd)
+
+ def _flashMASWindow_Linux():
+ """
+ Tries to flash MAS window
+ """
+
+ def _flashMASWindow_OSX():
+ """
+ Tries to flash MAS window
+ """
+
+ def _focusMASWindow_Windows():
+ """
+ Tries to set focus on MAS window
+ """
+ hwnd = __getMASWindowHWND_Windows()
+ if hwnd:
+ winnie32api.set_active_window(hwnd)
+
+ def _focusMASWindow_Linux():
+ """
+ Tries to set focus on MAS window
+ """
+
+ def _focusMASWindow_OSX():
+ """
+ Tries to set focus on MAS window
+ """
+
#Notif show internals
def _tryShowNotification_Windows(title, body):
"""
@@ -355,12 +398,10 @@ init python in mas_windowutils:
OUT:
bool. True if the notification was successfully sent, False otherwise
"""
- # The Windows way, notif_success is adjusted if need be
- notif_success = __tip.showWindow(title, body)
-
- #We need the IDs of the notifs to delete them from the tray
- store.destroy_list.append(__tip.hwnd)
- return notif_success
+ try:
+ return WIN_NOTIF_MANAGER.send(title, body)
+ except Exception:
+ return False
def _tryShowNotification_Linux(title, body):
"""
@@ -408,7 +449,8 @@ init python in mas_windowutils:
if store.mas_windowreacts.can_do_windowreacts:
#Try except here because we may not have permissions to do so
try:
- cur_pos = win32gui.GetCursorPos()
+ cur_pos = tuple(winnie32api.get_screen_mouse_pos())
+
except Exception:
cur_pos = DEF_MOUSE_POS_RETURN
@@ -432,19 +474,22 @@ init python in mas_windowutils:
OUT:
tuple representing window geometry or None if the window's hWnd could not be found
"""
- hwnd = __getMASWindowHWND()
+ hwnd = __getMASWindowHWND_Windows()
if hwnd is None:
return None
- rv = win32gui.GetWindowRect(hwnd)
+ try:
+ rect = winnie32api.get_window_rect(hwnd)
+ except Exception:
+ return None
- # win32gui may return incorrect geometry (-32k seems to be the limit),
+ # Windows may return incorrect geometry (-32k seems to be the limit),
# in this case we return None
- if rv[0] <= -32000 and rv[1] <= -32000:
+ if rect.top_left.x <= -32000 and rect.top_left.y <= -32000:
return None
- return rv
+ return (rect.top_left.x, rect.top_left.y, rect.bottom_right.x, rect.bottom_right.y)
def _getMASWindowPos_Linux():
"""
@@ -453,7 +498,7 @@ init python in mas_windowutils:
OUT:
tuple representing (left, top, right, bottom) of the window bounds, or None if not possible to get
"""
- geom = __getAbsoluteGeometry(MAS_WINDOW)
+ geom = __getAbsoluteGeometry_Linux(MAS_WINDOW)
if geom is not None:
return (
@@ -579,28 +624,33 @@ init python in mas_windowutils:
_tryShowNotif = _tryShowNotification_Windows
getMASWindowPos = _getMASWindowPos_Windows
getMousePos = _getAbsoluteMousePos_Windows
+ flashMASWindow = _flashMASWindow_Windows
+ focusMASWindow = _focusMASWindow_Windows
- else:
- if renpy.linux:
- _window_get = _getActiveWindowHandle_Linux
- _tryShowNotif = _tryShowNotification_Linux
- getMASWindowPos = _getMASWindowPos_Linux
- getMousePos = _getAbsoluteMousePos_Linux
-
- else:
- _window_get = _getActiveWindowHandle_OSX
- _tryShowNotif = _tryShowNotification_OSX
-
- #Because we have no method of testing on Mac, we'll use the dummy function for these
- getMASWindowPos = store.dummy
- getMousePos = store.dummy
+ elif renpy.linux:
+ _window_get = _getActiveWindowHandle_Linux
+ _tryShowNotif = _tryShowNotification_Linux
+ getMASWindowPos = _getMASWindowPos_Linux
+ getMousePos = _getAbsoluteMousePos_Linux
+ flashMASWindow = _flashMASWindow_Linux
+ focusMASWindow = _focusMASWindow_Linux
- #Now make sure we don't use these functions so long as we can't validate Mac
- isCursorAboveMASWindow = return_false
- isCursorBelowMASWindow = return_false
- isCursorLeftOfMASWindow = return_false
- isCursorRightOfMASWindow = return_false
- isCursorInMASWindow = return_true
+ else:
+ _window_get = _getActiveWindowHandle_OSX
+ _tryShowNotif = _tryShowNotification_OSX
+ flashMASWindow = _flashMASWindow_OSX
+ focusMASWindow = _focusMASWindow_OSX
+
+ #Because we have no method of testing on Mac, we'll use the dummy function for these
+ getMASWindowPos = store.dummy
+ getMousePos = store.dummy
+
+ #Now make sure we don't use these functions so long as we can't validate Mac
+ # isCursorAboveMASWindow = return_false
+ # isCursorBelowMASWindow = return_false
+ # isCursorLeftOfMASWindow = return_false
+ # isCursorRightOfMASWindow = return_false
+ # isCursorInMASWindow = return_true
init python:
#List of notif quips (used for topic alerts)
@@ -623,8 +673,6 @@ init python:
"Do you have a minute, [player]?",
]
- #List of hwnd IDs to destroy
- destroy_list = list()
#START: Utility methods
def mas_canCheckActiveWindow():
@@ -649,7 +697,13 @@ init python:
return store.mas_windowutils._window_get()
return ""
- def mas_display_notif(title, body, group=None, skip_checks=False):
+ def mas_display_notif(
+ title: str,
+ body: list[str],
+ group: str|None = None,
+ skip_checks: bool = False,
+ flash_window: bool = False
+ ) -> bool:
"""
Notification creation method
@@ -660,6 +714,8 @@ init python:
(Default: None)
skip_checks - Whether or not we skips checks
(Default: False)
+ flash_window - do we want to flash the MAS window (tray icon)
+
OUT:
bool indicating status (notif shown or not (by check))
@@ -671,11 +727,12 @@ init python:
4. And if the notification group is enabled
OR if we skip checks. BUT this should only be used for introductory or testing purposes.
"""
-
#First we want to create this location in the dict, but don't add an extra location if we're skipping checks
if persistent._mas_windowreacts_notif_filters.get(group) is None and not skip_checks:
persistent._mas_windowreacts_notif_filters[group] = False
+ notif_success = False
+
if (
skip_checks
or (
@@ -689,17 +746,17 @@ init python:
renpy.substitute(title),
renpy.substitute(renpy.random.choice(body))
)
+ if notif_success:
+ # Flash the window if needed
+ if flash_window:
+ mas_windowutils.flashMASWindow()
- #Play the notif sound if we have that enabled and notif was successful
- if persistent._mas_notification_sounds and notif_success:
- renpy.sound.play("mod_assets/sounds/effects/notif.wav")
-
- #Now we return true if notif was successful, false otherwise
- return notif_success
- return False
+ #Play the notif sound if we have that enabled and notif was successful
+ if persistent._mas_notification_sounds:
+ renpy.sound.play("mod_assets/sounds/effects/notif.wav")
- #TODO: Remove this at some point | Alias for depreciation
- display_notif = mas_display_notif
+ #Now we return true if notif was successful, false otherwise
+ return notif_success
def mas_isFocused():
"""
@@ -735,10 +792,8 @@ init python:
"""
Clears all tray icons (also action center on win10)
"""
- if renpy.windows and store.mas_windowreacts.can_show_notifs:
- for index in range(len(destroy_list)-1,-1,-1):
- store.mas_windowutils.win32gui.DestroyWindow(destroy_list[index])
- destroy_list.pop(index)
+ if renpy.windows:
+ mas_windowutils.WIN_NOTIF_MANAGER.clear()
def mas_checkForWindowReacts():
"""
@@ -749,7 +804,7 @@ init python:
return
active_window_handle = mas_getActiveWindowHandle()
- for ev_label, ev in mas_windowreacts.windowreact_db.iteritems():
+ for ev_label, ev in mas_windowreacts.windowreact_db.items():
if (
Event._filterEvent(ev, unlocked=True, aff=store.mas_curr_affection)
and ev.checkConditional()
@@ -770,7 +825,7 @@ init python:
IN:
List of ev_labels to exclude from being unlocked
"""
- for ev_label, ev in mas_windowreacts.windowreact_db.iteritems():
+ for ev_label, ev in mas_windowreacts.windowreact_db.items():
if ev_label not in excluded:
ev.unlocked=True
@@ -829,4 +884,3 @@ init python:
ASSUMES: renpy.windows
"""
store.mas_clearNotifs()
- store.mas_windowutils.win32gui.UnregisterClass(__tip.classAtom, __tip.hinst)
diff --git a/Monika After Story/project.json b/Monika After Story/project.json
index 54dc1b197c..94ce277ad3 100644
--- a/Monika After Story/project.json
+++ b/Monika After Story/project.json
@@ -1 +1 @@
-{"build_update": true, "packages": ["DDLC Mod File", "DDLCMod", "", "source", "Mod"], "add_from": true, "force_recompile": true, "renamed_all": true}
\ No newline at end of file
+{"build_update": false, "packages": ["DDLC Mod File", "DDLCMod", "", "source", "market"], "add_from": false, "force_recompile": true, "renamed_all": true, "android_build": "Release", "renamed_steam": true}
\ No newline at end of file
diff --git a/Monika After Story/update/current.json b/Monika After Story/update/current.json
deleted file mode 100644
index be69754bc0..0000000000
--- a/Monika After Story/update/current.json
+++ /dev/null
@@ -1,67 +0,0 @@
-{
- "Mod": {
- "files": [
- "CustomIconMac.icns",
- "CustomIconWindows.ico",
- "README.html",
- "game/definitions.rpyc",
- "game/event-handler.rpyc",
- "game/import_ddlc.rpyc",
- "game/main_menu.rpyc",
- "game/mod_assets/Hit.wav",
- "game/mod_assets/Swipe.wav",
- "game/mod_assets/blue_sky.jpg",
- "game/mod_assets/chara9.png",
- "game/mod_assets/chara_exception.png",
- "game/mod_assets/hkb_disabled_background.png",
- "game/mod_assets/hkb_hover_background.png",
- "game/mod_assets/hkb_idle_background.png",
- "game/mod_assets/menu_new.png",
- "game/mod_assets/monika/1-night.png",
- "game/mod_assets/monika/1.png",
- "game/mod_assets/monika_day_bg_eq.png",
- "game/mod_assets/monika_day_room.png",
- "game/mod_assets/mus_zzz_c2.ogg",
- "game/mod_assets/music_menu.png",
- "game/mod_assets/pong.png",
- "game/mod_assets/pong_ball.png",
- "game/mod_assets/pong_beep.wav",
- "game/mod_assets/pong_boop.wav",
- "game/mod_assets/pong_field.png",
- "game/mod_assets/spr_slice_o_0.png",
- "game/mod_assets/spr_slice_o_1.png",
- "game/mod_assets/spr_slice_o_2.png",
- "game/mod_assets/spr_slice_o_3.png",
- "game/mod_assets/spr_slice_o_4.png",
- "game/mod_assets/spr_slice_o_5.png",
- "game/mod_assets/window_1.webm",
- "game/mod_assets/window_2.webm",
- "game/options.rpyc",
- "game/overrides.rpyc",
- "game/pong.rpyc",
- "game/python-packages/eliza.py",
- "game/python-packages/singleton.py",
- "game/screens.rpyc",
- "game/script-anniversary.rpyc",
- "game/script-ch30.rpyc",
- "game/script-greetings.rpyc",
- "game/script-introduction.rpyc",
- "game/script-story-events.rpyc",
- "game/script-topics.rpyc",
- "game/script.rpyc",
- "game/shake.rpyc",
- "game/splash.rpyc",
- "game/sprite-chart.rpyc",
- "game/updates.rpyc",
- "game/updates_topics.rpyc",
- "game/zz_hotkey_buttons.rpyc",
- "game/zz_music_selector.rpyc"
- ],
- "directories": [
- "game/mod_assets",
- "game/mod_assets/monika"
- ],
- "version": "34e3aae60979027e385277494ff55ea0caea5edb33fa425d310d9d24a78f9462",
- "xbit": []
- }
-}
diff --git a/testcases/Backgrounds/game/dev/test_bwswap.rpy b/testcases/Backgrounds/game/dev/test_bwswap.rpy
index e2d5ebf25e..3d418ed23a 100644
--- a/testcases/Backgrounds/game/dev/test_bwswap.rpy
+++ b/testcases/Backgrounds/game/dev/test_bwswap.rpy
@@ -33,7 +33,7 @@ label dev_bgsel_loop:
# build other backgrounds list
other_backgrounds = [
(mbg_obj.prompt, mbg_obj, False, False)
- for mbg_id, mbg_obj in mas_background.BACKGROUND_MAP.iteritems()
+ for mbg_id, mbg_obj in mas_background.BACKGROUND_MAP.items()
if mbg_id != "spaceroom"
]
@@ -112,4 +112,4 @@ image test_bgroom_snow = "mod_assets/location/test/test-snow.png"
image test_bgroom_def_night = "mod_assets/location/test/test-def-n.png"
image test_bgroom_overcast_night = "mod_assets/location/test/test-overcast-n.png"
image test_bgroom_rain_night = "mod_assets/location/test/test-rain-n.png"
-image test_bgroom_snow_night = "mod_assets/location/test/test-snow-n.png"
\ No newline at end of file
+image test_bgroom_snow_night = "mod_assets/location/test/test-snow-n.png"
diff --git a/tools/gamedir.py b/tools/gamedir.py
index e44479845b..bdf802d5f1 100644
--- a/tools/gamedir.py
+++ b/tools/gamedir.py
@@ -1,6 +1,6 @@
# module containing constants about game directory
-#
-# VER: py27
+#
+# VER: py39
REL_PATH_GAME = "../Monika After Story/game/"
REL_PATH_DEV = "../Monika After Story/game/dev/"
diff --git a/tools/ghactions.py b/tools/ghactions.py
index b43fd1e1eb..493388f711 100644
--- a/tools/ghactions.py
+++ b/tools/ghactions.py
@@ -1,9 +1,5 @@
## this is for travis to run
-# set to True if we are checking sprites for dynamically generated sprites
-# False will be standard sprite check behavior
-is_dynamic = True
-
import gamedir as GDIR
GDIR.REL_PATH_GAME = "Monika After Story/game/"
@@ -18,7 +14,7 @@
#spm.run_gss(sprite_db, sprite_db_keys, quiet=True)
# now check sprites
-bad_codes = spc.check_sprites(False, is_dynamic)
+bad_codes: list[spc.SpriteMismatch] = spc.check_sprites(False)
if len(bad_codes) > 0:
for bad_code in bad_codes:
@@ -30,12 +26,7 @@
)
)
- if is_dynamic:
- raise Exception(
- "Invalid sprites found. Run sprite checker manually "
- "to find invalid sprites."
- )
- else:
- raise Exception(
- "Invalid sprites found. **Did you forget to generate sprites?**"
- )
+ raise Exception(
+ "Invalid sprites found. Run sprite checker manually "
+ "to find invalid sprites."
+ )
diff --git a/tools/menutils.py b/tools/menutils.py
index 6e434d8186..9179a1ba23 100644
--- a/tools/menutils.py
+++ b/tools/menutils.py
@@ -15,6 +15,7 @@
import os
import platform
+from typing import Any
HEADER = """\n\n\
#=============================================================================#
@@ -44,7 +45,7 @@
__QUIT = "q"
-def menu(menu_opts, defindex=None):
+def menu(menu_opts: tuple[str, Any], defindex=None) -> Any | None:
"""
Generates a menu and returns the desired menu action
@@ -76,7 +77,7 @@ def menu(menu_opts, defindex=None):
defval = None
footer = MENU_END.format("[0]")
- except:
+ except Exception:
# if we failed, None everything so we dont do foolish things later
defindex = None
defval = None
@@ -111,7 +112,7 @@ def menu(menu_opts, defindex=None):
print(footer + "\n")
# and then prompt!
- user_input = raw_input(prompt)
+ user_input = input(prompt)
# NOTE: if blank, we just return the default value
if len(user_input) <= 0:
@@ -125,11 +126,11 @@ def menu(menu_opts, defindex=None):
# and user input is valid, return the result
return menu_opts[user_input][1]
- elif user_input == 0:
+ if user_input == 0:
# user wants to go back
return None
- except:
+ except Exception:
# bad user input
pass
@@ -194,7 +195,7 @@ def restrict(page_value):
print(PAGE_ENTRY.format(str_func(item)))
# action string and user input
- user_input = raw_input(PAGE_BAR.join(action_bar)).lower()
+ user_input = input(PAGE_BAR.join(action_bar)).lower()
# process user input
if user_input == __QUIT:
@@ -209,11 +210,11 @@ def restrict(page_value):
page += 1
elif user_input == __GOTO:
- page_input = raw_input(PAGE_GOTO_PROMPT.format(last_page+1))
+ page_input = input(PAGE_GOTO_PROMPT.format(last_page+1))
try:
page = int(page_input)-1
- except:
+ except Exception:
# bad page input
pass
@@ -246,7 +247,7 @@ def ask(question, def_no=True):
yes = "Y"
no = "n"
- choice = raw_input("{0}? ({1}/{2}): ".format(question, yes, no)).lower()
+ choice = input("{0}? ({1}/{2}): ".format(question, yes, no)).lower()
# check default
if len(choice) <= 0:
@@ -270,7 +271,7 @@ def e_pause():
"""
Generic enter to continue
"""
- abc = raw_input("\n\n (Press Enter to continue)")
+ abc = input("\n\n (Press Enter to continue)")
def header(title):
diff --git a/tools/renpy_lint_parser.py b/tools/renpy_lint_parser.py
index 4d235ec30a..c826bd2132 100644
--- a/tools/renpy_lint_parser.py
+++ b/tools/renpy_lint_parser.py
@@ -1,14 +1,15 @@
-# parses renpy_output and removes missing dynamic image lines.
+# parses renpy_output and removes missing dynamic image lines.
# (and other known things)
import re
-import os
+# import os
+import sys
# regex parsing
IMG_NOT_FOUND = re.compile(
- "\w+/(\w+/)*.+\.rpy:\d+ (Could not find image \(monika |The image named 'monika )(\d\w\w\w+|\d\w|1|5|4|g1|g2)"
+ r"\w+/(\w+/)*.+\.rpy:\d+ (Could not find image \(monika |The image named 'monika )(\d\w\w\w+|\d\w|1|5|4|g1|g2)"
)
# file load
@@ -17,21 +18,19 @@
OUT_FILENAME = "renpy_output_clean"
# load files
-INFILE = open(IN_FILENAME, "r")
-OUTFILE = open(OUT_FILENAME, "w")
-
-if not INFILE or not OUTFILE:
- print("file load failed")
- exit(1)
-
-# loop and clean
-for line in INFILE:
- if (
- len(line.strip()) > 0
- and not IMG_NOT_FOUND.match(line)
- ):
- OUTFILE.write(line)
-
-INFILE.close()
-OUTFILE.close()
-exit(0)
+try:
+ with open(IN_FILENAME, "r") as infile, open(OUT_FILENAME, "w") as outfile:
+ # loop and clean
+ for line in infile:
+ if (
+ len(line.strip()) > 0
+ and not IMG_NOT_FOUND.match(line)
+ ):
+ outfile.write(line)
+
+except Exception as e:
+ print(f"File load failed: {e}")
+ sys.exit(1)
+
+else:
+ sys.exit(0)
diff --git a/tools/sprite.py b/tools/sprite.py
index 27051bd4be..a70b97bfbc 100644
--- a/tools/sprite.py
+++ b/tools/sprite.py
@@ -7,7 +7,7 @@
draw_function = DRAW_MONIKA_IM
-class StaticSprite(object):
+class StaticSprite():
"""
A static sprite is a sprite that knows its sprite codes and more
diff --git a/tools/spritechecker.py b/tools/spritechecker.py
index 7cec6d667f..5da9d3d081 100644
--- a/tools/spritechecker.py
+++ b/tools/spritechecker.py
@@ -3,16 +3,17 @@
# this will NOT catch issues with non-standard code usage
# TODO: add special functions for non-standard usages
#
-# VER: py27
+# VER: py3.x
import os
-import spritepuller as spp
import gamedir as GDIR
import menutils
from collections import namedtuple
+from dataclasses import dataclass
from sprite import StaticSprite
+from spritepuller import pull_sprite_list
# every line of applicable dialogue starts with m and a space
DLG_START = "m "
@@ -37,14 +38,14 @@
BAD_CODE_FN = "zzzz_badcodes.txt"
BAD_CODE_LN = "{1} - FILE:{2} [{0}]"
-## namedtuple used to represent sprite codes not found
-SpriteMismatch = namedtuple(
- "SpriteMismatch",
- "code line filename"
-)
+@dataclass
+class SpriteMismatch:
+ code: str
+ line: int
+ filename: str
-def check_sprites(inc_dev=False, dynamic=False):
+def check_sprites(inc_dev=False) -> list[SpriteMismatch]:
"""
Goes through every rpy file and checks dialogue and show lines.
@@ -53,77 +54,67 @@ def check_sprites(inc_dev=False, dynamic=False):
IN:
inc_dev - if True, we will check dev files as well
(Defualt: False)
- dynamic - True means some sprites are generated dynamically
- (Default: False)
RETURNS:
list of SpriteMismatch's
"""
# sprite dict so we can compare to this
# we want a dict for O(1) lookups
- sp_dict = spp.pull_sprite_list(as_dict=True)
+ sp_dict: dict[str, StaticSprite] = pull_sprite_list(True)
# get all the rpys we want to adjust
rpys = get_rpy_paths(inc_dev=inc_dev)
# go through each rpy and get sprite mms
- bad_codes = list()
+ bad_codes = list[SpriteMismatch]()
for rpy in rpys:
- bad_codes.extend(check_file(rpy, sp_dict, dynamic))
+ bad_codes.extend(check_file(rpy, sp_dict))
return bad_codes
-def check_file(fpath, sp_dict, gen_if_missing):
+def check_file(fpath: str, sp_dict: dict[str, StaticSprite]) -> list[SpriteMismatch]:
"""
Checks the given file for sprite code correctness
IN:
fpath - filepath of the fie to check
sp_dict - dict of currently available sprite codes
- gen_if_missing - True will attempt to generate the sprite if it is
- missing.
RETURNS:
list of SpriteMismatches, one for every sprite code that was bad
"""
- sp_mismatches = list()
+ sp_mismatches = list[SpriteMismatch]()
ln_count = 1
- #Loadd spritemap data
+ #Load spritemap data
StaticSprite._loadSpriteMapData()
- with open(fpath, "r") as rpy_file:
+ with open(fpath, "r", encoding='utf-8') as rpy_file:
for line in rpy_file:
_code = try_extract_code(line.strip())
if _code and _code not in sp_dict:
# we have a code but its not in the dict?!
- if gen_if_missing:
- # attempt to generate if possible
-
- gen_spr = StaticSprite(_code)
-
- if gen_spr.invalid:
- sp_mismatches.append(
- SpriteMismatch(_code, ln_count, fpath)
- )
+ #let's generate it since sprites are also dynamically generated
+ gen_spr = StaticSprite(_code)
- else:
- sp_dict[_code] = gen_spr
-
- else:
+ if gen_spr.invalid:
sp_mismatches.append(
SpriteMismatch(_code, ln_count, fpath)
)
+ else:
+ sp_dict[_code] = gen_spr
+
+
ln_count += 1
return sp_mismatches
-def extract_dlg_code(line):
+def extract_dlg_code(line: str) -> str:
"""
Extracts the sprite code from the given line
Assumes the line is a dlg line or extend line
@@ -137,7 +128,7 @@ def extract_dlg_code(line):
return line.split(" ")[1]
-def extract_code_if_dlg(line):
+def extract_code_if_dlg(line: str) -> str | None:
"""
Does both checking and extraction of a code from a potential dialogue
line
@@ -161,7 +152,7 @@ def extract_code_if_dlg(line):
return None
-def extract_shw_code(line):
+def extract_shw_code(line: str) -> str:
"""
Extracts the sprite code from the given line
Assumes the line is a show line
@@ -175,7 +166,7 @@ def extract_shw_code(line):
return line.split(" ")[2]
-def extract_code_if_shw(line):
+def extract_code_if_shw(line: str) -> str | None:
"""
Does both checking and extractiong of a code from a potential show line
@@ -198,7 +189,7 @@ def extract_code_if_shw(line):
return None
-def extract_code_if_ext(line):
+def extract_code_if_ext(line: str) -> str | None:
"""
Does both checking and extractiong of a code from a potential extend line
@@ -220,7 +211,7 @@ def extract_code_if_ext(line):
return None
-def get_rpy_paths(inc_dev=False):
+def get_rpy_paths(inc_dev=False) -> list[str]:
"""
Gets a list of all filepaths in teh game dir that are rpy files.
Non-recursive
@@ -255,7 +246,7 @@ def get_rpy_paths(inc_dev=False):
return fp_list
-def try_extract_code(cl_line):
+def try_extract_code(cl_line: str) -> str | None:
"""
Attempts to extract a code from a line, using all known ways
@@ -279,7 +270,7 @@ def try_extract_code(cl_line):
return _code
-def write_bad_codes(bad_list):
+def write_bad_codes(bad_list: list[SpriteMismatch]) -> None:
"""
Writes out the bad codes to file
@@ -299,7 +290,7 @@ def write_bad_codes(bad_list):
############## special run methods ##################################
-def run():
+def run() -> None:
"""
Runs this module (menu-related)
"""
@@ -307,7 +298,7 @@ def run():
run_chk(False)
-def run_chk(quiet=False, inc_dev=False, use_dyn=False):
+def run_chk(quiet=False, inc_dev=False) -> None:
"""
Main sprite checker workflow
@@ -321,12 +312,7 @@ def run_chk(quiet=False, inc_dev=False, use_dyn=False):
if inc_dev is None:
return
- use_dyn = menutils.menu(menu_are_dyn, 1)
-
- if use_dyn is None:
- return
-
- bad_codes = check_sprites(inc_dev=inc_dev, dynamic=use_dyn)
+ bad_codes = check_sprites(inc_dev=inc_dev)
if len(bad_codes) == 0:
# no bad codes
diff --git a/tools/spritemaker.py b/tools/spritemaker.py
deleted file mode 100644
index 5219df559b..0000000000
--- a/tools/spritemaker.py
+++ /dev/null
@@ -1,1117 +0,0 @@
-# makes sprites
-# can also load sprites
-
-from __future__ import print_function
-
-import os
-import gamedir as GDIR
-import menutils
-
-import spritepuller as spull
-
-import sprite as spr_module
-
-from sprite import StaticSprite
-
-# state vars
-
-_need_to_gen_sprites = False
-
-# classes
-
-class SortedKeySpriteDBIter(object):
- """
- Iterator over a sprite db. This iterates so that the StaticSprites are
- in key order (aka from the given list of keys)
- """
-
- def __init__(self, sprite_db, sprite_db_keys):
- """
- Constructor for this iterator
- """
- self.index = -1
- self.sprite_db = sprite_db
- self.sprite_db_keys = sprite_db_keys
-
- # create an empty filter sprite so we dont have crashes during
- # iteration
- self.__default_fs = FilterSprite()
- self.__default_fs.invalid = True
-
- def __iter__(self):
- return self
-
- def next(self):
- """
- returns next iteration item
- """
- if self.index < len(self.sprite_db_keys)-1:
- self.index += 1
- return self.sprite_db.get(
- self.sprite_db_keys[self.index],
- self.__default_fs
- )
-
- # otherwise dnoe iterationg
- raise StopIteration
-
-
-class FilterSprite(StaticSprite):
- """
- A Filter sprite is a version of static sprite used for filtering
- other static sprites.
-
- The primary difference is that any of the initial properties can be
- None
- """
- POS = "position"
- EYE = "eyes"
- EYB = "eyebrows"
- NSE = "nose"
- BLH = "blush"
- TRS = "tears"
- SWD = "sweat"
- EMO = "emote"
- MTH = "mouth"
-
- CLEAR = "CLEAR"
- OPTIONAL = "OPTIONAL"
-
- def __init__(self):
- """
- Constructor
- """
- self._init_props()
-
- # setup filter map
- self.__filter_set_map = {
- self.POS: self.__flt_set_pos,
- self.EYE: self.__flt_set_eye,
- self.EYB: self.__flt_set_eyb,
- self.NSE: self.__flt_set_nse,
- self.BLH: self.__flt_set_blh,
- self.TRS: self.__flt_set_trs,
- self.SWD: self.__flt_set_swd,
- self.EMO: self.__flt_set_emo,
- self.MTH: self.__flt_set_mth,
- }
-
- # setup eq map
- self.__filter_eq_map = {
- self.POS: self.__flt_eq_pos,
- self.EYE: self.__flt_eq_eye,
- self.EYB: self.__flt_eq_eyb,
- self.NSE: self.__flt_eq_nse,
- self.BLH: self.__flt_eq_blh,
- self.TRS: self.__flt_eq_trs,
- self.SWD: self.__flt_eq_swd,
- self.EMO: self.__flt_eq_emo,
- self.MTH: self.__flt_eq_mth,
- }
-
- self._flt_fmt = "{: <12}"
-
- self.menu_flt_set = [
- ("Set Filters", "Filter: "),
- (self.POS.title(), self.POS),
- (self.EYE.title(), self.EYE),
- (self.EYB.title(), self.EYB),
- (self.NSE.title(), self.NSE),
- (self.BLH.title(), self.BLH),
- (self.TRS.title(), self.TRS),
- (self.SWD.title(), self.SWD),
- (self.EMO.title(), self.EMO),
- (self.MTH.title(), self.MTH),
- ]
-
- def __str__(self):
- """
- The string representation of this is a neat thing showing the status
- of each filter
- """
- return self._status(True, "Filter Settings", True, True)
-
- def filter(self, otherStaticSprite):
- """
- Checks if the given static sprite passes the filter for this one
- :param otherStaticSprite: the Static sprite object to compare to
- :returns: True if this sprite passes the filter, False if not
- """
- if otherStaticSprite.invalid:
- return False
-
- for flt in self.__filter_eq_map.itervalues():
- if not flt(otherStaticSprite):
- return False
-
- return True
-
- def set_filter(self, category, code):
- """
- Sets a filter point
- :param category: the filter key to set
- :param code: the code to lookup
- """
- flt_setter = self.__filter_set_map.get(category, None)
- if flt_setter is not None:
- flt_setter(code)
-
- @staticmethod
- def build_menu(category):
- """
- Builds a menu based on the given category
- :param category: one of the class constants
- :returns: menu list usable by menutils. May return None if could not
- build list
- """
- menu = FilterSprite._build_menu(category)
- if menu is None:
- return None
-
- # add title
- menu.insert(0, (category.title() + " Codes", "Code: "))
-
- # append an option to clear the filter
- menu.append(("Clear Filter", FilterSprite.CLEAR))
-
- return menu
-
- @staticmethod
- def build_selection_menu(category, optional=False, headeradd=""):
- """
- Builds a seleciton menu based on the given cateogory
- :param category: one of the class constants
- :param optional: True will add an optional option, basically skips
- setting this.
- :param headeradd: add text here to be appended to the header
- :returns: menu list usable by menutils. May return None if could not
- build list
- """
- menu = FilterSprite._build_menu(category)
- if menu is None:
- return None
-
- # add title part
- menu.insert(
- 0,
- ("Select " + category.title() + headeradd, "Option: ")
- )
-
- # add optional
- if optional:
- menu.append(("No " + category.title(), FilterSprite.OPTIONAL))
-
- return menu
-
- @staticmethod
- def from_ss(static_spr):
- """
- Generates a FilterSprite object from a StaticSprite
-
- May return None if invalid static sprite
- """
- if static_spr.invalid:
- return None
-
- filter_spr = FilterSprite()
- filter_spr.position = static_spr.position
- filter_spr.eyes = static_spr.eyes
- filter_spr.eyebrows = static_spr.eyebrows
- filter_spr.nose = static_spr.nose
- filter_spr.blush = static_spr.blush
- filter_spr.tears = static_spr.tears
- filter_spr.sweatdrop = static_spr.sweatdrop
- filter_spr.emote = static_spr.emote
- filter_spr.mouth = static_spr.mouth
- filter_spr.is_lean = static_spr.is_lean
- filter_spr.sides = static_spr.sides
- filter_spr.single = static_spr.single
- filter_spr.head = static_spr.head
- filter_spr.spcode = static_spr.spcode
-
- return filter_spr
-
- @staticmethod
- def _build_menu(category):
- """
- Builds menu options for a category
-
- May return None if errors occured
- """
- is_positions = category == FilterSprite.POS
-
- selections = FilterSprite._sprite_map.get(category, None)
- if selections is None:
- return None
-
- sorted_keys = sorted(selections.keys())
-
- menu = []
-
- # now the items
- for code in sorted_keys:
- name = selections[code]
- if is_positions and type(name) is not str:
- menu.append((StaticSprite.lean_tostring(name), code))
-
- else:
- menu.append((name, code))
-
- return menu
-
- def _status(self,
- useheader,
- headerstring,
- shownose,
- showemote
- ):
- """
- Builds string representation of this Filter according to given
- status props
- :param useheader: True will use the block header from menutils,
- False will not
- :param headerstring: the string to use in the header
- :param shownose: True will show the nose part of the filter, False
- will not
- :param showemote: True will show the emote part of the filter, False
- will not
- """
- # setup initial strings
- if useheader:
- msg = [menutils.header(headerstring)]
- else:
- msg = [self._tab + headerstring]
-
- # lean and position check
- if self.position is None:
- position = None
- is_lean = None
- elif self.is_lean:
- position = StaticSprite.lean_tostring(self.position)
- is_lean = True
- else:
- position = self.position
- is_lean = self.is_lean
-
- # now add each filter piece
- self.__fmt_flt(msg, "Position:", position)
- self.__fmt_flt(msg, "Is Lean:", is_lean)
- self.__fmt_flt(msg, "Eyes:", self.eyes)
- self.__fmt_flt(msg, "Eyebrows:", self.eyebrows)
- if shownose:
- self.__fmt_flt(msg, "Nose:", self.nose)
- self.__fmt_flt(msg, "Blush:", self.blush)
- self.__fmt_flt(msg, "Tears:", self.tears)
- self.__fmt_flt(msg, "Sweatdrop:", self.sweatdrop)
- if showemote:
- self.__fmt_flt(msg, "Emote:", self.emote)
- self.__fmt_flt(msg, "Mouth:", self.mouth)
-
- return "".join(msg)
-
- def __flt_eq_pos(self, other):
- """
- Checks if this position is same as other
- :param other: the other static sprite
- :returns: False if not None and does not match, True otherwise
- """
- if self.position is None:
- return True
-
- return (
- self.position == other.position
- and self.is_lean == other.is_lean
- )
-
- def __flt_eq_eye(self, other):
- """
- Checks if this eqyes is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.eyes is None:
- return True
-
- return self.eyes == other.eyes
-
- def __flt_eq_eyb(self, other):
- """
- Checks if this eyebrows is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.eyebrows is None:
- return True
-
- return self.eyebrows == other.eyebrows
-
- def __flt_eq_nse(self, other):
- """
- Checks if this nose is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.nose is None:
- return True
-
- return self.nose == other.nose
-
- def __flt_eq_blh(self, other):
- """
- Checks if this blush is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.blush is None:
- return True
-
- return self.blush == other.blush
-
- def __flt_eq_trs(self, other):
- """
- Checks if this tears is same as other
- :param other: the StaticSprite to compare to
- "returns: False if not None and does not match, True otherwise
- """
- if self.tears is None:
- return True
-
- return self.tears == other.tears
-
- def __flt_eq_swd(self, other):
- """
- Checks if this sweatdrop is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.sweatdrop is None:
- return True
-
- return self.sweatdrop == other.sweatdrop
-
- def __flt_eq_emo(self, other):
- """
- Checks if this emote is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.emote is None:
- return True
-
- return self.emote == other.emote
-
- def __flt_eq_mth(self, other):
- """
- Checks if this mouth is same as other
- :param other: the StaticSprite to compare to
- :returns: False if not None and does not match, True otherwise
- """
- if self.mouth is None:
- return True
-
- return self.mouth == other.mouth
-
- def __flt_set_pos(self, code):
- """
- Sets position filter
- :param code: the code to lookup
- """
- self.position = self._get_smap(self.POS, code, None)
-
- if self.position is None:
- self.is_lean = None
- else:
- self.is_lean = self._get_smap("is_lean", code, False)
-
- def __flt_set_eye(self, code):
- """
- Sets eye filter
- :param code: the code to lookup
- """
- self.eyes = self._get_smap(self.EYE, code, None)
-
- def __flt_set_eyb(self, code):
- """
- Sets eyebrow filter
- :param code: the code to lookup
- """
- self.eyebrows = self._get_smap(self.EYB, code, None)
-
- def __flt_set_nse(self, code):
- """
- Sets nose filter
- :param code: the code to lookup
- """
- self.nose = self._get_smap(self.NSE, code, None)
-
- def __flt_set_blh(self, code):
- """
- Sets blush filter
- :param code: the code to lookup
- """
- self.blush = self._get_smap(self.BLH, code, None)
-
- def __flt_set_trs(self, code):
- """
- Sets tears filter
- :param code: the code to lookup
- """
- self.tears = self._get_smap(self.TRS, code, None)
-
- def __flt_set_swd(self, code):
- """
- Sets sweatdrop filter
- :param code: the code to lookup
- """
- self.sweatdrop = self._get_smap(self.SWD, code, None)
-
- def __flt_set_emo(self, code):
- """
- Sets emote filter
- :param code: the code to lookup
- """
- self.emote = self._get_smap(self.EMO, code, None)
-
- def __flt_set_mth(self, code):
- """
- Sets mouth filter
- :param code: the code to lookup
- """
- self.mouth = self._get_smap(self.MTH, code, None)
-
- def __fmt_flt(self, msg_arr, string, value):
- """
- adds appropraitely formatted filter string to msg arr
- """
- if value is None:
- value_str = ""
- else:
- value_str = str(value)
-
- msg_arr.extend([
- "\n",
- self._tab,
- self._flt_fmt.format(string),
- value_str
- ])
-
-
-def gen_sprite_files(
- sprites,
- file_prefix,
- file_template,
- file_header,
- spacing="\n\n",
- tostring=str,
- quiet=False,
- sp_per_file=500,
- skip_pause=True,
- skip_continue=True
-):
- """
- Generates sprite files.
-
- IN:
- sprites - the list of sprite objects to generate stuff for
- file_prefix - the prefix for each filename
- file_template - the template for each filename
- file_header - the header to write at the top of each file
- spacing - spacing between items
- (Default: \n\n)
- tostring - to string function to use (must take a sprite object)
- (Default: str)
- quiet - True will supress menus and stdout
- (Default: False)
- sp_per_file - max number of sprites allowed per file
- (Default: 500)
- skip_pause - True will skip pause at end. False will not
- (Default: True)
- skip_continue - True will skip the continue. False will not
-
- RETURNS: True if successful, False if abort
- """
- # first, check if we will go over the max file limit
- if ( int(len(sprites) / sp_per_file) + 1) > spull.MAX_FILE_LIMIT:
- # always show error messages
- print(MSG_OVER_FILE_LIMIT.format(
- len(sprites),
- spull.MAX_FILE_LIMIT
- ))
- return False
-
- # ask user to continue
- if not (quiet or skip_continue):
- print(MSG_OVERWRITE.format(file_prefix))
- if not menutils.ask_continue():
- return False
-
- # setup file counts
- file_num = 0
- sp_count = 0
-
- # and file data
- filename = file_template.format(file_num)
- filepath = GDIR.REL_PATH_GAME + filename
-
- # create thef irst file
- if not quiet:
- print(MSG_GEN_FILE.format(filename), end="")
- output_file = open(os.path.normcase(filepath), "w")
- output_file.write(file_header)
-
- # begin loop over sprites
- for sprite_obj in sprites:
-
- if sp_count >= sp_per_file:
- # over the sprites per file limit. we should make new file.
-
- # increment counts
- sp_count = 0
- file_num += 1
-
- # close file and say done
- output_file.close()
- if not quiet:
- print("done")
-
- # setup next file stuff
- filename = file_template.format(file_num)
- filepath = GDIR.REL_PATH_GAME + filename
-
- # open file
- if not quiet:
- print(MSG_GEN_FILE.format(filename), end="")
- output_file = open(os.path.normcase(filepath), "w")
- output_file.write(file_header)
-
- # add sprite object to file
- output_file.write(tostring(sprite_obj))
- output_file.write(spacing)
- sp_count += 1
-
- # finally, close the last file and say done
- output_file.close()
- if not quiet:
- print("done")
-
- if not skip_pause:
- menutils.e_pause()
-
- return True
-
-
-def make_sprite(sprite_db, sprite_db_keys):
- """
- Makes a sprite and adds it to the sprite database.
- NOTE: keys should be regenerated after this by the caller
-
- RETURNS: True if sprite creation successful, False if not
- """
- sprite_obj = FilterSprite()
- sprite_code = []
-
- # this is the order we ask for sprites as it is the order of the
- # sprite code
- sprite_parts = (
- (FilterSprite.POS, False),
- (FilterSprite.EYE, False),
- (FilterSprite.EYB, False),
- # NOTE: we skip nose because there is only 1
-# FilterSprite.NSE,
- (FilterSprite.BLH, True),
- (FilterSprite.TRS, True),
- (FilterSprite.SWD, True),
- # NOTE: emote skipped
-# FilterSprite.EMO,
- (FilterSprite.MTH, False),
- )
-
- for sp_cat, is_optional in sprite_parts:
- sel_not_chosen = True
-
- # loop until user selection
- while sel_not_chosen:
-
- # generate menu
- sel_menu = FilterSprite.build_selection_menu(
- sp_cat,
- optional=is_optional,
- headeradd=" - " + "".join(sprite_code)
- )
-
- # if optional, we set the default to optional, which is always
- # the last item
- if is_optional:
- defindex = len(sel_menu) - 1
- else:
- defindex = None
-
- # now run teh menu
- sel_code = menutils.menu(sel_menu, defindex)
-
- if sel_code is not None:
- # a selection was chosen, check if optinal
-
- if sel_code != FilterSprite.OPTIONAL:
- # actual code selected, update the filter sprite and
- # the sprite code list
- sprite_code.append(sel_code)
- sprite_obj.set_filter(sp_cat, sel_code)
-
- # mark as selected
- sel_not_chosen = False
-
- else:
- # Exit was reached, verify if we actually want to exit
- print("\nExiting will abort the creation of this sprite!\n")
- if menutils.ask("Discard this sprite"):
- return False
-
- # if we reached here, we should have a sprite now
- menutils.clear_screen()
-
- # lets double check if this is a duplicate
- sprite_code = "".join(sprite_code)
- if sprite_code in sprite_db:
- print("\n\nSprite code {0} already exists! Aborting...".format(
- sprite_code
- ))
- menutils.e_pause()
- return False
-
- # otherwise, no duplicate
- # lets show the user and then confirm
- print(sprite_obj._status(
- True,
- "Selected Sprite Settings - " + sprite_code,
- False,
- False
- ))
-
- # TODO: ask user if they would want to see a preview. Get libpng and
- # generate a composite image with the appropraite paths. This is
- # really a stretch since exp_previewer covers this already.
-
- # spacing
- print("\n\n")
-
- # ask to create the sprite
- if not menutils.ask("Create sprite"):
- print("\nSprite discarded.")
- menutils.e_pause()
- return False
-
- # user said yes!
- # create the sprite
- real_sprite = StaticSprite(sprite_code)
-
- # now determine if we need an atl variant
- atl_sprite = real_sprite.make_atl()
-
- # print and abort if errors occured
- if real_sprite.invalid or (atl_sprite is not None and atl_sprite.invalid):
- menutils.clear_screen()
- print("\n\nError making this sprite. Notify devs to fix.")
- menutils.e_pause()
- return False
-
- # otherwise we ok
- sprite_db[real_sprite.spcode] = real_sprite
-
- if atl_sprite is not None:
- sprite_db[atl_sprite.spcode] = atl_sprite
-
- return True
-
-
-def make_sprite_bc(sprite_db, sprite_db_keys):
- """
- Makes sprite using just a code and adds it to sprite database.
- NOTE: keys should be regenerated after this by the caller
-
- RETURNS: True if sprite creation successful, False if not
- """
- not_valid_code = True
- sprite_created = False
- while not_valid_code:
- menutils.clear_screen()
- print("\n\n")
- trycode = raw_input("Enter a sprite code: ")
-
- # build a static sprite with the code
- new_sprite = StaticSprite(trycode)
-
- # and atl version
- atl_sprite = new_sprite.make_atl()
-
- if new_sprite.invalid or (atl_sprite is not None and atl_sprite.invalid):
- # if invalid, ask user if they want to continue
- print("\nSprite code {0} is invalid.\n".format(trycode))
- if not menutils.ask("Try again", def_no=False):
- return sprite_created
-
- elif new_sprite.spcode in sprite_db:
- # check if already exists
- print("\nSprite code {0} already exists!\n".format(
- new_sprite.spcode
- ))
- if not menutils.ask("Try again", def_no=False):
- return sprite_created
-
- else:
- # valid sprite, means we should show it and ask for confirm
- filter_spr = FilterSprite.from_ss(new_sprite)
- print(filter_spr._status(
- True,
- "Selected Sprite Settings - " + new_sprite.spcode,
- True,
- True
- ))
-
- # spacing
- print("\n\n")
-
- # ask to create the sprite
- if not menutils.ask("Create sprite"):
- print("\nSprite discarded.\n")
-
- else:
- # user said yes!
- # add sprite to db and prompt for more
- sprite_db[new_sprite.spcode] = new_sprite
-
- if atl_sprite is not None:
- sprite_db[atl_sprite.spcode] = atl_sprite
-
- sprite_created = True
- print("\nSprite created.\n")
-
- if not menutils.ask("Create another sprite", def_no=False):
- return sprite_created
-
-
-### runners
-
-def run():
- """
- Runs this module (menu related)
- """
- # first load all sprites
- print("Loading sprites...", end="")
- sprite_db = _load_sprites()
-
- # abort if failed
- if sprite_db is None:
- print("\nERROR in loading sprites. Aborting...")
- return
-
- # now sort keys
- sprite_db_keys = sorted(sprite_db.keys())
-
- # otherwise success
- print("DONE")
-
- choice = True
- while choice is not None:
-
- # set apropriate title text
- if _need_to_gen_sprites:
- title_entry = ("Sprite Maker" + MSG_UNSAVED, "Option: ")
- else:
- title_entry = ("Sprite Maker", "Option: ")
-
- menu_main[0] = title_entry
-
- choice = menutils.menu(menu_main)
-
- if choice is not None:
- result = choice(sprite_db, sprite_db_keys)
-
- # only make sprite returns a value, which is the updated keys
- # list
- if result is not None:
- sprite_db_keys = result
-
- elif _need_to_gen_sprites:
- # user hit None, but we should make sure that they wanted to leave
- # without saving changes
- menutils.clear_screen()
- print("\n\n" + MSG_WARN_GEN)
- if not menutils.ask("Leave this menu"):
- choice = True
-
-
-def run_gss(sprite_db, sprite_db_keys, quiet=False, sp_per_file=500):
- """
- Generates static sprites, and alises
-
- IN:
- quiet - supresses menus and stdout
- sp_per_file - max number of sprites allowed per file
- """
- # ask for draw function to use
- if not quiet:
- df_choice = True
- while df_choice is not None:
- df_choice = menutils.menu(menu_sdf, defindex=1)
-
- # if no choice was made here (or we aborted), then quit
- if df_choice is None:
- return
-
- # otherwise set and quit loop
- spr_module.draw_function = df_choice
- df_choice = None
-
- # ask if okay to overwrite files
- if not quiet:
- print("\n" + MSG_OVERWRITE.format(
- ", ".join([
- spull.STATIC_PREFIX,
- spull.ALIAS_PREFIX,
- spull.ATL_PREFIX
- ])
- ))
- if not menutils.ask_continue():
- return
-
- # generate static sprites
- if not gen_sprite_files(
- list(SortedKeySpriteDBIter(sprite_db, sprite_db_keys)),
- spull.STATIC_PREFIX,
- spull.STATIC_TEMPLATE,
- __SP_STATIC_HEADER,
- quiet=quiet,
- sp_per_file=sp_per_file
- ):
- return
-
- # now for filter sprites
- if not gen_sprite_files(
- filter(
- StaticSprite.as_is_closed_eyes,
- SortedKeySpriteDBIter(sprite_db, sprite_db_keys)
- ),
- spull.ALIAS_PREFIX,
- spull.ALIAS_TEMPLATE,
- __SP_STATIC_HEADER,
- spacing="\n",
- tostring=StaticSprite.as_alias_static,
- quiet=quiet,
- sp_per_file=5000
- ):
- return
-
- # and finally atl sprites
- if not gen_sprite_files(
- filter(
- StaticSprite.as_is_not_closed_eyes,
- SortedKeySpriteDBIter(sprite_db, sprite_db_keys)
- ),
- spull.ATL_PREFIX,
- spull.ATL_TEMPLATE,
- __SP_STATIC_HEADER,
- tostring=StaticSprite.as_atlify,
- quiet=quiet,
- sp_per_file=sp_per_file
- ):
- return
-
- # done, print done
- if not quiet:
- menutils.e_pause()
-
- global _need_to_gen_sprites
- _need_to_gen_sprites = False
-
-
-def run_mkspr(sprite_db, sprite_db_keys):
- """
- Makes a sprite.
-
- Returns an updated sprite_db_keys, or None if no changes
- """
- if make_sprite(sprite_db, sprite_db_keys):
- # success we made a sprite
-
- # mark that we are dirty and need to regen
- global _need_to_gen_sprites
- _need_to_gen_sprites = True
-
- # return updated keys
- return sorted(sprite_db.keys())
-
- return None
-
-
-def run_mkspr_bc(sprite_db, sprite_db_keys):
- """
- Makes a sprite.
-
- Returns an updated sprite_db_keys, or None if no changes
- """
- if make_sprite_bc(sprite_db, sprite_db_keys):
- # we made some sprites
-
- # mark that we are dirty and need to regin
- global _need_to_gen_sprites
- _need_to_gen_sprites = True
-
- # return updated keys
- return sorted(sprite_db.keys())
-
- return None
-
-
-def run_lstc(sprite_db, sprite_db_keys):
- """
- List codes submenu
- """
- choice = True
- ss_filter = FilterSprite()
-
- while choice is not None:
- choice = menutils.menu(menu_lstc)
-
- if choice is not None:
- choice(sprite_db, sprite_db_keys, ss_filter)
-
-
-def run_lstc_show(sprite_db, sprite_db_keys, ss_filter):
- """
- Show sprites, based on filter
- """
- # filter valid sprites
- filtered = filter(
- ss_filter.filter,
- SortedKeySpriteDBIter(sprite_db, sprite_db_keys)
- )
-
- # show codes
- menutils.paginate(
- "Sprite Codes",
- filtered,
- str_func=StaticSprite.as_scstr_code
- )
-
-
-def run_lstc_showfilter(sprite_db, sprite_db_keys, ss_filter):
- """
- Show filter settings
- """
- menutils.clear_screen()
- print(str(ss_filter))
- menutils.e_pause()
-
-
-def run_lstc_setfilter(sprite_db, sprite_db_keys, ss_filter):
- """
- Set filter settings
- """
- choice = True
-
- while choice is not None:
- choice = menutils.menu(ss_filter.menu_flt_set)
-
- if choice is not None:
- # get a menu baesd on the category
- category_menu = FilterSprite.build_menu(choice)
- if category_menu is not None:
- code = menutils.menu(category_menu)
-
- # set if not none
- if code is not None:
- ss_filter.set_filter(choice, code)
-
-
-############### menus ############
-
-menu_main = [
- ("Sprite Maker", "Option: "),
- ("List Codes", run_lstc),
- ("Make Sprite (Interactive)", run_mkspr),
- ("Make Sprite (By Code)", run_mkspr_bc),
- ("Generate Sprites", run_gss),
-]
-
-menu_lstc = [
- ("Filter Options", "Option: "),
- ("Show List", run_lstc_show),
- ("Show Filters", run_lstc_showfilter),
- ("Set Filter", run_lstc_setfilter),
-]
-
-menu_sdf = [
- ("Set Draw Function", "Option: "),
- (
- "Image Manipulators (" + spr_module.DRAW_MONIKA_IM + ")",
- spr_module.DRAW_MONIKA_IM
- ),
- (
- "Sprite Strings (" + spr_module.DRAW_MONIKA + ")",
- spr_module.DRAW_MONIKA
- ),
-]
-
-# strings
-
-MSG_OVERWRITE = (
- "This will overwrite all sprite chart files that start with:\n {0}\n"
-)
-
-MSG_OVER_FILE_LIMIT = "\nCannot fit {0} sprites into {1} files. Aborting..."
-MSG_GEN_FILE = "Generating file '{0}'..."
-MSG_WARN_GEN = (
- "WARNING! You have created a sprite but have not regenerated the sprite "
- "charts.\nLeaving this menu will abort your changes.\n"
-)
-MSG_UNSAVED = " - **Run Generate Sprites to save changes**"
-
-__SP_STATIC_HEADER = """\
-############################ AUTO-GENERATED ###################################
-## DO NOT EDIT THIS FILE ##
-## ##
-## This was auto-generated by the the spritemaker tool ##
-###############################################################################
-
-"""
-
-# internal functions
-
-
-def _load_sprites():
- """
- Loads sprite code data so this module can use it.
- NOTE: if None is returnd, treat as failure
- :returns: dictionary of the following format:
- [0] - sprite code (without static)
- [1] - StaticSprite object
- """
- sprite_list = []
-
- # load all static sprites
- for sprite_filepath in spull.STATIC_CHARTS:
- with open(os.path.normcase(sprite_filepath), "r") as sprite_file:
- sprite_list.extend(spull.pull_sprite_list_from_file(
- sprite_file,
- True
- ))
-
- # generate dict of static sprites
- sprite_db = {}
- for sprite_code in sprite_list:
- sprite_obj = StaticSprite(sprite_code)
-
- # immediately quit if invalid
- if sprite_obj.invalid:
- return None
-
- # otherwise add
- sprite_db[sprite_code] = sprite_obj
-
- # make as atl if possible
- atl_sprite = sprite_obj.make_atl()
- if atl_sprite is not None:
- sprite_db[atl_sprite.spcode] = atl_sprite
-
- return sprite_db
diff --git a/tools/spritepuller.py b/tools/spritepuller.py
index 6f85fd392a..4dce99009b 100644
--- a/tools/spritepuller.py
+++ b/tools/spritepuller.py
@@ -1,10 +1,12 @@
## module with a function that pull sprites out of the sprite-chart
#
-# VER: Python 2.7
+# VER: Python 3.x
import os
import gamedir as GDIR
+from spritechecker import StaticSprite
import menutils
+from typing import IO
STATIC_PREFIX = "sprite-chart-0"
ALIAS_PREFIX = "sprite-chart-1"
@@ -35,7 +37,7 @@
MAX_FILE_LIMIT = 10
-def clean_sprite(code):
+def clean_sprite(code: str) -> str:
"""
Cleans the given sprite (removes excess whitespace, colons)
@@ -49,7 +51,7 @@ def clean_sprite(code):
return code.replace(":","")
-def is_dyn_line(line):
+def is_dyn_line(line: str) -> bool:
"""
Checks if the given line is a sprite line with dynamic displayable
:param line: line to check
@@ -58,12 +60,12 @@ def is_dyn_line(line):
return is_sprite_line(line) and DYN_DIS in line
-def is_sprite_line(line):
+def is_sprite_line(line: str) -> bool:
"""
Checks if the given line is a sprite line
NOTE: a sprite line is a line that starts with "image monika"
-
+
NOTE: does not strip the given line.
IN:
@@ -75,7 +77,7 @@ def is_sprite_line(line):
return line.startswith(IMG_START)
-def pull_dyn_sprite_code(line):
+def pull_dyn_sprite_code(line: str) -> str | None:
"""
Pulls sprite code from teh given line.
Only ones that are monika + dynamic displayable are allowed
@@ -90,7 +92,7 @@ def pull_dyn_sprite_code(line):
return None
-def pull_sprite_code(line):
+def pull_sprite_code(line: str) -> str | None:
"""
Pulls the sprite code from the given line.
This checks if the given line is a sprite line before pulling the code.
@@ -106,7 +108,7 @@ def pull_sprite_code(line):
return None
-def pull_sprite_list(as_dict=False):
+def pull_sprite_list(as_dict=False) -> dict[str, StaticSprite] | list[StaticSprite]:
"""
Goes through the sprite chart and generates a list of all the known sprite
codes.
@@ -122,16 +124,16 @@ def pull_sprite_list(as_dict=False):
RETURNS:
list of known sprite codes, or dict if as_dict is True
"""
- sprite_list = list()
+ sprite_list: list[StaticSprite] = list()
for sprfile in SPRITE_PATH:
- with open(os.path.normcase(sprfile), "r") as sprite_file:
+ with open(os.path.normcase(sprfile), "r", encoding="utf-8") as sprite_file:
sprite_list.extend(pull_sprite_list_from_file(sprite_file))
if as_dict:
# do we want a dict instead?
- sprite_dict = dict()
-
+ sprite_dict: dict[str, StaticSprite] = dict()
+
for sprite in sprite_list:
sprite_dict[sprite] = 0
@@ -141,7 +143,7 @@ def pull_sprite_list(as_dict=False):
return sprite_list
-def pull_sprite_list_from_file(sprite_file, dyn_only=False):
+def pull_sprite_list_from_file(sprite_file: IO, dyn_only=False):
"""
Pulls a list of sprite from the given file
:param sprite_file: file object to read sprites from
@@ -165,7 +167,7 @@ def pull_sprite_list_from_file(sprite_file, dyn_only=False):
return sprite_list
-def write_spritecodes(sprites):
+def write_spritecodes(sprites: list[str]) -> None:
"""
Writes out a sprite file that just contains each sprite code out,
one sprite code per line
@@ -173,25 +175,25 @@ def write_spritecodes(sprites):
IN:
sprites - list of sprite codes to write out.
"""
- with open(os.path.normcase(SAVE_PATH), "w") as outfile:
+ with open(os.path.normcase(SAVE_PATH), "w", encoding="utf-8") as outfile:
for line in sprites:
outfile.write(line + "\n")
-def write_spritestats(sprites):
+def write_spritestats(sprites: dict[str, str]) -> None:
"""
- Writes out a sprite file that just contains each sprite code with its
+ Writes out a sprite file that just contains each sprite code with its
value, one sprite code per line
IN:
sprites - dict of sprite codes to write out
"""
- with open(os.path.normcase(SAVE_PATH_D), "w") as outfile:
+ with open(os.path.normcase(SAVE_PATH_D), "w", encoding="utf-8") as outfile:
for code in sprites:
outfile.write("{0}: {1}\n".format(code, sprites[code]))
-def write_zz_sprite_opt(sprites):
+def write_zz_sprite_opt(sprites: list[str]) -> None:
"""
Writes out a sprite file that can be used in renpy to optimize sprites
using image prediction.
@@ -199,7 +201,7 @@ def write_zz_sprite_opt(sprites):
IN:
sprites - list of sprite codes to write out.
"""
- with open(os.path.normcase(SAVE_PATH_IO), "w") as outfile:
+ with open(os.path.normcase(SAVE_PATH_IO), "w", encoding="utf-8") as outfile:
outfile.write(__ZZ_SP_OPT_HEADER)
open_list = False
@@ -222,14 +224,14 @@ def write_zz_sprite_opt(sprites):
# 1 last footer needed
if open_list:
outfile.write(__ZZ_SP_OPT_LINE_END)
-
+
outfile.write(__ZZ_SP_OPT_FOOTER)
####################### special run methods ##################################
-def run():
+def run() -> None:
"""
Runs this module (menu-related)
"""
@@ -242,7 +244,7 @@ def run():
choice()
-def run_spl(quiet=False):
+def run_spl(quiet=False) -> None:
"""
Generates the sprite code list and writes it to file
@@ -264,7 +266,7 @@ def run_spl(quiet=False):
menutils.e_pause()
-def run_rpy_all(quiet=False):
+def run_rpy_all(quiet=False) -> None:
"""
Generates optimized image rpy for ALL images
@@ -339,7 +341,7 @@ def _find_files(prefix):
]
-def _init():
+def _init() -> None:
"""
Startup
"""
diff --git a/tools/testsgenerator.py b/tools/testsgenerator.py
deleted file mode 100644
index 4295631b70..0000000000
--- a/tools/testsgenerator.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# module that generates expression test files
-
-import spritepuller as spp
-import gamedir as GDIR
-
-import menutils
-
-# bad code file name
-EXPR_TEST_FN = "zzzz_dev_exprtest.rpy"
-
-
-def generate_expr_test(quiet=False):
- """
- Uses the expressions retrieved from the spritepuller utility,
- then cleans them(remove duplicates, remove old expressions),
- sorts them and finally generates a file named zzzz_dev_exprtest.rpy
- which adds an event which can be accessed in-game to check every single
- expression added
-
- IN:
- quiet - True will suppress output
- """
- if not quiet:
- print("\nPulling Sprites ...")
- # pull the sprites and put them in a set to remove duplicates
- sp_list = set(spp.pull_sprite_list())
-
- # pick only the new ones
- sp_list = [x for x in sp_list if len(x) > 3]
-
- # sort them
- sp_list.sort()
-
- if not quiet:
- print("\nWriting to file ...")
-
- with open(GDIR.REL_PATH_DEV+ EXPR_TEST_FN, "w") as outfile:
-
- # write the header
- outfile.write(__ZZ_EXPR_TEST_HEADER)
-
- for sp in sp_list:
-
- # write each line format will be 'm expr_code "expr_code"'
- outfile.write(__ZZ_EXPR_TEST_LINE.format(sp) + "\n")
-
- # write the footer
- outfile.write(__ZZ_EXPR_TEST_FOOTER)
-
- if not quiet:
- print("\nDone")
- menutils.e_pause()
-
-def run():
- """
- Runs this module (menu-related)
- """
- # for now, we only have 1 workflow
- generate_expr_test()
-
-
-
-#### strings for formatting:
-__ZZ_EXPR_TEST_HEADER = """
-############################ AUTO-GENERATED ###################################
-## DO NOT EDIT THIS FILE ##
-## ##
-## This was auto-generated by the testsgenerator tool ##
-###############################################################################
-#
-# This is a module designed for running test on all the current expressions.
-# NOTE: This is going to be a click nightmare.
-# USE AT YOUR OWN RISK
-#
-
-init 5 python:
- addEvent(
- Event(
- persistent.event_database,
- eventlabel="dev_expr_testing",
- category=["dev"],
- prompt="TEST EXPRESSIONS",
- pool=True,
- unlocked=True
- )
- )
-
-label dev_expr_testing:
-"""
-__ZZ_EXPR_TEST_LINE = ' m {0} "{0}"'
-__ZZ_EXPR_TEST_FOOTER = " return"
-
diff --git a/tools/toolsmenu.py b/tools/toolsmenu.py
index 21b46ae1d2..071e1affd3 100644
--- a/tools/toolsmenu.py
+++ b/tools/toolsmenu.py
@@ -1,38 +1,30 @@
## TODO
## we need a neato menu for everything
#
-# VER: python 2.7
+# VER: python 3.x
import os
__clean_path = os.getcwd().replace("\\", "/")
if "MonikaModDev/tools" not in __clean_path:
os.chdir("tools")
-try:
- raw_input
-except NameError:
- print("run this using py2")
- exit(1)
-
-import spritepuller as spp
+# import spritepuller as spp
import spritechecker as spc
-import spritemaker as spm
-import testsgenerator as tg
+# import spritemaker as spm
+# import testsgenerator as tg
import menutils
menu_main = [
("MAS Dev Tools", "Utility: "),
- ("Sprite Puller", spp.run),
+ # ("Sprite Puller", spp.run),
("Check Sprites", spc.run),
- ("Make Sprites", spm.run),
- ("Generate Expressions Test", tg.run)
+ # ("Make Sprites", spm.run),
+ # ("Generate Expressions Test", tg.run)
]
choice = True
while choice is not None:
-
choice = menutils.menu(menu_main)
-
if choice is not None:
choice()