Skip to content

Commit

Permalink
Fix first line indent between pages (#1971)
Browse files Browse the repository at this point in the history
  • Loading branch information
vkbo authored Jul 8, 2024
2 parents 1c261ef + 69d97a3 commit ece225c
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 5 deletions.
10 changes: 5 additions & 5 deletions novelwriter/core/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ def __init__(self, project: NWProject) -> None:
# Instance Variables
self._hFormatter = HeadingFormatter(self._project)
self._noSep = True # Flag to indicate that we don't want a scene separator
self._noIndent = False # Flag to disable text indent on next paragraph
self._showDialog = False # Flag for dialogue highlighting

# This File
Expand Down Expand Up @@ -873,19 +874,18 @@ def tokenizeText(self) -> None:
pLines: list[T_Token] = []

tCount = len(tokens)
pIndent = True
for n, cToken in enumerate(tokens):

if n > 0:
pToken = tokens[n-1] # Look behind
if n < tCount - 1:
nToken = tokens[n+1] # Look ahead

if not self._indentFirst and cToken[0] in self.L_SKIP_INDENT:
if cToken[0] in self.L_SKIP_INDENT and not self._indentFirst:
# Unless the indentFirst flag is set, we set up the next
# paragraph to not be indented if we see a block of a
# specific type
pIndent = False
self._noIndent = True

if cToken[0] == self.T_EMPTY:
# We don't need to keep the empty lines after this pass
Expand All @@ -910,7 +910,7 @@ def tokenizeText(self) -> None:
# Next token is not text, so we add the buffer to tokens
nLines = len(pLines)
cStyle = pLines[0][4]
if self._firstIndent and pIndent and not cStyle & self.M_ALIGNED:
if self._firstIndent and not (self._noIndent or cStyle & self.M_ALIGNED):
# If paragraph indentation is enabled, not temporarily
# turned off, and the block is not aligned, we add the
# text indentation flag
Expand Down Expand Up @@ -938,7 +938,7 @@ def tokenizeText(self) -> None:

# Reset buffer and make sure text indent is on for next pass
pLines = []
pIndent = True
self._noIndent = False

else:
self._tokens.append(cToken)
Expand Down
83 changes: 83 additions & 0 deletions tests/test_core/test_core_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1324,6 +1324,89 @@ def testCoreToken_SpecialFormat(mockGUI):
]


@pytest.mark.core
def testCoreToken_TextIndent(mockGUI):
"""Test the handling of text indent in the Tokenizer class."""
project = NWProject()
tokens = BareTokenizer(project)

# No First Indent
tokens.setFirstLineIndent(True, 1.0, False)

assert tokens._noIndent is False
assert tokens._firstIndent is True
assert tokens._firstWidth == 1.0
assert tokens._indentFirst is False

# Page One
# Two paragraphs in the same scene
tokens._text = (
"# Title One\n\n"
"### Scene One\n\n"
"First paragraph.\n\n"
"Second paragraph.\n\n"
)
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_NONE),
(Tokenizer.T_HEAD3, 2, "Scene One", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 2, "First paragraph.", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 2, "Second paragraph.", [], Tokenizer.A_IND_T),
]
assert tokens._noIndent is False

# Page Two
# New scene with only a synopsis
tokens._text = (
"### Scene Two\n\n"
"%Synopsis: Stuff happens.\n\n"
)
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD3, 1, "Scene Two", [], Tokenizer.A_NONE),
(Tokenizer.T_SYNOPSIS, 1, "Stuff happens.", [], Tokenizer.A_NONE),
]
assert tokens._noIndent is True

# Page Three
# Two paragraphs for the scene on the previous page
tokens._text = (
"First paragraph.\n\n"
"Second paragraph.\n\n"
)
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_TEXT, 0, "First paragraph.", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Second paragraph.", [], Tokenizer.A_IND_T),
]
assert tokens._noIndent is False

# First Indent
tokens.setFirstLineIndent(True, 1.0, True)

assert tokens._noIndent is False
assert tokens._firstIndent is True
assert tokens._firstWidth == 1.0
assert tokens._indentFirst is True

# Page Four
# Two paragraphs in the same scene
tokens._text = (
"# Title One\n\n"
"### Scene One\n\n"
"First paragraph.\n\n"
"Second paragraph.\n\n"
)
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_NONE),
(Tokenizer.T_HEAD3, 2, "Scene One", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 2, "First paragraph.", [], Tokenizer.A_IND_T),
(Tokenizer.T_TEXT, 2, "Second paragraph.", [], Tokenizer.A_IND_T),
]
assert tokens._noIndent is False


@pytest.mark.core
def testCoreToken_ProcessHeaders(mockGUI):
"""Test the header and page parser of the Tokenizer class."""
Expand Down

0 comments on commit ece225c

Please sign in to comment.