diff --git a/novelwriter/core/tokenizer.py b/novelwriter/core/tokenizer.py index a2238b73e..6e420f5d5 100644 --- a/novelwriter/core/tokenizer.py +++ b/novelwriter/core/tokenizer.py @@ -198,6 +198,7 @@ def __init__(self, project: NWProject) -> None: # Instance Variables self._hFormatter = HeadingFormatter(self._project) self._noSep = True # Flag to indicate that we don't want a scene separator + self._noIndent = False # Flag to disable text indent on next paragraph self._showDialog = False # Flag for dialogue highlighting # This File @@ -873,7 +874,6 @@ def tokenizeText(self) -> None: pLines: list[T_Token] = [] tCount = len(tokens) - pIndent = True for n, cToken in enumerate(tokens): if n > 0: @@ -881,11 +881,11 @@ def tokenizeText(self) -> None: if n < tCount - 1: nToken = tokens[n+1] # Look ahead - if not self._indentFirst and cToken[0] in self.L_SKIP_INDENT: + if cToken[0] in self.L_SKIP_INDENT and not self._indentFirst: # Unless the indentFirst flag is set, we set up the next # paragraph to not be indented if we see a block of a # specific type - pIndent = False + self._noIndent = True if cToken[0] == self.T_EMPTY: # We don't need to keep the empty lines after this pass @@ -910,7 +910,7 @@ def tokenizeText(self) -> None: # Next token is not text, so we add the buffer to tokens nLines = len(pLines) cStyle = pLines[0][4] - if self._firstIndent and pIndent and not cStyle & self.M_ALIGNED: + if self._firstIndent and not (self._noIndent or cStyle & self.M_ALIGNED): # If paragraph indentation is enabled, not temporarily # turned off, and the block is not aligned, we add the # text indentation flag @@ -938,7 +938,7 @@ def tokenizeText(self) -> None: # Reset buffer and make sure text indent is on for next pass pLines = [] - pIndent = True + self._noIndent = False else: self._tokens.append(cToken) diff --git a/tests/test_core/test_core_tokenizer.py b/tests/test_core/test_core_tokenizer.py index aa23ad1d7..2932fe451 100644 --- a/tests/test_core/test_core_tokenizer.py +++ b/tests/test_core/test_core_tokenizer.py @@ -1324,6 +1324,89 @@ def testCoreToken_SpecialFormat(mockGUI): ] +@pytest.mark.core +def testCoreToken_TextIndent(mockGUI): + """Test the handling of text indent in the Tokenizer class.""" + project = NWProject() + tokens = BareTokenizer(project) + + # No First Indent + tokens.setFirstLineIndent(True, 1.0, False) + + assert tokens._noIndent is False + assert tokens._firstIndent is True + assert tokens._firstWidth == 1.0 + assert tokens._indentFirst is False + + # Page One + # Two paragraphs in the same scene + tokens._text = ( + "# Title One\n\n" + "### Scene One\n\n" + "First paragraph.\n\n" + "Second paragraph.\n\n" + ) + tokens.tokenizeText() + assert tokens._tokens == [ + (Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_NONE), + (Tokenizer.T_HEAD3, 2, "Scene One", [], Tokenizer.A_NONE), + (Tokenizer.T_TEXT, 2, "First paragraph.", [], Tokenizer.A_NONE), + (Tokenizer.T_TEXT, 2, "Second paragraph.", [], Tokenizer.A_IND_T), + ] + assert tokens._noIndent is False + + # Page Two + # New scene with only a synopsis + tokens._text = ( + "### Scene Two\n\n" + "%Synopsis: Stuff happens.\n\n" + ) + tokens.tokenizeText() + assert tokens._tokens == [ + (Tokenizer.T_HEAD3, 1, "Scene Two", [], Tokenizer.A_NONE), + (Tokenizer.T_SYNOPSIS, 1, "Stuff happens.", [], Tokenizer.A_NONE), + ] + assert tokens._noIndent is True + + # Page Three + # Two paragraphs for the scene on the previous page + tokens._text = ( + "First paragraph.\n\n" + "Second paragraph.\n\n" + ) + tokens.tokenizeText() + assert tokens._tokens == [ + (Tokenizer.T_TEXT, 0, "First paragraph.", [], Tokenizer.A_NONE), + (Tokenizer.T_TEXT, 0, "Second paragraph.", [], Tokenizer.A_IND_T), + ] + assert tokens._noIndent is False + + # First Indent + tokens.setFirstLineIndent(True, 1.0, True) + + assert tokens._noIndent is False + assert tokens._firstIndent is True + assert tokens._firstWidth == 1.0 + assert tokens._indentFirst is True + + # Page Four + # Two paragraphs in the same scene + tokens._text = ( + "# Title One\n\n" + "### Scene One\n\n" + "First paragraph.\n\n" + "Second paragraph.\n\n" + ) + tokens.tokenizeText() + assert tokens._tokens == [ + (Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_NONE), + (Tokenizer.T_HEAD3, 2, "Scene One", [], Tokenizer.A_NONE), + (Tokenizer.T_TEXT, 2, "First paragraph.", [], Tokenizer.A_IND_T), + (Tokenizer.T_TEXT, 2, "Second paragraph.", [], Tokenizer.A_IND_T), + ] + assert tokens._noIndent is False + + @pytest.mark.core def testCoreToken_ProcessHeaders(mockGUI): """Test the header and page parser of the Tokenizer class."""