diff --git a/novelwriter/constants.py b/novelwriter/constants.py
index 2f409d9a3..9a871e4d8 100644
--- a/novelwriter/constants.py
+++ b/novelwriter/constants.py
@@ -99,6 +99,7 @@ class nwHeaders:
H_VALID = ("H0", "H1", "H2", "H3", "H4")
H_LEVEL = {"H0": 0, "H1": 1, "H2": 2, "H3": 3, "H4": 4}
+ H_SIZES = {0: 1.00, 1: 2.00, 2: 1.75, 3: 1.50, 4: 1.25}
class nwFiles:
diff --git a/novelwriter/core/docbuild.py b/novelwriter/core/docbuild.py
index 403137fb0..c65ac0be0 100644
--- a/novelwriter/core/docbuild.py
+++ b/novelwriter/core/docbuild.py
@@ -37,7 +37,7 @@
from novelwriter.core.project import NWProject
from novelwriter.core.tohtml import ToHtml
from novelwriter.core.tokenizer import Tokenizer
-from novelwriter.core.tomd import ToMarkdown
+from novelwriter.core.tomarkdown import ToMarkdown
from novelwriter.core.toodt import ToOdt
from novelwriter.enum import nwBuildFmt
from novelwriter.error import formatException, logException
diff --git a/novelwriter/core/tohtml.py b/novelwriter/core/tohtml.py
index 2f64bb56f..85a91f594 100644
--- a/novelwriter/core/tohtml.py
+++ b/novelwriter/core/tohtml.py
@@ -171,9 +171,7 @@ def doConvert(self) -> None:
h3 = "h3"
h4 = "h4"
- para = []
lines = []
- pStyle = None
tHandle = self._handle
for tType, nHead, tText, tFormat, tStyle in self._tokens:
@@ -240,37 +238,27 @@ def doConvert(self) -> None:
aNm = ""
# Process Text Type
- if tType == self.T_EMPTY:
- if pStyle is None:
- pStyle = ""
- if len(para) > 1 and self._cssStyles:
- pClass = " class='break'"
- else:
- pClass = ""
- if len(para) > 0:
- tTemp = "
".join(para)
- lines.append(f"
{tTemp.rstrip()}
\n")
- para = []
- pStyle = None
+ if tType == self.T_TEXT:
+ lines.append(f"{self._formatText(tText, tFormat, hTags)}
\n")
elif tType == self.T_TITLE:
- tHead = tText.replace(nwHeadFmt.BR, "
")
+ tHead = tText.replace(nwHeadFmt.BR, "
")
lines.append(f"{aNm}{tHead}
\n")
elif tType == self.T_HEAD1:
- tHead = tText.replace(nwHeadFmt.BR, "
")
+ tHead = tText.replace(nwHeadFmt.BR, "
")
lines.append(f"<{h1}{h1Cl}{hStyle}>{aNm}{tHead}{h1}>\n")
elif tType == self.T_HEAD2:
- tHead = tText.replace(nwHeadFmt.BR, "
")
+ tHead = tText.replace(nwHeadFmt.BR, "
")
lines.append(f"<{h2}{hStyle}>{aNm}{tHead}{h2}>\n")
elif tType == self.T_HEAD3:
- tHead = tText.replace(nwHeadFmt.BR, "
")
+ tHead = tText.replace(nwHeadFmt.BR, "
")
lines.append(f"<{h3}{hStyle}>{aNm}{tHead}{h3}>\n")
elif tType == self.T_HEAD4:
- tHead = tText.replace(nwHeadFmt.BR, "
")
+ tHead = tText.replace(nwHeadFmt.BR, "
")
lines.append(f"<{h4}{hStyle}>{aNm}{tHead}{h4}>\n")
elif tType == self.T_SEP:
@@ -279,11 +267,6 @@ def doConvert(self) -> None:
elif tType == self.T_SKIP:
lines.append(f"
\n")
- elif tType == self.T_TEXT:
- if pStyle is None:
- pStyle = hStyle
- para.append(self._formatText(tText, tFormat, hTags).rstrip())
-
elif tType == self.T_SYNOPSIS and self._doSynopsis:
lines.append(self._formatSynopsis(self._formatText(tText, tFormat, hTags), True))
@@ -491,6 +474,7 @@ def _formatText(self, text: str, tFmt: T_Formats, tags: dict[int, str]) -> str:
else:
html = tags.get(fmt, "ERR")
temp = f"{temp[:pos]}{html}{temp[pos:]}"
+ temp = temp.replace("\n", "
")
return stripEscape(temp)
def _formatSynopsis(self, text: str, synopsis: bool) -> str:
diff --git a/novelwriter/core/tokenizer.py b/novelwriter/core/tokenizer.py
index 15cd80469..448cb2ed5 100644
--- a/novelwriter/core/tokenizer.py
+++ b/novelwriter/core/tokenizer.py
@@ -50,6 +50,7 @@
T_Formats = list[tuple[int, int, str]]
T_Comment = tuple[str, T_Formats]
+T_Token = tuple[int, int, str, T_Formats, int]
def stripEscape(text: str) -> str:
@@ -129,7 +130,7 @@ def __init__(self, project: NWProject) -> None:
self._keepMD = False # Whether to keep the markdown text
# Tokens and Meta Data (Per Document)
- self._tokens: list[tuple[int, int, str, T_Formats, int]] = []
+ self._tokens: list[T_Token] = []
self._footnotes: dict[str, T_Comment] = {}
# Tokens and Meta Data (Per Instance)
@@ -152,6 +153,7 @@ def __init__(self, project: NWProject) -> None:
self._doComments = False # Also process comments
self._doKeywords = False # Also process keywords like tags and references
self._skipKeywords = set() # Keywords to ignore
+ self._keepBreaks = True # Keep line breaks in paragraphs
# Margins
self._marginTitle = (1.000, 0.500)
@@ -409,6 +411,11 @@ def setIgnoredKeywords(self, keywords: str) -> None:
self._skipKeywords = set(x.lower().strip() for x in keywords.split(","))
return
+ def setKeepLineBreaks(self, state: bool) -> None:
+ """Keep line breaks in paragraphs."""
+ self._keepBreaks = state
+ return
+
def setKeepMarkdown(self, state: bool) -> None:
"""Keep original markdown during build."""
self._keepMD = state
@@ -490,7 +497,6 @@ def tokenizeText(self) -> None:
4: The internal formatting map of the text, self.FMT_*
5: The style of the block, self.A_*
"""
- self._tokens = []
if self._isNovel:
self._hFormatter.setHandle(self._handle)
@@ -498,12 +504,13 @@ def tokenizeText(self) -> None:
breakNext = False
tmpMarkdown = []
tHandle = self._handle or ""
+ tokens: list[T_Token] = []
for aLine in self._text.splitlines():
sLine = aLine.strip().lower()
# Check for blank lines
if len(sLine) == 0:
- self._tokens.append((
+ tokens.append((
self.T_EMPTY, nHead, "", [], self.A_NONE
))
if self._keepMD:
@@ -532,7 +539,7 @@ def tokenizeText(self) -> None:
continue
elif sLine == "[vspace]":
- self._tokens.append(
+ tokens.append(
(self.T_SKIP, nHead, "", [], sAlign)
)
continue
@@ -540,11 +547,11 @@ def tokenizeText(self) -> None:
elif sLine.startswith("[vspace:") and sLine.endswith("]"):
nSkip = checkInt(sLine[8:-1], 0)
if nSkip >= 1:
- self._tokens.append(
+ tokens.append(
(self.T_SKIP, nHead, "", [], sAlign)
)
if nSkip > 1:
- self._tokens += (nSkip - 1) * [
+ tokens += (nSkip - 1) * [
(self.T_SKIP, nHead, "", [], self.A_NONE)
]
continue
@@ -561,14 +568,14 @@ def tokenizeText(self) -> None:
cStyle, cKey, cText, _, _ = processComment(aLine)
if cStyle == nwComment.SYNOPSIS:
tLine, tFmt = self._extractFormats(cText)
- self._tokens.append((
+ tokens.append((
self.T_SYNOPSIS, nHead, tLine, tFmt, sAlign
))
if self._doSynopsis and self._keepMD:
tmpMarkdown.append(f"{aLine}\n")
elif cStyle == nwComment.SHORT:
tLine, tFmt = self._extractFormats(cText)
- self._tokens.append((
+ tokens.append((
self.T_SHORT, nHead, tLine, tFmt, sAlign
))
if self._doSynopsis and self._keepMD:
@@ -580,7 +587,7 @@ def tokenizeText(self) -> None:
tmpMarkdown.append(f"{aLine}\n")
else:
tLine, tFmt = self._extractFormats(cText)
- self._tokens.append((
+ tokens.append((
self.T_COMMENT, nHead, tLine, tFmt, sAlign
))
if self._doComments and self._keepMD:
@@ -594,7 +601,7 @@ def tokenizeText(self) -> None:
valid, bits, _ = self._project.index.scanThis(aLine)
if valid and bits and bits[0] not in self._skipKeywords:
- self._tokens.append((
+ tokens.append((
self.T_KEYWORD, nHead, aLine[1:].strip(), [], sAlign
))
if self._doKeywords and self._keepMD:
@@ -630,7 +637,7 @@ def tokenizeText(self) -> None:
self._hFormatter.resetAll()
self._noSep = True
- self._tokens.append((
+ tokens.append((
tType, nHead, tText, [], tStyle
))
if self._keepMD:
@@ -665,7 +672,7 @@ def tokenizeText(self) -> None:
self._hFormatter.resetScene()
self._noSep = True
- self._tokens.append((
+ tokens.append((
tType, nHead, tText, [], tStyle
))
if self._keepMD:
@@ -706,7 +713,7 @@ def tokenizeText(self) -> None:
tStyle = self.A_NONE if self._noSep else self.A_CENTRE
self._noSep = False
- self._tokens.append((
+ tokens.append((
tType, nHead, tText, [], tStyle
))
if self._keepMD:
@@ -736,7 +743,7 @@ def tokenizeText(self) -> None:
tType = self.T_SEP
tStyle = self.A_CENTRE
- self._tokens.append((
+ tokens.append((
tType, nHead, tText, [], tStyle
))
if self._keepMD:
@@ -784,26 +791,26 @@ def tokenizeText(self) -> None:
# Process formats
tLine, tFmt = self._extractFormats(aLine)
- self._tokens.append((
+ tokens.append((
self.T_TEXT, nHead, tLine, tFmt, sAlign
))
if self._keepMD:
tmpMarkdown.append(f"{aLine}\n")
# If we have content, turn off the first page flag
- if self._isFirst and self._tokens:
+ if self._isFirst and tokens:
self._isFirst = False # First document has been processed
# Make sure the token array doesn't start with a page break
# on the very first page, adding a blank first page.
- if self._tokens[0][4] & self.A_PBB:
- token = self._tokens[0]
- self._tokens[0] = (
- token[0], token[1], token[2], token[3], token[4] & ~self.A_PBB
+ if tokens[0][4] & self.A_PBB:
+ cToken = tokens[0]
+ tokens[0] = (
+ cToken[0], cToken[1], cToken[2], cToken[3], cToken[4] & ~self.A_PBB
)
# Always add an empty line at the end of the file
- self._tokens.append((
+ tokens.append((
self.T_EMPTY, nHead, "", [], self.A_NONE
))
if self._keepMD:
@@ -812,27 +819,62 @@ def tokenizeText(self) -> None:
# Second Pass
# ===========
- # Some items need a second pass
+ # This second pass strips away consecutive blank lines, and
+ # combines consecutive text lines into the same paragraph.
+ # It also ensures that there isn't paragraph spacing between
+ # meta data lines for formats that has spacing.
+
+ self._tokens = []
+ pToken: T_Token = (self.T_EMPTY, 0, "", [], self.A_NONE)
+ nToken: T_Token = (self.T_EMPTY, 0, "", [], self.A_NONE)
- pToken = (self.T_EMPTY, 0, "", [], self.A_NONE)
- nToken = (self.T_EMPTY, 0, "", [], self.A_NONE)
- tCount = len(self._tokens)
- for n, token in enumerate(self._tokens):
+ lineSep = "\n" if self._keepBreaks else " "
+ pLines: list[T_Token] = []
+
+ tCount = len(tokens)
+ for n, cToken in enumerate(tokens):
if n > 0:
- pToken = self._tokens[n-1]
+ pToken = tokens[n-1] # Look behind
if n < tCount - 1:
- nToken = self._tokens[n+1]
+ nToken = tokens[n+1] # Look ahead
+
+ if cToken[0] == self.T_EMPTY:
+ # We don't need to keep the empty lines after this pass
+ pass
- if token[0] == self.T_KEYWORD:
- aStyle = token[4]
+ elif cToken[0] == self.T_KEYWORD:
+ # Adjust margins for lines in a list of keyword lines
+ aStyle = cToken[4]
if pToken[0] == self.T_KEYWORD:
aStyle |= self.A_Z_TOPMRG
if nToken[0] == self.T_KEYWORD:
aStyle |= self.A_Z_BTMMRG
- self._tokens[n] = (
- token[0], token[1], token[2], token[3], aStyle
- )
+ self._tokens.append((
+ cToken[0], cToken[1], cToken[2], cToken[3], aStyle
+ ))
+
+ elif cToken[0] == self.T_TEXT:
+ # Combine lines from the same paragraph
+ pLines.append(cToken)
+ if nToken[0] != self.T_TEXT:
+ nLines = len(pLines)
+ if nLines == 1:
+ self._tokens.append(pLines[0])
+ elif nLines > 1:
+ tTxt = ""
+ tFmt: T_Formats = []
+ for aToken in pLines:
+ tLen = len(tTxt)
+ tTxt += f"{aToken[2]}{lineSep}"
+ tFmt.extend((p+tLen, fmt, key) for p, fmt, key in aToken[3])
+ self._tokens.append((
+ self.T_TEXT, pLines[0][1], tTxt[:-1], tFmt, pLines[0][4]
+ ))
+ pLines = []
+
+ else:
+ self._tokens.append(cToken)
return
@@ -875,7 +917,6 @@ def countStats(self) -> None:
textWordChars = self._counts.get("textWordChars", 0)
titleWordChars = self._counts.get("titleWordChars", 0)
- para = []
for tType, _, tText, _, _ in self._tokens:
tText = tText.replace(nwUnicode.U_ENDASH, " ")
tText = tText.replace(nwUnicode.U_EMDASH, " ")
@@ -885,22 +926,19 @@ def countStats(self) -> None:
nChars = len(tText)
nWChars = len("".join(tWords))
- if tType == self.T_EMPTY:
- if len(para) > 0:
- tTemp = "\n".join(para)
- tPWords = tTemp.split()
- nPWords = len(tPWords)
- nPChars = len(tTemp)
- nPWChars = len("".join(tPWords))
-
- paragraphCount += 1
- allWords += nPWords
- textWords += nPWords
- allChars += nPChars
- textChars += nPChars
- allWordChars += nPWChars
- textWordChars += nPWChars
- para = []
+ if tType == self.T_TEXT:
+ tPWords = tText.split()
+ nPWords = len(tPWords)
+ nPChars = len(tText)
+ nPWChars = len("".join(tPWords))
+
+ paragraphCount += 1
+ allWords += nPWords
+ textWords += nPWords
+ allChars += nPChars
+ textChars += nPChars
+ allWordChars += nPWChars
+ textWordChars += nPWChars
elif tType in self.L_HEADINGS:
titleCount += 1
@@ -916,9 +954,6 @@ def countStats(self) -> None:
allChars += nChars
allWordChars += nWChars
- elif tType == self.T_TEXT:
- para.append(tText.rstrip())
-
elif tType == self.T_SYNOPSIS and self._doSynopsis:
text = "{0}: {1}".format(self._localLookup("Synopsis"), tText)
words = text.split()
diff --git a/novelwriter/core/tomd.py b/novelwriter/core/tomarkdown.py
similarity index 95%
rename from novelwriter/core/tomd.py
rename to novelwriter/core/tomarkdown.py
index 371b6f7c5..440da0e06 100644
--- a/novelwriter/core/tomd.py
+++ b/novelwriter/core/tomarkdown.py
@@ -139,17 +139,12 @@ def doConvert(self) -> None:
mTags = EXT_MD
cSkip = nwUnicode.U_MMSP
- para = []
lines = []
- lineSep = " \n" if self._preserveBreaks else " "
-
for tType, _, tText, tFormat, tStyle in self._tokens:
- if tType == self.T_EMPTY:
- if para:
- tTemp = (lineSep.join(para)).rstrip(" ")
- lines.append(f"{tTemp}\n\n")
- para = []
+ if tType == self.T_TEXT:
+ tTemp = self._formatText(tText, tFormat, mTags).replace("\n", " \n")
+ lines.append(f"{tTemp}\n\n")
elif tType == self.T_TITLE:
tHead = tText.replace(nwHeadFmt.BR, "\n")
@@ -177,9 +172,6 @@ def doConvert(self) -> None:
elif tType == self.T_SKIP:
lines.append(f"{cSkip}\n\n")
- elif tType == self.T_TEXT:
- para.append(self._formatText(tText, tFormat, mTags).rstrip())
-
elif tType == self.T_SYNOPSIS and self._doSynopsis:
label = self._localLookup("Synopsis")
lines.append(f"**{label}:** {self._formatText(tText, tFormat, mTags)}\n\n")
diff --git a/novelwriter/core/toodt.py b/novelwriter/core/toodt.py
index b30bbc0d4..cd81d100e 100644
--- a/novelwriter/core/toodt.py
+++ b/novelwriter/core/toodt.py
@@ -96,6 +96,18 @@ def _mkTag(ns: str, tag: str) -> str:
M_SUP = ~X_SUP
M_SUB = ~X_SUB
+# ODT Styles
+S_TITLE = "Title"
+S_HEAD1 = "Heading_20_1"
+S_HEAD2 = "Heading_20_2"
+S_HEAD3 = "Heading_20_3"
+S_HEAD4 = "Heading_20_4"
+S_SEP = "Separator"
+S_FIND = "First_20_line_20_indent"
+S_TEXT = "Text_20_body"
+S_META = "Text_20_Meta"
+S_HNF = "Header_20_and_20_Footer"
+
class ToOdt(Tokenizer):
"""Core: Open Document Writer
@@ -406,11 +418,8 @@ def doConvert(self) -> None:
"""Convert the list of text tokens into XML elements."""
self._result = "" # Not used, but cleared just in case
- pFmt: list[T_Formats] = []
- pText = []
- pStyle = None
- pIndent = True
xText = self._xText
+ pIndent = True
for tType, _, tText, tFormat, tStyle in self._tokens:
# Styles
@@ -445,79 +454,55 @@ def doConvert(self) -> None:
pIndent = False
# Process Text Types
- if tType == self.T_EMPTY:
- if len(pText) > 1 and pStyle is not None:
- if self._doJustify:
- pStyle.setTextAlign("left")
-
- if len(pText) > 0 and pStyle is not None:
- tTxt = ""
- tFmt: T_Formats = []
- for nText, nFmt in zip(pText, pFmt):
- tLen = len(tTxt)
- tTxt += f"{nText}\n"
- tFmt.extend((p+tLen, fmt, key) for p, fmt, key in nFmt)
-
- # Don't indent a paragraph if it has alignment set
- tIndent = self._firstIndent and pIndent and pStyle.isUnaligned()
- self._addTextPar(
- xText, "First_20_line_20_indent" if tIndent else "Text_20_body",
- pStyle, tTxt.rstrip(), tFmt=tFmt
- )
- pIndent = True
-
- pFmt = []
- pText = []
- pStyle = None
+ if tType == self.T_TEXT:
+ if self._firstIndent and pIndent and oStyle.isUnaligned():
+ self._addTextPar(xText, S_FIND, oStyle, tText, tFmt=tFormat)
+ else:
+ self._addTextPar(xText, S_TEXT, oStyle, tText, tFmt=tFormat)
+ pIndent = True
elif tType == self.T_TITLE:
# Title must be text:p
tHead = tText.replace(nwHeadFmt.BR, "\n")
- self._addTextPar(xText, "Title", oStyle, tHead, isHead=False)
+ self._addTextPar(xText, S_TITLE, oStyle, tHead, isHead=False)
elif tType == self.T_HEAD1:
tHead = tText.replace(nwHeadFmt.BR, "\n")
- self._addTextPar(xText, "Heading_20_1", oStyle, tHead, isHead=True, oLevel="1")
+ self._addTextPar(xText, S_HEAD1, oStyle, tHead, isHead=True, oLevel="1")
elif tType == self.T_HEAD2:
tHead = tText.replace(nwHeadFmt.BR, "\n")
- self._addTextPar(xText, "Heading_20_2", oStyle, tHead, isHead=True, oLevel="2")
+ self._addTextPar(xText, S_HEAD2, oStyle, tHead, isHead=True, oLevel="2")
elif tType == self.T_HEAD3:
tHead = tText.replace(nwHeadFmt.BR, "\n")
- self._addTextPar(xText, "Heading_20_3", oStyle, tHead, isHead=True, oLevel="3")
+ self._addTextPar(xText, S_HEAD3, oStyle, tHead, isHead=True, oLevel="3")
elif tType == self.T_HEAD4:
tHead = tText.replace(nwHeadFmt.BR, "\n")
- self._addTextPar(xText, "Heading_20_4", oStyle, tHead, isHead=True, oLevel="4")
+ self._addTextPar(xText, S_HEAD4, oStyle, tHead, isHead=True, oLevel="4")
elif tType == self.T_SEP:
- self._addTextPar(xText, "Separator", oStyle, tText)
+ self._addTextPar(xText, S_SEP, oStyle, tText)
elif tType == self.T_SKIP:
- self._addTextPar(xText, "Separator", oStyle, "")
-
- elif tType == self.T_TEXT:
- if pStyle is None:
- pStyle = oStyle
- pText.append(tText)
- pFmt.append(tFormat)
+ self._addTextPar(xText, S_SEP, oStyle, "")
elif tType == self.T_SYNOPSIS and self._doSynopsis:
tTemp, tFmt = self._formatSynopsis(tText, tFormat, True)
- self._addTextPar(xText, "Text_20_Meta", oStyle, tTemp, tFmt=tFmt)
+ self._addTextPar(xText, S_META, oStyle, tTemp, tFmt=tFmt)
elif tType == self.T_SHORT and self._doSynopsis:
tTemp, tFmt = self._formatSynopsis(tText, tFormat, False)
- self._addTextPar(xText, "Text_20_Meta", oStyle, tTemp, tFmt=tFmt)
+ self._addTextPar(xText, S_META, oStyle, tTemp, tFmt=tFmt)
elif tType == self.T_COMMENT and self._doComments:
tTemp, tFmt = self._formatComments(tText, tFormat)
- self._addTextPar(xText, "Text_20_Meta", oStyle, tTemp, tFmt=tFmt)
+ self._addTextPar(xText, S_META, oStyle, tTemp, tFmt=tFmt)
elif tType == self.T_KEYWORD and self._doKeywords:
tTemp, tFmt = self._formatKeywords(tText)
- self._addTextPar(xText, "Text_20_Meta", oStyle, tTemp, tFmt=tFmt)
+ self._addTextPar(xText, S_META, oStyle, tTemp, tFmt=tFmt)
return
@@ -847,7 +832,7 @@ def _defaultStyles(self) -> None:
_mkTag("style", "name"): "Heading",
_mkTag("style", "family"): "paragraph",
_mkTag("style", "parent-style-name"): "Standard",
- _mkTag("style", "next-style-name"): "Text_20_body",
+ _mkTag("style", "next-style-name"): S_TEXT,
_mkTag("style", "class"): "text",
})
ET.SubElement(xStyl, _mkTag("style", "paragraph-properties"), attrib={
@@ -863,7 +848,7 @@ def _defaultStyles(self) -> None:
# Add Header and Footer Styles
ET.SubElement(self._xStyl, _mkTag("style", "style"), attrib={
- _mkTag("style", "name"): "Header_20_and_20_Footer",
+ _mkTag("style", "name"): S_HNF,
_mkTag("style", "display-name"): "Header and Footer",
_mkTag("style", "family"): "paragraph",
_mkTag("style", "parent-style-name"): "Standard",
@@ -875,7 +860,7 @@ def _defaultStyles(self) -> None:
def _useableStyles(self) -> None:
"""Set the usable styles."""
# Add Text Body Style
- style = ODTParagraphStyle("Text_20_body")
+ style = ODTParagraphStyle(S_TEXT)
style.setDisplayName("Text body")
style.setParentStyleName("Standard")
style.setClass("text")
@@ -890,16 +875,16 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add First Line Indent Style
- style = ODTParagraphStyle("First_20_line_20_indent")
+ style = ODTParagraphStyle(S_FIND)
style.setDisplayName("First line indent")
- style.setParentStyleName("Text_20_body")
+ style.setParentStyleName(S_TEXT)
style.setClass("text")
style.setTextIndent(self._fTextIndent)
style.packXML(self._xStyl)
self._mainPara[style.name] = style
# Add Text Meta Style
- style = ODTParagraphStyle("Text_20_Meta")
+ style = ODTParagraphStyle(S_META)
style.setDisplayName("Text Meta")
style.setParentStyleName("Standard")
style.setClass("text")
@@ -915,10 +900,10 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add Title Style
- style = ODTParagraphStyle("Title")
+ style = ODTParagraphStyle(S_TITLE)
style.setDisplayName("Title")
style.setParentStyleName("Heading")
- style.setNextStyleName("Text_20_body")
+ style.setNextStyleName(S_TEXT)
style.setClass("chapter")
style.setMarginTop(self._mTopTitle)
style.setMarginBottom(self._mBotTitle)
@@ -931,10 +916,10 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add Separator Style
- style = ODTParagraphStyle("Separator")
+ style = ODTParagraphStyle(S_SEP)
style.setDisplayName("Separator")
style.setParentStyleName("Standard")
- style.setNextStyleName("Text_20_body")
+ style.setNextStyleName(S_TEXT)
style.setClass("text")
style.setMarginTop(self._mTopText)
style.setMarginBottom(self._mBotText)
@@ -947,10 +932,10 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add Heading 1 Style
- style = ODTParagraphStyle("Heading_20_1")
+ style = ODTParagraphStyle(S_HEAD1)
style.setDisplayName("Heading 1")
style.setParentStyleName("Heading")
- style.setNextStyleName("Text_20_body")
+ style.setNextStyleName(S_TEXT)
style.setOutlineLevel("1")
style.setClass("text")
style.setMarginTop(self._mTopHead1)
@@ -965,10 +950,10 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add Heading 2 Style
- style = ODTParagraphStyle("Heading_20_2")
+ style = ODTParagraphStyle(S_HEAD2)
style.setDisplayName("Heading 2")
style.setParentStyleName("Heading")
- style.setNextStyleName("Text_20_body")
+ style.setNextStyleName(S_TEXT)
style.setOutlineLevel("2")
style.setClass("text")
style.setMarginTop(self._mTopHead2)
@@ -983,10 +968,10 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add Heading 3 Style
- style = ODTParagraphStyle("Heading_20_3")
+ style = ODTParagraphStyle(S_HEAD3)
style.setDisplayName("Heading 3")
style.setParentStyleName("Heading")
- style.setNextStyleName("Text_20_body")
+ style.setNextStyleName(S_TEXT)
style.setOutlineLevel("3")
style.setClass("text")
style.setMarginTop(self._mTopHead3)
@@ -1001,10 +986,10 @@ def _useableStyles(self) -> None:
self._mainPara[style.name] = style
# Add Heading 4 Style
- style = ODTParagraphStyle("Heading_20_4")
+ style = ODTParagraphStyle(S_HEAD4)
style.setDisplayName("Heading 4")
style.setParentStyleName("Heading")
- style.setNextStyleName("Text_20_body")
+ style.setNextStyleName(S_TEXT)
style.setOutlineLevel("4")
style.setClass("text")
style.setMarginTop(self._mTopHead4)
@@ -1021,7 +1006,7 @@ def _useableStyles(self) -> None:
# Add Header Style
style = ODTParagraphStyle("Header")
style.setDisplayName("Header")
- style.setParentStyleName("Header_20_and_20_Footer")
+ style.setParentStyleName(S_HNF)
style.setTextAlign("right")
style.packXML(self._xStyl)
self._mainPara[style.name] = style
diff --git a/novelwriter/gui/dochighlight.py b/novelwriter/gui/dochighlight.py
index 0e731032e..c00f7b440 100644
--- a/novelwriter/gui/dochighlight.py
+++ b/novelwriter/gui/dochighlight.py
@@ -36,7 +36,7 @@
from novelwriter import CONFIG, SHARED
from novelwriter.common import checkInt
-from novelwriter.constants import nwRegEx, nwUnicode
+from novelwriter.constants import nwHeaders, nwRegEx, nwUnicode
from novelwriter.core.index import processComment
from novelwriter.enum import nwComment
from novelwriter.types import QRegExUnicode
@@ -95,14 +95,14 @@ def initHighlighter(self) -> None:
# Create Character Formats
self._addCharFormat("text", SHARED.theme.colText)
- self._addCharFormat("header1", SHARED.theme.colHead, "b", 1.8)
- self._addCharFormat("header2", SHARED.theme.colHead, "b", 1.6)
- self._addCharFormat("header3", SHARED.theme.colHead, "b", 1.4)
- self._addCharFormat("header4", SHARED.theme.colHead, "b", 1.2)
- self._addCharFormat("head1h", SHARED.theme.colHeadH, "b", 1.8)
- self._addCharFormat("head2h", SHARED.theme.colHeadH, "b", 1.6)
- self._addCharFormat("head3h", SHARED.theme.colHeadH, "b", 1.4)
- self._addCharFormat("head4h", SHARED.theme.colHeadH, "b", 1.2)
+ self._addCharFormat("header1", SHARED.theme.colHead, "b", nwHeaders.H_SIZES[1])
+ self._addCharFormat("header2", SHARED.theme.colHead, "b", nwHeaders.H_SIZES[2])
+ self._addCharFormat("header3", SHARED.theme.colHead, "b", nwHeaders.H_SIZES[3])
+ self._addCharFormat("header4", SHARED.theme.colHead, "b", nwHeaders.H_SIZES[4])
+ self._addCharFormat("head1h", SHARED.theme.colHeadH, "b", nwHeaders.H_SIZES[1])
+ self._addCharFormat("head2h", SHARED.theme.colHeadH, "b", nwHeaders.H_SIZES[2])
+ self._addCharFormat("head3h", SHARED.theme.colHeadH, "b", nwHeaders.H_SIZES[3])
+ self._addCharFormat("head4h", SHARED.theme.colHeadH, "b", nwHeaders.H_SIZES[4])
self._addCharFormat("bold", colEmph, "b")
self._addCharFormat("italic", colEmph, "i")
self._addCharFormat("strike", SHARED.theme.colHidden, "s")
diff --git a/novelwriter/types.py b/novelwriter/types.py
index a9fc015db..e4051f8ef 100644
--- a/novelwriter/types.py
+++ b/novelwriter/types.py
@@ -24,7 +24,7 @@
from __future__ import annotations
from PyQt5.QtCore import QRegularExpression, Qt
-from PyQt5.QtGui import QColor, QPainter, QTextCursor
+from PyQt5.QtGui import QColor, QPainter, QTextCursor, QTextFormat
from PyQt5.QtWidgets import QDialogButtonBox, QSizePolicy, QStyle
# Qt Alignment Flags
@@ -44,6 +44,11 @@
QtAlignRightTop = Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignTop
QtAlignTop = Qt.AlignmentFlag.AlignTop
+# Qt Page Break
+
+QtPageBreakBefore = QTextFormat.PageBreakFlag.PageBreak_AlwaysBefore
+QtPageBreakAfter = QTextFormat.PageBreakFlag.PageBreak_AlwaysAfter
+
# Qt Painter Types
QtTransparent = QColor(0, 0, 0, 0)
diff --git a/tests/test_core/test_core_docbuild.py b/tests/test_core/test_core_docbuild.py
index 6054b2728..57c142513 100644
--- a/tests/test_core/test_core_docbuild.py
+++ b/tests/test_core/test_core_docbuild.py
@@ -31,7 +31,7 @@
from novelwriter.core.docbuild import NWBuildDocument
from novelwriter.core.project import NWProject
from novelwriter.core.tohtml import ToHtml
-from novelwriter.core.tomd import ToMarkdown
+from novelwriter.core.tomarkdown import ToMarkdown
from novelwriter.core.toodt import ToOdt
from novelwriter.enum import nwBuildFmt
diff --git a/tests/test_core/test_core_tohtml.py b/tests/test_core/test_core_tohtml.py
index a5df4696e..2cea33282 100644
--- a/tests/test_core/test_core_tohtml.py
+++ b/tests/test_core/test_core_tohtml.py
@@ -165,11 +165,11 @@ def testCoreToHtml_ConvertParagraphs(mockGUI):
)
# Text w/Hard Break
- html._text = "Line one \nLine two \nLine three\n"
+ html._text = "Line one\nLine two\nLine three\n"
html.tokenizeText()
html.doConvert()
assert html.result == (
- "Line one
Line two
Line three
\n"
+ "Line one
Line two
Line three
\n"
)
# Synopsis, Short
diff --git a/tests/test_core/test_core_tokenizer.py b/tests/test_core/test_core_tokenizer.py
index 0d70eab1f..388b8843a 100644
--- a/tests/test_core/test_core_tokenizer.py
+++ b/tests/test_core/test_core_tokenizer.py
@@ -27,7 +27,7 @@
from novelwriter.constants import nwHeadFmt
from novelwriter.core.project import NWProject
from novelwriter.core.tokenizer import HeadingFormatter, Tokenizer, stripEscape
-from novelwriter.core.tomd import ToMarkdown
+from novelwriter.core.tomarkdown import ToMarkdown
from tests.tools import C, buildTestProject, readFile
@@ -262,7 +262,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_TITLE, 1, "Novel Title", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "#! Novel Title\n\n"
@@ -274,7 +273,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_TITLE, 1, "Note Title", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "#! Note Title\n\n"
@@ -289,7 +287,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Novel Title", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "# Novel Title\n\n"
@@ -301,7 +298,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Note Title", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "# Note Title\n\n"
@@ -315,7 +311,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Chapter One", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "## Chapter One\n\n"
@@ -326,7 +321,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Heading 2", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "## Heading 2\n\n"
@@ -340,7 +334,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD3, 1, "Scene One", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "### Scene One\n\n"
@@ -351,7 +344,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD3, 1, "Heading 3", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "### Heading 3\n\n"
@@ -365,7 +357,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD4, 1, "A Section", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "#### A Section\n\n"
@@ -376,7 +367,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD4, 1, "Heading 4", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "#### Heading 4\n\n"
@@ -391,7 +381,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_TITLE, 1, "Title", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "#! Title\n\n"
@@ -403,7 +392,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_TITLE, 1, "Title", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "#! Title\n\n"
@@ -417,7 +405,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Prologue", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "##! Prologue\n\n"
@@ -428,7 +415,6 @@ def testCoreToken_HeaderFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Prologue", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "##! Prologue\n\n"
@@ -706,7 +692,6 @@ def testCoreToken_MetaFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_COMMENT, 0, "A comment", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "\n"
@@ -717,9 +702,7 @@ def testCoreToken_MetaFormat(mockGUI):
# Ignore Text
tokens._text = "%~ Some text\n"
tokens.tokenizeText()
- assert tokens._tokens == [
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- ]
+ assert tokens._tokens == []
assert tokens.allMarkdown[-1] == "\n"
# Synopsis
@@ -727,13 +710,11 @@ def testCoreToken_MetaFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SYNOPSIS, 0, "The synopsis", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
tokens._text = "% synopsis: The synopsis\n"
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SYNOPSIS, 0, "The synopsis", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "\n"
@@ -747,7 +728,6 @@ def testCoreToken_MetaFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SHORT, 0, "A short description", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "\n"
@@ -760,7 +740,6 @@ def testCoreToken_MetaFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_KEYWORD, 0, "char: Bod", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "\n"
@@ -777,7 +756,6 @@ def testCoreToken_MetaFormat(mockGUI):
(Tokenizer.T_KEYWORD, 0, "pov: Bod", [], styTop),
(Tokenizer.T_KEYWORD, 0, "plot: Main", [], styMid),
(Tokenizer.T_KEYWORD, 0, "location: Europe", [], styBtm),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "@pov: Bod\n@plot: Main\n@location: Europe\n\n"
@@ -787,7 +765,6 @@ def testCoreToken_MetaFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_KEYWORD, 0, "pov: Bod", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
@@ -814,22 +791,13 @@ def testCoreToken_MarginFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_TEXT, 0, "Some regular text", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Some left-aligned text", [], Tokenizer.A_LEFT),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Some right-aligned text", [], Tokenizer.A_RIGHT),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Some centered text", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Left-indented block", [], Tokenizer.A_IND_L),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Right-indented block", [], Tokenizer.A_IND_R),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Double-indented block", [], dblIndent),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 0, "Right-indent, right-aligned", [], rIndAlign),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == (
"Some regular text\n\n"
@@ -950,6 +918,79 @@ def testCoreToken_ExtractFormats(mockGUI):
]
+@pytest.mark.core
+def testCoreToken_Paragraphs(mockGUI):
+ """Test the splitting of paragraphs."""
+ project = NWProject()
+ tokens = BareTokenizer(project)
+ tokens.setKeepMarkdown(True)
+
+ # Collapse empty lines
+ tokens._text = "First paragraph\n\n\nSecond paragraph\n\n\n"
+ tokens.tokenizeText()
+ assert tokens._tokens == [
+ (Tokenizer.T_TEXT, 0, "First paragraph", [], Tokenizer.A_NONE),
+ (Tokenizer.T_TEXT, 0, "Second paragraph", [], Tokenizer.A_NONE),
+ ]
+
+ # Combine multi-line paragraphs, keep breaks
+ tokens._text = "This is text\nspanning multiple\nlines"
+ tokens.setKeepLineBreaks(True)
+ tokens.tokenizeText()
+ assert tokens._tokens == [
+ (Tokenizer.T_TEXT, 0, "This is text\nspanning multiple\nlines", [], Tokenizer.A_NONE),
+ ]
+
+ # Combine multi-line paragraphs, remove breaks
+ tokens._text = "This is text\nspanning multiple\nlines"
+ tokens.setKeepLineBreaks(False)
+ tokens.tokenizeText()
+ assert tokens._tokens == [
+ (Tokenizer.T_TEXT, 0, "This is text spanning multiple lines", [], Tokenizer.A_NONE),
+ ]
+
+ # Combine multi-line paragraphs, remove breaks, with formatting
+ tokens._text = "This **is text**\nspanning _multiple_\nlines"
+ tokens.setKeepLineBreaks(False)
+ tokens.tokenizeText()
+ assert tokens._tokens == [
+ (
+ Tokenizer.T_TEXT, 0,
+ "This is text spanning multiple lines",
+ [
+ (5, Tokenizer.FMT_B_B, ""),
+ (12, Tokenizer.FMT_B_E, ""),
+ (22, Tokenizer.FMT_I_B, ""),
+ (30, Tokenizer.FMT_I_E, ""),
+ ],
+ Tokenizer.A_NONE
+ ),
+ ]
+
+ # Make sure titles break a paragraph
+ tokens._text = "# Title\nText _on_\ntwo lines.\n## Chapter\nMore **text**\n_here_.\n\n\n"
+ tokens.setKeepLineBreaks(False)
+ tokens.tokenizeText()
+ assert tokens._tokens == [
+ (Tokenizer.T_HEAD1, 1, "Title", [], Tokenizer.A_NONE),
+ (
+ Tokenizer.T_TEXT, 1, "Text on two lines.", [
+ (5, Tokenizer.FMT_I_B, ""),
+ (7, Tokenizer.FMT_I_E, ""),
+ ], Tokenizer.A_NONE
+ ),
+ (Tokenizer.T_HEAD2, 2, "Chapter", [], Tokenizer.A_NONE),
+ (
+ Tokenizer.T_TEXT, 2, "More text here.", [
+ (5, Tokenizer.FMT_B_B, ""),
+ (9, Tokenizer.FMT_B_E, ""),
+ (10, Tokenizer.FMT_I_B, ""),
+ (14, Tokenizer.FMT_I_E, ""),
+ ], Tokenizer.A_NONE
+ ),
+ ]
+
+
@pytest.mark.core
def testCoreToken_TextFormat(mockGUI):
"""Test the tokenization of text formats in the Tokenizer class."""
@@ -961,21 +1002,13 @@ def testCoreToken_TextFormat(mockGUI):
tokens._text = "Some plain text\non two lines\n\n\n"
tokens.tokenizeText()
assert tokens._tokens == [
- (Tokenizer.T_TEXT, 0, "Some plain text", [], Tokenizer.A_NONE),
- (Tokenizer.T_TEXT, 0, "on two lines", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
+ (Tokenizer.T_TEXT, 0, "Some plain text\non two lines", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "Some plain text\non two lines\n\n\n\n"
tokens.setBodyText(False)
tokens.tokenizeText()
- assert tokens._tokens == [
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
- ]
+ assert tokens._tokens == []
assert tokens.allMarkdown[-1] == "\n\n\n"
tokens.setBodyText(True)
@@ -992,7 +1025,6 @@ def testCoreToken_TextFormat(mockGUI):
],
Tokenizer.A_NONE
),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "Some **bolded text** on this lines\n\n"
@@ -1008,7 +1040,6 @@ def testCoreToken_TextFormat(mockGUI):
],
Tokenizer.A_NONE
),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "Some _italic text_ on this lines\n\n"
@@ -1026,7 +1057,6 @@ def testCoreToken_TextFormat(mockGUI):
],
Tokenizer.A_NONE
),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "Some **_bold italic text_** on this lines\n\n"
@@ -1042,7 +1072,6 @@ def testCoreToken_TextFormat(mockGUI):
],
Tokenizer.A_NONE
),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == "Some ~~strikethrough text~~ on this lines\n\n"
@@ -1062,7 +1091,6 @@ def testCoreToken_TextFormat(mockGUI):
],
Tokenizer.A_NONE
),
- (Tokenizer.T_EMPTY, 0, "", [], Tokenizer.A_NONE),
]
assert tokens.allMarkdown[-1] == (
"Some **nested bold and _italic_ and ~~strikethrough~~ text** here\n\n"
@@ -1082,11 +1110,7 @@ def testCoreToken_SpecialFormat(mockGUI):
correctResp = [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_HEAD1, 2, "Title Two", [], Tokenizer.A_CENTRE | Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 2, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 2, "", [], Tokenizer.A_NONE),
]
# Command wo/Space
@@ -1130,12 +1154,8 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Multiple Empty Paragraphs
@@ -1150,12 +1170,8 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Three Skips
@@ -1167,14 +1183,10 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Malformed Command, Case 1
@@ -1186,11 +1198,7 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Malformed Command, Case 2
@@ -1202,11 +1210,7 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Malformed Command, Case 3
@@ -1218,11 +1222,7 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Empty Paragraph and Page Break
@@ -1238,13 +1238,8 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], 0),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Multiple Skip
@@ -1257,15 +1252,10 @@ def testCoreToken_SpecialFormat(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_PBB),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
(Tokenizer.T_TEXT, 1, "Some text to go here ...", [], 0),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
@@ -1293,7 +1283,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "T: Part One", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H1: Title, Not First Page
@@ -1303,7 +1292,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD1, 1, "T: Part One", [], Tokenizer.A_PBB | Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Chapters
@@ -1315,7 +1303,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "C: Chapter One", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H2: Unnumbered Chapter
@@ -1324,7 +1311,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "U: Prologue", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H2: Chapter Word Number
@@ -1334,7 +1320,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Chapter One", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H2: Chapter Roman Number Upper Case
@@ -1343,7 +1328,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Chapter II", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H2: Chapter Roman Number Lower Case
@@ -1352,7 +1336,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD2, 1, "Chapter iii", [], Tokenizer.A_PBB),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Scenes
@@ -1364,27 +1347,20 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD3, 1, "S: Scene One", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H3: Scene Hidden wo/Format
tokens._text = "### Scene One\n"
tokens.setSceneFormat("", True)
tokens.tokenizeText()
- assert tokens._tokens == [
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- ]
+ assert tokens._tokens == []
# H3: Scene wo/Format, first
tokens._text = "### Scene One\n"
tokens.setSceneFormat("", False)
tokens._noSep = True
tokens.tokenizeText()
- assert tokens._tokens == [
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- ]
+ assert tokens._tokens == []
# H3: Scene wo/Format, not first
tokens._text = "### Scene One\n"
@@ -1393,7 +1369,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H3: Scene Separator, first
@@ -1401,10 +1376,7 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.setSceneFormat("* * *", False)
tokens._noSep = True
tokens.tokenizeText()
- assert tokens._tokens == [
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- ]
+ assert tokens._tokens == []
# H3: Scene Separator, not first
tokens._text = "### Scene One\n"
@@ -1413,7 +1385,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SEP, 1, "* * *", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H3: Scene w/Absolute Number
@@ -1424,7 +1395,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD3, 1, "Scene 1", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H3: Scene w/Chapter Number
@@ -1435,7 +1405,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD3, 1, "Scene 3.2", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Sections
@@ -1445,10 +1414,7 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens._text = "#### A Section\n"
tokens.setSectionFormat("", True)
tokens.tokenizeText()
- assert tokens._tokens == [
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
- ]
+ assert tokens._tokens == []
# H4: Section Visible wo/Format
tokens._text = "#### A Section\n"
@@ -1456,7 +1422,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SKIP, 1, "", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H4: Section w/Format
@@ -1465,7 +1430,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_HEAD4, 1, "X: A Section", [], Tokenizer.A_NONE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# H4: Section Separator
@@ -1474,7 +1438,6 @@ def testCoreToken_ProcessHeaders(mockGUI):
tokens.tokenizeText()
assert tokens._tokens == [
(Tokenizer.T_SEP, 1, "* * *", [], Tokenizer.A_CENTRE),
- (Tokenizer.T_EMPTY, 1, "", [], Tokenizer.A_NONE),
]
# Check the first scene detector, plain text
@@ -1614,9 +1577,7 @@ def testCoreToken_CountStats(mockGUI, ipsumText):
tokens.setSceneFormat("* * *", False)
tokens.tokenizeText()
tokens.countStats()
- assert [t[2] for t in tokens._tokens] == [
- "Chapter", "", "", "", "Text", "", "* * *", "", "Text", ""
- ]
+ assert [t[2] for t in tokens._tokens] == ["Chapter", "Text", "* * *", "Text"]
assert tokens.textStats == {
"titleCount": 1, "paragraphCount": 2,
"allWords": 6, "textWords": 2, "titleWords": 1,
@@ -1633,9 +1594,7 @@ def testCoreToken_CountStats(mockGUI, ipsumText):
tokens.setSynopsis(True)
tokens.tokenizeText()
tokens.countStats()
- assert [t[2] for t in tokens._tokens] == [
- "Chapter", "", "", "", "Stuff", "", "Text", ""
- ]
+ assert [t[2] for t in tokens._tokens] == ["Chapter", "Stuff", "Text"]
assert tokens.textStats == {
"titleCount": 1, "paragraphCount": 1,
"allWords": 4, "textWords": 1, "titleWords": 1,
@@ -1652,9 +1611,7 @@ def testCoreToken_CountStats(mockGUI, ipsumText):
tokens.setSynopsis(True)
tokens.tokenizeText()
tokens.countStats()
- assert [t[2] for t in tokens._tokens] == [
- "Chapter", "", "", "", "Stuff", "", "Text", ""
- ]
+ assert [t[2] for t in tokens._tokens] == ["Chapter", "Stuff", "Text"]
assert tokens.textStats == {
"titleCount": 1, "paragraphCount": 1,
"allWords": 5, "textWords": 1, "titleWords": 1,
@@ -1671,9 +1628,7 @@ def testCoreToken_CountStats(mockGUI, ipsumText):
tokens.setComments(True)
tokens.tokenizeText()
tokens.countStats()
- assert [t[2] for t in tokens._tokens] == [
- "Chapter", "", "", "", "Stuff", "", "Text", ""
- ]
+ assert [t[2] for t in tokens._tokens] == ["Chapter", "Stuff", "Text"]
assert tokens.textStats == {
"titleCount": 1, "paragraphCount": 1,
"allWords": 4, "textWords": 1, "titleWords": 1,
@@ -1690,9 +1645,7 @@ def testCoreToken_CountStats(mockGUI, ipsumText):
tokens.setKeywords(True)
tokens.tokenizeText()
tokens.countStats()
- assert [t[2] for t in tokens._tokens] == [
- "Chapter", "", "", "", "pov: Jane", "", "Text", ""
- ]
+ assert [t[2] for t in tokens._tokens] == ["Chapter", "pov: Jane", "Text"]
assert tokens.textStats == {
"titleCount": 1, "paragraphCount": 1,
"allWords": 6, "textWords": 1, "titleWords": 1,
diff --git a/tests/test_core/test_core_tomd.py b/tests/test_core/test_core_tomarkdown.py
similarity index 97%
rename from tests/test_core/test_core_tomd.py
rename to tests/test_core/test_core_tomarkdown.py
index 7524502c7..8547f7781 100644
--- a/tests/test_core/test_core_tomd.py
+++ b/tests/test_core/test_core_tomarkdown.py
@@ -23,7 +23,7 @@
import pytest
from novelwriter.core.project import NWProject
-from novelwriter.core.tomd import ToMarkdown
+from novelwriter.core.tomarkdown import ToMarkdown
@pytest.mark.core
@@ -125,14 +125,14 @@ def testCoreToMarkdown_ConvertParagraphs(mockGUI):
)
# Text w/Hard Break
- toMD._text = "Line one \nLine two \nLine three\n"
+ toMD._text = "Line one\nLine two\nLine three\n"
toMD.tokenizeText()
toMD.doConvert()
assert toMD.result == "Line one \nLine two \nLine three\n\n"
# Text wo/Hard Break
- toMD._text = "Line one \nLine two \nLine three\n"
- toMD.setPreserveBreaks(False)
+ toMD._text = "Line one\nLine two\nLine three\n"
+ toMD.setKeepLineBreaks(False)
toMD.tokenizeText()
toMD.doConvert()
assert toMD.result == "Line one Line two Line three\n\n"
diff --git a/tests/test_core/test_core_toodt.py b/tests/test_core/test_core_toodt.py
index bc7bbf273..19f2fd20d 100644
--- a/tests/test_core/test_core_toodt.py
+++ b/tests/test_core/test_core_toodt.py
@@ -587,7 +587,7 @@ def getStyle(styleName):
''
'Scene'
'Regular paragraph'
- 'withbreak'
+ 'withbreak'
'Left Align'
''
)
diff --git a/tests/test_gui/test_gui_mainmenu.py b/tests/test_gui/test_gui_mainmenu.py
index 5b0ef5389..84b01b920 100644
--- a/tests/test_gui/test_gui_mainmenu.py
+++ b/tests/test_gui/test_gui_mainmenu.py
@@ -29,13 +29,12 @@
from novelwriter.constants import nwKeyWords, nwUnicode
from novelwriter.enum import nwDocAction, nwDocInsert
from novelwriter.gui.doceditor import GuiDocEditor
-from novelwriter.types import QtMouseLeft
from tests.tools import C, buildTestProject, writeFile
@pytest.mark.gui
-def testGuiMenu_EditFormat(qtbot, monkeypatch, nwGUI, prjLipsum):
+def testGuiMainMenu_EditFormat(qtbot, monkeypatch, nwGUI, prjLipsum):
"""Test the main menu Edit and Format entries."""
monkeypatch.setattr(GuiDocEditor, "hasFocus", lambda *a: True)
@@ -345,84 +344,7 @@ def testGuiMenu_EditFormat(qtbot, monkeypatch, nwGUI, prjLipsum):
@pytest.mark.gui
-def testGuiMenu_ContextMenus(qtbot, nwGUI, prjLipsum):
- """Test the context menus."""
- assert nwGUI.openProject(prjLipsum)
- assert nwGUI.openDocument("4c4f28287af27")
-
- # Editor Context Menu
- cursor = nwGUI.docEditor.textCursor()
- cursor.setPosition(127)
- nwGUI.docEditor.setTextCursor(cursor)
- rect = nwGUI.docEditor.cursorRect()
-
- nwGUI.docEditor._openContextMenu(rect.bottomRight())
- qtbot.mouseClick(nwGUI.docEditor, QtMouseLeft, pos=rect.topLeft())
-
- nwGUI.docEditor._makePosSelection(QTextCursor.WordUnderCursor, rect.center())
- cursor = nwGUI.docEditor.textCursor()
- assert cursor.selectedText() == "imperdiet"
-
- nwGUI.docEditor._makePosSelection(QTextCursor.BlockUnderCursor, rect.center())
- cursor = nwGUI.docEditor.textCursor()
- assert cursor.selectedText() == (
- "Pellentesque nec erat ut nulla posuere commodo. Curabitur nisi augue, imperdiet et porta "
- "imperdiet, efficitur id leo. Cras finibus arcu at nibh commodo congue. Proin suscipit "
- "placerat condimentum. Aenean ante enim, cursus id lorem a, blandit venenatis nibh. "
- "Maecenas suscipit porta elit, sit amet porta felis porttitor eu. Sed a dui nibh. "
- "Phasellus sed faucibus dui. Pellentesque felis nulla, ultrices non efficitur quis, "
- "rutrum id mi. Mauris tempus auctor nisl, in bibendum enim pellentesque sit amet. Proin "
- "nunc lacus, imperdiet nec posuere ac, interdum non lectus."
- )
-
- # Viewer Context Menu
- assert nwGUI.viewDocument("4c4f28287af27")
-
- cursor = nwGUI.docViewer.textCursor()
- cursor.setPosition(127)
- nwGUI.docViewer.setTextCursor(cursor)
- rect = nwGUI.docViewer.cursorRect()
-
- nwGUI.docViewer._openContextMenu(rect.bottomRight())
- qtbot.mouseClick(nwGUI.docViewer, QtMouseLeft, pos=rect.topLeft())
-
- nwGUI.docViewer._makePosSelection(QTextCursor.WordUnderCursor, rect.center())
- cursor = nwGUI.docViewer.textCursor()
- assert cursor.selectedText() == "imperdiet"
-
- nwGUI.docEditor._makePosSelection(QTextCursor.BlockUnderCursor, rect.center())
- cursor = nwGUI.docEditor.textCursor()
- assert cursor.selectedText() == (
- "Pellentesque nec erat ut nulla posuere commodo. Curabitur nisi augue, imperdiet et porta "
- "imperdiet, efficitur id leo. Cras finibus arcu at nibh commodo congue. Proin suscipit "
- "placerat condimentum. Aenean ante enim, cursus id lorem a, blandit venenatis nibh. "
- "Maecenas suscipit porta elit, sit amet porta felis porttitor eu. Sed a dui nibh. "
- "Phasellus sed faucibus dui. Pellentesque felis nulla, ultrices non efficitur quis, "
- "rutrum id mi. Mauris tempus auctor nisl, in bibendum enim pellentesque sit amet. Proin "
- "nunc lacus, imperdiet nec posuere ac, interdum non lectus."
- )
-
- # Navigation History
- assert nwGUI.viewDocument("04468803b92e1")
- assert nwGUI.docViewer.docHandle == "04468803b92e1"
- assert nwGUI.docViewer.docHeader.backButton.isEnabled()
- assert not nwGUI.docViewer.docHeader.forwardButton.isEnabled()
-
- qtbot.mouseClick(nwGUI.docViewer.docHeader.backButton, QtMouseLeft)
- assert nwGUI.docViewer.docHandle == "4c4f28287af27"
- assert not nwGUI.docViewer.docHeader.backButton.isEnabled()
- assert nwGUI.docViewer.docHeader.forwardButton.isEnabled()
-
- qtbot.mouseClick(nwGUI.docViewer.docHeader.forwardButton, QtMouseLeft)
- assert nwGUI.docViewer.docHandle == "04468803b92e1"
- assert nwGUI.docViewer.docHeader.backButton.isEnabled()
- assert not nwGUI.docViewer.docHeader.forwardButton.isEnabled()
-
- # qtbot.stop()
-
-
-@pytest.mark.gui
-def testGuiMenu_Insert(qtbot, monkeypatch, nwGUI, fncPath, projPath, mockRnd):
+def testGuiMainMenu_Insert(qtbot, monkeypatch, nwGUI, fncPath, projPath, mockRnd):
"""Test the Insert menu."""
buildTestProject(nwGUI, projPath)
diff --git a/tests/test_text/test_core_counting.py b/tests/test_text/test_text_counting.py
similarity index 100%
rename from tests/test_text/test_core_counting.py
rename to tests/test_text/test_text_counting.py