From 16bc007c96728aa96001db75c0bf9829de3e046a Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Fri, 5 Aug 2022 16:16:54 +0200 Subject: [PATCH 1/7] adds monarch highlighter file generation --- examples/statemachine/langium-config.json | 3 + .../syntaxes/statemachine.monarch.ts | 39 ++ packages/langium-cli/src/generate.ts | 26 +- .../highlighting/monarch-generator.ts | 476 ++++++++++++++++++ .../{ => highlighting}/textmate-generator.ts | 70 +-- packages/langium-cli/src/package.ts | 5 + 6 files changed, 580 insertions(+), 39 deletions(-) create mode 100644 examples/statemachine/syntaxes/statemachine.monarch.ts create mode 100644 packages/langium-cli/src/generator/highlighting/monarch-generator.ts rename packages/langium-cli/src/generator/{ => highlighting}/textmate-generator.ts (96%) diff --git a/examples/statemachine/langium-config.json b/examples/statemachine/langium-config.json index b75ddcb54..2d6407bb1 100644 --- a/examples/statemachine/langium-config.json +++ b/examples/statemachine/langium-config.json @@ -8,6 +8,9 @@ "fileExtensions": [".statemachine"], "textMate": { "out": "./syntaxes/statemachine.tmLanguage.json" + }, + "monarch": { + "out": "./syntaxes/statemachine.monarch.ts" } } ] diff --git a/examples/statemachine/syntaxes/statemachine.monarch.ts b/examples/statemachine/syntaxes/statemachine.monarch.ts new file mode 100644 index 000000000..f3567cbc8 --- /dev/null +++ b/examples/statemachine/syntaxes/statemachine.monarch.ts @@ -0,0 +1,39 @@ +// Monarch syntax highlighting for the statemachine language. +export const languagestatemachine = { + + keywords: [ + 'actions','commands','end','events','initialState','state','statemachine' + ], + operators: [ + '=>' + ], + symbols: /{|}|=>/, + + folding: { + markers: { + start: new RegExp('^\s*//\s*#?region\b'), + end: new RegExp('^\s*//\s*#?endregion\b') + } + }, + + tokenizer: { + initial: [ + { regex: /[_a-zA-Z][\w_]*/, action: { cases: { '@keywords': {"token":"keyword"}, '@default': {"token":"identifier"} }} }, + { regex: /[0-9]+/, action: {"token":"number"} }, + { regex: /"[^"]*"|'[^']*'/, action: {"token":"string"} }, + { include: '@whitespace' }, + { regex: /@symbols/, action: { cases: { '@operators': {"token":"operator"}, '@default': {"token":""} }} }, + ], + whitespace: [ + { regex: /\s+/, action: {"token":"white"} }, + { regex: /\/\*/, action: {"token":"comment","next":"@comment"} }, + { regex: /\/\/[^\n\r]*/, action: {"token":"comment"} }, + ], + comment: [ + { regex: /[^\/\*]+/, action: {"token":"comment"} }, + { regex: /\/\*/, action: {"token":"comment","next":"@push"} }, + { regex: /\*\//, action: {"token":"comment","next":"@pop"} }, + { regex: /[\/\*]/, action: {"token":"comment"} }, + ], + } +}; \ No newline at end of file diff --git a/packages/langium-cli/src/generate.ts b/packages/langium-cli/src/generate.ts index eb5c8c911..e827b61ac 100644 --- a/packages/langium-cli/src/generate.ts +++ b/packages/langium-cli/src/generate.ts @@ -15,7 +15,8 @@ import { URI } from 'vscode-uri'; import { generateAst } from './generator/ast-generator'; import { serializeGrammar } from './generator/grammar-serializer'; import { generateModule } from './generator/module-generator'; -import { generateTextMate } from './generator/textmate-generator'; +import { generateTextMate } from './generator/highlighting/textmate-generator'; +import { generateMonarch } from './generator/highlighting/monarch-generator'; import { getUserChoice, log } from './generator/util'; import { getFilePath, LangiumConfig, LangiumLanguageConfig, RelativePath } from './package'; import { validateParser } from './parser-validation'; @@ -190,15 +191,32 @@ export async function generate(config: LangiumConfig, options: GenerateOptions): const genTmGrammar = generateTextMate(grammar, languageConfig); const textMatePath = path.resolve(relPath, languageConfig.textMate.out); log('log', options, `Writing textmate grammar to ${chalk.white.bold(textMatePath)}`); - const parentDir = path.dirname(textMatePath).split(path.sep).pop(); - parentDir && await mkdirWithFail(parentDir, options); - await writeWithFail(textMatePath, genTmGrammar, options); + await writeHighlightGrammar(genTmGrammar, textMatePath, options); + } + + if(languageConfig?.monarch) { + const genMonarchGrammar = generateMonarch(grammar, languageConfig); + const monarchPath = path.resolve(relPath, languageConfig.monarch.out); + log('log', options, `Writing monarch grammar to ${chalk.white.bold(monarchPath)}`); + await writeHighlightGrammar(genMonarchGrammar, monarchPath, options); } } return 'success'; } +/** + * Writes contents of a grammar for syntax highlighting to a file, logging any errors and continuing without throwing + * @param grammar Grammar contents to write + * @param grammarPath Path to write, verifying the parent dir exists first + * @param options Generation options + */ +async function writeHighlightGrammar(grammar: string, grammarPath: string, options: GenerateOptions): Promise { + const parentDir = path.dirname(grammarPath).split(path.sep).pop(); + parentDir && await mkdirWithFail(parentDir, options); + await writeWithFail(grammarPath, grammar, options); +} + async function rmdirWithFail(dirPath: string, expectedFiles: string[], options: GenerateOptions): Promise { try { let deleteDir = true; diff --git a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts new file mode 100644 index 000000000..a9b787254 --- /dev/null +++ b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts @@ -0,0 +1,476 @@ +/****************************************************************************** + * Copyright 2022 TypeFox GmbH + * This program and the accompanying materials are made available under the + * terms of the MIT License, which is available in the project root. + ******************************************************************************/ + +import * as langium from 'langium'; +import { getTerminalParts, isCommentTerminal, isRegexToken, isTerminalRule, terminalRegex } from 'langium'; +import { LangiumLanguageConfig } from '../../package'; +import { collectKeywords } from '../util'; + +/** + * Monarch Language Definition, describes aspects & token categories of target language + */ +interface LanguageDefinition { + readonly name: string; + readonly keywords: string[]; + readonly operators: string[]; + readonly symbols: string[]; + readonly tokenPostfix: string; +} + +/** + * Monarch Tokenizer, consists of an object that defines states. + */ +interface Tokenizer { + states: State[] +} + +/** + * Name of a State + */ +type StateName = string; + +/** + * Each state is defined as an array of rules which are used to match the input + * Rules can be regular, or other States whose rules we should include in this state + */ +interface State { + name: StateName + rules: Array +} + +/** + * A rule that matches input. Can have either an action, or an array of cases. + */ +interface Rule { + regex: RegExp | string; + action: Action | Case[]; +} + +/** + * A case that selects a specific action by matching a guard pattern + */ +interface Case { + guard: string; + action: Action; +} + +/** + * Determines whether a given object is a Rule instance + * @param obj Object to check + * @returns Whether this object is a Rule + */ +function isRule(obj: State | Rule): obj is Rule { + return (obj as Rule).regex !== undefined && (obj as Rule).action !== undefined; +} + +/** + * Name of a token type, such as 'string' + */ +type Token = string; + +/** + * Token class to be used for CSS rendering, such as 'keyword', 'component', or 'type.identifer' + */ +type TokenClass = string; + +/** + * Next state that proceeds from an action, can also be a pop or a push of the current state (like for nested block comments) + */ +type NextState = StateName | '@pop' | '@push'; + +/** + * An action performed when a rule (or a case) matches token. + * It can determine the token class, as well whether to push/pop a tokenizer state + */ +interface Action { + token?: Token + tokenClass?: TokenClass + next?: NextState + // other more advanced states omitted... +} + +/** + * Abstract representation of a Monarch grammar file + */ +interface MonarchGrammar { + readonly languageDefinition: LanguageDefinition; + readonly tokenizer: Tokenizer; +} + +/** + * Generates a Monarch highlighting grammar file's contents, based on the passed Langium grammar + * @param grammar Langium grammar to use in generating this Monarch syntax highlighting file content + * @param config Langium Config to also use during generation + * @returns Generated Monarch syntax highlighting file content + */ +export function generateMonarch(grammar: langium.Grammar, config: LangiumLanguageConfig): string { + + const symbols = getSymbols(grammar); + const regex = /[{}[\]()]/; + const operators = symbols.filter(s => !regex.test(s)); + + // build absract monarch grammar representation + const monarchGrammar: MonarchGrammar = { + languageDefinition: { + name: config.id, // identifier for generating the grammar export + keywords: getKeywords(grammar), + operators, + symbols, + tokenPostfix: '.' + config.id, // category appended to all tokens + }, + tokenizer: { + states: getTokenizerStates(grammar) + } + }; + + // return concrete monarch grammar representation + return prettyPrint(monarchGrammar); +} + +/** + * Gets Monarch tokenizer states from a Langium grammar + * @param grammar Langium grammar to source tokenizer states from + * @returns Array of tokenizer states + */ +function getTokenizerStates(grammar: langium.Grammar): State[] { + + // initial state, name is arbitrary, just needs to come first + const initialState: State = { + name: 'initial', + rules: getTerminalRules(grammar) + }; + + const whitespaceState: State = { + name: 'whitespace', + rules: getWhitespaceRules(grammar) + }; + + const commentRules: State = { + name: 'comment', + rules: getCommentRules(grammar) + }; + + // order the following additional rules, to prevent + // comment sequences being classified as symbols + + // add include for the whitespace state + initialState.rules.push(whitespaceState); + + // add operator & symbol case handling + initialState.rules.push({ + regex: '@symbols', + action: [ + { + guard: '@operators', + action: { token: 'operator' } + }, + // by default, leave the symbol alone + { + guard: '@default', + action: { token: '' } + } + ] + }); + + return [ + initialState, + whitespaceState, + commentRules + ]; +} + +/** + * Pretty prints a monarch grammar into it's concrete form, suitable for writing to a file + * @param monarchGrammar Grammar to pretty print + * @returns Monarch grammar in concrete form, suitable for writing to a file + */ +function prettyPrint(monarchGrammar: MonarchGrammar): string { + const name = monarchGrammar.languageDefinition.name; + return ([ + `// Monarch syntax highlighting for the ${name} language.`, + `export const language${name} = {\n`, + + // add language definitions + prettyPrintLangDef(monarchGrammar.languageDefinition), + + // add folding + '\tfolding: {', + '\t\tmarkers: {', + '\t\t\tstart: new RegExp(\'^\\s*//\\s*#?region\\b\'),', + '\t\t\tend: new RegExp(\'^\\s*//\\s*#?endregion\\b\')', + '\t\t}', + '\t},\n', + + // add tokenizer parts, simple state machine groupings + prettyPrintTokenizer(monarchGrammar.tokenizer), + + '};' + ].join('\n')).replaceAll(/\t/g, ' '); +} + +/** + * Generates an entry for a language definition, given a name (token category) and values + * @param name Category of language definition to add + * @param values Values to add under the given category + * @param fmt Formatter to keep things indented + * @returns A string of this language def entry, for use in a monarch file + */ +function genLanguageDefEntry(name: string, values: string[], fmt: Formatter): string { + return [ + `${name}: [`, + '\t' + values.map(v => `'${v}'`).join(','), + '],' + ].map(fmt).join('\n'); +} + +/** + * Pretty prints the language definition portion of a Monarch grammar + * @param languageDef LanguageDefinition to pretty print + * @returns LanguageDefinition in concrete form + */ +function prettyPrintLangDef(languageDef: LanguageDefinition): string { + const content = [ + genLanguageDefEntry('keywords', languageDef.keywords, indent), + genLanguageDefEntry('operators', languageDef.operators, indent), + // special case, identify symbols via singular regex + indent('symbols: /' + languageDef.symbols.join('|') + '/,') + ]; + return content.join('\n') + '\n'; +} + +/** + * Pretty prints the tokenizer portion of a Monarch grammar file + * @param tokenizer Tokenizer portion to print out + * @returns Tokenizer in concrete form + */ +function prettyPrintTokenizer(tokenizer: Tokenizer): string { + return [ + '\ttokenizer: {', + tokenizer.states.map(state => prettyPrintState(state, (s => indent(indent(s))))).join('\n'), + '\t}' + ].join('\n'); +} + +/** + * Pretty prints a tokenizer state, composed of various rules + * @param state Tokenizer state to pretty print + * @param fmt Formatter to set indentation + * @returns Tokenizer state in concrete form + */ +function prettyPrintState(state: State, fmt: Formatter): string { + return [ + fmt(state.name + ': ['), + state.rules.map(rule => fmt(prettyPrintRule(rule, indent))).join('\n'), + fmt('],') + ].join('\n'); +} + +/** + * Pretty prints a Rule. + * This can either be a literal rule to match w/ an action, or a reference to a state to include here + * @param ruleOrState Rule to pretty print. If it's a state, we include that state's contents implicitly within this context. + * @param fmt Formatter to track indentation + * @returns Rule in concrete form + */ +function prettyPrintRule(ruleOrState: Rule | State, fmt: Formatter): string { + if(isRule(ruleOrState)) { + // extract rule pattern, either just a string or a regex w/ parts + const rulePatt = ruleOrState.regex instanceof RegExp ? getTerminalParts(ruleOrState.regex).join('') : `/${ruleOrState.regex}/`; + return fmt('{ regex: ' + rulePatt + ', action: ' + prettyPrintAction(ruleOrState.action) + ' },'); + } else { + // include another state by name, implicitly includes all of its contents + return fmt(`{ include: '@${ruleOrState.name}' },`); + } +} + +/** + * Pretty prints the action of a Rule + * @param action Action to print. Can have several keywords to control what the state machine should do next. + * @returns Action in concrete form + */ +function prettyPrintAction(action: Action | Case[]): string { + if(!Array.isArray(action)) { + // plain action + return JSON.stringify(action); + } else { + // array of cases, each with an action + const prettyCases: string = action.map(c => `'${c.guard}': ` + prettyPrintAction(c.action)).join(', '); + return '{ cases: { ' + prettyCases + ' }}'; + } +} + +/** + * Convert a deafult Langium token names to a monarch one + * @param name Token name to convert + * @returns Returns the equivalent monarch name, or the original token name + */ +function getMonarchTokenName(name: string): string { + if(name === 'WS') { + return 'white'; + } else if (name === 'ML_COMMENT' || name === 'SL_COMMENT') { + return 'comment'; + } else if (name === 'STRING') { + return 'string'; + } else if (name === 'INT') { + return 'number'; + } else if (name === 'BIGINT') { + return 'number.float'; + } else if (name === 'ID') { + return 'identifier'; + } else { + // fallback to the original name + return name; + } +} + +/** + * Gets whitespace rules from the langium grammar. Includes starting comment sequence + * @param grammar Langium grammar to extract whitespace rules from + * @returns Array of Monarch whitespace rules + */ +function getWhitespaceRules(grammar: langium.Grammar): Rule[] { + const rules: Rule[] = []; + for(const rule of grammar.rules) { + if(isTerminalRule(rule) && (isCommentTerminal(rule) || rule.name === 'WS') && isRegexToken(rule.definition)) { + const tokenName = getMonarchTokenName(rule.name); + const part = getTerminalParts(terminalRegex(rule))[0]; + if(part.start !== '' && part.end !== '' && tokenName === 'comment') { + // state-based rule, only add push to jump into it + rules.push({ + regex: part.start.replace('/', '\\/'), + action: { token: tokenName, next: '@' + tokenName } + }); + + } else { + // single regex rule + rules.push({ + regex: rule.definition.regex, + action: {token: getMonarchTokenName(rule.name) } + }); + } + } + } + return rules; +} + +/** + * Gets comment state rules from the Langium grammar. Accounts for nested multi-line comments. + * @param grammar Langium grammar to extract comment rules from + * @returns Array of Monarch comment rules + */ +function getCommentRules(grammar: langium.Grammar): Rule[] { + const rules: Rule[] = []; + for(const rule of grammar.rules) { + if(isTerminalRule(rule) && (isCommentTerminal(rule) || rule.name === 'WS') && isRegexToken(rule.definition)) { + const tokenName = getMonarchTokenName(rule.name); + const part = getTerminalParts(terminalRegex(rule))[0]; + if(part.start !== '' && part.end !== '' && tokenName === 'comment') { + // rules to manage comment nesting via push/pop + // rule order matters + + const start = part.start.replace('/', '\\/'); + const end = part.end.replace('/', '\\/'); + + // 1st, add anything that's not in the start sequence + rules.push({ + regex: `[^${start}]+`, + action: { token: tokenName } + }); + + // 2nd, otherwise if start seq, push this state again for nesting + rules.push({ + regex: start, + action: { token: tokenName, next: '@push' } + }); + + // 3rd, end of sequence, pop this state, keeping others on the stack + rules.push({ + regex: end, + action: { token: tokenName, next: '@pop' } + }); + + // 4th, otherwise, start sequence characters are OK in this state + rules.push({ + regex: `[${start}]`, + action: { token: tokenName } + }); + + } + } + } + return rules; +} + +/** + * Retrieves non-comment terminal rules, creating associated actions for them + * @param grammar Grammar to get non-comment terminals from + * @returns Array of Rules to add to a Monarch tokenizer state + */ +function getTerminalRules(grammar: langium.Grammar): Rule[] { + const rules: Rule[] = []; + for (const rule of grammar.rules) { + if (isTerminalRule(rule) && !isCommentTerminal(rule) && rule.name !== 'WS' && isRegexToken(rule.definition)) { + const tokenName = getMonarchTokenName(rule.name); + // default action... + let action: Action | Case[] = { token: tokenName }; + + if(tokenName === 'identifier') { + // for identifiers, add case to handle keywords as well, + // so they aren't tagged incorrectly as IDs + action = [{ + guard: '@keywords', + action: { token: 'keyword' } + },{ + guard: '@default', + action // include default action from above + }]; + } + + rules.push({ + regex: rule.definition.regex, + action + }); + } + } + return rules; +} + +/** + * Keyword regex for matching keyword terminals, or for only collecting symbol terminals + */ +const KeywordRegex = /[A-Za-z]/; + +/** + * Retrieves keywords from the current grammar + * @param grammar Gramamr to get keywords from + * @returns Array of keywords + */ +function getKeywords(grammar: langium.Grammar): string[] { + return collectKeywords(grammar).filter(kw => KeywordRegex.test(kw)); +} + +/** + * Retrieve symbols from langium grammar + * @param grammar Grammar to get symbols from + * @returns Array of symbols, effective inverse of getKeywords + */ +function getSymbols(grammar: langium.Grammar): string[] { + return collectKeywords(grammar).filter(kw => !KeywordRegex.test(kw)); +} + +/** + * General formatter type to help with nested indentation + */ + type Formatter = (line: string) => string; + +/** + * Adds single indentation to string + * @param s String to print w/ an extra indent + * @returns Singly indented string + */ +function indent(s: string): string { + return '\t' + s; +} diff --git a/packages/langium-cli/src/generator/textmate-generator.ts b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts similarity index 96% rename from packages/langium-cli/src/generator/textmate-generator.ts rename to packages/langium-cli/src/generator/highlighting/textmate-generator.ts index 93685d964..810b74a75 100644 --- a/packages/langium-cli/src/generator/textmate-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts @@ -1,13 +1,13 @@ /****************************************************************************** - * Copyright 2021 TypeFox GmbH + * Copyright 2022 TypeFox GmbH * This program and the accompanying materials are made available under the * terms of the MIT License, which is available in the project root. ******************************************************************************/ import * as langium from 'langium'; import { escapeRegExp, getCaseInsensitivePattern, getTerminalParts, isCommentTerminal, isTerminalRule, terminalRegex } from 'langium'; -import { LangiumLanguageConfig } from '../package'; -import { collectKeywords } from './util'; +import { LangiumLanguageConfig } from '../../package'; +import { collectKeywords } from '../util'; export interface TextMateGrammar { repository: Repository; @@ -124,7 +124,38 @@ function getControlKeywords(grammar: langium.Grammar, pack: LangiumLanguageConfi }; } -function groupKeywords(keywords: string[], caseInsensitive: boolean | undefined): string[] { +function getStringPatterns(grammar: langium.Grammar, pack: LangiumLanguageConfig): Pattern[] { + const terminals = langium.stream(grammar.rules).filter(langium.isTerminalRule); + const stringTerminal = terminals.find(e => e.name.toLowerCase() === 'string'); + const stringPatterns: Pattern[] = []; + if (stringTerminal) { + const parts = getTerminalParts(terminalRegex(stringTerminal)); + for (const part of parts) { + if (part.end) { + stringPatterns.push({ + 'name': `string.quoted.${delimiterName(part.start)}.${pack.id}`, + 'begin': part.start, + 'end': part.end + }); + } + } + } + return stringPatterns; +} + +function delimiterName(delimiter: string): string { + if (delimiter === "'") { + return 'single'; + } else if (delimiter === '"') { + return 'double'; + } else if (delimiter === '`') { + return 'backtick'; + } else { + return 'delimiter'; + } +} + +export function groupKeywords(keywords: string[], caseInsensitive: boolean | undefined): string[] { const groups: { letter: string[], leftSpecial: string[], @@ -156,34 +187,3 @@ function groupKeywords(keywords: string[], caseInsensitive: boolean | undefined) if (groups.special.length) res.push(`\\B(${groups.special.join('|')})\\B`); return res; } - -function getStringPatterns(grammar: langium.Grammar, pack: LangiumLanguageConfig): Pattern[] { - const terminals = langium.stream(grammar.rules).filter(langium.isTerminalRule); - const stringTerminal = terminals.find(e => e.name.toLowerCase() === 'string'); - const stringPatterns: Pattern[] = []; - if (stringTerminal) { - const parts = getTerminalParts(terminalRegex(stringTerminal)); - for (const part of parts) { - if (part.end) { - stringPatterns.push({ - 'name': `string.quoted.${delimiterName(part.start)}.${pack.id}`, - 'begin': part.start, - 'end': part.end - }); - } - } - } - return stringPatterns; -} - -function delimiterName(delimiter: string): string { - if (delimiter === "'") { - return 'single'; - } else if (delimiter === '"') { - return 'double'; - } else if (delimiter === '`') { - return 'backtick'; - } else { - return 'delimiter'; - } -} diff --git a/packages/langium-cli/src/package.ts b/packages/langium-cli/src/package.ts index 976def6d4..1c11adb7b 100644 --- a/packages/langium-cli/src/package.ts +++ b/packages/langium-cli/src/package.ts @@ -48,6 +48,11 @@ export interface LangiumLanguageConfig { /** Output path to syntax highlighting file */ out: string } + /** Enable generating a Monarch syntax highlighting file */ + monarch?: { + /** Output path to syntax highlighting file */ + out: string + } /** Configure the chevrotain parser for a single language */ chevrotainParserConfig?: IParserConfig } From a3ac5ed8f04d6f4ec11188aafb28f7c7fa99c2bb Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Fri, 5 Aug 2022 16:27:48 +0200 Subject: [PATCH 2/7] fix order of function --- .../highlighting/textmate-generator.ts | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/packages/langium-cli/src/generator/highlighting/textmate-generator.ts b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts index 810b74a75..59b767122 100644 --- a/packages/langium-cli/src/generator/highlighting/textmate-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts @@ -124,37 +124,6 @@ function getControlKeywords(grammar: langium.Grammar, pack: LangiumLanguageConfi }; } -function getStringPatterns(grammar: langium.Grammar, pack: LangiumLanguageConfig): Pattern[] { - const terminals = langium.stream(grammar.rules).filter(langium.isTerminalRule); - const stringTerminal = terminals.find(e => e.name.toLowerCase() === 'string'); - const stringPatterns: Pattern[] = []; - if (stringTerminal) { - const parts = getTerminalParts(terminalRegex(stringTerminal)); - for (const part of parts) { - if (part.end) { - stringPatterns.push({ - 'name': `string.quoted.${delimiterName(part.start)}.${pack.id}`, - 'begin': part.start, - 'end': part.end - }); - } - } - } - return stringPatterns; -} - -function delimiterName(delimiter: string): string { - if (delimiter === "'") { - return 'single'; - } else if (delimiter === '"') { - return 'double'; - } else if (delimiter === '`') { - return 'backtick'; - } else { - return 'delimiter'; - } -} - export function groupKeywords(keywords: string[], caseInsensitive: boolean | undefined): string[] { const groups: { letter: string[], @@ -187,3 +156,34 @@ export function groupKeywords(keywords: string[], caseInsensitive: boolean | und if (groups.special.length) res.push(`\\B(${groups.special.join('|')})\\B`); return res; } + +function getStringPatterns(grammar: langium.Grammar, pack: LangiumLanguageConfig): Pattern[] { + const terminals = langium.stream(grammar.rules).filter(langium.isTerminalRule); + const stringTerminal = terminals.find(e => e.name.toLowerCase() === 'string'); + const stringPatterns: Pattern[] = []; + if (stringTerminal) { + const parts = getTerminalParts(terminalRegex(stringTerminal)); + for (const part of parts) { + if (part.end) { + stringPatterns.push({ + 'name': `string.quoted.${delimiterName(part.start)}.${pack.id}`, + 'begin': part.start, + 'end': part.end + }); + } + } + } + return stringPatterns; +} + +function delimiterName(delimiter: string): string { + if (delimiter === "'") { + return 'single'; + } else if (delimiter === '"') { + return 'double'; + } else if (delimiter === '`') { + return 'backtick'; + } else { + return 'delimiter'; + } +} From 578238c1b5a665136d8c31127560b6704abbfdb0 Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Fri, 5 Aug 2022 16:29:30 +0200 Subject: [PATCH 3/7] and remove export... --- .../src/generator/highlighting/textmate-generator.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/langium-cli/src/generator/highlighting/textmate-generator.ts b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts index 59b767122..10e7bd2b5 100644 --- a/packages/langium-cli/src/generator/highlighting/textmate-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts @@ -124,7 +124,7 @@ function getControlKeywords(grammar: langium.Grammar, pack: LangiumLanguageConfi }; } -export function groupKeywords(keywords: string[], caseInsensitive: boolean | undefined): string[] { +function groupKeywords(keywords: string[], caseInsensitive: boolean | undefined): string[] { const groups: { letter: string[], leftSpecial: string[], From 7598beee49361cd64db3206a4c5be0545ff536df Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Fri, 5 Aug 2022 16:42:51 +0200 Subject: [PATCH 4/7] fix comment state name --- .../src/generator/highlighting/monarch-generator.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts index a9b787254..264a893c8 100644 --- a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts @@ -148,7 +148,7 @@ function getTokenizerStates(grammar: langium.Grammar): State[] { rules: getWhitespaceRules(grammar) }; - const commentRules: State = { + const commentState: State = { name: 'comment', rules: getCommentRules(grammar) }; @@ -178,7 +178,7 @@ function getTokenizerStates(grammar: langium.Grammar): State[] { return [ initialState, whitespaceState, - commentRules + commentState ]; } From 2e345a82e4714028d10c78b17e5047586a026a9a Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Wed, 10 Aug 2022 11:23:36 +0200 Subject: [PATCH 5/7] use generator nodes, remove folding, improved token name id, ws/comment fixes --- .../syntaxes/statemachine.monarch.ts | 13 +- .../highlighting/monarch-generator.ts | 212 +++++++++--------- 2 files changed, 105 insertions(+), 120 deletions(-) diff --git a/examples/statemachine/syntaxes/statemachine.monarch.ts b/examples/statemachine/syntaxes/statemachine.monarch.ts index f3567cbc8..1f1a136e5 100644 --- a/examples/statemachine/syntaxes/statemachine.monarch.ts +++ b/examples/statemachine/syntaxes/statemachine.monarch.ts @@ -1,6 +1,5 @@ // Monarch syntax highlighting for the statemachine language. -export const languagestatemachine = { - +export default { keywords: [ 'actions','commands','end','events','initialState','state','statemachine' ], @@ -9,16 +8,9 @@ export const languagestatemachine = { ], symbols: /{|}|=>/, - folding: { - markers: { - start: new RegExp('^\s*//\s*#?region\b'), - end: new RegExp('^\s*//\s*#?endregion\b') - } - }, - tokenizer: { initial: [ - { regex: /[_a-zA-Z][\w_]*/, action: { cases: { '@keywords': {"token":"keyword"}, '@default': {"token":"identifier"} }} }, + { regex: /[_a-zA-Z][\w_]*/, action: { cases: { '@keywords': {"token":"keyword"}, '@default': {"token":"ID"} }} }, { regex: /[0-9]+/, action: {"token":"number"} }, { regex: /"[^"]*"|'[^']*'/, action: {"token":"string"} }, { include: '@whitespace' }, @@ -31,7 +23,6 @@ export const languagestatemachine = { ], comment: [ { regex: /[^\/\*]+/, action: {"token":"comment"} }, - { regex: /\/\*/, action: {"token":"comment","next":"@push"} }, { regex: /\*\//, action: {"token":"comment","next":"@pop"} }, { regex: /[\/\*]/, action: {"token":"comment"} }, ], diff --git a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts index 264a893c8..c13359f55 100644 --- a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts @@ -5,7 +5,7 @@ ******************************************************************************/ import * as langium from 'langium'; -import { getTerminalParts, isCommentTerminal, isRegexToken, isTerminalRule, terminalRegex } from 'langium'; +import { getTerminalParts, isCommentTerminal, isRegexToken, isTerminalRule, terminalRegex, CompositeGeneratorNode, NL, processGeneratorNode, TerminalRule } from 'langium'; import { LangiumLanguageConfig } from '../../package'; import { collectKeywords } from '../util'; @@ -183,106 +183,107 @@ function getTokenizerStates(grammar: langium.Grammar): State[] { } /** - * Pretty prints a monarch grammar into it's concrete form, suitable for writing to a file + * Pretty prints a monarch grammar into a concrete form, suitable for writing to a file * @param monarchGrammar Grammar to pretty print * @returns Monarch grammar in concrete form, suitable for writing to a file */ function prettyPrint(monarchGrammar: MonarchGrammar): string { const name = monarchGrammar.languageDefinition.name; - return ([ - `// Monarch syntax highlighting for the ${name} language.`, - `export const language${name} = {\n`, + const node = new CompositeGeneratorNode( + `// Monarch syntax highlighting for the ${name} language.`, NL, + 'export default {', NL + ); - // add language definitions - prettyPrintLangDef(monarchGrammar.languageDefinition), + node.indent(grammarDef => { - // add folding - '\tfolding: {', - '\t\tmarkers: {', - '\t\t\tstart: new RegExp(\'^\\s*//\\s*#?region\\b\'),', - '\t\t\tend: new RegExp(\'^\\s*//\\s*#?endregion\\b\')', - '\t\t}', - '\t},\n', + // add language definitions + prettyPrintLangDef(monarchGrammar.languageDefinition, grammarDef); + grammarDef.append(NL, NL); // add tokenizer parts, simple state machine groupings - prettyPrintTokenizer(monarchGrammar.tokenizer), + prettyPrintTokenizer(monarchGrammar.tokenizer, grammarDef); + grammarDef.append(NL); + + }); + node.append('};'); - '};' - ].join('\n')).replaceAll(/\t/g, ' '); + return processGeneratorNode(node); } /** * Generates an entry for a language definition, given a name (token category) and values * @param name Category of language definition to add * @param values Values to add under the given category - * @param fmt Formatter to keep things indented - * @returns A string of this language def entry, for use in a monarch file + * @returns GeneratorNode containing this printed language definition entry */ -function genLanguageDefEntry(name: string, values: string[], fmt: Formatter): string { - return [ - `${name}: [`, - '\t' + values.map(v => `'${v}'`).join(','), - '],' - ].map(fmt).join('\n'); +function genLanguageDefEntry(name: string, values: string[]): CompositeGeneratorNode { + const node = new CompositeGeneratorNode(`${name}: [`, NL); + node.indent(langDefValues => { + langDefValues.append(values.map(v => `'${v}'`).join(',')); + }); + node.append(NL, '],'); + return node; } /** * Pretty prints the language definition portion of a Monarch grammar * @param languageDef LanguageDefinition to pretty print - * @returns LanguageDefinition in concrete form + * @param node Existing generator node to append printed language definition to */ -function prettyPrintLangDef(languageDef: LanguageDefinition): string { - const content = [ - genLanguageDefEntry('keywords', languageDef.keywords, indent), - genLanguageDefEntry('operators', languageDef.operators, indent), +function prettyPrintLangDef(languageDef: LanguageDefinition, node: CompositeGeneratorNode): void { + node.append( + genLanguageDefEntry('keywords', languageDef.keywords), NL, + genLanguageDefEntry('operators', languageDef.operators), NL, // special case, identify symbols via singular regex - indent('symbols: /' + languageDef.symbols.join('|') + '/,') - ]; - return content.join('\n') + '\n'; + 'symbols: /' + languageDef.symbols.join('|') + '/,' + ); } /** * Pretty prints the tokenizer portion of a Monarch grammar file * @param tokenizer Tokenizer portion to print out - * @returns Tokenizer in concrete form + * @param node Existing generator node to append printed tokenizer to */ -function prettyPrintTokenizer(tokenizer: Tokenizer): string { - return [ - '\ttokenizer: {', - tokenizer.states.map(state => prettyPrintState(state, (s => indent(indent(s))))).join('\n'), - '\t}' - ].join('\n'); +function prettyPrintTokenizer(tokenizer: Tokenizer, node: CompositeGeneratorNode): void { + node.append('tokenizer: {', NL); + node.indent(tokenizerStates => { + for (const state of tokenizer.states) { + prettyPrintState(state, tokenizerStates); + tokenizerStates.append(NL); + } + }); + node.append('}'); } /** * Pretty prints a tokenizer state, composed of various rules * @param state Tokenizer state to pretty print - * @param fmt Formatter to set indentation - * @returns Tokenizer state in concrete form + * @param node Existing enerator node to append printed state to */ -function prettyPrintState(state: State, fmt: Formatter): string { - return [ - fmt(state.name + ': ['), - state.rules.map(rule => fmt(prettyPrintRule(rule, indent))).join('\n'), - fmt('],') - ].join('\n'); +function prettyPrintState(state: State, node: CompositeGeneratorNode): void { + node.append(state.name + ': [', NL); + node.indent(inode => { + for(const rule of state.rules) { + inode.append(prettyPrintRule(rule), NL); + } + }); + node.append('],'); } /** * Pretty prints a Rule. * This can either be a literal rule to match w/ an action, or a reference to a state to include here * @param ruleOrState Rule to pretty print. If it's a state, we include that state's contents implicitly within this context. - * @param fmt Formatter to track indentation - * @returns Rule in concrete form + * @returns Generator node containing this printed rule */ -function prettyPrintRule(ruleOrState: Rule | State, fmt: Formatter): string { +function prettyPrintRule(ruleOrState: Rule | State): CompositeGeneratorNode { if(isRule(ruleOrState)) { // extract rule pattern, either just a string or a regex w/ parts const rulePatt = ruleOrState.regex instanceof RegExp ? getTerminalParts(ruleOrState.regex).join('') : `/${ruleOrState.regex}/`; - return fmt('{ regex: ' + rulePatt + ', action: ' + prettyPrintAction(ruleOrState.action) + ' },'); + return new CompositeGeneratorNode('{ regex: ' + rulePatt + ', action: ' + prettyPrintAction(ruleOrState.action) + ' },'); } else { // include another state by name, implicitly includes all of its contents - return fmt(`{ include: '@${ruleOrState.name}' },`); + return new CompositeGeneratorNode(`{ include: '@${ruleOrState.name}' },`); } } @@ -303,52 +304,57 @@ function prettyPrintAction(action: Action | Case[]): string { } /** - * Convert a deafult Langium token names to a monarch one - * @param name Token name to convert - * @returns Returns the equivalent monarch name, or the original token name + * Extracts Monarch token name from a Langium terminal rule, using either name or type. + * @param rule Rule to convert to a Monarch token name + * @returns Returns the equivalent monarch token name, or the original rule name */ -function getMonarchTokenName(name: string): string { - if(name === 'WS') { - return 'white'; - } else if (name === 'ML_COMMENT' || name === 'SL_COMMENT') { - return 'comment'; - } else if (name === 'STRING') { +function getMonarchTokenName(rule: TerminalRule): string { + if(rule.name.toLowerCase() === 'string') { + // string is clarified as a terminal by name, but not necessarily by type return 'string'; - } else if (name === 'INT') { - return 'number'; - } else if (name === 'BIGINT') { - return 'number.float'; - } else if (name === 'ID') { - return 'identifier'; + } else if (rule.type) { + // use rule type + return rule.type.name; } else { // fallback to the original name - return name; + return rule.name; } } /** - * Gets whitespace rules from the langium grammar. Includes starting comment sequence + * Gets whitespace rules from the langium grammar. Includes starting comment sequence. * @param grammar Langium grammar to extract whitespace rules from * @returns Array of Monarch whitespace rules */ function getWhitespaceRules(grammar: langium.Grammar): Rule[] { const rules: Rule[] = []; for(const rule of grammar.rules) { - if(isTerminalRule(rule) && (isCommentTerminal(rule) || rule.name === 'WS') && isRegexToken(rule.definition)) { - const tokenName = getMonarchTokenName(rule.name); + if(isTerminalRule(rule) && isRegexToken(rule.definition)) { + const regex = new RegExp(terminalRegex(rule)); + + if(!isCommentTerminal(rule) && !regex.test(' ')) { + // skip rules that are not comments or whitespace + continue; + } + + // token name is either comment or whitespace + const tokenName = isCommentTerminal(rule) ? 'comment' : 'white'; + const part = getTerminalParts(terminalRegex(rule))[0]; - if(part.start !== '' && part.end !== '' && tokenName === 'comment') { - // state-based rule, only add push to jump into it + + // check if this is a comment terminal w/ a start & end sequence (multi-line) + if(part.start !== '' && part.end !== '' && isCommentTerminal(rule)) { + // state-based comment rule, only add push to jump into it rules.push({ regex: part.start.replace('/', '\\/'), action: { token: tokenName, next: '@' + tokenName } }); } else { - // single regex rule + // single regex rule, generally for whitespace rules.push({ regex: rule.definition.regex, - action: {token: getMonarchTokenName(rule.name) } + action: {token: tokenName } }); } } @@ -357,18 +363,19 @@ function getWhitespaceRules(grammar: langium.Grammar): Rule[] { } /** - * Gets comment state rules from the Langium grammar. Accounts for nested multi-line comments. + * Gets comment state rules from the Langium grammar. + * Accounts for multi-line comments, but without nesting. * @param grammar Langium grammar to extract comment rules from * @returns Array of Monarch comment rules */ function getCommentRules(grammar: langium.Grammar): Rule[] { const rules: Rule[] = []; for(const rule of grammar.rules) { - if(isTerminalRule(rule) && (isCommentTerminal(rule) || rule.name === 'WS') && isRegexToken(rule.definition)) { - const tokenName = getMonarchTokenName(rule.name); + if(isTerminalRule(rule) && isCommentTerminal(rule) && isRegexToken(rule.definition)) { + const tokenName = 'comment'; const part = getTerminalParts(terminalRegex(rule))[0]; - if(part.start !== '' && part.end !== '' && tokenName === 'comment') { - // rules to manage comment nesting via push/pop + if(part.start !== '' && part.end !== '') { + // rules to manage comment start/end // rule order matters const start = part.start.replace('/', '\\/'); @@ -380,19 +387,13 @@ function getCommentRules(grammar: langium.Grammar): Rule[] { action: { token: tokenName } }); - // 2nd, otherwise if start seq, push this state again for nesting - rules.push({ - regex: start, - action: { token: tokenName, next: '@push' } - }); - - // 3rd, end of sequence, pop this state, keeping others on the stack + // 2nd, end of sequence, pop this state, keeping others on the stack rules.push({ regex: end, action: { token: tokenName, next: '@pop' } }); - // 4th, otherwise, start sequence characters are OK in this state + // 3rd, otherwise, start sequence characters are OK in this state rules.push({ regex: `[${start}]`, action: { token: tokenName } @@ -412,14 +413,21 @@ function getCommentRules(grammar: langium.Grammar): Rule[] { function getTerminalRules(grammar: langium.Grammar): Rule[] { const rules: Rule[] = []; for (const rule of grammar.rules) { - if (isTerminalRule(rule) && !isCommentTerminal(rule) && rule.name !== 'WS' && isRegexToken(rule.definition)) { - const tokenName = getMonarchTokenName(rule.name); + if (isTerminalRule(rule) && !isCommentTerminal(rule) && isRegexToken(rule.definition)) { + const regex = new RegExp(terminalRegex(rule)); + + if (regex.test(' ')) { + // disallow terminal rules that match whitespace + continue; + } + + const tokenName = getMonarchTokenName(rule); // default action... let action: Action | Case[] = { token: tokenName }; - if(tokenName === 'identifier') { - // for identifiers, add case to handle keywords as well, - // so they aren't tagged incorrectly as IDs + if(getKeywords(grammar).some(keyword => regex.test(keyword))) { + // this rule overlaps with at least one keyword + // add case so keywords aren't tagged incorrectly as this token type action = [{ guard: '@keywords', action: { token: 'keyword' } @@ -445,7 +453,7 @@ const KeywordRegex = /[A-Za-z]/; /** * Retrieves keywords from the current grammar - * @param grammar Gramamr to get keywords from + * @param grammar Grammar to get keywords from * @returns Array of keywords */ function getKeywords(grammar: langium.Grammar): string[] { @@ -460,17 +468,3 @@ function getKeywords(grammar: langium.Grammar): string[] { function getSymbols(grammar: langium.Grammar): string[] { return collectKeywords(grammar).filter(kw => !KeywordRegex.test(kw)); } - -/** - * General formatter type to help with nested indentation - */ - type Formatter = (line: string) => string; - -/** - * Adds single indentation to string - * @param s String to print w/ an extra indent - * @returns Singly indented string - */ -function indent(s: string): string { - return '\t' + s; -} From dca35a3aba0a571d5ea6bb5611ce127aea2a4c7c Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Wed, 10 Aug 2022 11:29:30 +0200 Subject: [PATCH 6/7] small style changes --- .../src/generator/highlighting/monarch-generator.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts index c13359f55..c0e696db7 100644 --- a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts @@ -185,7 +185,7 @@ function getTokenizerStates(grammar: langium.Grammar): State[] { /** * Pretty prints a monarch grammar into a concrete form, suitable for writing to a file * @param monarchGrammar Grammar to pretty print - * @returns Monarch grammar in concrete form, suitable for writing to a file + * @returns Monarch grammar in concrete form */ function prettyPrint(monarchGrammar: MonarchGrammar): string { const name = monarchGrammar.languageDefinition.name; @@ -195,7 +195,6 @@ function prettyPrint(monarchGrammar: MonarchGrammar): string { ); node.indent(grammarDef => { - // add language definitions prettyPrintLangDef(monarchGrammar.languageDefinition, grammarDef); grammarDef.append(NL, NL); From b7014a5923aca1f2f78956bd56444e9e6a1f6e77 Mon Sep 17 00:00:00 2001 From: Benjamin Friedman Wilson Date: Wed, 31 Aug 2022 10:17:29 +0200 Subject: [PATCH 7/7] update copyright, escape regex, formatting fixes --- examples/statemachine/syntaxes/statemachine.monarch.ts | 4 ++-- packages/langium-cli/src/generate.ts | 2 +- .../src/generator/highlighting/monarch-generator.ts | 6 +++--- .../src/generator/highlighting/textmate-generator.ts | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/statemachine/syntaxes/statemachine.monarch.ts b/examples/statemachine/syntaxes/statemachine.monarch.ts index 1f1a136e5..e29f1806a 100644 --- a/examples/statemachine/syntaxes/statemachine.monarch.ts +++ b/examples/statemachine/syntaxes/statemachine.monarch.ts @@ -6,7 +6,7 @@ export default { operators: [ '=>' ], - symbols: /{|}|=>/, + symbols: /\{|\}|=>/, tokenizer: { initial: [ @@ -27,4 +27,4 @@ export default { { regex: /[\/\*]/, action: {"token":"comment"} }, ], } -}; \ No newline at end of file +}; diff --git a/packages/langium-cli/src/generate.ts b/packages/langium-cli/src/generate.ts index e827b61ac..8b2998143 100644 --- a/packages/langium-cli/src/generate.ts +++ b/packages/langium-cli/src/generate.ts @@ -194,7 +194,7 @@ export async function generate(config: LangiumConfig, options: GenerateOptions): await writeHighlightGrammar(genTmGrammar, textMatePath, options); } - if(languageConfig?.monarch) { + if (languageConfig?.monarch) { const genMonarchGrammar = generateMonarch(grammar, languageConfig); const monarchPath = path.resolve(relPath, languageConfig.monarch.out); log('log', options, `Writing monarch grammar to ${chalk.white.bold(monarchPath)}`); diff --git a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts index c0e696db7..902575853 100644 --- a/packages/langium-cli/src/generator/highlighting/monarch-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/monarch-generator.ts @@ -5,7 +5,7 @@ ******************************************************************************/ import * as langium from 'langium'; -import { getTerminalParts, isCommentTerminal, isRegexToken, isTerminalRule, terminalRegex, CompositeGeneratorNode, NL, processGeneratorNode, TerminalRule } from 'langium'; +import { getTerminalParts, isCommentTerminal, isRegexToken, isTerminalRule, terminalRegex, CompositeGeneratorNode, NL, processGeneratorNode, TerminalRule, escapeRegExp } from 'langium'; import { LangiumLanguageConfig } from '../../package'; import { collectKeywords } from '../util'; @@ -204,7 +204,7 @@ function prettyPrint(monarchGrammar: MonarchGrammar): string { grammarDef.append(NL); }); - node.append('};'); + node.append('};', NL); return processGeneratorNode(node); } @@ -234,7 +234,7 @@ function prettyPrintLangDef(languageDef: LanguageDefinition, node: CompositeGene genLanguageDefEntry('keywords', languageDef.keywords), NL, genLanguageDefEntry('operators', languageDef.operators), NL, // special case, identify symbols via singular regex - 'symbols: /' + languageDef.symbols.join('|') + '/,' + 'symbols: /' + languageDef.symbols.map(escapeRegExp).join('|') + '/,' ); } diff --git a/packages/langium-cli/src/generator/highlighting/textmate-generator.ts b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts index 10e7bd2b5..6294e2284 100644 --- a/packages/langium-cli/src/generator/highlighting/textmate-generator.ts +++ b/packages/langium-cli/src/generator/highlighting/textmate-generator.ts @@ -1,5 +1,5 @@ /****************************************************************************** - * Copyright 2022 TypeFox GmbH + * Copyright 2021-2022 TypeFox GmbH * This program and the accompanying materials are made available under the * terms of the MIT License, which is available in the project root. ******************************************************************************/