up follow livre

This commit is contained in:
Tykayn 2025-08-30 18:14:14 +02:00 committed by tykayn
parent b4b4398bb0
commit 3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions

View file

@ -0,0 +1,5 @@
"""fontTools.voltLib -- a package for dealing with Visual OpenType Layout Tool
(VOLT) files."""
# See
# http://www.microsoft.com/typography/VOLT.mspx

View file

@ -0,0 +1,206 @@
import argparse
import logging
import sys
from io import StringIO
from pathlib import Path
from fontTools import configLogger
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lexer import Lexer
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.ttLib import TTFont, TTLibError
from fontTools.voltLib.parser import Parser
from fontTools.voltLib.voltToFea import TABLES, VoltToFea
log = logging.getLogger("fontTools.feaLib")
SUPPORTED_TABLES = TABLES + ["cmap"]
def invalid_fea_glyph_name(name):
"""Check if the glyph name is valid according to FEA syntax."""
if name[0] not in Lexer.CHAR_NAME_START_:
return True
if any(c not in Lexer.CHAR_NAME_CONTINUATION_ for c in name[1:]):
return True
return False
def sanitize_glyph_name(name):
"""Sanitize the glyph name to ensure it is valid according to FEA syntax."""
sanitized = ""
for i, c in enumerate(name):
if i == 0 and c not in Lexer.CHAR_NAME_START_:
sanitized += "a" + c
elif c not in Lexer.CHAR_NAME_CONTINUATION_:
sanitized += "_"
else:
sanitized += c
return sanitized
def main(args=None):
"""Build tables from a MS VOLT project into an OTF font"""
parser = argparse.ArgumentParser(
description="Use fontTools to compile MS VOLT projects."
)
parser.add_argument(
"input",
metavar="INPUT",
help="Path to the input font/VTP file to process",
type=Path,
)
parser.add_argument(
"-f",
"--font",
metavar="INPUT_FONT",
help="Path to the input font (if INPUT is a VTP file)",
type=Path,
)
parser.add_argument(
"-o",
"--output",
dest="output",
metavar="OUTPUT",
help="Path to the output font.",
type=Path,
)
parser.add_argument(
"-t",
"--tables",
metavar="TABLE_TAG",
choices=SUPPORTED_TABLES,
nargs="+",
help="Specify the table(s) to be built.",
)
parser.add_argument(
"-F",
"--debug-feature-file",
help="Write the generated feature file to disk.",
action="store_true",
)
parser.add_argument(
"--ship",
help="Remove source VOLT tables from output font.",
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
help="Increase the logger verbosity. Multiple -v options are allowed.",
action="count",
default=0,
)
parser.add_argument(
"-T",
"--traceback",
help="show traceback for exceptions.",
action="store_true",
)
options = parser.parse_args(args)
levels = ["WARNING", "INFO", "DEBUG"]
configLogger(level=levels[min(len(levels) - 1, options.verbose)])
output_font = options.output or Path(
makeOutputFileName(options.font or options.input)
)
log.info(f"Compiling MS VOLT to '{output_font}'")
file_or_path = options.input
font = None
# If the input is a font file, extract the VOLT data from the "TSIV" table
try:
font = TTFont(file_or_path)
if "TSIV" in font:
file_or_path = StringIO(font["TSIV"].data.decode("utf-8"))
else:
log.error('"TSIV" table is missing')
return 1
except TTLibError:
pass
# If input is not a font file, the font must be provided
if font is None:
if not options.font:
log.error("Please provide an input font")
return 1
font = TTFont(options.font)
# FEA syntax does not allow some glyph names that VOLT accepts, so if we
# found such glyph name we will temporarily rename such glyphs.
glyphOrder = font.getGlyphOrder()
tempGlyphOrder = None
if any(invalid_fea_glyph_name(n) for n in glyphOrder):
tempGlyphOrder = []
for n in glyphOrder:
if invalid_fea_glyph_name(n):
n = sanitize_glyph_name(n)
existing = set(tempGlyphOrder) | set(glyphOrder)
while n in existing:
n = "a" + n
tempGlyphOrder.append(n)
font.setGlyphOrder(tempGlyphOrder)
doc = Parser(file_or_path).parse()
log.info("Converting VTP data to FEA")
converter = VoltToFea(doc, font)
try:
fea = converter.convert(options.tables, ignore_unsupported_settings=True)
except NotImplementedError as e:
if options.traceback:
raise
location = getattr(e.args[0], "location", None)
message = f'"{e}" is not supported'
if location:
path, line, column = location
log.error(f"{path}:{line}:{column}: {message}")
else:
log.error(message)
return 1
fea_filename = options.input
if options.debug_feature_file:
fea_filename = output_font.with_suffix(".fea")
log.info(f"Writing FEA to '{fea_filename}'")
with open(fea_filename, "w") as fp:
fp.write(fea)
log.info("Compiling FEA to OpenType tables")
try:
addOpenTypeFeaturesFromString(
font,
fea,
filename=fea_filename,
tables=options.tables,
)
except FeatureLibError as e:
if options.traceback:
raise
log.error(e)
return 1
if options.ship:
for tag in ["TSIV", "TSIS", "TSIP", "TSID"]:
if tag in font:
del font[tag]
# Restore original glyph names.
if tempGlyphOrder:
import io
f = io.BytesIO()
font.save(f)
font = TTFont(f)
font.setGlyphOrder(glyphOrder)
font["post"].extraNames = []
font.save(output_font)
if __name__ == "__main__":
sys.exit(main())

View file

@ -0,0 +1,452 @@
from fontTools.voltLib.error import VoltLibError
from typing import NamedTuple
class Pos(NamedTuple):
adv: int
dx: int
dy: int
adv_adjust_by: dict
dx_adjust_by: dict
dy_adjust_by: dict
def __str__(self):
res = " POS"
for attr in ("adv", "dx", "dy"):
value = getattr(self, attr)
if value is not None:
res += f" {attr.upper()} {value}"
adjust_by = getattr(self, f"{attr}_adjust_by", {})
for size, adjustment in adjust_by.items():
res += f" ADJUST_BY {adjustment} AT {size}"
res += " END_POS"
return res
class Element(object):
def __init__(self, location=None):
self.location = location
def build(self, builder):
pass
def __str__(self):
raise NotImplementedError
class Statement(Element):
pass
class Expression(Element):
pass
class VoltFile(Statement):
def __init__(self):
Statement.__init__(self, location=None)
self.statements = []
def build(self, builder):
for s in self.statements:
s.build(builder)
def __str__(self):
return "\n" + "\n".join(str(s) for s in self.statements) + " END\n"
class GlyphDefinition(Statement):
def __init__(self, name, gid, gunicode, gtype, components, location=None):
Statement.__init__(self, location)
self.name = name
self.id = gid
self.unicode = gunicode
self.type = gtype
self.components = components
def __str__(self):
res = f'DEF_GLYPH "{self.name}" ID {self.id}'
if self.unicode is not None:
if len(self.unicode) > 1:
unicodes = ",".join(f"U+{u:04X}" for u in self.unicode)
res += f' UNICODEVALUES "{unicodes}"'
else:
res += f" UNICODE {self.unicode[0]}"
if self.type is not None:
res += f" TYPE {self.type}"
if self.components is not None:
res += f" COMPONENTS {self.components}"
res += " END_GLYPH"
return res
class GroupDefinition(Statement):
def __init__(self, name, enum, location=None):
Statement.__init__(self, location)
self.name = name
self.enum = enum
self.glyphs_ = None
def glyphSet(self, groups=None):
if groups is not None and self.name in groups:
raise VoltLibError(
'Group "%s" contains itself.' % (self.name), self.location
)
if self.glyphs_ is None:
if groups is None:
groups = set({self.name})
else:
groups.add(self.name)
self.glyphs_ = self.enum.glyphSet(groups)
return self.glyphs_
def __str__(self):
enum = self.enum and str(self.enum) or ""
return f'DEF_GROUP "{self.name}"\n{enum}\nEND_GROUP'
class GlyphName(Expression):
"""A single glyph name, such as cedilla."""
def __init__(self, glyph, location=None):
Expression.__init__(self, location)
self.glyph = glyph
def glyphSet(self):
return (self.glyph,)
def __str__(self):
return f' GLYPH "{self.glyph}"'
class Enum(Expression):
"""An enum"""
def __init__(self, enum, location=None):
Expression.__init__(self, location)
self.enum = enum
def __iter__(self):
for e in self.glyphSet():
yield e
def glyphSet(self, groups=None):
glyphs = []
for element in self.enum:
if isinstance(element, (GroupName, Enum)):
glyphs.extend(element.glyphSet(groups))
else:
glyphs.extend(element.glyphSet())
return tuple(glyphs)
def __str__(self):
enum = "".join(str(e) for e in self.enum)
return f" ENUM{enum} END_ENUM"
class GroupName(Expression):
"""A glyph group"""
def __init__(self, group, parser, location=None):
Expression.__init__(self, location)
self.group = group
self.parser_ = parser
def glyphSet(self, groups=None):
group = self.parser_.resolve_group(self.group)
if group is not None:
self.glyphs_ = group.glyphSet(groups)
return self.glyphs_
else:
raise VoltLibError(
'Group "%s" is used but undefined.' % (self.group), self.location
)
def __str__(self):
return f' GROUP "{self.group}"'
class Range(Expression):
"""A glyph range"""
def __init__(self, start, end, parser, location=None):
Expression.__init__(self, location)
self.start = start
self.end = end
self.parser = parser
def glyphSet(self):
return tuple(self.parser.glyph_range(self.start, self.end))
def __str__(self):
return f' RANGE "{self.start}" TO "{self.end}"'
class ScriptDefinition(Statement):
def __init__(self, name, tag, langs, location=None):
Statement.__init__(self, location)
self.name = name
self.tag = tag
self.langs = langs
def __str__(self):
res = "DEF_SCRIPT"
if self.name is not None:
res += f' NAME "{self.name}"'
res += f' TAG "{self.tag}"\n\n'
for lang in self.langs:
res += f"{lang}"
res += "END_SCRIPT"
return res
class LangSysDefinition(Statement):
def __init__(self, name, tag, features, location=None):
Statement.__init__(self, location)
self.name = name
self.tag = tag
self.features = features
def __str__(self):
res = "DEF_LANGSYS"
if self.name is not None:
res += f' NAME "{self.name}"'
res += f' TAG "{self.tag}"\n\n'
for feature in self.features:
res += f"{feature}"
res += "END_LANGSYS\n"
return res
class FeatureDefinition(Statement):
def __init__(self, name, tag, lookups, location=None):
Statement.__init__(self, location)
self.name = name
self.tag = tag
self.lookups = lookups
def __str__(self):
res = f'DEF_FEATURE NAME "{self.name}" TAG "{self.tag}"\n'
res += " " + " ".join(f'LOOKUP "{l}"' for l in self.lookups) + "\n"
res += "END_FEATURE\n"
return res
class LookupDefinition(Statement):
def __init__(
self,
name,
process_base,
process_marks,
mark_glyph_set,
direction,
reversal,
comments,
context,
sub,
pos,
location=None,
):
Statement.__init__(self, location)
self.name = name
self.process_base = process_base
self.process_marks = process_marks
self.mark_glyph_set = mark_glyph_set
self.direction = direction
self.reversal = reversal
self.comments = comments
self.context = context
self.sub = sub
self.pos = pos
def __str__(self):
res = f'DEF_LOOKUP "{self.name}"'
res += f' {self.process_base and "PROCESS_BASE" or "SKIP_BASE"}'
if self.process_marks:
res += " PROCESS_MARKS "
if self.mark_glyph_set:
res += f'MARK_GLYPH_SET "{self.mark_glyph_set}"'
elif isinstance(self.process_marks, str):
res += f'"{self.process_marks}"'
else:
res += "ALL"
else:
res += " SKIP_MARKS"
if self.direction is not None:
res += f" DIRECTION {self.direction}"
if self.reversal:
res += " REVERSAL"
if self.comments is not None:
comments = self.comments.replace("\n", r"\n")
res += f'\nCOMMENTS "{comments}"'
if self.context:
res += "\n" + "\n".join(str(c) for c in self.context)
else:
res += "\nIN_CONTEXT\nEND_CONTEXT"
if self.sub:
res += f"\n{self.sub}"
if self.pos:
res += f"\n{self.pos}"
return res
class SubstitutionDefinition(Statement):
def __init__(self, mapping, location=None):
Statement.__init__(self, location)
self.mapping = mapping
def __str__(self):
res = "AS_SUBSTITUTION\n"
for src, dst in self.mapping.items():
src = "".join(str(s) for s in src)
dst = "".join(str(d) for d in dst)
res += f"SUB{src}\nWITH{dst}\nEND_SUB\n"
res += "END_SUBSTITUTION"
return res
class SubstitutionSingleDefinition(SubstitutionDefinition):
pass
class SubstitutionMultipleDefinition(SubstitutionDefinition):
pass
class SubstitutionLigatureDefinition(SubstitutionDefinition):
pass
class SubstitutionAlternateDefinition(SubstitutionDefinition):
pass
class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition):
pass
class PositionAttachDefinition(Statement):
def __init__(self, coverage, coverage_to, location=None):
Statement.__init__(self, location)
self.coverage = coverage
self.coverage_to = coverage_to
def __str__(self):
coverage = "".join(str(c) for c in self.coverage)
res = f"AS_POSITION\nATTACH{coverage}\nTO"
for coverage, anchor in self.coverage_to:
coverage = "".join(str(c) for c in coverage)
res += f'{coverage} AT ANCHOR "{anchor}"'
res += "\nEND_ATTACH\nEND_POSITION"
return res
class PositionAttachCursiveDefinition(Statement):
def __init__(self, coverages_exit, coverages_enter, location=None):
Statement.__init__(self, location)
self.coverages_exit = coverages_exit
self.coverages_enter = coverages_enter
def __str__(self):
res = "AS_POSITION\nATTACH_CURSIVE"
for coverage in self.coverages_exit:
coverage = "".join(str(c) for c in coverage)
res += f"\nEXIT {coverage}"
for coverage in self.coverages_enter:
coverage = "".join(str(c) for c in coverage)
res += f"\nENTER {coverage}"
res += "\nEND_ATTACH\nEND_POSITION"
return res
class PositionAdjustPairDefinition(Statement):
def __init__(self, coverages_1, coverages_2, adjust_pair, location=None):
Statement.__init__(self, location)
self.coverages_1 = coverages_1
self.coverages_2 = coverages_2
self.adjust_pair = adjust_pair
def __str__(self):
res = "AS_POSITION\nADJUST_PAIR\n"
for coverage in self.coverages_1:
coverage = " ".join(str(c) for c in coverage)
res += f" FIRST {coverage}"
res += "\n"
for coverage in self.coverages_2:
coverage = " ".join(str(c) for c in coverage)
res += f" SECOND {coverage}"
res += "\n"
for (id_1, id_2), (pos_1, pos_2) in self.adjust_pair.items():
res += f" {id_1} {id_2} BY{pos_1}{pos_2}\n"
res += "\nEND_ADJUST\nEND_POSITION"
return res
class PositionAdjustSingleDefinition(Statement):
def __init__(self, adjust_single, location=None):
Statement.__init__(self, location)
self.adjust_single = adjust_single
def __str__(self):
res = "AS_POSITION\nADJUST_SINGLE"
for coverage, pos in self.adjust_single:
coverage = "".join(str(c) for c in coverage)
res += f"{coverage} BY{pos}"
res += "\nEND_ADJUST\nEND_POSITION"
return res
class ContextDefinition(Statement):
def __init__(self, ex_or_in, left=None, right=None, location=None):
Statement.__init__(self, location)
self.ex_or_in = ex_or_in
self.left = left if left is not None else []
self.right = right if right is not None else []
def __str__(self):
res = self.ex_or_in + "\n"
for coverage in self.left:
coverage = "".join(str(c) for c in coverage)
res += f" LEFT{coverage}\n"
for coverage in self.right:
coverage = "".join(str(c) for c in coverage)
res += f" RIGHT{coverage}\n"
res += "END_CONTEXT"
return res
class AnchorDefinition(Statement):
def __init__(self, name, gid, glyph_name, component, locked, pos, location=None):
Statement.__init__(self, location)
self.name = name
self.gid = gid
self.glyph_name = glyph_name
self.component = component
self.locked = locked
self.pos = pos
def __str__(self):
locked = self.locked and " LOCKED" or ""
return (
f'DEF_ANCHOR "{self.name}"'
f" ON {self.gid}"
f" GLYPH {self.glyph_name}"
f" COMPONENT {self.component}"
f"{locked}"
f" AT {self.pos} END_ANCHOR"
)
class SettingDefinition(Statement):
def __init__(self, name, value, location=None):
Statement.__init__(self, location)
self.name = name
self.value = value
def __str__(self):
if self.value is True:
return f"{self.name}"
if isinstance(self.value, (tuple, list)):
value = " ".join(str(v) for v in self.value)
return f"{self.name} {value}"
return f"{self.name} {self.value}"

View file

@ -0,0 +1,12 @@
class VoltLibError(Exception):
def __init__(self, message, location):
Exception.__init__(self, message)
self.location = location
def __str__(self):
message = Exception.__str__(self)
if self.location:
path, line, column = self.location
return "%s:%d:%d: %s" % (path, line, column, message)
else:
return message

View file

@ -0,0 +1,99 @@
from fontTools.voltLib.error import VoltLibError
class Lexer(object):
NUMBER = "NUMBER"
STRING = "STRING"
NAME = "NAME"
NEWLINE = "NEWLINE"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_DIGIT_ = "0123456789"
CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz"
CHAR_UNDERSCORE_ = "_"
CHAR_PERIOD_ = "."
CHAR_NAME_START_ = (
CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + CHAR_UNDERSCORE_
)
CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type not in {Lexer.NEWLINE}:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return (self.filename_ or "<volt>", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += 2 if next_char == "\n" else 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"\r\n')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
return (Lexer.STRING, text[start + 1 : self.pos_ - 1], location)
else:
raise VoltLibError("Expected '\"' to terminate string", location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start : self.pos_]
return (Lexer.NAME, token, location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
raise VoltLibError("Unexpected character: '%s'" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p

View file

@ -0,0 +1,664 @@
import fontTools.voltLib.ast as ast
from fontTools.voltLib.lexer import Lexer
from fontTools.voltLib.error import VoltLibError
from io import open
PARSE_FUNCS = {
"DEF_GLYPH": "parse_def_glyph_",
"DEF_GROUP": "parse_def_group_",
"DEF_SCRIPT": "parse_def_script_",
"DEF_LOOKUP": "parse_def_lookup_",
"DEF_ANCHOR": "parse_def_anchor_",
"GRID_PPEM": "parse_ppem_",
"PRESENTATION_PPEM": "parse_ppem_",
"PPOSITIONING_PPEM": "parse_ppem_",
"COMPILER_USEEXTENSIONLOOKUPS": "parse_noarg_option_",
"COMPILER_USEPAIRPOSFORMAT2": "parse_noarg_option_",
"CMAP_FORMAT": "parse_cmap_format",
"DO_NOT_TOUCH_CMAP": "parse_noarg_option_",
}
class Parser(object):
def __init__(self, path):
self.doc_ = ast.VoltFile()
self.glyphs_ = OrderedSymbolTable()
self.groups_ = SymbolTable()
self.anchors_ = {} # dictionary of SymbolTable() keyed by glyph
self.scripts_ = SymbolTable()
self.langs_ = SymbolTable()
self.lookups_ = SymbolTable()
self.next_token_type_, self.next_token_ = (None, None)
self.next_token_location_ = None
self.make_lexer_(path)
self.advance_lexer_()
def make_lexer_(self, file_or_path):
if hasattr(file_or_path, "read"):
filename = getattr(file_or_path, "name", None)
data = file_or_path.read()
else:
filename = file_or_path
with open(file_or_path, "r") as f:
data = f.read()
self.lexer_ = Lexer(data, filename)
def parse(self):
statements = self.doc_.statements
while self.next_token_type_ is not None:
self.advance_lexer_()
if self.cur_token_ in PARSE_FUNCS.keys():
func = getattr(self, PARSE_FUNCS[self.cur_token_])
statements.append(func())
elif self.is_cur_keyword_("END"):
break
else:
raise VoltLibError(
"Expected " + ", ".join(sorted(PARSE_FUNCS.keys())),
self.cur_token_location_,
)
return self.doc_
def parse_def_glyph_(self):
assert self.is_cur_keyword_("DEF_GLYPH")
location = self.cur_token_location_
name = self.expect_string_()
self.expect_keyword_("ID")
gid = self.expect_number_()
if gid < 0:
raise VoltLibError("Invalid glyph ID", self.cur_token_location_)
gunicode = None
if self.next_token_ == "UNICODE":
self.expect_keyword_("UNICODE")
gunicode = [self.expect_number_()]
if gunicode[0] < 0:
raise VoltLibError("Invalid glyph UNICODE", self.cur_token_location_)
elif self.next_token_ == "UNICODEVALUES":
self.expect_keyword_("UNICODEVALUES")
gunicode = self.parse_unicode_values_()
gtype = None
if self.next_token_ == "TYPE":
self.expect_keyword_("TYPE")
gtype = self.expect_name_()
assert gtype in ("BASE", "LIGATURE", "MARK", "COMPONENT")
components = None
if self.next_token_ == "COMPONENTS":
self.expect_keyword_("COMPONENTS")
components = self.expect_number_()
self.expect_keyword_("END_GLYPH")
if self.glyphs_.resolve(name) is not None:
raise VoltLibError(
'Glyph "%s" (gid %i) already defined' % (name, gid), location
)
def_glyph = ast.GlyphDefinition(
name, gid, gunicode, gtype, components, location=location
)
self.glyphs_.define(name, def_glyph)
return def_glyph
def parse_def_group_(self):
assert self.is_cur_keyword_("DEF_GROUP")
location = self.cur_token_location_
name = self.expect_string_()
enum = None
if self.next_token_ == "ENUM":
enum = self.parse_enum_()
self.expect_keyword_("END_GROUP")
if self.groups_.resolve(name) is not None:
raise VoltLibError(
'Glyph group "%s" already defined, '
"group names are case insensitive" % name,
location,
)
def_group = ast.GroupDefinition(name, enum, location=location)
self.groups_.define(name, def_group)
return def_group
def parse_def_script_(self):
assert self.is_cur_keyword_("DEF_SCRIPT")
location = self.cur_token_location_
name = None
if self.next_token_ == "NAME":
self.expect_keyword_("NAME")
name = self.expect_string_()
self.expect_keyword_("TAG")
tag = self.expect_string_()
if self.scripts_.resolve(tag) is not None:
raise VoltLibError(
'Script "%s" already defined, '
"script tags are case insensitive" % tag,
location,
)
self.langs_.enter_scope()
langs = []
while self.next_token_ != "END_SCRIPT":
self.advance_lexer_()
lang = self.parse_langsys_()
self.expect_keyword_("END_LANGSYS")
if self.langs_.resolve(lang.tag) is not None:
raise VoltLibError(
'Language "%s" already defined in script "%s", '
"language tags are case insensitive" % (lang.tag, tag),
location,
)
self.langs_.define(lang.tag, lang)
langs.append(lang)
self.expect_keyword_("END_SCRIPT")
self.langs_.exit_scope()
def_script = ast.ScriptDefinition(name, tag, langs, location=location)
self.scripts_.define(tag, def_script)
return def_script
def parse_langsys_(self):
assert self.is_cur_keyword_("DEF_LANGSYS")
location = self.cur_token_location_
name = None
if self.next_token_ == "NAME":
self.expect_keyword_("NAME")
name = self.expect_string_()
self.expect_keyword_("TAG")
tag = self.expect_string_()
features = []
while self.next_token_ != "END_LANGSYS":
self.advance_lexer_()
feature = self.parse_feature_()
self.expect_keyword_("END_FEATURE")
features.append(feature)
def_langsys = ast.LangSysDefinition(name, tag, features, location=location)
return def_langsys
def parse_feature_(self):
assert self.is_cur_keyword_("DEF_FEATURE")
location = self.cur_token_location_
self.expect_keyword_("NAME")
name = self.expect_string_()
self.expect_keyword_("TAG")
tag = self.expect_string_()
lookups = []
while self.next_token_ != "END_FEATURE":
# self.advance_lexer_()
self.expect_keyword_("LOOKUP")
lookup = self.expect_string_()
lookups.append(lookup)
feature = ast.FeatureDefinition(name, tag, lookups, location=location)
return feature
def parse_def_lookup_(self):
assert self.is_cur_keyword_("DEF_LOOKUP")
location = self.cur_token_location_
name = self.expect_string_()
if not name[0].isalpha():
raise VoltLibError(
'Lookup name "%s" must start with a letter' % name, location
)
if self.lookups_.resolve(name) is not None:
raise VoltLibError(
'Lookup "%s" already defined, '
"lookup names are case insensitive" % name,
location,
)
process_base = True
if self.next_token_ == "PROCESS_BASE":
self.advance_lexer_()
elif self.next_token_ == "SKIP_BASE":
self.advance_lexer_()
process_base = False
process_marks = True
mark_glyph_set = None
if self.next_token_ == "PROCESS_MARKS":
self.advance_lexer_()
if self.next_token_ == "MARK_GLYPH_SET":
self.advance_lexer_()
mark_glyph_set = self.expect_string_()
elif self.next_token_ == "ALL":
self.advance_lexer_()
elif self.next_token_ == "NONE":
self.advance_lexer_()
process_marks = False
elif self.next_token_type_ == Lexer.STRING:
process_marks = self.expect_string_()
else:
raise VoltLibError(
"Expected ALL, NONE, MARK_GLYPH_SET or an ID. "
"Got %s" % (self.next_token_type_),
location,
)
elif self.next_token_ == "SKIP_MARKS":
self.advance_lexer_()
process_marks = False
direction = None
if self.next_token_ == "DIRECTION":
self.expect_keyword_("DIRECTION")
direction = self.expect_name_()
assert direction in ("LTR", "RTL")
reversal = None
if self.next_token_ == "REVERSAL":
self.expect_keyword_("REVERSAL")
reversal = True
comments = None
if self.next_token_ == "COMMENTS":
self.expect_keyword_("COMMENTS")
comments = self.expect_string_().replace(r"\n", "\n")
context = []
while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"):
context = self.parse_context_()
as_pos_or_sub = self.expect_name_()
sub = None
pos = None
if as_pos_or_sub == "AS_SUBSTITUTION":
sub = self.parse_substitution_(reversal)
elif as_pos_or_sub == "AS_POSITION":
pos = self.parse_position_()
else:
raise VoltLibError(
"Expected AS_SUBSTITUTION or AS_POSITION. " "Got %s" % (as_pos_or_sub),
location,
)
def_lookup = ast.LookupDefinition(
name,
process_base,
process_marks,
mark_glyph_set,
direction,
reversal,
comments,
context,
sub,
pos,
location=location,
)
self.lookups_.define(name, def_lookup)
return def_lookup
def parse_context_(self):
location = self.cur_token_location_
contexts = []
while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"):
side = None
coverage = None
ex_or_in = self.expect_name_()
# side_contexts = [] # XXX
if self.next_token_ != "END_CONTEXT":
left = []
right = []
while self.next_token_ in ("LEFT", "RIGHT"):
side = self.expect_name_()
coverage = self.parse_coverage_()
if side == "LEFT":
left.append(coverage)
else:
right.append(coverage)
self.expect_keyword_("END_CONTEXT")
context = ast.ContextDefinition(
ex_or_in, left, right, location=location
)
contexts.append(context)
else:
self.expect_keyword_("END_CONTEXT")
return contexts
def parse_substitution_(self, reversal):
assert self.is_cur_keyword_("AS_SUBSTITUTION")
location = self.cur_token_location_
src = []
dest = []
if self.next_token_ != "SUB":
raise VoltLibError("Expected SUB", location)
while self.next_token_ == "SUB":
self.expect_keyword_("SUB")
src.append(self.parse_coverage_())
self.expect_keyword_("WITH")
dest.append(self.parse_coverage_())
self.expect_keyword_("END_SUB")
self.expect_keyword_("END_SUBSTITUTION")
max_src = max([len(cov) for cov in src])
max_dest = max([len(cov) for cov in dest])
# many to many or mixed is invalid
if max_src > 1 and max_dest > 1:
raise VoltLibError("Invalid substitution type", location)
mapping = dict(zip(tuple(src), tuple(dest)))
if max_src == 1 and max_dest == 1:
# Alternate substitutions are represented by adding multiple
# substitutions for the same glyph, so we detect that here
glyphs = [x.glyphSet() for cov in src for x in cov] # flatten src
if len(set(glyphs)) != len(glyphs): # src has duplicates
sub = ast.SubstitutionAlternateDefinition(mapping, location=location)
else:
if reversal:
# Reversal is valid only for single glyph substitutions
# and VOLT ignores it otherwise.
sub = ast.SubstitutionReverseChainingSingleDefinition(
mapping, location=location
)
else:
sub = ast.SubstitutionSingleDefinition(mapping, location=location)
elif max_src == 1 and max_dest > 1:
sub = ast.SubstitutionMultipleDefinition(mapping, location=location)
elif max_src > 1 and max_dest == 1:
sub = ast.SubstitutionLigatureDefinition(mapping, location=location)
return sub
def parse_position_(self):
assert self.is_cur_keyword_("AS_POSITION")
location = self.cur_token_location_
pos_type = self.expect_name_()
if pos_type not in ("ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"):
raise VoltLibError(
"Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE", location
)
if pos_type == "ATTACH":
position = self.parse_attach_()
elif pos_type == "ATTACH_CURSIVE":
position = self.parse_attach_cursive_()
elif pos_type == "ADJUST_PAIR":
position = self.parse_adjust_pair_()
elif pos_type == "ADJUST_SINGLE":
position = self.parse_adjust_single_()
self.expect_keyword_("END_POSITION")
return position
def parse_attach_(self):
assert self.is_cur_keyword_("ATTACH")
location = self.cur_token_location_
coverage = self.parse_coverage_()
coverage_to = []
self.expect_keyword_("TO")
while self.next_token_ != "END_ATTACH":
cov = self.parse_coverage_()
self.expect_keyword_("AT")
self.expect_keyword_("ANCHOR")
anchor_name = self.expect_string_()
coverage_to.append((cov, anchor_name))
self.expect_keyword_("END_ATTACH")
position = ast.PositionAttachDefinition(
coverage, coverage_to, location=location
)
return position
def parse_attach_cursive_(self):
assert self.is_cur_keyword_("ATTACH_CURSIVE")
location = self.cur_token_location_
coverages_exit = []
coverages_enter = []
while self.next_token_ != "ENTER":
self.expect_keyword_("EXIT")
coverages_exit.append(self.parse_coverage_())
while self.next_token_ != "END_ATTACH":
self.expect_keyword_("ENTER")
coverages_enter.append(self.parse_coverage_())
self.expect_keyword_("END_ATTACH")
position = ast.PositionAttachCursiveDefinition(
coverages_exit, coverages_enter, location=location
)
return position
def parse_adjust_pair_(self):
assert self.is_cur_keyword_("ADJUST_PAIR")
location = self.cur_token_location_
coverages_1 = []
coverages_2 = []
adjust_pair = {}
while self.next_token_ == "FIRST":
self.advance_lexer_()
coverage_1 = self.parse_coverage_()
coverages_1.append(coverage_1)
while self.next_token_ == "SECOND":
self.advance_lexer_()
coverage_2 = self.parse_coverage_()
coverages_2.append(coverage_2)
while self.next_token_ != "END_ADJUST":
id_1 = self.expect_number_()
id_2 = self.expect_number_()
self.expect_keyword_("BY")
pos_1 = self.parse_pos_()
pos_2 = self.parse_pos_()
adjust_pair[(id_1, id_2)] = (pos_1, pos_2)
self.expect_keyword_("END_ADJUST")
position = ast.PositionAdjustPairDefinition(
coverages_1, coverages_2, adjust_pair, location=location
)
return position
def parse_adjust_single_(self):
assert self.is_cur_keyword_("ADJUST_SINGLE")
location = self.cur_token_location_
adjust_single = []
while self.next_token_ != "END_ADJUST":
coverages = self.parse_coverage_()
self.expect_keyword_("BY")
pos = self.parse_pos_()
adjust_single.append((coverages, pos))
self.expect_keyword_("END_ADJUST")
position = ast.PositionAdjustSingleDefinition(adjust_single, location=location)
return position
def parse_def_anchor_(self):
assert self.is_cur_keyword_("DEF_ANCHOR")
location = self.cur_token_location_
name = self.expect_string_()
self.expect_keyword_("ON")
gid = self.expect_number_()
self.expect_keyword_("GLYPH")
glyph_name = self.expect_name_()
self.expect_keyword_("COMPONENT")
component = self.expect_number_()
# check for duplicate anchor names on this glyph
if glyph_name in self.anchors_:
anchor = self.anchors_[glyph_name].resolve(name)
if anchor is not None and anchor.component == component:
raise VoltLibError(
'Anchor "%s" already defined, '
"anchor names are case insensitive" % name,
location,
)
if self.next_token_ == "LOCKED":
locked = True
self.advance_lexer_()
else:
locked = False
self.expect_keyword_("AT")
pos = self.parse_pos_()
self.expect_keyword_("END_ANCHOR")
anchor = ast.AnchorDefinition(
name, gid, glyph_name, component, locked, pos, location=location
)
if glyph_name not in self.anchors_:
self.anchors_[glyph_name] = SymbolTable()
self.anchors_[glyph_name].define(name, anchor)
return anchor
def parse_adjust_by_(self):
self.advance_lexer_()
assert self.is_cur_keyword_("ADJUST_BY")
adjustment = self.expect_number_()
self.expect_keyword_("AT")
size = self.expect_number_()
return adjustment, size
def parse_pos_(self):
# VOLT syntax doesn't seem to take device Y advance
self.advance_lexer_()
location = self.cur_token_location_
assert self.is_cur_keyword_("POS"), location
adv = None
dx = None
dy = None
adv_adjust_by = {}
dx_adjust_by = {}
dy_adjust_by = {}
if self.next_token_ == "ADV":
self.advance_lexer_()
adv = self.expect_number_()
while self.next_token_ == "ADJUST_BY":
adjustment, size = self.parse_adjust_by_()
adv_adjust_by[size] = adjustment
if self.next_token_ == "DX":
self.advance_lexer_()
dx = self.expect_number_()
while self.next_token_ == "ADJUST_BY":
adjustment, size = self.parse_adjust_by_()
dx_adjust_by[size] = adjustment
if self.next_token_ == "DY":
self.advance_lexer_()
dy = self.expect_number_()
while self.next_token_ == "ADJUST_BY":
adjustment, size = self.parse_adjust_by_()
dy_adjust_by[size] = adjustment
self.expect_keyword_("END_POS")
return ast.Pos(adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by)
def parse_unicode_values_(self):
location = self.cur_token_location_
try:
unicode_values = self.expect_string_().split(",")
unicode_values = [int(uni[2:], 16) for uni in unicode_values if uni != ""]
except ValueError as err:
raise VoltLibError(str(err), location)
return unicode_values if unicode_values != [] else None
def parse_enum_(self):
self.expect_keyword_("ENUM")
location = self.cur_token_location_
enum = ast.Enum(self.parse_coverage_(), location=location)
self.expect_keyword_("END_ENUM")
return enum
def parse_coverage_(self):
coverage = []
location = self.cur_token_location_
while self.next_token_ in ("GLYPH", "GROUP", "RANGE", "ENUM"):
if self.next_token_ == "ENUM":
enum = self.parse_enum_()
coverage.append(enum)
elif self.next_token_ == "GLYPH":
self.expect_keyword_("GLYPH")
name = self.expect_string_()
coverage.append(ast.GlyphName(name, location=location))
elif self.next_token_ == "GROUP":
self.expect_keyword_("GROUP")
name = self.expect_string_()
coverage.append(ast.GroupName(name, self, location=location))
elif self.next_token_ == "RANGE":
self.expect_keyword_("RANGE")
start = self.expect_string_()
self.expect_keyword_("TO")
end = self.expect_string_()
coverage.append(ast.Range(start, end, self, location=location))
return tuple(coverage)
def resolve_group(self, group_name):
return self.groups_.resolve(group_name)
def glyph_range(self, start, end):
return self.glyphs_.range(start, end)
def parse_ppem_(self):
location = self.cur_token_location_
ppem_name = self.cur_token_
value = self.expect_number_()
setting = ast.SettingDefinition(ppem_name, value, location=location)
return setting
def parse_noarg_option_(self):
location = self.cur_token_location_
name = self.cur_token_
value = True
setting = ast.SettingDefinition(name, value, location=location)
return setting
def parse_cmap_format(self):
location = self.cur_token_location_
name = self.cur_token_
value = (self.expect_number_(), self.expect_number_(), self.expect_number_())
setting = ast.SettingDefinition(name, value, location=location)
return setting
def is_cur_keyword_(self, k):
return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k)
def expect_string_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.STRING:
raise VoltLibError("Expected a string", self.cur_token_location_)
return self.cur_token_
def expect_keyword_(self, keyword):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
return self.cur_token_
raise VoltLibError('Expected "%s"' % keyword, self.cur_token_location_)
def expect_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
return self.cur_token_
raise VoltLibError("Expected a name", self.cur_token_location_)
def expect_number_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.NUMBER:
raise VoltLibError("Expected a number", self.cur_token_location_)
return self.cur_token_
def advance_lexer_(self):
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
self.next_token_type_,
self.next_token_,
self.next_token_location_,
)
try:
if self.is_cur_keyword_("END"):
raise StopIteration
(
self.next_token_type_,
self.next_token_,
self.next_token_location_,
) = self.lexer_.next()
except StopIteration:
self.next_token_type_, self.next_token_ = (None, None)
class SymbolTable(object):
def __init__(self):
self.scopes_ = [{}]
def enter_scope(self):
self.scopes_.append({})
def exit_scope(self):
self.scopes_.pop()
def define(self, name, item):
self.scopes_[-1][name] = item
def resolve(self, name, case_insensitive=True):
for scope in reversed(self.scopes_):
item = scope.get(name)
if item:
return item
if case_insensitive:
for key in scope:
if key.lower() == name.lower():
return scope[key]
return None
class OrderedSymbolTable(SymbolTable):
def __init__(self):
self.scopes_ = [{}]
def enter_scope(self):
self.scopes_.append({})
def resolve(self, name, case_insensitive=False):
SymbolTable.resolve(self, name, case_insensitive=case_insensitive)
def range(self, start, end):
for scope in reversed(self.scopes_):
if start in scope and end in scope:
start_idx = list(scope.keys()).index(start)
end_idx = list(scope.keys()).index(end)
return list(scope.keys())[start_idx : end_idx + 1]
return None

View file

@ -0,0 +1,911 @@
"""\
MS VOLT ``.vtp`` to AFDKO ``.fea`` OpenType Layout converter.
Usage
-----
To convert a VTP project file:
.. code-block:: sh
$ fonttools voltLib.voltToFea input.vtp output.fea
It is also possible convert font files with `TSIV` table (as saved from Volt),
in this case the glyph names used in the Volt project will be mapped to the
actual glyph names in the font files when written to the feature file:
.. code-block:: sh
$ fonttools voltLib.voltToFea input.ttf output.fea
The ``--quiet`` option can be used to suppress warnings.
The ``--traceback`` can be used to get Python traceback in case of exceptions,
instead of suppressing the traceback.
Limitations
-----------
* Not all VOLT features are supported, the script will error if it it
encounters something it does not understand. Please report an issue if this
happens.
* AFDKO feature file syntax for mark positioning is awkward and does not allow
setting the mark coverage. It also defines mark anchors globally, as a result
some mark positioning lookups might cover many marks than what was in the VOLT
file. This should not be an issue in practice, but if it is then the only way
is to modify the VOLT file or the generated feature file manually to use unique
mark anchors for each lookup.
* VOLT allows subtable breaks in any lookup type, but AFDKO feature file
implementations vary in their support; currently AFDKOs makeOTF supports
subtable breaks in pair positioning lookups only, while FontTools feaLib
support it for most substitution lookups and only some positioning lookups.
"""
import logging
import re
from io import StringIO
from graphlib import TopologicalSorter
from fontTools.feaLib import ast
from fontTools.ttLib import TTFont, TTLibError
from fontTools.voltLib import ast as VAst
from fontTools.voltLib.parser import Parser as VoltParser
log = logging.getLogger("fontTools.voltLib.voltToFea")
TABLES = ["GDEF", "GSUB", "GPOS"]
def _flatten_group(group):
ret = []
if isinstance(group, (tuple, list)):
for item in group:
ret.extend(_flatten_group(item))
elif hasattr(group, "enum"):
ret.extend(_flatten_group(group.enum))
else:
ret.append(group)
return ret
# Topologically sort of group definitions to ensure that all groups are defined
# before they are referenced. This is necessary because FEA requires it but
# VOLT does not, see below.
def sort_groups(groups):
group_map = {group.name.lower(): group for group in groups}
graph = {
group.name.lower(): [
x.group.lower()
for x in _flatten_group(group)
if isinstance(x, VAst.GroupName)
]
for group in groups
}
sorter = TopologicalSorter(graph)
return [group_map[name] for name in sorter.static_order()]
class Lookup(ast.LookupBlock):
def __init__(self, name, use_extension=False, location=None):
super().__init__(name, use_extension, location)
self.chained = []
class VoltToFea:
_NOT_LOOKUP_NAME_RE = re.compile(r"[^A-Za-z_0-9.]")
_NOT_CLASS_NAME_RE = re.compile(r"[^A-Za-z_0-9.\-]")
def __init__(self, file_or_path, font=None):
if isinstance(file_or_path, VAst.VoltFile):
self._doc, self._file_or_path = file_or_path, None
else:
self._doc, self._file_or_path = None, file_or_path
self._font = font
self._glyph_map = {}
self._glyph_order = None
self._gdef = {}
self._glyphclasses = {}
self._features = {}
self._lookups = {}
self._marks = set()
self._ligatures = {}
self._markclasses = {}
self._anchors = {}
self._settings = {}
self._lookup_names = {}
self._class_names = {}
def _lookupName(self, name):
if name not in self._lookup_names:
res = self._NOT_LOOKUP_NAME_RE.sub("_", name)
while res in self._lookup_names.values():
res += "_"
self._lookup_names[name] = res
return self._lookup_names[name]
def _className(self, name):
if name not in self._class_names:
res = self._NOT_CLASS_NAME_RE.sub("_", name)
while res in self._class_names.values():
res += "_"
self._class_names[name] = res
return self._class_names[name]
def _collectStatements(self, doc, tables, ignore_unsupported_settings=False):
# Collect glyph difinitions first, as we need them to map VOLT glyph names to font glyph name.
for statement in doc.statements:
if isinstance(statement, VAst.GlyphDefinition):
self._glyphDefinition(statement)
# Collect and sort group definitions first, to make sure a group
# definition that references other groups comes after them since VOLT
# does not enforce such ordering, and feature file require it.
groups = [s for s in doc.statements if isinstance(s, VAst.GroupDefinition)]
for group in sort_groups(groups):
self._groupDefinition(group)
for statement in doc.statements:
if isinstance(statement, VAst.AnchorDefinition):
if "GPOS" in tables:
self._anchorDefinition(statement)
elif isinstance(statement, VAst.SettingDefinition):
self._settingDefinition(statement, ignore_unsupported_settings)
elif isinstance(statement, (VAst.GlyphDefinition, VAst.GroupDefinition)):
pass # Handled above
elif isinstance(statement, VAst.ScriptDefinition):
self._scriptDefinition(statement)
elif not isinstance(statement, VAst.LookupDefinition):
raise NotImplementedError(statement)
# Lookup definitions need to be handled last as they reference glyph
# and mark classes that might be defined after them.
for statement in doc.statements:
if isinstance(statement, VAst.LookupDefinition):
if statement.pos and "GPOS" not in tables:
continue
if statement.sub and "GSUB" not in tables:
continue
self._lookupDefinition(statement)
def _buildFeatureFile(self, tables):
doc = ast.FeatureFile()
statements = doc.statements
if self._glyphclasses:
statements.append(ast.Comment("# Glyph classes"))
statements.extend(self._glyphclasses.values())
if self._markclasses:
statements.append(ast.Comment("\n# Mark classes"))
statements.extend(c[1] for c in sorted(self._markclasses.items()))
if self._lookups:
statements.append(ast.Comment("\n# Lookups"))
for lookup in self._lookups.values():
statements.extend(lookup.chained)
statements.append(lookup)
# Prune features
features = self._features.copy()
for feature_tag in features:
scripts = features[feature_tag]
for script_tag in scripts:
langs = scripts[script_tag]
for language_tag in langs:
langs[language_tag] = [
l for l in langs[language_tag] if l.lower() in self._lookups
]
scripts[script_tag] = {t: l for t, l in langs.items() if l}
features[feature_tag] = {t: s for t, s in scripts.items() if s}
features = {t: f for t, f in features.items() if f}
if features:
statements.append(ast.Comment("# Features"))
for feature_tag, scripts in features.items():
feature = ast.FeatureBlock(feature_tag)
script_tags = sorted(scripts, key=lambda k: 0 if k == "DFLT" else 1)
if feature_tag == "aalt" and len(script_tags) > 1:
log.warning(
"FEA syntax does not allow script statements in 'aalt' feature, "
"so only lookups from the first script will be included."
)
script_tags = script_tags[:1]
for script_tag in script_tags:
if feature_tag != "aalt":
feature.statements.append(ast.ScriptStatement(script_tag))
language_tags = sorted(
scripts[script_tag],
key=lambda k: 0 if k == "dflt" else 1,
)
if feature_tag == "aalt" and len(language_tags) > 1:
log.warning(
"FEA syntax does not allow language statements in 'aalt' feature, "
"so only lookups from the first language will be included."
)
language_tags = language_tags[:1]
for language_tag in language_tags:
if feature_tag != "aalt":
include_default = True if language_tag == "dflt" else False
feature.statements.append(
ast.LanguageStatement(
language_tag.ljust(4),
include_default=include_default,
)
)
for name in scripts[script_tag][language_tag]:
lookup = self._lookups[name.lower()]
lookupref = ast.LookupReferenceStatement(lookup)
feature.statements.append(lookupref)
statements.append(feature)
if self._gdef and "GDEF" in tables:
classes = []
for name in ("BASE", "MARK", "LIGATURE", "COMPONENT"):
if name in self._gdef:
classname = "GDEF_" + name.lower()
glyphclass = ast.GlyphClassDefinition(classname, self._gdef[name])
statements.append(glyphclass)
classes.append(ast.GlyphClassName(glyphclass))
else:
classes.append(None)
gdef = ast.TableBlock("GDEF")
gdef.statements.append(ast.GlyphClassDefStatement(*classes))
statements.append(gdef)
return doc
def convert(self, tables=None, ignore_unsupported_settings=False):
if self._doc is None:
self._doc = VoltParser(self._file_or_path).parse()
doc = self._doc
if tables is None:
tables = TABLES
if self._font is not None:
self._glyph_order = self._font.getGlyphOrder()
self._collectStatements(doc, tables, ignore_unsupported_settings)
fea = self._buildFeatureFile(tables)
return fea.asFea()
def _glyphName(self, glyph):
try:
name = glyph.glyph
except AttributeError:
name = glyph
return ast.GlyphName(self._glyph_map.get(name, name))
def _groupName(self, group):
try:
name = group.group
except AttributeError:
name = group
return ast.GlyphClassName(self._glyphclasses[name.lower()])
def _glyphSet(self, item):
return [
(self._glyphName(x) if isinstance(x, (str, VAst.GlyphName)) else x)
for x in item.glyphSet()
]
def _coverage(self, coverage, flatten=False):
items = []
for item in coverage:
if isinstance(item, VAst.GlyphName):
items.append(self._glyphName(item))
elif isinstance(item, VAst.GroupName):
items.append(self._groupName(item))
elif isinstance(item, VAst.Enum):
item = self._coverage(item.enum, flatten=True)
if flatten:
items.extend(item)
else:
items.append(ast.GlyphClass(item))
elif isinstance(item, VAst.Range):
item = self._glyphSet(item)
if flatten:
items.extend(item)
else:
items.append(ast.GlyphClass(item))
else:
raise NotImplementedError(item)
return items
def _context(self, context):
out = []
for item in context:
coverage = self._coverage(item, flatten=True)
if len(coverage) > 1:
coverage = ast.GlyphClass(coverage)
else:
coverage = coverage[0]
out.append(coverage)
return out
def _groupDefinition(self, group):
name = self._className(group.name)
glyphs = self._coverage(group.enum.enum, flatten=True)
glyphclass = ast.GlyphClass(glyphs)
classdef = ast.GlyphClassDefinition(name, glyphclass)
self._glyphclasses[group.name.lower()] = classdef
def _glyphDefinition(self, glyph):
try:
self._glyph_map[glyph.name] = self._glyph_order[glyph.id]
except TypeError:
pass
if glyph.type in ("BASE", "MARK", "LIGATURE", "COMPONENT"):
if glyph.type not in self._gdef:
self._gdef[glyph.type] = ast.GlyphClass()
self._gdef[glyph.type].glyphs.append(self._glyphName(glyph.name))
if glyph.type == "MARK":
self._marks.add(glyph.name)
elif glyph.type == "LIGATURE":
self._ligatures[glyph.name] = glyph.components
def _scriptDefinition(self, script):
stag = script.tag
for lang in script.langs:
ltag = lang.tag
for feature in lang.features:
lookups = {l.split("\\")[0]: True for l in feature.lookups}
ftag = feature.tag
if ftag not in self._features:
self._features[ftag] = {}
if stag not in self._features[ftag]:
self._features[ftag][stag] = {}
assert ltag not in self._features[ftag][stag]
self._features[ftag][stag][ltag] = lookups.keys()
def _settingDefinition(self, setting, ignore_unsupported=False):
if setting.name.startswith("COMPILER_"):
self._settings[setting.name] = setting.value
elif not ignore_unsupported:
log.warning(f"Unsupported setting ignored: {setting.name}")
def _adjustment(self, adjustment):
adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment
adv_device = adv_adjust_by and adv_adjust_by.items() or None
dx_device = dx_adjust_by and dx_adjust_by.items() or None
dy_device = dy_adjust_by and dy_adjust_by.items() or None
return ast.ValueRecord(
xPlacement=dx,
yPlacement=dy,
xAdvance=adv,
xPlaDevice=dx_device,
yPlaDevice=dy_device,
xAdvDevice=adv_device,
)
def _anchor(self, adjustment):
adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment
assert not adv_adjust_by
dx_device = dx_adjust_by and dx_adjust_by.items() or None
dy_device = dy_adjust_by and dy_adjust_by.items() or None
return ast.Anchor(
dx or 0,
dy or 0,
xDeviceTable=dx_device or None,
yDeviceTable=dy_device or None,
)
def _anchorDefinition(self, anchordef):
anchorname = anchordef.name
glyphname = anchordef.glyph_name
anchor = self._anchor(anchordef.pos)
if glyphname not in self._anchors:
self._anchors[glyphname] = {}
if anchorname.startswith("MARK_"):
anchorname = anchorname[:5] + anchorname[5:].lower()
else:
anchorname = anchorname.lower()
if anchorname not in self._anchors[glyphname]:
self._anchors[glyphname][anchorname] = {}
self._anchors[glyphname][anchorname][anchordef.component] = anchor
def _gposLookup(self, lookup, fealookup):
statements = fealookup.statements
pos = lookup.pos
if isinstance(pos, VAst.PositionAdjustPairDefinition):
for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items():
coverage_1 = pos.coverages_1[idx1 - 1]
coverage_2 = pos.coverages_2[idx2 - 1]
# If not both are groups, use “enum pos” otherwise makeotf will
# fail.
enumerated = False
for item in coverage_1 + coverage_2:
if not isinstance(item, VAst.GroupName):
enumerated = True
glyphs1 = self._coverage(coverage_1)
glyphs2 = self._coverage(coverage_2)
record1 = self._adjustment(pos1)
record2 = self._adjustment(pos2)
assert len(glyphs1) == 1
assert len(glyphs2) == 1
statements.append(
ast.PairPosStatement(
glyphs1[0], record1, glyphs2[0], record2, enumerated=enumerated
)
)
elif isinstance(pos, VAst.PositionAdjustSingleDefinition):
for a, b in pos.adjust_single:
glyphs = self._coverage(a)
record = self._adjustment(b)
assert len(glyphs) == 1
statements.append(
ast.SinglePosStatement([(glyphs[0], record)], [], [], False)
)
elif isinstance(pos, VAst.PositionAttachDefinition):
anchors = {}
allmarks = set()
for coverage, anchorname in pos.coverage_to:
# In feature files mark classes are global, but in VOLT they
# are defined per-lookup. If we output mark class definitions
# for all marks that use a given anchor, we might end up with a
# mark used in two different classes in the same lookup, which
# is causes feature file compilation error.
# At the expense of uglier feature code, we make the mark class
# name by appending the current lookup name not the anchor
# name, and output mark class definitions only for marks used
# in this lookup.
classname = self._className(f"{anchorname}.{lookup.name}")
markclass = ast.MarkClass(classname)
# Anchor names are case-insensitive in VOLT
anchorname = anchorname.lower()
# We might still end in marks used in two different anchor
# classes, so we filter out already used marks.
marks = set()
for mark in coverage:
marks.update(mark.glyphSet())
if not marks.isdisjoint(allmarks):
marks.difference_update(allmarks)
if not marks:
continue
allmarks.update(marks)
for glyphname in marks:
glyph = self._glyphName(glyphname)
anchor = self._anchors[glyphname][f"MARK_{anchorname}"][1]
markdef = ast.MarkClassDefinition(markclass, anchor, glyph)
self._markclasses[(glyphname, classname)] = markdef
for base in pos.coverage:
for name in base.glyphSet():
if name not in anchors:
anchors[name] = []
if (anchorname, classname) not in anchors[name]:
anchors[name].append((anchorname, classname))
is_ligature = all(n in self._ligatures for n in anchors)
is_mark = all(n in self._marks for n in anchors)
for name in anchors:
components = 1
if is_ligature:
components = self._ligatures[name]
marks = [[] for _ in range(components)]
for mark, classname in anchors[name]:
markclass = ast.MarkClass(classname)
for component in range(1, components + 1):
if component in self._anchors[name][mark]:
anchor = self._anchors[name][mark][component]
marks[component - 1].append((anchor, markclass))
base = self._glyphName(name)
if is_mark:
mark = ast.MarkMarkPosStatement(base, marks[0])
elif is_ligature:
mark = ast.MarkLigPosStatement(base, marks)
else:
mark = ast.MarkBasePosStatement(base, marks[0])
statements.append(mark)
elif isinstance(pos, VAst.PositionAttachCursiveDefinition):
# Collect enter and exit glyphs
enter_coverage = []
for coverage in pos.coverages_enter:
for base in coverage:
for name in base.glyphSet():
enter_coverage.append(name)
exit_coverage = []
for coverage in pos.coverages_exit:
for base in coverage:
for name in base.glyphSet():
exit_coverage.append(name)
# Write enter anchors, also check if the glyph has exit anchor and
# write it, too.
for name in enter_coverage:
glyph = self._glyphName(name)
entry = self._anchors[name]["entry"][1]
exit = None
if name in exit_coverage:
exit = self._anchors[name]["exit"][1]
exit_coverage.pop(exit_coverage.index(name))
statements.append(ast.CursivePosStatement(glyph, entry, exit))
# Write any remaining exit anchors.
for name in exit_coverage:
glyph = self._glyphName(name)
exit = self._anchors[name]["exit"][1]
statements.append(ast.CursivePosStatement(glyph, None, exit))
else:
raise NotImplementedError(pos)
def _gposContextLookup(self, lookup, prefix, suffix, ignore, fealookup, chained):
statements = fealookup.statements
pos = lookup.pos
if isinstance(pos, VAst.PositionAdjustPairDefinition):
for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items():
glyphs1 = self._coverage(pos.coverages_1[idx1 - 1])
glyphs2 = self._coverage(pos.coverages_2[idx2 - 1])
assert len(glyphs1) == 1
assert len(glyphs2) == 1
glyphs = (glyphs1[0], glyphs2[0])
if ignore:
statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
else:
statement = ast.ChainContextPosStatement(
prefix, glyphs, suffix, [chained, chained]
)
statements.append(statement)
elif isinstance(pos, VAst.PositionAdjustSingleDefinition):
glyphs = [ast.GlyphClass()]
for a, _ in pos.adjust_single:
glyphs[0].extend(self._coverage(a, flatten=True))
if ignore:
statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
else:
statement = ast.ChainContextPosStatement(
prefix, glyphs, suffix, [chained]
)
statements.append(statement)
elif isinstance(pos, VAst.PositionAttachDefinition):
glyphs = [ast.GlyphClass()]
for coverage, _ in pos.coverage_to:
glyphs[0].extend(self._coverage(coverage, flatten=True))
if ignore:
statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
else:
statement = ast.ChainContextPosStatement(
prefix, glyphs, suffix, [chained]
)
statements.append(statement)
else:
raise NotImplementedError(pos)
def _gsubLookup(self, lookup, fealookup):
statements = fealookup.statements
sub = lookup.sub
# Alternate substitutions are represented by adding multiple
# substitutions for the same glyph, so we need to collect them into one
# to many mapping.
if isinstance(sub, VAst.SubstitutionAlternateDefinition):
alternates = {}
for key, val in sub.mapping.items():
if not key or not val:
path, line, column = sub.location
log.warning(f"{path}:{line}:{column}: Ignoring empty substitution")
continue
glyphs = self._coverage(key)
replacements = self._coverage(val)
assert len(glyphs) == 1
for src_glyph, repl_glyph in zip(
glyphs[0].glyphSet(), replacements[0].glyphSet()
):
alternates.setdefault(str(self._glyphName(src_glyph)), []).append(
str(self._glyphName(repl_glyph))
)
for glyph, replacements in alternates.items():
statement = ast.AlternateSubstStatement(
[], glyph, [], ast.GlyphClass(replacements)
)
statements.append(statement)
return
for key, val in sub.mapping.items():
if not key or not val:
path, line, column = sub.location
log.warning(f"{path}:{line}:{column}: Ignoring empty substitution")
continue
glyphs = self._coverage(key)
replacements = self._coverage(val)
if isinstance(sub, VAst.SubstitutionSingleDefinition):
assert len(glyphs) == 1
assert len(replacements) == 1
statements.append(
ast.SingleSubstStatement(glyphs, replacements, [], [], False)
)
elif isinstance(sub, VAst.SubstitutionReverseChainingSingleDefinition):
# This is handled in gsubContextLookup()
pass
elif isinstance(sub, VAst.SubstitutionMultipleDefinition):
assert len(glyphs) == 1
statements.append(
ast.MultipleSubstStatement([], glyphs[0], [], replacements)
)
elif isinstance(sub, VAst.SubstitutionLigatureDefinition):
assert len(replacements) == 1
statement = ast.LigatureSubstStatement(
[], glyphs, [], replacements[0], False
)
# If any of the input glyphs is a group, we need to
# explode the substitution into multiple ligature substitutions
# since feature file syntax does not support classes in
# ligature substitutions.
n = max(len(x.glyphSet()) for x in glyphs)
if n > 1:
# All input should either be groups of the same length or single glyphs
assert all(len(x.glyphSet()) in (n, 1) for x in glyphs)
glyphs = [x.glyphSet() for x in glyphs]
glyphs = [([x[0]] * n if len(x) == 1 else x) for x in glyphs]
# In this case ligature replacements must be a group of the same length
# as the input groups, or a single glyph. VOLT
# allows the replacement glyphs to be longer and truncates them.
# So well allow that and zip() below will do the truncation
# for us.
replacement = replacements[0].glyphSet()
if len(replacement) == 1:
replacement = [replacement[0]] * n
assert len(replacement) >= n
# Add the unexploded statement commented out for reference.
statements.append(ast.Comment(f"# {statement}"))
for zipped in zip(*glyphs, replacement):
zipped = [self._glyphName(x) for x in zipped]
statements.append(
ast.LigatureSubstStatement(
[], zipped[:-1], [], zipped[-1], False
)
)
else:
statements.append(statement)
else:
raise NotImplementedError(sub)
def _gsubContextLookup(self, lookup, prefix, suffix, ignore, fealookup, chained):
statements = fealookup.statements
sub = lookup.sub
if isinstance(sub, VAst.SubstitutionReverseChainingSingleDefinition):
# Reverse substitutions is a special case, it cant use chained lookups.
for key, val in sub.mapping.items():
if not key or not val:
path, line, column = sub.location
log.warning(f"{path}:{line}:{column}: Ignoring empty substitution")
continue
glyphs = self._coverage(key)
replacements = self._coverage(val)
statements.append(
ast.ReverseChainSingleSubstStatement(
prefix, suffix, glyphs, replacements
)
)
fealookup.chained = []
return
if not isinstance(
sub,
(
VAst.SubstitutionSingleDefinition,
VAst.SubstitutionMultipleDefinition,
VAst.SubstitutionLigatureDefinition,
VAst.SubstitutionAlternateDefinition,
),
):
raise NotImplementedError(type(sub))
glyphs = []
for key, val in sub.mapping.items():
if not key or not val:
path, line, column = sub.location
log.warning(f"{path}:{line}:{column}: Ignoring empty substitution")
continue
glyphs.extend(self._coverage(key, flatten=True))
if len(glyphs) > 1:
glyphs = [ast.GlyphClass(glyphs)]
if ignore:
statements.append(ast.IgnoreSubstStatement([(prefix, glyphs, suffix)]))
else:
statements.append(
ast.ChainContextSubstStatement(prefix, glyphs, suffix, [chained])
)
def _lookupDefinition(self, lookup):
mark_attachement = None
mark_filtering = None
flags = 0
if lookup.direction == "RTL":
flags |= 1
if not lookup.process_base:
flags |= 2
# FIXME: Does VOLT support this?
# if not lookup.process_ligatures:
# flags |= 4
if not lookup.process_marks:
flags |= 8
elif isinstance(lookup.process_marks, str):
mark_attachement = self._groupName(lookup.process_marks)
elif lookup.mark_glyph_set is not None:
mark_filtering = self._groupName(lookup.mark_glyph_set)
lookupflags = None
if flags or mark_attachement is not None or mark_filtering is not None:
lookupflags = ast.LookupFlagStatement(
flags, mark_attachement, mark_filtering
)
use_extension = False
if self._settings.get("COMPILER_USEEXTENSIONLOOKUPS"):
use_extension = True
if "\\" in lookup.name:
# Merge sub lookups as subtables (lookups named “base\sub”),
# makeotf/feaLib will issue a warning and ignore the subtable
# statement if it is not a pairpos lookup, though.
name = lookup.name.split("\\")[0]
if name.lower() not in self._lookups:
fealookup = Lookup(
self._lookupName(name),
use_extension=use_extension,
)
if lookupflags is not None:
fealookup.statements.append(lookupflags)
fealookup.statements.append(ast.Comment("# " + lookup.name))
else:
fealookup = self._lookups[name.lower()]
fealookup.statements.append(ast.SubtableStatement())
fealookup.statements.append(ast.Comment("# " + lookup.name))
self._lookups[name.lower()] = fealookup
else:
fealookup = Lookup(
self._lookupName(lookup.name),
use_extension=use_extension,
)
if lookupflags is not None:
fealookup.statements.append(lookupflags)
self._lookups[lookup.name.lower()] = fealookup
if lookup.comments is not None:
fealookup.statements.append(ast.Comment("# " + lookup.comments))
contexts = []
for context in lookup.context:
prefix = self._context(context.left)
suffix = self._context(context.right)
ignore = context.ex_or_in == "EXCEPT_CONTEXT"
contexts.append([prefix, suffix, ignore])
# It seems that VOLT will create contextual substitution using
# only the input if there is no other contexts in this lookup.
if ignore and len(lookup.context) == 1:
contexts.append([[], [], False])
if contexts:
chained = ast.LookupBlock(
self._lookupName(lookup.name + " chained"),
use_extension=use_extension,
)
fealookup.chained.append(chained)
if lookup.sub is not None:
self._gsubLookup(lookup, chained)
elif lookup.pos is not None:
self._gposLookup(lookup, chained)
for prefix, suffix, ignore in contexts:
if lookup.sub is not None:
self._gsubContextLookup(
lookup, prefix, suffix, ignore, fealookup, chained
)
elif lookup.pos is not None:
self._gposContextLookup(
lookup, prefix, suffix, ignore, fealookup, chained
)
else:
if lookup.sub is not None:
self._gsubLookup(lookup, fealookup)
elif lookup.pos is not None:
self._gposLookup(lookup, fealookup)
def main(args=None):
"""Convert MS VOLT to AFDKO feature files."""
import argparse
from pathlib import Path
from fontTools import configLogger
parser = argparse.ArgumentParser(
"fonttools voltLib.voltToFea", description=main.__doc__
)
parser.add_argument(
"input", metavar="INPUT", type=Path, help="input font/VTP file to process"
)
parser.add_argument(
"featurefile", metavar="OUTPUT", type=Path, help="output feature file"
)
parser.add_argument(
"-t",
"--table",
action="append",
choices=TABLES,
dest="tables",
help="List of tables to write, by default all tables are written",
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="Suppress non-error messages"
)
parser.add_argument(
"--traceback", action="store_true", help="Dont catch exceptions"
)
options = parser.parse_args(args)
configLogger(level=("ERROR" if options.quiet else "INFO"))
file_or_path = options.input
font = None
try:
font = TTFont(file_or_path)
if "TSIV" in font:
file_or_path = StringIO(font["TSIV"].data.decode("utf-8"))
else:
log.error('"TSIV" table is missing, font was not saved from VOLT?')
return 1
except TTLibError:
pass
converter = VoltToFea(file_or_path, font)
try:
fea = converter.convert(options.tables)
except NotImplementedError as e:
if options.traceback:
raise
location = getattr(e.args[0], "location", None)
message = f'"{e}" is not supported'
if location:
path, line, column = location
log.error(f"{path}:{line}:{column}: {message}")
else:
log.error(message)
return 1
with open(options.featurefile, "w") as feafile:
feafile.write(fea)
if __name__ == "__main__":
import sys
sys.exit(main())