up follow livre

This commit is contained in:
Tykayn 2025-08-30 18:14:14 +02:00 committed by tykayn
parent b4b4398bb0
commit 3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions

View file

@ -0,0 +1,8 @@
import logging
from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
version = __version__ = "4.59.2"
__all__ = ["version", "log", "configLogger"]

View file

@ -0,0 +1,35 @@
import sys
def main(args=None):
if args is None:
args = sys.argv[1:]
# TODO Handle library-wide options. Eg.:
# --unicodedata
# --verbose / other logging stuff
# TODO Allow a way to run arbitrary modules? Useful for setting
# library-wide options and calling another library. Eg.:
#
# $ fonttools --unicodedata=... fontmake ...
#
# This allows for a git-like command where thirdparty commands
# can be added. Should we just try importing the fonttools
# module first and try without if it fails?
if len(sys.argv) < 2:
sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help"
mod = "fontTools." + sys.argv[1]
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
del sys.argv[0]
import runpy
runpy.run_module(mod, run_name="__main__")
if __name__ == "__main__":
sys.exit(main())

View file

@ -0,0 +1,439 @@
"""Module for reading and writing AFM (Adobe Font Metrics) files.
Note that this has been designed to read in AFM files generated by Fontographer
and has not been tested on many other files. In particular, it does not
implement the whole Adobe AFM specification [#f1]_ but, it should read most
"common" AFM files.
Here is an example of using `afmLib` to read, modify and write an AFM file:
>>> from fontTools.afmLib import AFM
>>> f = AFM("Tests/afmLib/data/TestAFM.afm")
>>>
>>> # Accessing a pair gets you the kern value
>>> f[("V","A")]
-60
>>>
>>> # Accessing a glyph name gets you metrics
>>> f["A"]
(65, 668, (8, -25, 660, 666))
>>> # (charnum, width, bounding box)
>>>
>>> # Accessing an attribute gets you metadata
>>> f.FontName
'TestFont-Regular'
>>> f.FamilyName
'TestFont'
>>> f.Weight
'Regular'
>>> f.XHeight
500
>>> f.Ascender
750
>>>
>>> # Attributes and items can also be set
>>> f[("A","V")] = -150 # Tighten kerning
>>> f.FontName = "TestFont Squished"
>>>
>>> # And the font written out again (remove the # in front)
>>> #f.write("testfont-squished.afm")
.. rubric:: Footnotes
.. [#f1] `Adobe Technote 5004 <https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5004.AFM_Spec.pdf>`_,
Adobe Font Metrics File Format Specification.
"""
import re
# every single line starts with a "word"
identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines
charRE = re.compile(
r"(-?\d+)" # charnum
r"\s*;\s*WX\s+" # ; WX
r"(-?\d+)" # width
r"\s*;\s*N\s+" # ; N
r"([.A-Za-z0-9_]+)" # charname
r"\s*;\s*B\s+" # ; B
r"(-?\d+)" # left
r"\s+"
r"(-?\d+)" # bottom
r"\s+"
r"(-?\d+)" # right
r"\s+"
r"(-?\d+)" # top
r"\s*;\s*" # ;
)
# regular expression to parse kerning lines
kernRE = re.compile(
r"([.A-Za-z0-9_]+)" # leftchar
r"\s+"
r"([.A-Za-z0-9_]+)" # rightchar
r"\s+"
r"(-?\d+)" # value
r"\s*"
)
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
r"([.A-Za-z0-9_]+)" # char name
r"\s+"
r"(\d+)" # number of parts
r"\s*;\s*"
)
componentRE = re.compile(
r"PCC\s+" # PPC
r"([.A-Za-z0-9_]+)" # base char name
r"\s+"
r"(-?\d+)" # x offset
r"\s+"
r"(-?\d+)" # y offset
r"\s*;\s*"
)
preferredAttributeOrder = [
"FontName",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"IsFixedPitch",
"FontBBox",
"UnderlinePosition",
"UnderlineThickness",
"Version",
"Notice",
"EncodingScheme",
"CapHeight",
"XHeight",
"Ascender",
"Descender",
]
class error(Exception):
pass
class AFM(object):
_attrs = None
_keywords = [
"StartFontMetrics",
"EndFontMetrics",
"StartCharMetrics",
"EndCharMetrics",
"StartKernData",
"StartKernPairs",
"EndKernPairs",
"EndKernData",
"StartComposites",
"EndComposites",
]
def __init__(self, path=None):
"""AFM file reader.
Instantiating an object with a path name will cause the file to be opened,
read, and parsed. Alternatively the path can be left unspecified, and a
file can be parsed later with the :meth:`read` method."""
self._attrs = {}
self._chars = {}
self._kerning = {}
self._index = {}
self._comments = []
self._composites = {}
if path is not None:
self.read(path)
def read(self, path):
"""Opens, reads and parses a file."""
lines = readlines(path)
for line in lines:
if not line.strip():
continue
m = identifierRE.match(line)
if m is None:
raise error("syntax error in AFM file: " + repr(line))
pos = m.regs[1][1]
word = line[:pos]
rest = line[pos:].strip()
if word in self._keywords:
continue
if word == "C":
self.parsechar(rest)
elif word == "KPX":
self.parsekernpair(rest)
elif word == "CC":
self.parsecomposite(rest)
else:
self.parseattr(word, rest)
def parsechar(self, rest):
m = charRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
charname = things[2]
del things[2]
charnum, width, l, b, r, t = (int(thing) for thing in things)
self._chars[charname] = charnum, width, (l, b, r, t)
def parsekernpair(self, rest):
m = kernRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
leftchar, rightchar, value = things
value = int(value)
self._kerning[(leftchar, rightchar)] = value
def parseattr(self, word, rest):
if word == "FontBBox":
l, b, r, t = [int(thing) for thing in rest.split()]
self._attrs[word] = l, b, r, t
elif word == "Comment":
self._comments.append(rest)
else:
try:
value = int(rest)
except (ValueError, OverflowError):
self._attrs[word] = rest
else:
self._attrs[word] = value
def parsecomposite(self, rest):
m = compositeRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
charname = m.group(1)
ncomponents = int(m.group(2))
rest = rest[m.regs[0][1] :]
components = []
while True:
m = componentRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
basechar = m.group(1)
xoffset = int(m.group(2))
yoffset = int(m.group(3))
components.append((basechar, xoffset, yoffset))
rest = rest[m.regs[0][1] :]
if not rest:
break
assert len(components) == ncomponents
self._composites[charname] = components
def write(self, path, sep="\r"):
"""Writes out an AFM font to the given path."""
import time
lines = [
"StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s"
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
]
# write comments, assuming (possibly wrongly!) they should
# all appear at the top
for comment in self._comments:
lines.append("Comment " + comment)
# write attributes, first the ones we know about, in
# a preferred order
attrs = self._attrs
for attr in preferredAttributeOrder:
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
# write char metrics
lines.append("StartCharMetrics " + repr(len(self._chars)))
items = [
(charnum, (charname, width, box))
for charname, (charnum, width, box) in self._chars.items()
]
def myKey(a):
"""Custom key function to make sure unencoded chars (-1)
end up at the end of the list after sorting."""
if a[0] == -1:
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
return a
items.sort(key=myKey)
for charnum, (charname, width, (l, b, r, t)) in items:
lines.append(
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
% (charnum, width, charname, l, b, r, t)
)
lines.append("EndCharMetrics")
# write kerning info
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
if self._composites:
composites = sorted(self._composites.items())
lines.append("StartComposites %s" % len(self._composites))
for charname, components in composites:
line = "CC %s %s ;" % (charname, len(components))
for basechar, xoffset, yoffset in components:
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
lines.append("EndFontMetrics")
writelines(path, lines, sep)
def has_kernpair(self, pair):
"""Returns `True` if the given glyph pair (specified as a tuple) exists
in the kerning dictionary."""
return pair in self._kerning
def kernpairs(self):
"""Returns a list of all kern pairs in the kerning dictionary."""
return list(self._kerning.keys())
def has_char(self, char):
"""Returns `True` if the given glyph exists in the font."""
return char in self._chars
def chars(self):
"""Returns a list of all glyph names in the font."""
return list(self._chars.keys())
def comments(self):
"""Returns all comments from the file."""
return self._comments
def addComment(self, comment):
"""Adds a new comment to the file."""
self._comments.append(comment)
def addComposite(self, glyphName, components):
"""Specifies that the glyph `glyphName` is made up of the given components.
The components list should be of the following form::
[
(glyphname, xOffset, yOffset),
...
]
"""
self._composites[glyphName] = components
def __getattr__(self, attr):
if attr in self._attrs:
return self._attrs[attr]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
self.__dict__[attr] = value
else:
self._attrs[attr] = value
def __delattr__(self, attr):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
try:
del self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, return the kernpair
return self._kerning[key]
else:
# return the metrics instead
return self._chars[key]
def __setitem__(self, key, value):
if isinstance(key, tuple):
# key is a tuple, set kernpair
self._kerning[key] = value
else:
# set char metrics
self._chars[key] = value
def __delitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, del kernpair
del self._kerning[key]
else:
# del char metrics
del self._chars[key]
def __repr__(self):
if hasattr(self, "FullName"):
return "<AFM object for %s>" % self.FullName
else:
return "<AFM object at %x>" % id(self)
def readlines(path):
with open(path, "r", encoding="ascii") as f:
data = f.read()
return data.splitlines()
def writelines(path, lines, sep="\r"):
with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
if __name__ == "__main__":
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
afm = AFM(path)
char = "A"
if afm.has_char(char):
print(afm[char]) # print charnum, width and boundingbox
pair = ("A", "V")
if afm.has_kernpair(pair):
print(afm[pair]) # print kerning value for pair
print(afm.Version) # various other afm entries have become attributes
print(afm.Weight)
# afm.comments() returns a list of all Comment lines found in the AFM
print(afm.comments())
# print afm.chars()
# print afm.kernpairs()
print(afm)
afm.write(path + ".muck")

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,233 @@
"""CFF2 to CFF converter."""
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.misc.psCharStrings import T2StackUseExtractor
from fontTools.cffLib import (
TopDictIndex,
buildOrder,
buildDefaults,
topDictOperators,
privateDictOperators,
FDSelect,
)
from .transforms import desubroutinizeCharString
from .specializer import specializeProgram
from .width import optimizeWidths
from collections import defaultdict
import logging
__all__ = ["convertCFF2ToCFF", "main"]
log = logging.getLogger("fontTools.cffLib")
def _convertCFF2ToCFF(cff, otFont):
"""Converts this object from CFF2 format to CFF format. This conversion
is done 'in-place'. The conversion cannot be reversed.
The CFF2 font cannot be variable. (TODO Accept those and convert to the
default instance?)
This assumes a decompiled CFF2 table. (i.e. that the object has been
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
cff.major = 1
topDictData = TopDictIndex(None)
for item in cff.topDictIndex:
# Iterate over, such that all are decompiled
item.cff2GetGlyphOrder = None
topDictData.append(item)
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "VarStore"):
raise ValueError("Variable CFF2 font cannot be converted to CFF format.")
opOrder = buildOrder(topDictOperators)
topDict.order = opOrder
for key in topDict.rawDict.keys():
if key not in opOrder:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
charStrings = topDict.CharStrings
fdArray = topDict.FDArray
if not hasattr(topDict, "FDSelect"):
# FDSelect is optional in CFF2, but required in CFF.
fdSelect = topDict.FDSelect = FDSelect()
fdSelect.gidArray = [0] * len(charStrings.charStrings)
defaults = buildDefaults(privateDictOperators)
order = buildOrder(privateDictOperators)
for fd in fdArray:
fd.setCFF2(False)
privateDict = fd.Private
privateDict.order = order
for key in order:
if key not in privateDict.rawDict and key in defaults:
privateDict.rawDict[key] = defaults[key]
for key in privateDict.rawDict.keys():
if key not in order:
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# Add ending operators
for cs in charStrings.values():
cs.decompile()
cs.program.append("endchar")
for subrSets in [cff.GlobalSubrs] + [
getattr(fd.Private, "Subrs", []) for fd in fdArray
]:
for cs in subrSets:
cs.program.append("return")
# Add (optimal) width to CharStrings that need it.
widths = defaultdict(list)
metrics = otFont["hmtx"].metrics
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex == None:
fdIndex = 0
widths[fdIndex].append(metrics[glyphName][0])
for fdIndex, widthList in widths.items():
bestDefault, bestNominal = optimizeWidths(widthList)
private = fdArray[fdIndex].Private
private.defaultWidthX = bestDefault
private.nominalWidthX = bestNominal
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex == None:
fdIndex = 0
private = fdArray[fdIndex].Private
width = metrics[glyphName][0]
if width != private.defaultWidthX:
cs.program.insert(0, width - private.nominalWidthX)
# Handle stack use since stack-depth is lower in CFF than in CFF2.
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex is None:
fdIndex = 0
private = fdArray[fdIndex].Private
extractor = T2StackUseExtractor(
getattr(private, "Subrs", []), cff.GlobalSubrs, private=private
)
stackUse = extractor.execute(cs)
if stackUse > 48: # CFF stack depth is 48
desubroutinizeCharString(cs)
cs.program = specializeProgram(cs.program)
# Unused subroutines are still in CFF2 (ie. lacking 'return' operator)
# because they were not decompiled when we added the 'return'.
# Moreover, some used subroutines may have become unused after the
# stack-use fixup. So we remove all unused subroutines now.
cff.remove_unused_subroutines()
mapping = {
name: ("cid" + str(n).zfill(5) if n else ".notdef")
for n, name in enumerate(topDict.charset)
}
topDict.charset = [
"cid" + str(n).zfill(5) if n else ".notdef" for n in range(len(topDict.charset))
]
charStrings.charStrings = {
mapping[name]: v for name, v in charStrings.charStrings.items()
}
topDict.ROS = ("Adobe", "Identity", 0)
def convertCFF2ToCFF(font, *, updatePostTable=True):
if "CFF2" not in font:
raise ValueError("Input font does not contain a CFF2 table.")
cff = font["CFF2"].cff
_convertCFF2ToCFF(cff, font)
del font["CFF2"]
table = font["CFF "] = newTable("CFF ")
table.cff = cff
if updatePostTable and "post" in font:
# Only version supported for fonts with CFF table is 0x00030000 not 0x20000
post = font["post"]
if post.formatType == 2.0:
post.formatType = 3.0
def main(args=None):
"""Convert CFF2 OTF font to CFF OTF font"""
if args is None:
import sys
args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.CFF2ToCFF",
description="Convert a non-variable CFF2 font to CFF.",
)
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
)
parser.add_argument(
"--no-recalc-timestamp",
dest="recalc_timestamp",
action="store_false",
help="Don't set the output font's timestamp to the current time.",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
from fontTools import configLogger
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
import os
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
outfile = (
makeOutputFileName(infile, overWrite=True, suffix="-CFF")
if not options.output
else options.output
)
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
convertCFF2ToCFF(font)
log.info(
"Saving %s",
outfile,
)
font.save(outfile)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))

View file

@ -0,0 +1,305 @@
"""CFF to CFF2 converter."""
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.misc.psCharStrings import T2WidthExtractor
from fontTools.cffLib import (
TopDictIndex,
FDArrayIndex,
FontDict,
buildOrder,
topDictOperators,
privateDictOperators,
topDictOperators2,
privateDictOperators2,
)
from io import BytesIO
import logging
__all__ = ["convertCFFToCFF2", "main"]
log = logging.getLogger("fontTools.cffLib")
class _NominalWidthUsedError(Exception):
def __add__(self, other):
raise self
def __radd__(self, other):
raise self
def _convertCFFToCFF2(cff, otFont):
"""Converts this object from CFF format to CFF2 format. This conversion
is done 'in-place'. The conversion cannot be reversed.
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
# Clean up T2CharStrings
topDict = cff.topDictIndex[0]
fdArray = topDict.FDArray if hasattr(topDict, "FDArray") else None
charStrings = topDict.CharStrings
globalSubrs = cff.GlobalSubrs
localSubrs = (
[getattr(fd.Private, "Subrs", []) for fd in fdArray]
if fdArray
else (
[topDict.Private.Subrs]
if hasattr(topDict, "Private") and hasattr(topDict.Private, "Subrs")
else []
)
)
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
cs.decompile()
# Clean up subroutines first
for subrs in [globalSubrs] + localSubrs:
for subr in subrs:
program = subr.program
i = j = len(program)
try:
i = program.index("return")
except ValueError:
pass
try:
j = program.index("endchar")
except ValueError:
pass
program[min(i, j) :] = []
# Clean up glyph charstrings
removeUnusedSubrs = False
nominalWidthXError = _NominalWidthUsedError()
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
program = cs.program
thisLocalSubrs = (
localSubrs[fdIndex]
if fdIndex is not None
else (
getattr(topDict.Private, "Subrs", [])
if hasattr(topDict, "Private")
else []
)
)
# Intentionally use custom type for nominalWidthX, such that any
# CharString that has an explicit width encoded will throw back to us.
extractor = T2WidthExtractor(
thisLocalSubrs,
globalSubrs,
nominalWidthXError,
0,
)
try:
extractor.execute(cs)
except _NominalWidthUsedError:
# Program has explicit width. We want to drop it, but can't
# just pop the first number since it may be a subroutine call.
# Instead, when seeing that, we embed the subroutine and recurse.
# If this ever happened, we later prune unused subroutines.
while len(program) >= 2 and program[1] in ["callsubr", "callgsubr"]:
removeUnusedSubrs = True
subrNumber = program.pop(0)
assert isinstance(subrNumber, int), subrNumber
op = program.pop(0)
bias = extractor.localBias if op == "callsubr" else extractor.globalBias
subrNumber += bias
subrSet = thisLocalSubrs if op == "callsubr" else globalSubrs
subrProgram = subrSet[subrNumber].program
program[:0] = subrProgram
# Now pop the actual width
assert len(program) >= 1, program
program.pop(0)
if program and program[-1] == "endchar":
program.pop()
if removeUnusedSubrs:
cff.remove_unused_subroutines()
# Upconvert TopDict
cff.major = 2
cff2GetGlyphOrder = cff.otFont.getGlyphOrder
topDictData = TopDictIndex(None, cff2GetGlyphOrder)
for item in cff.topDictIndex:
# Iterate over, such that all are decompiled
topDictData.append(item)
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "Private"):
privateDict = topDict.Private
else:
privateDict = None
opOrder = buildOrder(topDictOperators2)
topDict.order = opOrder
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
if not hasattr(topDict, "FDArray"):
fdArray = topDict.FDArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = topDict.GlobalSubrs
topDict.GlobalSubrs.fdArray = fdArray
charStrings = topDict.CharStrings
if charStrings.charStringsAreIndexed:
charStrings.charStringsIndex.fdArray = fdArray
else:
charStrings.fdArray = fdArray
fontDict = FontDict()
fontDict.setCFF2(True)
fdArray.append(fontDict)
fontDict.Private = privateDict
privateOpOrder = buildOrder(privateDictOperators2)
if privateDict is not None:
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
else:
# clean up the PrivateDicts in the fdArray
fdArray = topDict.FDArray
privateOpOrder = buildOrder(privateDictOperators2)
for fontDict in fdArray:
fontDict.setCFF2(True)
for key in list(fontDict.rawDict.keys()):
if key not in fontDict.order:
del fontDict.rawDict[key]
if hasattr(fontDict, key):
delattr(fontDict, key)
privateDict = fontDict.Private
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in list(privateDict.rawDict.keys()):
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
# Now delete up the deprecated topDict operators from CFF 1.0
for entry in topDictOperators:
key = entry[1]
# We seem to need to keep the charset operator for now,
# or we fail to compile with some fonts, like AdditionFont.otf.
# I don't know which kind of CFF font those are. But keeping
# charset seems to work. It will be removed when we save and
# read the font again.
#
# AdditionFont.otf has <Encoding name="StandardEncoding"/>.
if key == "charset":
continue
if key not in opOrder:
if key in topDict.rawDict:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
# TODO(behdad): What does the following comment even mean? Both CFF and CFF2
# use the same T2Charstring class. I *think* what it means is that the CharStrings
# were loaded for CFF1, and we need to reload them for CFF2 to set varstore, etc
# on them. At least that's what I understand. It's probably safe to remove this
# and just set vstore where needed.
#
# See comment above about charset as well.
# At this point, the Subrs and Charstrings are all still T2Charstring class
# easiest to fix this by compiling, then decompiling again
file = BytesIO()
cff.compile(file, otFont, isCFF2=True)
file.seek(0)
cff.decompile(file, otFont, isCFF2=True)
def convertCFFToCFF2(font):
cff = font["CFF "].cff
del font["CFF "]
_convertCFFToCFF2(cff, font)
table = font["CFF2"] = newTable("CFF2")
table.cff = cff
def main(args=None):
"""Convert CFF OTF font to CFF2 OTF font"""
if args is None:
import sys
args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.CFFToCFF2",
description="Upgrade a CFF font to CFF2.",
)
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
)
parser.add_argument(
"--no-recalc-timestamp",
dest="recalc_timestamp",
action="store_false",
help="Don't set the output font's timestamp to the current time.",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
from fontTools import configLogger
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
import os
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
outfile = (
makeOutputFileName(infile, overWrite=True, suffix="-CFF2")
if not options.output
else options.output
)
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
convertCFFToCFF2(font)
log.info(
"Saving %s",
outfile,
)
font.save(outfile)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,927 @@
# -*- coding: utf-8 -*-
"""T2CharString operator specializer and generalizer.
PostScript glyph drawing operations can be expressed in multiple different
ways. For example, as well as the ``lineto`` operator, there is also a
``hlineto`` operator which draws a horizontal line, removing the need to
specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
vertical line, removing the need to specify a ``dy`` coordinate. As well
as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
into lists of operations, this module allows for conversion between general
and specific forms of the operation.
"""
from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
if isinstance(string, str):
string = string.split()
program = []
for token in string:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
program.append(token)
return program
def programToString(program):
return " ".join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (🤷).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. It
returns the numRegions for the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = 0
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, str):
stack.append(token)
continue
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenStack = len(stack)
lenBlendStack += numBlends + lenStack - 1
lastBlendIndex = lenStack
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(("", [width]))
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(("", stack))
commands.append((token, []))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(("", stack))
return commands
def _flattenBlendArgs(args):
token_list = []
for arg in args:
if isinstance(arg, list):
token_list.extend(arg)
token_list.append("blend")
else:
token_list.append(arg)
return token_list
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op, args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def _everyN(el, n):
"""Group the list el into groups of size n"""
l = len(el)
if l % n != 0:
raise ValueError(el)
for i in range(0, l, n):
yield el[i : i + n]
class _GeneralizerDecombinerCommandsMap(object):
@staticmethod
def rmoveto(args):
if len(args) != 2:
raise ValueError(args)
yield ("rmoveto", args)
@staticmethod
def hmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [args[0], 0])
@staticmethod
def vmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [0, args[0]])
@staticmethod
def rlineto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 2):
yield ("rlineto", args)
@staticmethod
def hlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [next(it), 0])
yield ("rlineto", [0, next(it)])
except StopIteration:
pass
@staticmethod
def vlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [0, next(it)])
yield ("rlineto", [next(it), 0])
except StopIteration:
pass
@staticmethod
def rrcurveto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 6):
yield ("rrcurveto", args)
@staticmethod
def hhcurveto(args):
l = len(args)
if l < 4 or l % 4 > 1:
raise ValueError(args)
if l % 2 == 1:
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
@staticmethod
def vvcurveto(args):
l = len(args)
if l < 4 or l % 4 > 1:
raise ValueError(args)
if l % 2 == 1:
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
@staticmethod
def hvcurveto(args):
l = len(args)
if l < 4 or l % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if l % 2 == 1:
lastStraight = l % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
else:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
@staticmethod
def vhcurveto(args):
l = len(args)
if l < 4 or l % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if l % 2 == 1:
lastStraight = l % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
else:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
@staticmethod
def rcurveline(args):
l = len(args)
if l < 8 or l % 6 != 2:
raise ValueError(args)
args, last_args = args[:-2], args[-2:]
for args in _everyN(args, 6):
yield ("rrcurveto", args)
yield ("rlineto", last_args)
@staticmethod
def rlinecurve(args):
l = len(args)
if l < 8 or l % 2 != 0:
raise ValueError(args)
args, last_args = args[:-6], args[-6:]
for args in _everyN(args, 2):
yield ("rlineto", args)
yield ("rrcurveto", last_args)
def _convertBlendOpToArgs(blendList):
# args is list of blend op args. Since we are supporting
# recursive blend op calls, some of these args may also
# be a list of blend op args, and need to be converted before
# we convert the current list.
if any([isinstance(arg, list) for arg in blendList]):
args = [
i
for e in blendList
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
]
else:
args = blendList
# We now know that blendList contains a blend op argument list, even if
# some of the args are lists that each contain a blend op argument list.
# Convert from:
# [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
# to:
# [ [x0] + [delta tuple for x0],
# ...,
# [xn] + [delta tuple for xn] ]
numBlends = args[-1]
# Can't use args.pop() when the args are being used in a nested list
# comprehension. See calling context
args = args[:-1]
l = len(args)
numRegions = l // numBlends - 1
if not (numBlends * (numRegions + 1) == l):
raise ValueError(blendList)
defaultArgs = [[arg] for arg in args[:numBlends]]
deltaArgs = args[numBlends:]
numDeltaValues = len(deltaArgs)
deltaList = [
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
]
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
for op, args in commands:
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [
n
for arg in args
for n in (
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
)
]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
func = getattr(mapping, op, None)
if func is None:
result.append((op, args))
continue
try:
for command in func(args):
result.append(command)
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
)
def _categorizeVector(v):
"""
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
it returns a single zero still.
>>> _categorizeVector((0,0))
('0', (0,))
>>> _categorizeVector((1,0))
('h', (1,))
>>> _categorizeVector((0,2))
('v', (2,))
>>> _categorizeVector((1,2))
('r', (1, 2))
"""
if not v[0]:
if not v[1]:
return "0", v[:1]
else:
return "v", v[1:]
else:
if not v[1]:
return "h", v[:1]
else:
return "r", v
def _mergeCategories(a, b):
if a == "0":
return b
if b == "0":
return a
if a == b:
return a
return None
def _negateCategory(a):
if a == "h":
return "v"
if a == "v":
return "h"
assert a in "0r"
return a
def _convertToBlendCmds(args):
# return a list of blend commands, and
# the remaining non-blended args, if any.
num_args = len(args)
stack_use = 0
new_args = []
i = 0
while i < num_args:
arg = args[i]
i += 1
if not isinstance(arg, list):
new_args.append(arg)
stack_use += 1
else:
prev_stack_use = stack_use
# The arg is a tuple of blend values.
# These are each (master 0,delta 1..delta n, 1)
# Combine as many successive tuples as we can,
# up to the max stack limit.
num_sources = len(arg) - 1
blendlist = [arg]
stack_use += 1 + num_sources # 1 for the num_blends arg
# if we are here, max stack is the CFF2 max stack.
# I use the CFF2 max stack limit here rather than
# the 'maxstack' chosen by the client, as the default
# maxstack may have been used unintentionally. For all
# the other operators, this just produces a little less
# optimization, but here it puts a hard (and low) limit
# on the number of source fonts that can be used.
#
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
while (
(i < num_args)
and isinstance(args[i], list)
and stack_use + num_sources < maxStackLimit
):
blendlist.append(args[i])
i += 1
stack_use += num_sources
# blendList now contains as many single blend tuples as can be
# combined without exceeding the CFF2 stack limit.
num_blends = len(blendlist)
# append the 'num_blends' default font values
blend_args = []
for arg in blendlist:
blend_args.append(arg[0])
for arg in blendlist:
assert arg[-1] == 1
blend_args.extend(arg[1:-1])
blend_args.append(num_blends)
new_args.append(blend_args)
stack_use = prev_stack_use + num_blends
return new_args
def _addArgs(a, b):
if isinstance(b, list):
if isinstance(a, list):
if len(a) != len(b) or a[-1] != b[-1]:
raise ValueError()
return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
else:
a, b = b, a
if isinstance(a, list):
assert a[-1] == 1
return [_addArgs(a[0], b)] + a[1:]
return a + b
def _argsStackUse(args):
stackLen = 0
maxLen = 0
for arg in args:
if type(arg) is list:
# Blended arg
maxLen = max(maxLen, stackLen + _argsStackUse(arg))
stackLen += arg[-1]
else:
stackLen += 1
return max(stackLen, maxLen)
def specializeCommands(
commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48,
):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
# 0. Generalize commands.
# This ensures that they are in our expected simple form, with each line/curve only
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
# If caller is sure the input is in this form, they can turn off generalization to
# save time.
#
# 1. Combine successive rmoveto operations.
#
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
# We specialize into some, made-up, variants as well, which simplifies following
# passes.
#
# 3. Merge or delete redundant operations, to the extent requested.
# OpenType spec declares point numbers in CFF undefined. As such, we happily
# change topology. If client relies on point numbers (in GPOS anchors, or for
# hinting purposes(what?)) they can turn this off.
#
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
#
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
#
# 6. Resolve any remaining made-up operators into real operators.
#
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
else:
commands = list(commands) # Make copy since we modify in-place later.
# 1. Combine successive rmoveto operations.
for i in range(len(commands) - 1, 0, -1):
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
v1, v2 = commands[i - 1][1], commands[i][1]
commands[i - 1] = (
"rmoveto",
[_addArgs(v1[0], v2[0]), _addArgs(v1[1], v2[1])],
)
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
#
# We, in fact, specialize into more, made-up, variants that special-case when both
# X and Y components are zero. This simplifies the following optimization passes.
# This case is rare, but OCD does not let me skip it.
#
# After this round, we will have four variants that use the following mnemonics:
#
# - 'r' for relative, ie. non-zero X and non-zero Y,
# - 'h' for horizontal, ie. zero X and non-zero Y,
# - 'v' for vertical, ie. non-zero X and zero Y,
# - '0' for zeros, ie. zero X and zero Y.
#
# The '0' pseudo-operators are not part of the spec, but help simplify the following
# optimization rounds. We resolve them at the end. So, after this, we will have four
# moveto and four lineto variants:
#
# - 0moveto, 0lineto
# - hmoveto, hlineto
# - vmoveto, vlineto
# - rmoveto, rlineto
#
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
#
# There are nine different variants of curves without the '0'. Those nine map exactly
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
#
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
# the '0' with either 'h' or 'v' and it works.
#
# When we get to curve splines however, things become more complicated... XXX finish this.
# There's one more complexity with splines. If one side of the spline is not horizontal or
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
# This limits our merge opportunities later.
#
for i in range(len(commands)):
op, args = commands[i]
if op in {"rmoveto", "rlineto"}:
c, args = _categorizeVector(args)
commands[i] = c + op[1:], args
continue
if op == "rrcurveto":
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
#
# TODO
# A 0moveto that comes before all other path operations can be removed.
# though I find conflicting evidence for this.
#
# TODO
# "If hstem and vstem hints are both declared at the beginning of a
# CharString, and this sequence is followed directly by the hintmask or
# cntrmask operators, then the vstem hint operator (or, if applicable,
# the vstemhm operator) need not be included."
#
# "The sequence and form of a CFF2 CharString program may be represented as:
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
#
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
#
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands) - 1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == "00curveto":
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c + "lineto"
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == "0lineto":
del commands[i]
continue
# Merge adjacent hlineto's and vlineto's.
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
_, other_args = commands[i - 1]
assert len(args) == 1 and len(other_args) == 1
try:
new_args = [_addArgs(args[0], other_args[0])]
except ValueError:
continue
commands[i - 1] = (op, new_args)
del commands[i]
continue
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
for i in range(1, len(commands) - 1):
op, args = commands[i]
prv, nxt = commands[i - 1][0], commands[i + 1][0]
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
assert len(args) == 1
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
commands[i] = ("rlineto", args)
continue
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
assert (op[0] == "r") ^ (op[1] == "r")
if op[0] == "v":
pos = 0
elif op[0] != "r":
pos = 1
elif op[1] == "v":
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ("rrcurveto", args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
stackUse = _argsStackUse(commands[-1][1]) if commands else 0
for i in range(len(commands) - 1, 0, -1):
op1, args1 = commands[i - 1]
op2, args2 = commands[i]
new_op = None
# Merge logic...
if {op1, op2} <= {"rlineto", "rrcurveto"}:
if op1 == op2:
new_op = op1
else:
l = len(args2)
if op2 == "rrcurveto" and l == 6:
new_op = "rlinecurve"
elif l == 2:
new_op = "rcurveline"
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
new_op = op2
elif {op1, op2} == {"vlineto", "hlineto"}:
new_op = op1
elif "curveto" == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
continue
d = _mergeCategories(d1, d2)
if d is None:
continue
if d0 == "r":
d = _mergeCategories(d, d3)
if d is None:
continue
new_op = "r" + d + "curveto"
elif d3 == "r":
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None:
continue
new_op = d0 + "r" + "curveto"
else:
d0 = _mergeCategories(d0, d3)
if d0 is None:
continue
new_op = d0 + d + "curveto"
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
args1StackUse = _argsStackUse(args1)
combinedStackUse = max(args1StackUse, len(args1) + stackUse)
if new_op and combinedStackUse < maxstack:
commands[i - 1] = (new_op, args1 + args2)
del commands[i]
stackUse = combinedStackUse
else:
stackUse = args1StackUse
# 6. Resolve any remaining made-up operators into real operators.
for i in range(len(commands)):
op, args = commands[i]
if op in {"0moveto", "0lineto"}:
commands[i] = "h" + op[1:], args
continue
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
l = len(args)
op0, op1 = op[:2]
if (op0 == "r") ^ (op1 == "r"):
assert l % 2 == 1
if op0 == "0":
op0 = "h"
if op1 == "0":
op1 = "h"
if op0 == "r":
op0 = op1
if op1 == "r":
op1 = _negateCategory(op0)
assert {op0, op1} <= {"h", "v"}, (op0, op1)
if l % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == "h") ^ (l % 8 == 1):
# Swap last two args order
args = args[:-2] + args[-1:] + args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == "h": # hhcurveto
# Swap first two args order
args = args[1:2] + args[:1] + args[2:]
commands[i] = op0 + op1 + "curveto", args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
for i in range(len(commands)):
op, args = commands[i]
if any(isinstance(arg, list) for arg in args):
commands[i] = op, _convertToBlendCmds(args)
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.specializer",
description="CFF CharString generalizer/specializer",
)
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
parser.add_argument(
"--num-regions",
metavar="NumRegions",
nargs="*",
default=None,
help="Number of variable-font regions for blend opertaions.",
)
parser.add_argument(
"--font",
metavar="FONTFILE",
default=None,
help="CFF2 font to specialize.",
)
parser.add_argument(
"-o",
"--output-file",
type=str,
help="Output font file name.",
)
options = parser.parse_args(sys.argv[1:])
if options.program:
getNumRegions = (
None
if options.num_regions is None
else lambda vsIndex: int(
options.num_regions[0 if vsIndex is None else vsIndex]
)
)
program = stringToProgram(options.program)
print("Program:")
print(programToString(program))
commands = programToCommands(program, getNumRegions)
print("Commands:")
print(commands)
program2 = commandsToProgram(commands)
print("Program from commands:")
print(programToString(program2))
assert program == program2
print("Generalized program:")
print(programToString(generalizeProgram(program, getNumRegions)))
print("Specialized program:")
print(programToString(specializeProgram(program, getNumRegions)))
if options.font:
from fontTools.ttLib import TTFont
font = TTFont(options.font)
cff2 = font["CFF2"].cff.topDictIndex[0]
charstrings = cff2.CharStrings
for glyphName in charstrings.keys():
charstring = charstrings[glyphName]
charstring.decompile()
getNumRegions = charstring.private.getNumRegions
charstring.program = specializeProgram(
charstring.program, getNumRegions, maxstack=maxStackLimit
)
if options.output_file is None:
from fontTools.misc.cliTools import makeOutputFileName
outfile = makeOutputFileName(
options.font, overWrite=True, suffix=".specialized"
)
else:
outfile = options.output_file
if outfile:
print("Saving", outfile)
font.save(outfile)

View file

@ -0,0 +1,495 @@
from fontTools.misc.psCharStrings import (
SimpleT2Decompiler,
T2WidthExtractor,
calcSubrBias,
)
def _uniq_sort(l):
return sorted(set(l))
class StopHintCountEvent(Exception):
pass
class _DesubroutinizingT2Decompiler(SimpleT2Decompiler):
stop_hintcount_ops = (
"op_hintmask",
"op_cntrmask",
"op_rmoveto",
"op_hmoveto",
"op_vmoveto",
)
def __init__(self, localSubrs, globalSubrs, private=None):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
def execute(self, charString):
self.need_hintcount = True # until proven otherwise
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, "_desubroutinized"):
# If a charstring has already been desubroutinized, we will still
# need to execute it if we need to count hints in order to
# compute the byte length for mask arguments, and haven't finished
# counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
SimpleT2Decompiler.execute(self, charString)
except StopHintCountEvent:
del self.callingStack[-1]
return
charString._patches = []
SimpleT2Decompiler.execute(self, charString)
desubroutinized = charString.program[:]
for idx, expansion in reversed(charString._patches):
assert idx >= 2
assert desubroutinized[idx - 1] in [
"callsubr",
"callgsubr",
], desubroutinized[idx - 1]
assert type(desubroutinized[idx - 2]) == int
if expansion[-1] == "return":
expansion = expansion[:-1]
desubroutinized[idx - 2 : idx] = expansion
if not self.private.in_cff2:
if "endchar" in desubroutinized:
# Cut off after first endchar
desubroutinized = desubroutinized[
: desubroutinized.index("endchar") + 1
]
charString._desubroutinized = desubroutinized
del charString._patches
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def stop_hint_count(self, *args):
self.need_hintcount = False
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, None)
cs = self.callingStack[-1]
if hasattr(cs, "_desubroutinized"):
raise StopHintCountEvent()
def op_hintmask(self, index):
SimpleT2Decompiler.op_hintmask(self, index)
if self.need_hintcount:
self.stop_hint_count()
def processSubr(self, index, subr):
cs = self.callingStack[-1]
if not hasattr(cs, "_desubroutinized"):
cs._patches.append((index, subr._desubroutinized))
def desubroutinizeCharString(cs):
"""Desubroutinize a charstring in-place."""
cs.decompile()
subrs = getattr(cs.private, "Subrs", [])
decompiler = _DesubroutinizingT2Decompiler(subrs, cs.globalSubrs, cs.private)
decompiler.execute(cs)
cs.program = cs._desubroutinized
del cs._desubroutinized
def desubroutinize(cff):
for fontName in cff.fontNames:
font = cff[fontName]
cs = font.CharStrings
for c in cs.values():
desubroutinizeCharString(c)
# Delete all the local subrs
if hasattr(font, "FDArray"):
for fd in font.FDArray:
pd = fd.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
else:
pd = font.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
# as well as the global subrs
cff.GlobalSubrs.clear()
class _MarkingT2Decompiler(SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, private):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(T2WidthExtractor):
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
# Note that if a charstring starts with hintmask, it will
# have has_hint set to True, because it *might* produce an
# implicit vstem if called under certain conditions.
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible.
# Only relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints
# continuing after this, or there might be
# other things. Not clear yet.
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
pass
def __init__(
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
):
self._css = css
T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
)
self.private = private
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, "_hints") else None
charString._hints = self.Hints()
T2WidthExtractor.execute(self, charString)
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in range(hints.last_checked, len(charString.program) - 1):
if isinstance(charString.program[i], str):
hints.status = 2
break
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
T2WidthExtractor.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
T2WidthExtractor.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
rv = T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
def op_cntrmask(self, index):
rv = T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
return rv
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2:
# Check from last_check, see if we may be an implicit vstem
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
else:
# We are an implicit vstem
hints.has_hint = True
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
# Check from last_check, make sure we didn't have
# any operators.
if hints.status != 2:
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
hints.last_checked = index
if hints.status != 2:
if subr_hints.has_hint:
hints.has_hint = True
# Decide where to chop off from
if subr_hints.status == 0:
hints.last_hint = index
else:
hints.last_hint = index - 2 # Leave the subr call in
elif subr_hints.status == 0:
hints.deletions.append(index)
hints.status = max(hints.status, subr_hints.status)
def _cs_subset_subroutines(charstring, subrs, gsubrs):
p = charstring.program
for i in range(1, len(p)):
if p[i] == "callsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
elif p[i] == "callgsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = (
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
)
def _cs_drop_hints(charstring):
hints = charstring._hints
if hints.deletions:
p = charstring.program
for idx in reversed(hints.deletions):
del p[idx - 2 : idx]
if hints.has_hint:
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
charstring.program = charstring.program[hints.last_hint :]
if not charstring.program:
# TODO CFF2 no need for endchar.
charstring.program.append("endchar")
if hasattr(charstring, "width"):
# Insert width back if needed
if charstring.width != charstring.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert (
charstring.private.defaultWidthX is not None
), "CFF2 CharStrings must not have an initial width value"
charstring.program.insert(
0, charstring.width - charstring.private.nominalWidthX
)
if hints.has_hintmask:
i = 0
p = charstring.program
while i < len(p):
if p[i] in ["hintmask", "cntrmask"]:
assert i + 1 <= len(p)
del p[i : i + 2]
continue
i += 1
assert len(charstring.program)
del charstring._hints
def remove_hints(cff, *, removeUnusedSubrs: bool = True):
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such.
# Upon returning, for each charstring we note all the
# subroutine calls it makes that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
css = set()
for c in cs.values():
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(
css,
subrs,
c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private,
)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
_cs_drop_hints(charstring)
del css
# Drop font-wide hinting values
all_privs = []
if hasattr(font, "FDArray"):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"BlueScale",
"BlueShift",
"BlueFuzz",
"StemSnapH",
"StemSnapV",
"StdHW",
"StdVW",
"ForceBold",
"LanguageGroup",
"ExpansionFactor",
]:
if hasattr(priv, k):
setattr(priv, k, None)
if removeUnusedSubrs:
remove_unused_subroutines(cff)
def _pd_delete_empty_subrs(private_dict):
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
if "Subrs" in private_dict.rawDict:
del private_dict.rawDict["Subrs"]
del private_dict.Subrs
def remove_unused_subroutines(cff):
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Renumber subroutines to remove unused ones
# Mark all used subroutines
for c in cs.values():
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, "FDArray"):
all_subrs.extend(
fd.Private.Subrs
for fd in font.FDArray
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
)
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, "_used"):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = calcSubrBias(subrs)
subrs._new_bias = calcSubrBias(subrs._used)
# Renumber glyph charstrings
for c in cs.values():
subrs = getattr(c.private, "Subrs", None)
_cs_subset_subroutines(c, subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
local_subrs = font.Private.Subrs
elif (
hasattr(font, "FDArray")
and len(font.FDArray) == 1
and hasattr(font.FDArray[0].Private, "Subrs")
):
# Technically we shouldn't do this. But I've run into fonts that do it.
local_subrs = font.FDArray[0].Private.Subrs
else:
local_subrs = None
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, "file"):
del subrs.file
if hasattr(subrs, "offsets"):
del subrs.offsets
for subr in subrs.items:
_cs_subset_subroutines(subr, local_subrs, font.GlobalSubrs)
# Delete local SubrsIndex if empty
if hasattr(font, "FDArray"):
for fd in font.FDArray:
_pd_delete_empty_subrs(fd.Private)
else:
_pd_delete_empty_subrs(font.Private)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias

View file

@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
"""T2CharString glyph width optimizer.
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
value do not need to specify their width in their charstring, saving bytes.
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
values for a font, when provided with a list of glyph widths."""
from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
__all__ = ["optimizeWidths", "main"]
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
args = parser.parse_args(args)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font["hmtx"]
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()

View file

@ -0,0 +1,664 @@
"""
colorLib.builder: Build COLR/CPAL tables from scratch
"""
import collections
import copy
import enum
from functools import partial
from math import ceil, log
from typing import (
Any,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from fontTools.misc.arrayTools import intRect
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.misc.treeTools import build_n_ary_tree
from fontTools.ttLib.tables import C_O_L_R_
from fontTools.ttLib.tables import C_P_A_L_
from fontTools.ttLib.tables import _n_a_m_e
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode
from .errors import ColorLibError
from .geometry import round_start_circle_stable_containment
from .table_builder import BuildCallback, TableBuilder
# TODO move type aliases to colorLib.types?
T = TypeVar("T")
_Kwargs = Mapping[str, Any]
_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
_PaintInputList = Sequence[_PaintInput]
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
_ClipBoxInput = Union[
Tuple[int, int, int, int, int], # format 1, variable
Tuple[int, int, int, int], # format 0, non-variable
ot.ClipBox,
]
MAX_PAINT_COLR_LAYER_COUNT = 255
_DEFAULT_ALPHA = 1.0
_MAX_REUSE_LEN = 32
def _beforeBuildPaintRadialGradient(paint, source):
x0 = source["x0"]
y0 = source["y0"]
r0 = source["r0"]
x1 = source["x1"]
y1 = source["y1"]
r1 = source["r1"]
# TODO apparently no builder_test confirms this works (?)
# avoid abrupt change after rounding when c0 is near c1's perimeter
c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1)
x0, y0 = c.centre
r0 = c.radius
# update source to ensure paint is built with corrected values
source["x0"] = x0
source["y0"] = y0
source["r0"] = r0
source["x1"] = x1
source["y1"] = y1
source["r1"] = r1
return paint, source
def _defaultColorStop():
colorStop = ot.ColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultVarColorStop():
colorStop = ot.VarColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultColorLine():
colorLine = ot.ColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultVarColorLine():
colorLine = ot.VarColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultPaintSolid():
paint = ot.Paint()
paint.Alpha = _DEFAULT_ALPHA
return paint
def _buildPaintCallbacks():
return {
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintRadialGradient,
): _beforeBuildPaintRadialGradient,
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintVarRadialGradient,
): _beforeBuildPaintRadialGradient,
(BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop,
(BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop,
(BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine,
(BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintSolid,
): _defaultPaintSolid,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintVarSolid,
): _defaultPaintSolid,
}
def populateCOLRv0(
table: ot.COLR,
colorGlyphsV0: _ColorGlyphsV0Dict,
glyphMap: Optional[Mapping[str, int]] = None,
):
"""Build v0 color layers and add to existing COLR table.
Args:
table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``).
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
color palette index) tuples. Can be empty.
glyphMap: a map from glyph names to glyph indices, as returned from
``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID.
"""
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphsV0.items()
baseGlyphRecords = []
layerRecords = []
for baseGlyph, layers in colorGlyphItems:
baseRec = ot.BaseGlyphRecord()
baseRec.BaseGlyph = baseGlyph
baseRec.FirstLayerIndex = len(layerRecords)
baseRec.NumLayers = len(layers)
baseGlyphRecords.append(baseRec)
for layerGlyph, paletteIndex in layers:
layerRec = ot.LayerRecord()
layerRec.LayerGlyph = layerGlyph
layerRec.PaletteIndex = paletteIndex
layerRecords.append(layerRec)
table.BaseGlyphRecordArray = table.LayerRecordArray = None
if baseGlyphRecords:
table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
if layerRecords:
table.LayerRecordArray = ot.LayerRecordArray()
table.LayerRecordArray.LayerRecord = layerRecords
table.BaseGlyphRecordCount = len(baseGlyphRecords)
table.LayerRecordCount = len(layerRecords)
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: Optional[int] = None,
*,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
varIndexMap: Optional[ot.DeltaSetIndexMap] = None,
clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None,
allowLayerReuse: bool = True,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or
list of ``Paint`` for COLRv1.
version: the version of COLR table. If None, the version is determined
by the presence of COLRv1 paints or variation data (varStore), which
require version 1; otherwise, if all base glyphs use only simple color
layers, version 0 is used.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer.
clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples:
(xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase).
Returns:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
if version in (None, 0) and not varStore:
# split color glyphs into v0 and v1 and encode separately
colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs)
if version == 0 and colorGlyphsV1:
raise ValueError("Can't encode COLRv1 glyphs in COLRv0")
else:
# unless explicitly requested for v1 or have variations, in which case
# we encode all color glyph as v1
colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs
colr = ot.COLR()
populateCOLRv0(colr, colorGlyphsV0, glyphMap)
colr.LayerList, colr.BaseGlyphList = buildColrV1(
colorGlyphsV1,
glyphMap,
allowLayerReuse=allowLayerReuse,
)
if version is None:
version = 1 if (varStore or colorGlyphsV1) else 0
elif version not in (0, 1):
raise NotImplementedError(version)
self.version = colr.Version = version
if version == 0:
self.ColorLayers = self._decompileColorLayersV0(colr)
else:
colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None
colr.VarIndexMap = varIndexMap
colr.VarStore = varStore
self.table = colr
return self
def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList:
clipList = ot.ClipList()
clipList.Format = 1
clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()}
return clipList
def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox:
if isinstance(clipBox, ot.ClipBox):
return clipBox
n = len(clipBox)
clip = ot.ClipBox()
if n not in (4, 5):
raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}")
clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4])
clip.Format = int(n == 5) + 1
if n == 5:
clip.VarIndexBase = int(clipBox[4])
return clip
class ColorPaletteType(enum.IntFlag):
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
USABLE_WITH_DARK_BACKGROUND = 0x0002
@classmethod
def _missing_(cls, value):
# enforce reserved bits
if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0):
raise ValueError(f"{value} is not a valid {cls.__name__}")
return super()._missing_(value)
# None, 'abc' or {'en': 'abc', 'de': 'xyz'}
_OptionalLocalizedString = Union[None, str, Dict[str, str]]
def buildPaletteLabels(
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
) -> List[Optional[int]]:
return [
(
nameTable.addMultilingualName(l, mac=False)
if isinstance(l, dict)
else (
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
if l is None
else nameTable.addMultilingualName({"en": l}, mac=False)
)
)
for l in labels
]
def buildCPAL(
palettes: Sequence[Sequence[Tuple[float, float, float, float]]],
paletteTypes: Optional[Sequence[ColorPaletteType]] = None,
paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None,
) -> C_P_A_L_.table_C_P_A_L_:
"""Build CPAL table from list of color palettes.
Args:
palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats
in the range [0..1].
paletteTypes: optional list of ColorPaletteType, one for each palette.
paletteLabels: optional list of palette labels. Each lable can be either:
None (no label), a string (for for default English labels), or a
localized string (as a dict keyed with BCP47 language codes).
paletteEntryLabels: optional list of palette entry labels, one for each
palette entry (see paletteLabels).
nameTable: optional name table where to store palette and palette entry
labels. Required if either paletteLabels or paletteEntryLabels is set.
Return:
A new CPAL v0 or v1 table, if custom palette types or labels are specified.
"""
if len({len(p) for p in palettes}) != 1:
raise ColorLibError("color palettes have different lengths")
if (paletteLabels or paletteEntryLabels) and not nameTable:
raise TypeError(
"nameTable is required if palette or palette entries have labels"
)
cpal = C_P_A_L_.table_C_P_A_L_()
cpal.numPaletteEntries = len(palettes[0])
cpal.palettes = []
for i, palette in enumerate(palettes):
colors = []
for j, color in enumerate(palette):
if not isinstance(color, tuple) or len(color) != 4:
raise ColorLibError(
f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}"
)
if any(v > 1 or v < 0 for v in color):
raise ColorLibError(
f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}"
)
# input colors are RGBA, CPAL encodes them as BGRA
red, green, blue, alpha = color
colors.append(
C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha)))
)
cpal.palettes.append(colors)
if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)):
cpal.version = 1
if paletteTypes is not None:
if len(paletteTypes) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}"
)
cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes]
else:
cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len(
palettes
)
if paletteLabels is not None:
if len(paletteLabels) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}"
)
cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable)
else:
cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes)
if paletteEntryLabels is not None:
if len(paletteEntryLabels) != cpal.numPaletteEntries:
raise ColorLibError(
f"Expected {cpal.numPaletteEntries} paletteEntryLabels, "
f"got {len(paletteEntryLabels)}"
)
cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable)
else:
cpal.paletteEntryLabels = [
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
] * cpal.numPaletteEntries
else:
cpal.version = 0
return cpal
# COLR v1 tables
# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec
def _is_colrv0_layer(layer: Any) -> bool:
# Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which
# the first element is a str (the layerGlyph) and the second element is an int
# (CPAL paletteIndex).
# https://github.com/googlefonts/ufo2ft/issues/426
try:
layerGlyph, paletteIndex = layer
except (TypeError, ValueError):
return False
else:
return isinstance(layerGlyph, str) and isinstance(paletteIndex, int)
def _split_color_glyphs_by_version(
colorGlyphs: _ColorGlyphsDict,
) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]:
colorGlyphsV0 = {}
colorGlyphsV1 = {}
for baseGlyph, layers in colorGlyphs.items():
if all(_is_colrv0_layer(l) for l in layers):
colorGlyphsV0[baseGlyph] = layers
else:
colorGlyphsV1[baseGlyph] = layers
# sanity check
assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1))
return colorGlyphsV0, colorGlyphsV1
def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
# TODO feels like something itertools might have already
for lbound in range(num_layers):
# Reuse of very large #s of layers is relatively unlikely
# +2: we want sequences of at least 2
# otData handles single-record duplication
for ubound in range(
lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN)
):
yield (lbound, ubound)
class LayerReuseCache:
reusePool: Mapping[Tuple[Any, ...], int]
tuples: Mapping[int, Tuple[Any, ...]]
keepAlive: List[ot.Paint] # we need id to remain valid
def __init__(self):
self.reusePool = {}
self.tuples = {}
self.keepAlive = []
def _paint_tuple(self, paint: ot.Paint):
# start simple, who even cares about cyclic graphs or interesting field types
def _tuple_safe(value):
if isinstance(value, enum.Enum):
return value
elif hasattr(value, "__dict__"):
return tuple(
(k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items())
)
elif isinstance(value, collections.abc.MutableSequence):
return tuple(_tuple_safe(e) for e in value)
return value
# Cache the tuples for individual Paint instead of the whole sequence
# because the seq could be a transient slice
result = self.tuples.get(id(paint), None)
if result is None:
result = _tuple_safe(paint)
self.tuples[id(paint)] = result
self.keepAlive.append(paint)
return result
def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]:
return tuple(self._paint_tuple(p) for p in paints)
def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]:
found_reuse = True
while found_reuse:
found_reuse = False
ranges = sorted(
_reuse_ranges(len(layers)),
key=lambda t: (t[1] - t[0], t[1], t[0]),
reverse=True,
)
for lbound, ubound in ranges:
reuse_lbound = self.reusePool.get(
self._as_tuple(layers[lbound:ubound]), -1
)
if reuse_lbound == -1:
continue
new_slice = ot.Paint()
new_slice.Format = int(ot.PaintFormat.PaintColrLayers)
new_slice.NumLayers = ubound - lbound
new_slice.FirstLayerIndex = reuse_lbound
layers = layers[:lbound] + [new_slice] + layers[ubound:]
found_reuse = True
break
return layers
def add(self, layers: List[ot.Paint], first_layer_index: int):
for lbound, ubound in _reuse_ranges(len(layers)):
self.reusePool[self._as_tuple(layers[lbound:ubound])] = (
lbound + first_layer_index
)
class LayerListBuilder:
layers: List[ot.Paint]
cache: LayerReuseCache
allowLayerReuse: bool
def __init__(self, *, allowLayerReuse=True):
self.layers = []
if allowLayerReuse:
self.cache = LayerReuseCache()
else:
self.cache = None
# We need to intercept construction of PaintColrLayers
callbacks = _buildPaintCallbacks()
callbacks[
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintColrLayers,
)
] = self._beforeBuildPaintColrLayers
self.tableBuilder = TableBuilder(callbacks)
# COLR layers is unusual in that it modifies shared state
# so we need a callback into an object
def _beforeBuildPaintColrLayers(self, dest, source):
# Sketchy gymnastics: a sequence input will have dropped it's layers
# into NumLayers; get it back
if isinstance(source.get("NumLayers", None), collections.abc.Sequence):
layers = source["NumLayers"]
else:
layers = source["Layers"]
# Convert maps seqs or whatever into typed objects
layers = [self.buildPaint(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
if self.cache is not None:
# Look for reuse, with preference to longer sequences
# This may make the layer list smaller
layers = self.cache.try_reuse(layers)
# The layer list is now final; if it's too big we need to tree it
is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT
layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT)
# We now have a tree of sequences with Paint leaves.
# Convert the sequences into PaintColrLayers.
def listToColrLayers(layer):
if isinstance(layer, collections.abc.Sequence):
return self.buildPaint(
{
"Format": ot.PaintFormat.PaintColrLayers,
"Layers": [listToColrLayers(l) for l in layer],
}
)
return layer
layers = [listToColrLayers(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
paint = ot.Paint()
paint.Format = int(ot.PaintFormat.PaintColrLayers)
paint.NumLayers = len(layers)
paint.FirstLayerIndex = len(self.layers)
self.layers.extend(layers)
# Register our parts for reuse provided we aren't a tree
# If we are a tree the leaves registered for reuse and that will suffice
if self.cache is not None and not is_tree:
self.cache.add(layers, paint.FirstLayerIndex)
# we've fully built dest; empty source prevents generalized build from kicking in
return paint, {}
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
return self.tableBuilder.build(ot.Paint, paint)
def build(self) -> Optional[ot.LayerList]:
if not self.layers:
return None
layers = ot.LayerList()
layers.LayerCount = len(self.layers)
layers.Paint = self.layers
return layers
def buildBaseGlyphPaintRecord(
baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput
) -> ot.BaseGlyphList:
self = ot.BaseGlyphPaintRecord()
self.BaseGlyph = baseGlyph
self.Paint = layerBuilder.buildPaint(paint)
return self
def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
lines = []
for baseGlyph, error in sorted(errors.items()):
lines.append(f" {baseGlyph} => {type(error).__name__}: {error}")
return "\n".join(lines)
def buildColrV1(
colorGlyphs: _ColorGlyphsDict,
glyphMap: Optional[Mapping[str, int]] = None,
*,
allowLayerReuse: bool = True,
) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]:
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphs.items()
errors = {}
baseGlyphs = []
layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse)
for baseGlyph, paint in colorGlyphItems:
try:
baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint))
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
errors[baseGlyph] = e
if errors:
failed_glyphs = _format_glyph_errors(errors)
exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}")
exc.errors = errors
raise exc from next(iter(errors.values()))
layers = layerBuilder.build()
glyphs = ot.BaseGlyphList()
glyphs.BaseGlyphCount = len(baseGlyphs)
glyphs.BaseGlyphPaintRecord = baseGlyphs
return (layers, glyphs)

View file

@ -0,0 +1,2 @@
class ColorLibError(Exception):
pass

View file

@ -0,0 +1,143 @@
"""Helpers for manipulating 2D points and vectors in COLR table."""
from math import copysign, cos, hypot, isclose, pi
from fontTools.misc.roundTools import otRound
def _vector_between(origin, target):
return (target[0] - origin[0], target[1] - origin[1])
def _round_point(pt):
return (otRound(pt[0]), otRound(pt[1]))
def _unit_vector(vec):
length = hypot(*vec)
if length == 0:
return None
return (vec[0] / length, vec[1] / length)
_CIRCLE_INSIDE_TOLERANCE = 1e-4
# The unit vector's X and Y components are respectively
# U = (cos(α), sin(α))
# where α is the angle between the unit vector and the positive x axis.
_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984
def _rounding_offset(direction):
# Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector.
# We divide the unit circle in 8 equal slices oriented towards the cardinal
# (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we
# map one of the possible cases: -1, 0, +1 for either X and Y coordinate.
# E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or
# (-1.0, 0.0) if it's pointing West, etc.
uv = _unit_vector(direction)
if not uv:
return (0, 0)
result = []
for uv_component in uv:
if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD:
# unit vector component near 0: direction almost orthogonal to the
# direction of the current axis, thus keep coordinate unchanged
result.append(0)
else:
# nudge coord by +/- 1.0 in direction of unit vector
result.append(copysign(1.0, uv_component))
return tuple(result)
class Circle:
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def __repr__(self):
return f"Circle(centre={self.centre}, radius={self.radius})"
def round(self):
return Circle(_round_point(self.centre), otRound(self.radius))
def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
return (
isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
or outer_circle.radius > dist
)
def concentric(self, other):
return self.centre == other.centre
def move(self, dx, dy):
self.centre = (self.centre[0] + dx, self.centre[1] + dy)
def round_start_circle_stable_containment(c0, r0, c1, r1):
"""Round start circle so that it stays inside/outside end circle after rounding.
The rounding of circle coordinates to integers may cause an abrupt change
if the start circle c0 is so close to the end circle c1's perimiter that
it ends up falling outside (or inside) as a result of the rounding.
To keep the gradient unchanged, we nudge it in the right direction.
See:
https://github.com/googlefonts/colr-gradients-spec/issues/204
https://github.com/googlefonts/picosvg/issues/158
"""
start, end = Circle(c0, r0), Circle(c1, r1)
inside_before_round = start.inside(end)
round_start = start.round()
round_end = end.round()
inside_after_round = round_start.inside(round_end)
if inside_before_round == inside_after_round:
return round_start
elif inside_after_round:
# start was outside before rounding: we need to push start away from end
direction = _vector_between(round_end.centre, round_start.centre)
radius_delta = +1.0
else:
# start was inside before rounding: we need to push start towards end
direction = _vector_between(round_start.centre, round_end.centre)
radius_delta = -1.0
dx, dy = _rounding_offset(direction)
# At most 2 iterations ought to be enough to converge. Before the loop, we
# know the start circle didn't keep containment after normal rounding; thus
# we continue adjusting by -/+ 1.0 until containment is restored.
# Normal rounding can at most move each coordinates -/+0.5; in the worst case
# both the start and end circle's centres and radii will be rounded in opposite
# directions, e.g. when they move along a 45 degree diagonal:
# c0 = (1.5, 1.5) ===> (2.0, 2.0)
# r0 = 0.5 ===> 1.0
# c1 = (0.499, 0.499) ===> (0.0, 0.0)
# r1 = 2.499 ===> 2.0
# In this example, the relative distance between the circles, calculated
# as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and
# -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both
# x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these
# moves cover twice that distance, which is enough to restore containment.
max_attempts = 2
for _ in range(max_attempts):
if round_start.concentric(round_end):
# can't move c0 towards c1 (they are the same), so we change the radius
round_start.radius += radius_delta
assert round_start.radius >= 0
else:
round_start.move(dx, dy)
if inside_before_round == round_start.inside(round_end):
break
else: # likely a bug
raise AssertionError(
f"Rounding circle {start} "
f"{'inside' if inside_before_round else 'outside'} "
f"{end} failed after {max_attempts} attempts!"
)
return round_start

View file

@ -0,0 +1,223 @@
"""
colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such.
"""
import collections
import enum
from fontTools.ttLib.tables.otBase import (
BaseTable,
FormatSwitchingBaseTable,
UInt8FormatSwitchingBaseTable,
)
from fontTools.ttLib.tables.otConverters import (
ComputedInt,
SimpleValue,
Struct,
Short,
UInt8,
UShort,
IntValue,
FloatValue,
OptionalValue,
)
from fontTools.misc.roundTools import otRound
class BuildCallback(enum.Enum):
"""Keyed on (BEFORE_BUILD, class[, Format if available]).
Receives (dest, source).
Should return (dest, source), which can be new objects.
"""
BEFORE_BUILD = enum.auto()
"""Keyed on (AFTER_BUILD, class[, Format if available]).
Receives (dest).
Should return dest, which can be a new object.
"""
AFTER_BUILD = enum.auto()
"""Keyed on (CREATE_DEFAULT, class[, Format if available]).
Receives no arguments.
Should return a new instance of class.
"""
CREATE_DEFAULT = enum.auto()
def _assignable(convertersByName):
return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)}
def _isNonStrSequence(value):
return isinstance(value, collections.abc.Sequence) and not isinstance(value, str)
def _split_format(cls, source):
if _isNonStrSequence(source):
assert len(source) > 0, f"{cls} needs at least format from {source}"
fmt, remainder = source[0], source[1:]
elif isinstance(source, collections.abc.Mapping):
assert "Format" in source, f"{cls} needs at least Format from {source}"
remainder = source.copy()
fmt = remainder.pop("Format")
else:
raise ValueError(f"Not sure how to populate {cls} from {source}")
assert isinstance(
fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}"
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
return fmt, remainder
class TableBuilder:
"""
Helps to populate things derived from BaseTable from maps, tuples, etc.
A table of lifecycle callbacks may be provided to add logic beyond what is possible
based on otData info for the target class. See BuildCallbacks.
"""
def __init__(self, callbackTable=None):
if callbackTable is None:
callbackTable = {}
self._callbackTable = callbackTable
def _convert(self, dest, field, converter, value):
enumClass = getattr(converter, "enumClass", None)
if enumClass:
if isinstance(value, enumClass):
pass
elif isinstance(value, str):
try:
value = getattr(enumClass, value.upper())
except AttributeError:
raise ValueError(f"{value} is not a valid {enumClass}")
else:
value = enumClass(value)
elif isinstance(converter, IntValue):
value = otRound(value)
elif isinstance(converter, FloatValue):
value = float(value)
elif isinstance(converter, Struct):
if converter.repeat:
if _isNonStrSequence(value):
value = [self.build(converter.tableClass, v) for v in value]
else:
value = [self.build(converter.tableClass, value)]
setattr(dest, converter.repeat, len(value))
else:
value = self.build(converter.tableClass, value)
elif callable(converter):
value = converter(value)
setattr(dest, field, value)
def build(self, cls, source):
assert issubclass(cls, BaseTable)
if isinstance(source, cls):
return source
callbackKey = (cls,)
fmt = None
if issubclass(cls, FormatSwitchingBaseTable):
fmt, source = _split_format(cls, source)
callbackKey = (cls, fmt)
dest = self._callbackTable.get(
(BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls()
)()
assert isinstance(dest, cls)
convByName = _assignable(cls.convertersByName)
skippedFields = set()
# For format switchers we need to resolve converters based on format
if issubclass(cls, FormatSwitchingBaseTable):
dest.Format = fmt
convByName = _assignable(convByName[dest.Format])
skippedFields.add("Format")
# Convert sequence => mapping so before thunk only has to handle one format
if _isNonStrSequence(source):
# Sequence (typically list or tuple) assumed to match fields in declaration order
assert len(source) <= len(
convByName
), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values"
source = dict(zip(convByName.keys(), source))
dest, source = self._callbackTable.get(
(BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s)
)(dest, source)
if isinstance(source, collections.abc.Mapping):
for field, value in source.items():
if field in skippedFields:
continue
converter = convByName.get(field, None)
if not converter:
raise ValueError(
f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}"
)
self._convert(dest, field, converter, value)
else:
# let's try as a 1-tuple
dest = self.build(cls, (source,))
for field, conv in convByName.items():
if not hasattr(dest, field) and isinstance(conv, OptionalValue):
setattr(dest, field, conv.DEFAULT)
dest = self._callbackTable.get(
(BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d
)(dest)
return dest
class TableUnbuilder:
def __init__(self, callbackTable=None):
if callbackTable is None:
callbackTable = {}
self._callbackTable = callbackTable
def unbuild(self, table):
assert isinstance(table, BaseTable)
source = {}
callbackKey = (type(table),)
if isinstance(table, FormatSwitchingBaseTable):
source["Format"] = int(table.Format)
callbackKey += (table.Format,)
for converter in table.getConverters():
if isinstance(converter, ComputedInt):
continue
value = getattr(table, converter.name)
enumClass = getattr(converter, "enumClass", None)
if enumClass:
source[converter.name] = value.name.lower()
elif isinstance(converter, Struct):
if converter.repeat:
source[converter.name] = [self.unbuild(v) for v in value]
else:
source[converter.name] = self.unbuild(value)
elif isinstance(converter, SimpleValue):
# "simple" values (e.g. int, float, str) need no further un-building
source[converter.name] = value
else:
raise NotImplementedError(
"Don't know how unbuild {value!r} with {converter!r}"
)
source = self._callbackTable.get(callbackKey, lambda s: s)(source)
return source

View file

@ -0,0 +1,81 @@
from fontTools.ttLib.tables import otTables as ot
from .table_builder import TableUnbuilder
def unbuildColrV1(layerList, baseGlyphList):
layers = []
if layerList:
layers = layerList.Paint
unbuilder = LayerListUnbuilder(layers)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
for rec in baseGlyphList.BaseGlyphPaintRecord
}
def _flatten_layers(lst):
for paint in lst:
if paint["Format"] == ot.PaintFormat.PaintColrLayers:
yield from _flatten_layers(paint["Layers"])
else:
yield paint
class LayerListUnbuilder:
def __init__(self, layers):
self.layers = layers
callbacks = {
(
ot.Paint,
ot.PaintFormat.PaintColrLayers,
): self._unbuildPaintColrLayers,
}
self.tableUnbuilder = TableUnbuilder(callbacks)
def unbuildPaint(self, paint):
assert isinstance(paint, ot.Paint)
return self.tableUnbuilder.unbuild(paint)
def _unbuildPaintColrLayers(self, source):
assert source["Format"] == ot.PaintFormat.PaintColrLayers
layers = list(
_flatten_layers(
[
self.unbuildPaint(childPaint)
for childPaint in self.layers[
source["FirstLayerIndex"] : source["FirstLayerIndex"]
+ source["NumLayers"]
]
]
)
)
if len(layers) == 1:
return layers[0]
return {"Format": source["Format"], "Layers": layers}
if __name__ == "__main__":
from pprint import pprint
import sys
from fontTools.ttLib import TTFont
try:
fontfile = sys.argv[1]
except IndexError:
sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
font = TTFont(fontfile)
colr = font["COLR"]
if colr.version < 1:
sys.exit(f"error: No COLR table version=1 found in {fontfile}")
colorGlyphs = unbuildColrV1(
colr.table.LayerList,
colr.table.BaseGlyphList,
)
pprint(colorGlyphs)

View file

@ -0,0 +1,90 @@
"""
Define all configuration options that can affect the working of fontTools
modules. E.g. optimization levels of varLib IUP, otlLib GPOS compression level,
etc. If this file gets too big, split it into smaller files per-module.
An instance of the Config class can be attached to a TTFont object, so that
the various modules can access their configuration options from it.
"""
from textwrap import dedent
from fontTools.misc.configTools import *
class Config(AbstractConfig):
options = Options()
OPTIONS = Config.options
Config.register_option(
name="fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
help=dedent(
"""\
GPOS Lookup type 2 (PairPos) compression level:
0 = do not attempt to compact PairPos lookups;
1 to 8 = create at most 1 to 8 new subtables for each existing
subtable, provided that it would yield a 50%% file size saving;
9 = create as many new subtables as needed to yield a file size saving.
Default: 0.
This compaction aims to save file size, by splitting large class
kerning subtables (Format 2) that contain many zero values into
smaller and denser subtables. It's a trade-off between the overhead
of several subtables versus the sparseness of one big subtable.
See the pull request: https://github.com/fonttools/fonttools/pull/2326
"""
),
default=0,
parse=int,
validate=lambda v: v in range(10),
)
Config.register_option(
name="fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER",
help=dedent(
"""\
FontTools tries to use the HarfBuzz Repacker to serialize GPOS/GSUB tables
if the uharfbuzz python bindings are importable, otherwise falls back to its
slower, less efficient serializer. Set to False to always use the latter.
Set to True to explicitly request the HarfBuzz Repacker (will raise an
error if uharfbuzz cannot be imported).
"""
),
default=None,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
Config.register_option(
name="fontTools.otlLib.builder:WRITE_GPOS7",
help=dedent(
"""\
macOS before 13.2 didnt support GPOS LookupType 7 (non-chaining
ContextPos lookups), so FontTools.otlLib.builder disables a file size
optimisation that would use LookupType 7 instead of 8 when there is no
chaining (no prefix or suffix). Set to True to enable the optimization.
"""
),
default=False,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
Config.register_option(
name="fontTools.ttLib:OPTIMIZE_FONT_SPEED",
help=dedent(
"""\
Enable optimizations that prioritize speed over file size. This
mainly affects how glyf table and gvar / VARC tables are compiled.
The produced fonts will be larger, but rendering performance will
be improved with HarfBuzz and other text layout engines.
"""
),
default=False,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)

View file

@ -0,0 +1,15 @@
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cu2qu import *

View file

@ -0,0 +1,6 @@
import sys
from .cli import _main as main
if __name__ == "__main__":
sys.exit(main())

View file

@ -0,0 +1,54 @@
"""Benchmark the cu2qu algorithm performance."""
from .cu2qu import *
import random
import timeit
MAX_ERR = 0.05
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)
]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
setup_func = "setup_" + function
if setup_suffix:
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
run_benchmark("cu2qu", "curve_to_quadratic")
run_benchmark("cu2qu", "curves_to_quadratic")
if __name__ == "__main__":
random.seed(1)
main()

View file

@ -0,0 +1,198 @@
import os
import argparse
import logging
import shutil
import multiprocessing as mp
from contextlib import closing
from functools import partial
import fontTools
from .ufo import font_to_quadratic, fonts_to_quadratic
ufo_module = None
try:
import ufoLib2 as ufo_module
except ImportError:
try:
import defcon as ufo_module
except ImportError as e:
pass
logger = logging.getLogger("fontTools.cu2qu")
def _cpu_count():
try:
return mp.cpu_count()
except NotImplementedError: # pragma: no cover
return 1
def open_ufo(path):
if hasattr(ufo_module.Font, "open"): # ufoLib2
return ufo_module.Font.open(path)
return ufo_module.Font(path) # defcon
def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path)
logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
if output_path:
ufo.save(output_path)
else:
ufo.save() # save in-place
elif output_path:
_copytree(input_path, output_path)
def _samepath(path1, path2):
# TODO on python3+, there's os.path.samefile
path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
return path1 == path2
def _copytree(input_path, output_path):
if _samepath(input_path, output_path):
logger.debug("input and output paths are the same file; skipped copy")
return
if os.path.exists(output_path):
shutil.rmtree(output_path)
shutil.copytree(input_path, output_path)
def _main(args=None):
"""Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input UFO source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
"--conversion-error",
type=float,
metavar="ERROR",
default=None,
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"-m",
"--mixed",
default=False,
action="store_true",
help="whether to used mixed quadratic and cubic curves",
)
parser.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="do not reverse the contour direction",
)
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
help="whether curve conversion should keep interpolation compatibility",
)
mode_parser.add_argument(
"-j",
"--jobs",
type=int,
nargs="?",
default=1,
const=_cpu_count(),
metavar="N",
help="Convert using N multiple processes (default: %(default)s)",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
"-o",
"--output-file",
default=None,
metavar="OUTPUT",
help=(
"output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."
),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted UFOs",
)
options = parser.parse_args(args)
if ufo_module is None:
parser.error("Either ufoLib2 or defcon are required to run this script.")
if not options.verbose:
level = "WARNING"
elif options.verbose == 1:
level = "INFO"
else:
level = "DEBUG"
logging.basicConfig(level=level)
if len(options.infiles) > 1 and options.output_file:
parser.error("-o/--output-file can't be used with multile inputs")
if options.output_dir:
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
else:
# save in-place
output_paths = [None] * len(options.infiles)
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction,
all_quadratic=False if options.mixed else True,
)
if options.interpolatable:
logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
logger.info("Saving %s", output_path)
if output_path:
ufo.save(output_path)
else:
ufo.save()
else:
for input_path, output_path in zip(options.infiles, output_paths):
if output_path:
_copytree(input_path, output_path)
else:
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, **kwargs)
logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths))
else:
for input_path, output_path in zip(options.infiles, output_paths):
_font_to_quadratic(input_path, output_path, **kwargs)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,546 @@
# cython: language_level=3
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cython
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = cython.compiled
import math
from .errors import Error as Cu2QuError, ApproxNotFoundError
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100
NAN = float("NaN")
@cython.cfunc
@cython.inline
@cython.returns(cython.double)
@cython.locals(v1=cython.complex, v2=cython.complex, result=cython.double)
def dot(v1, v2):
"""Return the dot product of two vectors.
Args:
v1 (complex): First vector.
v2 (complex): Second vector.
Returns:
double: Dot product.
"""
result = (v1 * v2.conjugate()).real
# When vectors are perpendicular (i.e. dot product is 0), the above expression may
# yield slightly different results when running in pure Python vs C/Cython,
# both of which are correct within IEEE-754 floating-point precision.
# It's probably due to the different order of operations and roundings in each
# implementation. Because we are using the result in a denominator and catching
# ZeroDivisionError (see `calc_intersect`), it's best to normalize the result here.
if abs(result) < 1e-15:
result = 0.0
return result
@cython.cfunc
@cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
)
def calc_cubic_points(a, b, c, d):
_1 = d
_2 = (c / 3.0) + d
_3 = (b + c) / 3.0 + _2
_4 = a + d + c + b
return _1, _2, _3, _4
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0
b = (p2 - p1) * 3.0 - c
d = p0
a = p3 - d - c - b
return a, b, c, d
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts.
Splits the curve into `n` equal parts by curve time.
(t=0..1/n, t=1/n..2/n, ...)
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
An iterator yielding the control points (four complex values) of the
subcurves.
"""
# Hand-coded special-cases
if n == 2:
return iter(split_cubic_into_two(p0, p1, p2, p3))
if n == 3:
return iter(split_cubic_into_three(p0, p1, p2, p3))
if n == 4:
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(
split_cubic_into_two(a[0], a[1], a[2], a[3])
+ split_cubic_into_two(b[0], b[1], b[2], b[3])
)
if n == 6:
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(
split_cubic_into_three(a[0], a[1], a[2], a[3])
+ split_cubic_into_three(b[0], b[1], b[2], b[3])
)
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
n=cython.int,
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
)
@cython.locals(
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n
delta_2 = dt * dt
delta_3 = dt * delta_2
for i in range(n):
t1 = i * dt
t1_2 = t1 * t1
# calc new a, b, c and d
a1 = a * delta_3
b1 = (3 * a * t1 + b) * delta_2
c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
yield calc_cubic_points(a1, b1, c1, d1)
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts.
Splits the curve into two equal parts at t = 0.5
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
tuple: Two cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return (
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
)
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(
mid1=cython.complex,
deriv1=cython.complex,
mid2=cython.complex,
deriv2=cython.complex,
)
def split_cubic_into_three(p0, p1, p2, p3):
"""Split a cubic Bezier into three equal parts.
Splits the curve into three equal parts at t = 1/3 and t = 2/3
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
tuple: Three cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27)
deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27)
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27)
deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27)
return (
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
)
@cython.cfunc
@cython.inline
@cython.returns(cython.complex)
@cython.locals(
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one.
Args:
t (double): Position of control point.
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
complex: Location of candidate control point on quadratic curve.
"""
_p1 = p0 + (p1 - p0) * 1.5
_p2 = p3 + (p2 - p3) * 1.5
return _p1 + (_p2 - _p1) * t
@cython.cfunc
@cython.inline
@cython.returns(cython.complex)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double)
def calc_intersect(a, b, c, d):
"""Calculate the intersection of two lines.
Args:
a (complex): Start point of first line.
b (complex): End point of first line.
c (complex): Start point of second line.
d (complex): End point of second line.
Returns:
complex: Location of intersection if one present, ``complex(NaN,NaN)``
if no intersection was found.
"""
ab = b - a
cd = d - c
p = ab * 1j
try:
h = dot(p, a - c) / dot(p, cd)
except ZeroDivisionError:
# if 3 or 4 points are equal, we do have an intersection despite the zero-div:
# return one of the off-curves so that the algorithm can attempt a one-curve
# solution if it's within tolerance:
# https://github.com/linebender/kurbo/pull/484
if b == c and (a == b or c == d):
return b
return complex(NAN, NAN)
return c + cd * h
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
"Origin" means *the* origin (0,0), not the start of the curve. Note that no
checks are made on the start and end positions of the curve; this function
only checks the inside of the curve.
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
tolerance (double): Distance from origin.
Returns:
bool: True if the cubic Bezier ``p`` entirely lies within a distance
``tolerance`` of the origin, False otherwise.
"""
# First check p2 then p1, as p2 has higher error early on.
if abs(p2) <= tolerance and abs(p1) <= tolerance:
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc
@cython.inline
@cython.locals(tolerance=cython.double)
@cython.locals(
q1=cython.complex,
c0=cython.complex,
c1=cython.complex,
c2=cython.complex,
c3=cython.complex,
)
def cubic_approx_quadratic(cubic, tolerance):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
Args:
cubic (sequence): Four complex numbers representing control points of
the cubic Bezier curve.
tolerance (double): Permitted deviation from the original curve.
Returns:
Three complex numbers representing control points of the quadratic
curve if it fits within the given tolerance, or ``None`` if no suitable
curve could be calculated.
"""
q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3])
if math.isnan(q1.imag):
return None
c0 = cubic[0]
c3 = cubic[3]
c1 = c0 + (q1 - c0) * (2 / 3)
c2 = c3 + (q1 - c3) * (2 / 3)
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
return None
return c0, q1, c3
@cython.cfunc
@cython.locals(n=cython.int, tolerance=cython.double)
@cython.locals(i=cython.int)
@cython.locals(all_quadratic=cython.int)
@cython.locals(
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
)
@cython.locals(
q0=cython.complex,
q1=cython.complex,
next_q1=cython.complex,
q2=cython.complex,
d1=cython.complex,
)
def cubic_approx_spline(cubic, n, tolerance, all_quadratic):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four complex numbers representing control points of
the cubic Bezier curve.
n (int): Number of quadratic Bezier curves in the spline.
tolerance (double): Permitted deviation from the original curve.
Returns:
A list of ``n+2`` complex numbers, representing control points of the
quadratic spline if it fits within the given tolerance, or ``None`` if
no suitable spline could be calculated.
"""
if n == 1:
return cubic_approx_quadratic(cubic, tolerance)
if n == 2 and all_quadratic == False:
return cubic
cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
# calculate the spline of quadratics and check errors at the same time.
next_cubic = next(cubics)
next_q1 = cubic_approx_control(
0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
)
q2 = cubic[0]
d1 = 0j
spline = [cubic[0], next_q1]
for i in range(1, n + 1):
# Current cubic to convert
c0, c1, c2, c3 = next_cubic
# Current quadratic approximation of current cubic
q0 = q2
q1 = next_q1
if i < n:
next_cubic = next(cubics)
next_q1 = cubic_approx_control(
i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
)
spline.append(next_q1)
q2 = (q1 + next_q1) * 0.5
else:
q2 = c3
# End-point deltas
d0 = d1
d1 = q2 - c3
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
d0,
q0 + (q1 - q0) * (2 / 3) - c1,
q2 + (q1 - q2) * (2 / 3) - c2,
d1,
tolerance,
):
return None
spline.append(cubic[3])
return spline
@cython.locals(max_err=cython.double)
@cython.locals(n=cython.int)
@cython.locals(all_quadratic=cython.int)
def curve_to_quadratic(curve, max_err, all_quadratic=True):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four 2D tuples representing control points of
the cubic Bezier curve.
max_err (double): Permitted deviation from the original curve.
all_quadratic (bool): If True (default) returned value is a
quadratic spline. If False, it's either a single quadratic
curve or a single cubic curve.
Returns:
If all_quadratic is True: A list of 2D tuples, representing
control points of the quadratic spline if it fits within the
given tolerance, or ``None`` if no suitable spline could be
calculated.
If all_quadratic is False: Either a quadratic curve (if length
of output is 3), or a cubic curve (if length of output is 4).
"""
curve = [complex(*p) for p in curve]
for n in range(1, MAX_N + 1):
spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
if spline is not None:
# done. go home
return [(s.real, s.imag) for s in spline]
raise ApproxNotFoundError(curve)
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
@cython.locals(all_quadratic=cython.int)
def curves_to_quadratic(curves, max_errors, all_quadratic=True):
"""Return quadratic Bezier splines approximating the input cubic Beziers.
Args:
curves: A sequence of *n* curves, each curve being a sequence of four
2D tuples.
max_errors: A sequence of *n* floats representing the maximum permissible
deviation from each of the cubic Bezier curves.
all_quadratic (bool): If True (default) returned values are a
quadratic spline. If False, they are either a single quadratic
curve or a single cubic curve.
Example::
>>> curves_to_quadratic( [
... [ (50,50), (100,100), (150,100), (200,50) ],
... [ (75,50), (120,100), (150,75), (200,60) ]
... ], [1,1] )
[[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]
The returned splines have "implied oncurve points" suitable for use in
TrueType ``glif`` outlines - i.e. in the first spline returned above,
the first quadratic segment runs from (50,50) to
( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
Returns:
If all_quadratic is True, a list of splines, each spline being a list
of 2D tuples.
If all_quadratic is False, a list of curves, each curve being a quadratic
(length 3), or cubic (length 4).
Raises:
fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
can be found for all curves with the given parameters.
"""
curves = [[complex(*p) for p in curve] for curve in curves]
assert len(max_errors) == len(curves)
l = len(curves)
splines = [None] * l
last_i = i = 0
n = 1
while True:
spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
if spline is None:
if n == MAX_N:
break
n += 1
last_i = i
continue
splines[i] = spline
i = (i + 1) % l
if i == last_i:
# done. go home
return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves)

View file

@ -0,0 +1,77 @@
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""
class ApproxNotFoundError(Error):
def __init__(self, curve):
message = "no approximation found: %s" % curve
super().__init__(message)
self.curve = curve
class UnequalZipLengthsError(Error):
pass
class IncompatibleGlyphsError(Error):
def __init__(self, glyphs):
assert len(glyphs) > 1
self.glyphs = glyphs
names = set(repr(g.name) for g in glyphs)
if len(names) > 1:
self.combined_name = "{%s}" % ", ".join(sorted(names))
else:
self.combined_name = names.pop()
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.combined_name)
class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
def __str__(self):
return "Glyphs named %s have different number of segments" % (
self.combined_name
)
class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
def __init__(self, glyphs, segments):
IncompatibleGlyphsError.__init__(self, glyphs)
self.segments = segments
def __str__(self):
lines = []
ndigits = len(str(max(self.segments)))
for i, tags in sorted(self.segments.items()):
lines.append(
"%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
)
return "Glyphs named %s have incompatible segment types:\n %s" % (
self.combined_name,
"\n ".join(lines),
)
class IncompatibleFontsError(Error):
def __init__(self, glyph_errors):
self.glyph_errors = glyph_errors
def __str__(self):
return "fonts contains incompatible glyphs: %s" % (
", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
)

View file

@ -0,0 +1,349 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts cubic bezier curves to quadratic splines.
Conversion is performed such that the quadratic splines keep the same end-curve
tangents as the original cubics. The approach is iterative, increasing the
number of segments for a spline until the error gets below a bound.
Respective curves from multiple fonts will be converted at once to ensure that
the resulting splines are interpolation-compatible.
"""
import logging
from fontTools.pens.basePen import AbstractPen
from fontTools.pens.pointPen import PointToSegmentPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic
from .errors import (
UnequalZipLengthsError,
IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError,
IncompatibleGlyphsError,
IncompatibleFontsError,
)
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
# (see fonts_to_quadratic).
DEFAULT_MAX_ERR = 0.001
CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type"
logger = logging.getLogger(__name__)
_zip = zip
def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility.
"""
if len(set(len(a) for a in args)) != 1:
raise UnequalZipLengthsError(*args)
return list(_zip(*args))
class GetSegmentsPen(AbstractPen):
"""Pen to collect segments into lists of points for conversion.
Curves always include their initial on-curve point, so some points are
duplicated between segments.
"""
def __init__(self):
self._last_pt = None
self.segments = []
def _add_segment(self, tag, *args):
if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1]
self.segments.append((tag, args))
def moveTo(self, pt):
self._add_segment("move", pt)
def lineTo(self, pt):
self._add_segment("line", pt)
def qCurveTo(self, *points):
self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points):
self._add_segment("curve", self._last_pt, *points)
def closePath(self):
self._add_segment("close")
def endPath(self):
self._add_segment("end")
def addComponent(self, glyphName, transformation):
pass
def _get_segments(glyph):
"""Get a glyph's segments as extracted by GetSegmentsPen."""
pen = GetSegmentsPen()
# glyph.draw(pen)
# We can't simply draw the glyph with the pen, but we must initialize the
# PointToSegmentPen explicitly with outputImpliedClosingLine=True.
# By default PointToSegmentPen does not outputImpliedClosingLine -- unless
# last and first point on closed contour are duplicated. Because we are
# converting multiple glyphs at the same time, we want to make sure
# this function returns the same number of segments, whether or not
# the last and first point overlap.
# https://github.com/googlefonts/fontmake/issues/572
# https://github.com/fonttools/fonttools/pull/1720
pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True)
glyph.drawPoints(pointPen)
return pen.segments
def _set_segments(glyph, segments, reverse_direction):
"""Draw segments as extracted by GetSegmentsPen back to a glyph."""
glyph.clearContours()
pen = glyph.getPen()
if reverse_direction:
pen = ReverseContourPen(pen)
for tag, args in segments:
if tag == "move":
pen.moveTo(*args)
elif tag == "line":
pen.lineTo(*args)
elif tag == "curve":
pen.curveTo(*args[1:])
elif tag == "qcurve":
pen.qCurveTo(*args[1:])
elif tag == "close":
pen.closePath()
elif tag == "end":
pen.endPath()
else:
raise AssertionError('Unhandled segment type "%s"' % tag)
def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
"""Return quadratic approximations of cubic segments."""
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
n = len(new_points[0])
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1
if all_quadratic or n == 3:
return [("qcurve", p) for p in new_points]
else:
return [("curve", p) for p in new_points]
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
"""Do the actual conversion of a set of compatible glyphs, after arguments
have been set up.
Return True if the glyphs were modified, else return False.
"""
try:
segments_by_location = zip(*[_get_segments(g) for g in glyphs])
except UnequalZipLengthsError:
raise IncompatibleSegmentNumberError(glyphs)
if not any(segments_by_location):
return False
# always modify input glyphs if reverse_direction is True
glyphs_modified = reverse_direction
new_segments_by_location = []
incompatible = {}
for i, segments in enumerate(segments_by_location):
tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments]
elif tag == "curve":
new_segments = _segments_to_quadratic(
segments, max_err, stats, all_quadratic
)
if all_quadratic or new_segments != segments:
glyphs_modified = True
segments = new_segments
new_segments_by_location.append(segments)
if glyphs_modified:
new_segments_by_glyph = zip(*new_segments_by_location)
for glyph, new_segments in zip(glyphs, new_segments_by_glyph):
_set_segments(glyph, new_segments, reverse_direction)
if incompatible:
raise IncompatibleSegmentTypesError(glyphs, segments=incompatible)
return glyphs_modified
def glyphs_to_quadratic(
glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
):
"""Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
compatibility. If this is not required, calling glyphs_to_quadratic with one
glyph at a time may yield slightly more optimized results.
Return True if glyphs were modified, else return False.
Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines.
"""
if stats is None:
stats = {}
if not max_err:
# assume 1000 is the default UPEM
max_err = DEFAULT_MAX_ERR * 1000
if isinstance(max_err, (list, tuple)):
max_errors = max_err
else:
max_errors = [max_err] * len(glyphs)
assert len(max_errors) == len(glyphs)
return _glyphs_to_quadratic(
glyphs, max_errors, reverse_direction, stats, all_quadratic
)
def fonts_to_quadratic(
fonts,
max_err_em=None,
max_err=None,
reverse_direction=False,
stats=None,
dump_stats=False,
remember_curve_type=True,
all_quadratic=True,
):
"""Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
compatibility. If this is not required, calling fonts_to_quadratic with one
font at a time may yield slightly more optimized results.
Return the set of modified glyph names if any, else return an empty set.
By default, cu2qu stores the curve type in the fonts' lib, under a private
key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert
them again if the curve type is already set to "quadratic".
Setting 'remember_curve_type' to False disables this optimization.
Raises IncompatibleFontsError if same-named glyphs from different fonts
have non-interpolatable outlines.
"""
if remember_curve_type:
curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
if len(curve_types) == 1:
curve_type = next(iter(curve_types))
if curve_type in ("quadratic", "mixed"):
logger.info("Curves already converted to quadratic")
return False
elif curve_type == "cubic":
pass # keep converting
else:
raise NotImplementedError(curve_type)
elif len(curve_types) > 1:
# going to crash later if they do differ
logger.warning("fonts may contain different curve types")
if stats is None:
stats = {}
if max_err_em and max_err:
raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR
if isinstance(max_err, (list, tuple)):
assert len(max_err) == len(fonts)
max_errors = max_err
elif max_err:
max_errors = [max_err] * len(fonts)
if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em)
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
modified = set()
glyph_errors = {}
for name in set().union(*(f.keys() for f in fonts)):
glyphs = []
cur_max_errors = []
for font, error in zip(fonts, max_errors):
if name in font:
glyphs.append(font[name])
cur_max_errors.append(error)
try:
if _glyphs_to_quadratic(
glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
):
modified.add(name)
except IncompatibleGlyphsError as exc:
logger.error(exc)
glyph_errors[name] = exc
if glyph_errors:
raise IncompatibleFontsError(glyph_errors)
if modified and dump_stats:
spline_lengths = sorted(stats.keys())
logger.info(
"New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
)
if remember_curve_type:
for font in fonts:
curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
new_curve_type = "quadratic" if all_quadratic else "mixed"
if curve_type != new_curve_type:
font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
return modified
def glyph_to_quadratic(glyph, **kwargs):
"""Convenience wrapper around glyphs_to_quadratic, for just one glyph.
Return True if the glyph was modified, else return False.
"""
return glyphs_to_quadratic([glyph], **kwargs)
def font_to_quadratic(font, **kwargs):
"""Convenience wrapper around fonts_to_quadratic, for just one font.
Return the set of modified glyph names if any, else return empty set.
"""
return fonts_to_quadratic([font], **kwargs)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,6 @@
import sys
from fontTools.designspaceLib import main
if __name__ == "__main__":
sys.exit(main())

View file

@ -0,0 +1,475 @@
"""Allows building all the variable fonts of a DesignSpace version 5 by
splitting the document into interpolable sub-space, then into each VF.
"""
from __future__ import annotations
import itertools
import logging
import math
from typing import Any, Callable, Dict, Iterator, List, Tuple, cast
from fontTools.designspaceLib import (
AxisDescriptor,
AxisMappingDescriptor,
DesignSpaceDocument,
DiscreteAxisDescriptor,
InstanceDescriptor,
RuleDescriptor,
SimpleLocationDict,
SourceDescriptor,
VariableFontDescriptor,
)
from fontTools.designspaceLib.statNames import StatNames, getStatNames
from fontTools.designspaceLib.types import (
ConditionSet,
Range,
Region,
getVFUserRegion,
locationInRegion,
regionInRegion,
userRegionToDesignRegion,
)
LOGGER = logging.getLogger(__name__)
MakeInstanceFilenameCallable = Callable[
[DesignSpaceDocument, InstanceDescriptor, StatNames], str
]
def defaultMakeInstanceFilename(
doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames
) -> str:
"""Default callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overriden
because it's not specified by the STAT table.
"""
familyName = instance.familyName or statNames.familyNames.get("en")
styleName = instance.styleName or statNames.styleNames.get("en")
return f"{familyName}-{styleName}.ttf"
def splitInterpolable(
doc: DesignSpaceDocument,
makeNames: bool = True,
expandLocations: bool = True,
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]:
"""Split the given DS5 into several interpolable sub-designspaces.
There are as many interpolable sub-spaces as there are combinations of
discrete axis values.
E.g. with axes:
- italic (discrete) Upright or Italic
- style (discrete) Sans or Serif
- weight (continuous) 100 to 900
There are 4 sub-spaces in which the Weight axis should interpolate:
(Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif).
The sub-designspaces still include the full axis definitions and STAT data,
but the rules, sources, variable fonts, instances are trimmed down to only
keep what falls within the interpolable sub-space.
Args:
- ``makeNames``: Whether to compute the instance family and style
names using the STAT data.
- ``expandLocations``: Whether to turn all locations into "full"
locations, including implicit default axis values where missing.
- ``makeInstanceFilename``: Callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overridden
because it's not specified by the STAT table.
.. versionadded:: 5.0
"""
discreteAxes = []
interpolableUserRegion: Region = {}
for axis in doc.axes:
if hasattr(axis, "values"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(DiscreteAxisDescriptor, axis)
discreteAxes.append(axis)
else:
axis = cast(AxisDescriptor, axis)
interpolableUserRegion[axis.name] = Range(
axis.minimum,
axis.maximum,
axis.default,
)
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
discreteUserLocation = {
discreteAxis.name: value
for discreteAxis, value in zip(discreteAxes, values)
}
subDoc = _extractSubSpace(
doc,
{**interpolableUserRegion, **discreteUserLocation},
keepVFs=True,
makeNames=makeNames,
expandLocations=expandLocations,
makeInstanceFilename=makeInstanceFilename,
)
yield discreteUserLocation, subDoc
def splitVariableFonts(
doc: DesignSpaceDocument,
makeNames: bool = False,
expandLocations: bool = False,
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
) -> Iterator[Tuple[str, DesignSpaceDocument]]:
"""Convert each variable font listed in this document into a standalone
designspace. This can be used to compile all the variable fonts from a
format 5 designspace using tools that can only deal with 1 VF at a time.
Args:
- ``makeNames``: Whether to compute the instance family and style
names using the STAT data.
- ``expandLocations``: Whether to turn all locations into "full"
locations, including implicit default axis values where missing.
- ``makeInstanceFilename``: Callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overridden
because it's not specified by the STAT table.
.. versionadded:: 5.0
"""
# Make one DesignspaceDoc v5 for each variable font
for vf in doc.getVariableFonts():
vfUserRegion = getVFUserRegion(doc, vf)
vfDoc = _extractSubSpace(
doc,
vfUserRegion,
keepVFs=False,
makeNames=makeNames,
expandLocations=expandLocations,
makeInstanceFilename=makeInstanceFilename,
)
vfDoc.lib = {**vfDoc.lib, **vf.lib}
yield vf.name, vfDoc
def convert5to4(
doc: DesignSpaceDocument,
) -> Dict[str, DesignSpaceDocument]:
"""Convert each variable font listed in this document into a standalone
format 4 designspace. This can be used to compile all the variable fonts
from a format 5 designspace using tools that only know about format 4.
.. versionadded:: 5.0
"""
vfs = {}
for _location, subDoc in splitInterpolable(doc):
for vfName, vfDoc in splitVariableFonts(subDoc):
vfDoc.formatVersion = "4.1"
vfs[vfName] = vfDoc
return vfs
def _extractSubSpace(
doc: DesignSpaceDocument,
userRegion: Region,
*,
keepVFs: bool,
makeNames: bool,
expandLocations: bool,
makeInstanceFilename: MakeInstanceFilenameCallable,
) -> DesignSpaceDocument:
subDoc = DesignSpaceDocument()
# Don't include STAT info
# FIXME: (Jany) let's think about it. Not include = OK because the point of
# the splitting is to build VFs and we'll use the STAT data of the full
# document to generate the STAT of the VFs, so "no need" to have STAT data
# in sub-docs. Counterpoint: what if someone wants to split this DS for
# other purposes? Maybe for that it would be useful to also subset the STAT
# data?
# subDoc.elidedFallbackName = doc.elidedFallbackName
def maybeExpandDesignLocation(object):
if expandLocations:
return object.getFullDesignLocation(doc)
else:
return object.designLocation
for axis in doc.axes:
range = userRegion[axis.name]
if isinstance(range, Range) and hasattr(axis, "minimum"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(AxisDescriptor, axis)
subDoc.addAxis(
AxisDescriptor(
# Same info
tag=axis.tag,
name=axis.name,
labelNames=axis.labelNames,
hidden=axis.hidden,
# Subset range
minimum=max(range.minimum, axis.minimum),
default=range.default or axis.default,
maximum=min(range.maximum, axis.maximum),
map=[
(user, design)
for user, design in axis.map
if range.minimum <= user <= range.maximum
],
# Don't include STAT info
axisOrdering=None,
axisLabels=None,
)
)
subDoc.axisMappings = mappings = []
subDocAxes = {axis.name for axis in subDoc.axes}
for mapping in doc.axisMappings:
if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()):
continue
if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()):
LOGGER.error(
"In axis mapping from input %s, some output axes are not in the variable-font: %s",
mapping.inputLocation,
mapping.outputLocation,
)
continue
mappingAxes = set()
mappingAxes.update(mapping.inputLocation.keys())
mappingAxes.update(mapping.outputLocation.keys())
for axis in doc.axes:
if axis.name not in mappingAxes:
continue
range = userRegion[axis.name]
if (
range.minimum != axis.minimum
or (range.default is not None and range.default != axis.default)
or range.maximum != axis.maximum
):
LOGGER.error(
"Limiting axis ranges used in <mapping> elements not supported: %s",
axis.name,
)
continue
mappings.append(
AxisMappingDescriptor(
inputLocation=mapping.inputLocation,
outputLocation=mapping.outputLocation,
)
)
# Don't include STAT info
# subDoc.locationLabels = doc.locationLabels
# Rules: subset them based on conditions
designRegion = userRegionToDesignRegion(doc, userRegion)
subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion)
subDoc.rulesProcessingLast = doc.rulesProcessingLast
# Sources: keep only the ones that fall within the kept axis ranges
for source in doc.sources:
if not locationInRegion(doc.map_backward(source.designLocation), userRegion):
continue
subDoc.addSource(
SourceDescriptor(
filename=source.filename,
path=source.path,
font=source.font,
name=source.name,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(source)
),
layerName=source.layerName,
familyName=source.familyName,
styleName=source.styleName,
muteKerning=source.muteKerning,
muteInfo=source.muteInfo,
mutedGlyphNames=source.mutedGlyphNames,
)
)
# Copy family name translations from the old default source to the new default
vfDefault = subDoc.findDefault()
oldDefault = doc.findDefault()
if vfDefault is not None and oldDefault is not None:
vfDefault.localisedFamilyName = oldDefault.localisedFamilyName
# Variable fonts: keep only the ones that fall within the kept axis ranges
if keepVFs:
# Note: call getVariableFont() to make the implicit VFs explicit
for vf in doc.getVariableFonts():
vfUserRegion = getVFUserRegion(doc, vf)
if regionInRegion(vfUserRegion, userRegion):
subDoc.addVariableFont(
VariableFontDescriptor(
name=vf.name,
filename=vf.filename,
axisSubsets=[
axisSubset
for axisSubset in vf.axisSubsets
if isinstance(userRegion[axisSubset.name], Range)
],
lib=vf.lib,
)
)
# Instances: same as Sources + compute missing names
for instance in doc.instances:
if not locationInRegion(instance.getFullUserLocation(doc), userRegion):
continue
if makeNames:
statNames = getStatNames(doc, instance.getFullUserLocation(doc))
familyName = instance.familyName or statNames.familyNames.get("en")
styleName = instance.styleName or statNames.styleNames.get("en")
subDoc.addInstance(
InstanceDescriptor(
filename=instance.filename
or makeInstanceFilename(doc, instance, statNames),
path=instance.path,
font=instance.font,
name=instance.name or f"{familyName} {styleName}",
userLocation={} if expandLocations else instance.userLocation,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(instance)
),
familyName=familyName,
styleName=styleName,
postScriptFontName=instance.postScriptFontName
or statNames.postScriptFontName,
styleMapFamilyName=instance.styleMapFamilyName
or statNames.styleMapFamilyNames.get("en"),
styleMapStyleName=instance.styleMapStyleName
or statNames.styleMapStyleName,
localisedFamilyName=instance.localisedFamilyName
or statNames.familyNames,
localisedStyleName=instance.localisedStyleName
or statNames.styleNames,
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName
or statNames.styleMapFamilyNames,
localisedStyleMapStyleName=instance.localisedStyleMapStyleName
or {},
lib=instance.lib,
)
)
else:
subDoc.addInstance(
InstanceDescriptor(
filename=instance.filename,
path=instance.path,
font=instance.font,
name=instance.name,
userLocation={} if expandLocations else instance.userLocation,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(instance)
),
familyName=instance.familyName,
styleName=instance.styleName,
postScriptFontName=instance.postScriptFontName,
styleMapFamilyName=instance.styleMapFamilyName,
styleMapStyleName=instance.styleMapStyleName,
localisedFamilyName=instance.localisedFamilyName,
localisedStyleName=instance.localisedStyleName,
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName,
localisedStyleMapStyleName=instance.localisedStyleMapStyleName,
lib=instance.lib,
)
)
subDoc.lib = doc.lib
return subDoc
def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet:
c: Dict[str, Range] = {}
for condition in conditionSet:
minimum, maximum = condition.get("minimum"), condition.get("maximum")
c[condition["name"]] = Range(
minimum if minimum is not None else -math.inf,
maximum if maximum is not None else math.inf,
)
return c
def _subsetRulesBasedOnConditions(
rules: List[RuleDescriptor], designRegion: Region
) -> List[RuleDescriptor]:
# What rules to keep:
# - Keep the rule if any conditionset is relevant.
# - A conditionset is relevant if all conditions are relevant or it is empty.
# - A condition is relevant if
# - axis is point (C-AP),
# - and point in condition's range (C-AP-in)
# (in this case remove the condition because it's always true)
# - else (C-AP-out) whole conditionset can be discarded (condition false
# => conditionset false)
# - axis is range (C-AR),
# - (C-AR-all) and axis range fully contained in condition range: we can
# scrap the condition because it's always true
# - (C-AR-inter) and intersection(axis range, condition range) not empty:
# keep the condition with the smaller range (= intersection)
# - (C-AR-none) else, whole conditionset can be discarded
newRules: List[RuleDescriptor] = []
for rule in rules:
newRule: RuleDescriptor = RuleDescriptor(
name=rule.name, conditionSets=[], subs=rule.subs
)
for conditionset in rule.conditionSets:
cs = _conditionSetFrom(conditionset)
newConditionset: List[Dict[str, Any]] = []
discardConditionset = False
for selectionName, selectionValue in designRegion.items():
# TODO: Ensure that all(key in conditionset for key in region.keys())?
if selectionName not in cs:
# raise Exception("Selection has different axes than the rules")
continue
if isinstance(selectionValue, (float, int)): # is point
# Case C-AP-in
if selectionValue in cs[selectionName]:
pass # always matches, conditionset can stay empty for this one.
# Case C-AP-out
else:
discardConditionset = True
else: # is range
# Case C-AR-all
if selectionValue in cs[selectionName]:
pass # always matches, conditionset can stay empty for this one.
else:
intersection = cs[selectionName].intersection(selectionValue)
# Case C-AR-inter
if intersection is not None:
newConditionset.append(
{
"name": selectionName,
"minimum": intersection.minimum,
"maximum": intersection.maximum,
}
)
# Case C-AR-none
else:
discardConditionset = True
if not discardConditionset:
newRule.conditionSets.append(newConditionset)
if newRule.conditionSets:
newRules.append(newRule)
return newRules
def _filterLocation(
userRegion: Region,
location: Dict[str, float],
) -> Dict[str, float]:
return {
name: value
for name, value in location.items()
if name in userRegion and isinstance(userRegion[name], Range)
}

View file

@ -0,0 +1,260 @@
"""Compute name information for a given location in user-space coordinates
using STAT data. This can be used to fill-in automatically the names of an
instance:
.. code:: python
instance = doc.instances[0]
names = getStatNames(doc, instance.getFullUserLocation(doc))
print(names.styleNames)
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Literal, Optional, Tuple, Union
import logging
from fontTools.designspaceLib import (
AxisDescriptor,
AxisLabelDescriptor,
DesignSpaceDocument,
DiscreteAxisDescriptor,
SimpleLocationDict,
SourceDescriptor,
)
LOGGER = logging.getLogger(__name__)
RibbiStyleName = Union[
Literal["regular"],
Literal["bold"],
Literal["italic"],
Literal["bold italic"],
]
BOLD_ITALIC_TO_RIBBI_STYLE = {
(False, False): "regular",
(False, True): "italic",
(True, False): "bold",
(True, True): "bold italic",
}
@dataclass
class StatNames:
"""Name data generated from the STAT table information."""
familyNames: Dict[str, str]
styleNames: Dict[str, str]
postScriptFontName: Optional[str]
styleMapFamilyNames: Dict[str, str]
styleMapStyleName: Optional[RibbiStyleName]
def getStatNames(
doc: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> StatNames:
"""Compute the family, style, PostScript names of the given ``userLocation``
using the document's STAT information.
Also computes localizations.
If not enough STAT data is available for a given name, either its dict of
localized names will be empty (family and style names), or the name will be
None (PostScript name).
Note: this method does not consider info attached to the instance, like
family name. The user needs to override all names on an instance that STAT
information would compute differently than desired.
.. versionadded:: 5.0
"""
familyNames: Dict[str, str] = {}
defaultSource: Optional[SourceDescriptor] = doc.findDefault()
if defaultSource is None:
LOGGER.warning("Cannot determine default source to look up family name.")
elif defaultSource.familyName is None:
LOGGER.warning(
"Cannot look up family name, assign the 'familyname' attribute to the default source."
)
else:
familyNames = {
"en": defaultSource.familyName,
**defaultSource.localisedFamilyName,
}
styleNames: Dict[str, str] = {}
# If a free-standing label matches the location, use it for name generation.
label = doc.labelForUserLocation(userLocation)
if label is not None:
styleNames = {"en": label.name, **label.labelNames}
# Otherwise, scour the axis labels for matches.
else:
# Gather all languages in which at least one translation is provided
# Then build names for all these languages, but fallback to English
# whenever a translation is missing.
labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
if labels:
languages = set(
language for label in labels for language in label.labelNames
)
languages.add("en")
for language in languages:
styleName = " ".join(
label.labelNames.get(language, label.defaultName)
for label in labels
if not label.elidable
)
if not styleName and doc.elidedFallbackName is not None:
styleName = doc.elidedFallbackName
styleNames[language] = styleName
if "en" not in familyNames or "en" not in styleNames:
# Not enough information to compute PS names of styleMap names
return StatNames(
familyNames=familyNames,
styleNames=styleNames,
postScriptFontName=None,
styleMapFamilyNames={},
styleMapStyleName=None,
)
postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "")
styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation)
styleNamesForStyleMap = styleNames
if regularUserLocation != userLocation:
regularStatNames = getStatNames(doc, regularUserLocation)
styleNamesForStyleMap = regularStatNames.styleNames
styleMapFamilyNames = {}
for language in set(familyNames).union(styleNames.keys()):
familyName = familyNames.get(language, familyNames["en"])
styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"])
styleMapFamilyNames[language] = (familyName + " " + styleName).strip()
return StatNames(
familyNames=familyNames,
styleNames=styleNames,
postScriptFontName=postScriptFontName,
styleMapFamilyNames=styleMapFamilyNames,
styleMapStyleName=styleMapStyleName,
)
def _getSortedAxisLabels(
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
) -> Dict[str, list[AxisLabelDescriptor]]:
"""Returns axis labels sorted by their ordering, with unordered ones appended as
they are listed."""
# First, get the axis labels with explicit ordering...
sortedAxes = sorted(
(axis for axis in axes if axis.axisOrdering is not None),
key=lambda a: a.axisOrdering,
)
sortedLabels: Dict[str, list[AxisLabelDescriptor]] = {
axis.name: axis.axisLabels for axis in sortedAxes
}
# ... then append the others in the order they appear.
# NOTE: This relies on Python 3.7+ dict's preserved insertion order.
for axis in axes:
if axis.axisOrdering is None:
sortedLabels[axis.name] = axis.axisLabels
return sortedLabels
def _getAxisLabelsForUserLocation(
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
userLocation: SimpleLocationDict,
) -> list[AxisLabelDescriptor]:
labels: list[AxisLabelDescriptor] = []
allAxisLabels = _getSortedAxisLabels(axes)
if allAxisLabels.keys() != userLocation.keys():
LOGGER.warning(
f"Mismatch between user location '{userLocation.keys()}' and available "
f"labels for '{allAxisLabels.keys()}'."
)
for axisName, axisLabels in allAxisLabels.items():
userValue = userLocation[axisName]
label: Optional[AxisLabelDescriptor] = next(
(
l
for l in axisLabels
if l.userValue == userValue
or (
l.userMinimum is not None
and l.userMaximum is not None
and l.userMinimum <= userValue <= l.userMaximum
)
),
None,
)
if label is None:
LOGGER.debug(
f"Document needs a label for axis '{axisName}', user value '{userValue}'."
)
else:
labels.append(label)
return labels
def _getRibbiStyle(
self: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> Tuple[RibbiStyleName, SimpleLocationDict]:
"""Compute the RIBBI style name of the given user location,
return the location of the matching Regular in the RIBBI group.
.. versionadded:: 5.0
"""
regularUserLocation = {}
axes_by_tag = {axis.tag: axis for axis in self.axes}
bold: bool = False
italic: bool = False
axis = axes_by_tag.get("wght")
if axis is not None:
for regular_label in axis.axisLabels:
if (
regular_label.linkedUserValue == userLocation[axis.name]
# In the "recursive" case where both the Regular has
# linkedUserValue pointing the Bold, and the Bold has
# linkedUserValue pointing to the Regular, only consider the
# first case: Regular (e.g. 400) has linkedUserValue pointing to
# Bold (e.g. 700, higher than Regular)
and regular_label.userValue < regular_label.linkedUserValue
):
regularUserLocation[axis.name] = regular_label.userValue
bold = True
break
axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
if axis is not None:
for upright_label in axis.axisLabels:
if (
upright_label.linkedUserValue == userLocation[axis.name]
# In the "recursive" case where both the Upright has
# linkedUserValue pointing the Italic, and the Italic has
# linkedUserValue pointing to the Upright, only consider the
# first case: Upright (e.g. ital=0, slant=0) has
# linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
# slant=12 for backwards italics, in any case higher than
# Upright in absolute value, hence the abs() below.
and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
):
regularUserLocation[axis.name] = upright_label.userValue
italic = True
break
return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], {
**userLocation,
**regularUserLocation,
}

View file

@ -0,0 +1,147 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, List, Optional, Union, cast
from fontTools.designspaceLib import (
AxisDescriptor,
DesignSpaceDocument,
DesignSpaceDocumentError,
RangeAxisSubsetDescriptor,
SimpleLocationDict,
ValueAxisSubsetDescriptor,
VariableFontDescriptor,
)
def clamp(value, minimum, maximum):
return min(max(value, minimum), maximum)
@dataclass
class Range:
minimum: float
"""Inclusive minimum of the range."""
maximum: float
"""Inclusive maximum of the range."""
default: float = 0
"""Default value"""
def __post_init__(self):
self.minimum, self.maximum = sorted((self.minimum, self.maximum))
self.default = clamp(self.default, self.minimum, self.maximum)
def __contains__(self, value: Union[float, Range]) -> bool:
if isinstance(value, Range):
return self.minimum <= value.minimum and value.maximum <= self.maximum
return self.minimum <= value <= self.maximum
def intersection(self, other: Range) -> Optional[Range]:
if self.maximum < other.minimum or self.minimum > other.maximum:
return None
else:
return Range(
max(self.minimum, other.minimum),
min(self.maximum, other.maximum),
self.default, # We don't care about the default in this use-case
)
# A region selection is either a range or a single value, as a Designspace v5
# axis-subset element only allows a single discrete value or a range for a
# variable-font element.
Region = Dict[str, Union[Range, float]]
# A conditionset is a set of named ranges.
ConditionSet = Dict[str, Range]
# A rule is a list of conditionsets where any has to be relevant for the whole rule to be relevant.
Rule = List[ConditionSet]
Rules = Dict[str, Rule]
def locationInRegion(location: SimpleLocationDict, region: Region) -> bool:
for name, value in location.items():
if name not in region:
return False
regionValue = region[name]
if isinstance(regionValue, (float, int)):
if value != regionValue:
return False
else:
if value not in regionValue:
return False
return True
def regionInRegion(region: Region, superRegion: Region) -> bool:
for name, value in region.items():
if not name in superRegion:
return False
superValue = superRegion[name]
if isinstance(superValue, (float, int)):
if value != superValue:
return False
else:
if value not in superValue:
return False
return True
def userRegionToDesignRegion(doc: DesignSpaceDocument, userRegion: Region) -> Region:
designRegion = {}
for name, value in userRegion.items():
axis = doc.getAxis(name)
if axis is None:
raise DesignSpaceDocumentError(
f"Cannot find axis named '{name}' for region."
)
if isinstance(value, (float, int)):
designRegion[name] = axis.map_forward(value)
else:
designRegion[name] = Range(
axis.map_forward(value.minimum),
axis.map_forward(value.maximum),
axis.map_forward(value.default),
)
return designRegion
def getVFUserRegion(doc: DesignSpaceDocument, vf: VariableFontDescriptor) -> Region:
vfUserRegion: Region = {}
# For each axis, 2 cases:
# - it has a range = it's an axis in the VF DS
# - it's a single location = use it to know which rules should apply in the VF
for axisSubset in vf.axisSubsets:
axis = doc.getAxis(axisSubset.name)
if axis is None:
raise DesignSpaceDocumentError(
f"Cannot find axis named '{axisSubset.name}' for variable font '{vf.name}'."
)
if hasattr(axisSubset, "userMinimum"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axisSubset = cast(RangeAxisSubsetDescriptor, axisSubset)
if not hasattr(axis, "minimum"):
raise DesignSpaceDocumentError(
f"Cannot select a range over '{axis.name}' for variable font '{vf.name}' "
"because it's a discrete axis, use only 'userValue' instead."
)
axis = cast(AxisDescriptor, axis)
vfUserRegion[axis.name] = Range(
max(axisSubset.userMinimum, axis.minimum),
min(axisSubset.userMaximum, axis.maximum),
axisSubset.userDefault or axis.default,
)
else:
axisSubset = cast(ValueAxisSubsetDescriptor, axisSubset)
vfUserRegion[axis.name] = axisSubset.userValue
# Any axis not mentioned explicitly has a single location = default value
for axis in doc.axes:
if axis.name not in vfUserRegion:
assert isinstance(
axis.default, (int, float)
), f"Axis '{axis.name}' has no valid default value."
vfUserRegion[axis.name] = axis.default
return vfUserRegion

View file

@ -0,0 +1,258 @@
MacRoman = [
"NUL",
"Eth",
"eth",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Yacute",
"yacute",
"HT",
"LF",
"Thorn",
"thorn",
"CR",
"Zcaron",
"zcaron",
"DLE",
"DC1",
"DC2",
"DC3",
"DC4",
"onehalf",
"onequarter",
"onesuperior",
"threequarters",
"threesuperior",
"twosuperior",
"brokenbar",
"minus",
"multiply",
"RS",
"US",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"DEL",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nbspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
]

View file

@ -0,0 +1,258 @@
StandardEncoding = [
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
".notdef",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
".notdef",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
".notdef",
"questiondown",
".notdef",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
".notdef",
"ring",
"cedilla",
".notdef",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"AE",
".notdef",
"ordfeminine",
".notdef",
".notdef",
".notdef",
".notdef",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"ae",
".notdef",
".notdef",
".notdef",
"dotlessi",
".notdef",
".notdef",
"lslash",
"oslash",
"oe",
"germandbls",
".notdef",
".notdef",
".notdef",
".notdef",
]

View file

@ -0,0 +1 @@
"""Empty __init__.py file to signal Python this directory is a package."""

View file

@ -0,0 +1,135 @@
"""Extend the Python codecs module with a few encodings that are used in OpenType (name table)
but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
import codecs
import encodings
class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(
name=self.name, encode=self.encode, decode=self.decode
)
codecs.register_error(name, self.error)
def _map(self, mapper, output_type, exc_type, input, errors):
base_error_handler = codecs.lookup_error(errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[: e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def encode(self, input, errors="strict"):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
def decode(self, input, errors="strict"):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
s = e.object[e.start : end]
if s in self.mapping:
return self.mapping[s], end
elif isinstance(e, UnicodeEncodeError):
for end in range(e.start + 1, e.start + self.max_len + 1):
s = e.object[e.start : end]
if s in self.reverse:
return self.reverse[s], end
e.encoding = self.name
raise e
_extended_encodings = {
"x_mac_japanese_ttx": (
"shift_jis",
{
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_trad_chinese_ttx": (
"big5",
{
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_korean_ttx": (
"euc_kr",
{
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
}
_cache = {}
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None
codecs.register(search_function)

View file

@ -0,0 +1,4 @@
"""fontTools.feaLib -- a package for dealing with OpenType feature files."""
# The structure of OpenType feature files is defined here:
# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html

View file

@ -0,0 +1,78 @@
from fontTools.ttLib import TTFont
from fontTools.feaLib.builder import addOpenTypeFeatures, Builder
from fontTools.feaLib.error import FeatureLibError
from fontTools import configLogger
from fontTools.misc.cliTools import makeOutputFileName
import sys
import argparse
import logging
log = logging.getLogger("fontTools.feaLib")
def main(args=None):
"""Add features from a feature file (.fea) into an OTF font"""
parser = argparse.ArgumentParser(
description="Use fontTools to compile OpenType feature files (*.fea)."
)
parser.add_argument(
"input_fea", metavar="FEATURES", help="Path to the feature file"
)
parser.add_argument(
"input_font", metavar="INPUT_FONT", help="Path to the input font"
)
parser.add_argument(
"-o",
"--output",
dest="output_font",
metavar="OUTPUT_FONT",
help="Path to the output font.",
)
parser.add_argument(
"-t",
"--tables",
metavar="TABLE_TAG",
choices=Builder.supportedTables,
nargs="+",
help="Specify the table(s) to be built.",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="Add source-level debugging information to font.",
)
parser.add_argument(
"-v",
"--verbose",
help="Increase the logger verbosity. Multiple -v " "options are allowed.",
action="count",
default=0,
)
parser.add_argument(
"--traceback", help="show traceback for exceptions.", action="store_true"
)
options = parser.parse_args(args)
levels = ["WARNING", "INFO", "DEBUG"]
configLogger(level=levels[min(len(levels) - 1, options.verbose)])
output_font = options.output_font or makeOutputFileName(options.input_font)
log.info("Compiling features to '%s'" % (output_font))
font = TTFont(options.input_font)
try:
addOpenTypeFeatures(
font, options.input_fea, tables=options.tables, debug=options.debug
)
except FeatureLibError as e:
if options.traceback:
raise
log.error(e)
sys.exit(1)
font.save(output_font)
if __name__ == "__main__":
sys.exit(main())

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,22 @@
class FeatureLibError(Exception):
def __init__(self, message, location=None):
Exception.__init__(self, message)
self.location = location
def __str__(self):
message = Exception.__str__(self)
if self.location:
return f"{self.location}: {message}"
else:
return message
class IncludedFeaNotFound(FeatureLibError):
def __str__(self):
assert self.location is not None
message = (
"The following feature file should be included but cannot be found: "
f"{Exception.__str__(self)}"
)
return f"{self.location}: {message}"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,287 @@
from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound
from fontTools.feaLib.location import FeatureLibLocation
import re
import os
try:
import cython
except ImportError:
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
class Lexer(object):
NUMBER = "NUMBER"
HEXADECIMAL = "HEXADECIMAL"
OCTAL = "OCTAL"
NUMBERS = (NUMBER, HEXADECIMAL, OCTAL)
FLOAT = "FLOAT"
STRING = "STRING"
NAME = "NAME"
FILENAME = "FILENAME"
GLYPHCLASS = "GLYPHCLASS"
CID = "CID"
SYMBOL = "SYMBOL"
COMMENT = "COMMENT"
NEWLINE = "NEWLINE"
ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
CHAR_DIGIT_ = "0123456789"
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-"
RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$")
MODE_NORMAL_ = "NORMAL"
MODE_FILENAME_ = "FILENAME"
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
self.mode_ = Lexer.MODE_NORMAL_
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type != Lexer.NEWLINE:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return FeatureLibLocation(self.filename_ or "<features>", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += 2 if next_char == "\n" else 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "#":
self.scan_until_(Lexer.CHAR_NEWLINE_)
return (Lexer.COMMENT, text[start : self.pos_], location)
if self.mode_ is Lexer.MODE_FILENAME_:
if cur_char != "(":
raise FeatureLibError("Expected '(' before file name", location)
self.scan_until_(")")
cur_char = text[self.pos_] if self.pos_ < limit else None
if cur_char != ")":
raise FeatureLibError("Expected ')' after file name", location)
self.pos_ += 1
self.mode_ = Lexer.MODE_NORMAL_
return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location)
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location)
if cur_char == "@":
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
glyphclass = text[start + 1 : self.pos_]
if len(glyphclass) < 1:
raise FeatureLibError("Expected glyph class name", location)
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
raise FeatureLibError(
"Glyph class names must consist of letters, digits, "
"underscore, period or hyphen",
location,
)
return (Lexer.GLYPHCLASS, glyphclass, location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start : self.pos_]
if token == "include":
self.mode_ = Lexer.MODE_FILENAME_
return (Lexer.NAME, token, location)
if cur_char == "0" and next_char in "xX":
self.pos_ += 2
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location)
if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.OCTAL, int(text[start : self.pos_], 8), location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char in Lexer.CHAR_SYMBOL_:
self.pos_ += 1
return (Lexer.SYMBOL, cur_char, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
# strip newlines embedded within a string
string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1])
return (Lexer.STRING, string, location)
else:
raise FeatureLibError("Expected '\"' to terminate string", location)
raise FeatureLibError("Unexpected character: %r" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
def scan_anonymous_block(self, tag):
location = self.location_()
tag = tag.strip()
self.scan_until_(Lexer.CHAR_NEWLINE_)
self.scan_over_(Lexer.CHAR_NEWLINE_)
regexp = r"}\s*" + tag + r"\s*;"
split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1)
if len(split) != 2:
raise FeatureLibError(
"Expected '} %s;' to terminate anonymous block" % tag, location
)
self.pos_ += len(split[0])
return (Lexer.ANONYMOUS_BLOCK, split[0], location)
class IncludingLexer(object):
"""A Lexer that follows include statements.
The OpenType feature file specification states that due to
historical reasons, relative imports should be resolved in this
order:
1. If the source font is UFO format, then relative to the UFO's
font directory
2. relative to the top-level include file
3. relative to the parent include file
We only support 1 (via includeDir) and 2.
"""
def __init__(self, featurefile, *, includeDir=None):
"""Initializes an IncludingLexer.
Behavior:
If includeDir is passed, it will be used to determine the top-level
include directory to use for all encountered include statements. If it is
not passed, ``os.path.dirname(featurefile)`` will be considered the
include directory.
"""
self.lexers_ = [self.make_lexer_(featurefile)]
self.featurefilepath = self.lexers_[0].filename_
self.includeDir = includeDir
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while self.lexers_:
lexer = self.lexers_[-1]
try:
token_type, token, location = next(lexer)
except StopIteration:
self.lexers_.pop()
continue
if token_type is Lexer.NAME and token == "include":
fname_type, fname_token, fname_location = lexer.next()
if fname_type is not Lexer.FILENAME:
raise FeatureLibError("Expected file name", fname_location)
# semi_type, semi_token, semi_location = lexer.next()
# if semi_type is not Lexer.SYMBOL or semi_token != ";":
# raise FeatureLibError("Expected ';'", semi_location)
if os.path.isabs(fname_token):
path = fname_token
else:
if self.includeDir is not None:
curpath = self.includeDir
elif self.featurefilepath is not None:
curpath = os.path.dirname(self.featurefilepath)
else:
# if the IncludingLexer was initialized from an in-memory
# file-like stream, it doesn't have a 'name' pointing to
# its filesystem path, therefore we fall back to using the
# current working directory to resolve relative includes
curpath = os.getcwd()
path = os.path.join(curpath, fname_token)
if len(self.lexers_) >= 5:
raise FeatureLibError("Too many recursive includes", fname_location)
try:
self.lexers_.append(self.make_lexer_(path))
except FileNotFoundError as err:
raise IncludedFeaNotFound(fname_token, fname_location) from err
else:
return (token_type, token, location)
raise StopIteration()
@staticmethod
def make_lexer_(file_or_path):
if hasattr(file_or_path, "read"):
fileobj, closing = file_or_path, False
else:
filename, closing = file_or_path, True
fileobj = open(filename, "r", encoding="utf-8-sig")
data = fileobj.read()
filename = getattr(fileobj, "name", None)
if closing:
fileobj.close()
return Lexer(data, filename)
def scan_anonymous_block(self, tag):
return self.lexers_[-1].scan_anonymous_block(tag)
class NonIncludingLexer(IncludingLexer):
"""Lexer that does not follow `include` statements, emits them as-is."""
def __next__(self): # Python 3
return next(self.lexers_[0])

View file

@ -0,0 +1,12 @@
from typing import NamedTuple
class FeatureLibLocation(NamedTuple):
"""A location in a feature file"""
file: str
line: int
column: int
def __str__(self):
return f"{self.file}:{self.line}:{self.column}"

View file

@ -0,0 +1,12 @@
from typing import NamedTuple
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
class LookupDebugInfo(NamedTuple):
"""Information about where a lookup came from, to be embedded in a font"""
location: str
name: str
feature: list

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,118 @@
from fontTools.varLib.models import VariationModel, normalizeValue, piecewiseLinearMap
def Location(loc):
return tuple(sorted(loc.items()))
class VariableScalar:
"""A scalar with different values at different points in the designspace."""
def __init__(self, location_value={}):
self.values = {}
self.axes = {}
for location, value in location_value.items():
self.add_value(location, value)
def __repr__(self):
items = []
for location, value in self.values.items():
loc = ",".join(
[
f"{ax}={int(coord) if float(coord).is_integer() else coord}"
for ax, coord in location
]
)
items.append("%s:%i" % (loc, value))
return "(" + (" ".join(items)) + ")"
@property
def does_vary(self):
values = list(self.values.values())
return any(v != values[0] for v in values[1:])
@property
def axes_dict(self):
if not self.axes:
raise ValueError(
".axes must be defined on variable scalar before interpolating"
)
return {ax.axisTag: ax for ax in self.axes}
def _normalized_location(self, location):
location = self.fix_location(location)
normalized_location = {}
for axtag in location.keys():
if axtag not in self.axes_dict:
raise ValueError("Unknown axis %s in %s" % (axtag, location))
axis = self.axes_dict[axtag]
normalized_location[axtag] = normalizeValue(
location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue)
)
return Location(normalized_location)
def fix_location(self, location):
location = dict(location)
for tag, axis in self.axes_dict.items():
if tag not in location:
location[tag] = axis.defaultValue
return location
def add_value(self, location, value):
if self.axes:
location = self.fix_location(location)
self.values[Location(location)] = value
def fix_all_locations(self):
self.values = {
Location(self.fix_location(l)): v for l, v in self.values.items()
}
@property
def default(self):
self.fix_all_locations()
key = Location({ax.axisTag: ax.defaultValue for ax in self.axes})
if key not in self.values:
raise ValueError("Default value could not be found")
# I *guess* we could interpolate one, but I don't know how.
return self.values[key]
def value_at_location(self, location, model_cache=None, avar=None):
loc = Location(location)
if loc in self.values.keys():
return self.values[loc]
values = list(self.values.values())
loc = dict(self._normalized_location(loc))
return self.model(model_cache, avar).interpolateFromMasters(loc, values)
def model(self, model_cache=None, avar=None):
if model_cache is not None:
key = tuple(self.values.keys())
if key in model_cache:
return model_cache[key]
locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
if avar is not None:
mapping = avar.segments
locations = [
{
k: piecewiseLinearMap(v, mapping[k]) if k in mapping else v
for k, v in location.items()
}
for location in locations
]
m = VariationModel(locations)
if model_cache is not None:
model_cache[key] = m
return m
def get_deltas_and_supports(self, model_cache=None, avar=None):
values = list(self.values.values())
return self.model(model_cache, avar).getDeltasAndSupports(values)
def add_to_variation_store(self, store_builder, model_cache=None, avar=None):
deltas, supports = self.get_deltas_and_supports(model_cache, avar)
store_builder.setSupports(supports)
index = store_builder.storeDeltas(deltas)
return int(self.default), index

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,36 @@
import pkgutil
import sys
import fontTools
import importlib
import os
from pathlib import Path
def main():
"""Show this help"""
path = fontTools.__path__
descriptions = {}
for pkg in sorted(
mod.name
for mod in pkgutil.walk_packages([fontTools.__path__[0]], prefix="fontTools.")
):
try:
imports = __import__(pkg, globals(), locals(), ["main"])
except ImportError as e:
continue
try:
description = imports.main.__doc__
# Cython modules seem to return "main()" as the docstring
if description and description != "main()":
pkg = pkg.replace("fontTools.", "").replace(".__main__", "")
# show the docstring's first line only
descriptions[pkg] = description.splitlines()[0]
except AttributeError as e:
pass
for pkg, description in descriptions.items():
print("fonttools %-25s %s" % (pkg, description), file=sys.stderr)
if __name__ == "__main__":
print("fonttools v%s\n" % fontTools.__version__, file=sys.stderr)
main()

View file

@ -0,0 +1,248 @@
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib
import fontTools.merge.base
from fontTools.merge.cmap import (
computeMegaGlyphOrder,
computeMegaCmap,
renameCFFCharStrings,
)
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
from fontTools.merge.options import Options
import fontTools.merge.tables
from fontTools.misc.loggingTools import Timer
from functools import reduce
import sys
import logging
log = logging.getLogger("fontTools.merge")
timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
class Merger(object):
"""Font merger.
This class merges multiple files into a single OpenType font, taking into
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
cross-font metrics (for example ``hhea.ascent`` is set to the maximum value
across all the fonts).
If multiple glyphs map to the same Unicode value, and the glyphs are considered
sufficiently different (that is, they differ in any of paths, widths, or
height), then subsequent glyphs are renamed and a lookup in the ``locl``
feature will be created to disambiguate them. For example, if the arguments
are an Arabic font and a Latin font and both contain a set of parentheses,
the Latin glyphs will be renamed to ``parenleft.1`` and ``parenright.1``,
and a lookup will be inserted into the to ``locl`` feature (creating it if
necessary) under the ``latn`` script to substitute ``parenleft`` with
``parenleft.1`` etc.
Restrictions:
- All fonts must have the same units per em.
- If duplicate glyph disambiguation takes place as described above then the
fonts must have a ``GSUB`` table.
Attributes:
options: Currently unused.
"""
def __init__(self, options=None):
if not options:
options = Options()
self.options = options
def _openFonts(self, fontfiles):
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font, fontfile in zip(fonts, fontfiles):
font._merger__fontfile = fontfile
font._merger__name = font["name"].getDebugName(4)
return fonts
def merge(self, fontfiles):
"""Merges fonts together.
Args:
fontfiles: A list of file names to be merged
Returns:
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
this to write it out to an OTF file.
"""
#
# Settle on a mega glyph order.
#
fonts = self._openFonts(fontfiles)
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
computeMegaGlyphOrder(self, glyphOrders)
# Take first input file sfntVersion
sfntVersion = fonts[0].sfntVersion
# Reload fonts and set new glyph names on them.
fonts = self._openFonts(fontfiles)
for font, glyphOrder in zip(fonts, glyphOrders):
font.setGlyphOrder(glyphOrder)
if "CFF " in font:
renameCFFCharStrings(self, glyphOrder, font["CFF "])
cmaps = [font["cmap"] for font in fonts]
self.duplicateGlyphsPerFont = [{} for _ in fonts]
computeMegaCmap(self, cmaps)
mega = ttLib.TTFont(sfntVersion=sfntVersion)
mega.setGlyphOrder(self.glyphOrder)
for font in fonts:
self._preMerge(font)
self.fonts = fonts
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove("GlyphOrder")
for tag in sorted(allTags):
if tag in self.options.drop_tables:
continue
with timer("merge '%s'" % tag):
tables = [font.get(tag, NotImplemented) for font in fonts]
log.info("Merging '%s'.", tag)
clazz = ttLib.getTableClass(tag)
table = clazz(tag).merge(self, tables)
# XXX Clean this up and use: table = mergeObjects(tables)
if table is not NotImplemented and table is not False:
mega[tag] = table
log.info("Merged '%s'.", tag)
else:
log.info("Dropped '%s'.", tag)
del self.duplicateGlyphsPerFont
del self.fonts
self._postMerge(mega)
return mega
def mergeObjects(self, returnTable, logic, tables):
# Right now we don't use self at all. Will use in the future
# for options and logging.
allKeys = set.union(
set(),
*(vars(table).keys() for table in tables if table is not NotImplemented),
)
for key in allKeys:
log.info(" %s", key)
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic["*"]
except KeyError:
raise Exception(
"Don't know how to merge key %s of class %s"
% (key, returnTable.__class__.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
if value is not NotImplemented:
setattr(returnTable, key, value)
return returnTable
def _preMerge(self, font):
layoutPreMerge(font)
def _postMerge(self, font):
layoutPostMerge(font)
if "OS/2" in font:
# https://github.com/fonttools/fonttools/issues/2538
# TODO: Add an option to disable this?
font["OS/2"].recalcAvgCharWidth(font)
__all__ = ["Options", "Merger", "main"]
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
"""Merge multiple fonts into one"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
options = Options()
args = options.parse_opts(args)
fontfiles = []
if options.input_file:
with open(options.input_file) as inputfile:
fontfiles = [
line.strip()
for line in inputfile.readlines()
if not line.lstrip().startswith("#")
]
for g in args:
fontfiles.append(g)
if len(fontfiles) < 1:
print(
"usage: fonttools merge [font1 ... fontN] [--input-file=filelist.txt] [--output-file=merged.ttf] [--import-file=tables.ttx]",
file=sys.stderr,
)
print(
" [--drop-tables=tags] [--verbose] [--timing]",
file=sys.stderr,
)
print("", file=sys.stderr)
print(" font1 ... fontN Files to merge.", file=sys.stderr)
print(
" --input-file=<filename> Read files to merge from a text file, each path new line. # Comment lines allowed.",
file=sys.stderr,
)
print(
" --output-file=<filename> Specify output file name (default: merged.ttf).",
file=sys.stderr,
)
print(
" --import-file=<filename> TTX file to import after merging. This can be used to set metadata.",
file=sys.stderr,
)
print(
" --drop-tables=<table tags> Comma separated list of table tags to skip, case sensitive.",
file=sys.stderr,
)
print(
" --verbose Output progress information.",
file=sys.stderr,
)
print(" --timing Output progress timing.", file=sys.stderr)
return 1
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
if options.timing:
timer.logger.setLevel(logging.DEBUG)
else:
timer.logger.disabled = True
merger = Merger(options=options)
font = merger.merge(fontfiles)
if options.import_file:
font.importXML(options.import_file)
with timer("compile and save font"):
font.save(options.output_file)
if __name__ == "__main__":
sys.exit(main())

View file

@ -0,0 +1,6 @@
import sys
from fontTools.merge import main
if __name__ == "__main__":
sys.exit(main())

Some files were not shown because too many files have changed in this diff Show more