up follow livre
This commit is contained in:
parent
70a5c3465c
commit
cffb31c1ef
12198 changed files with 2562132 additions and 35 deletions
1596
venv/lib/python3.13/site-packages/fontTools/varLib/__init__.py
Normal file
1596
venv/lib/python3.13/site-packages/fontTools/varLib/__init__.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,6 @@
|
|||
import sys
|
||||
from fontTools.varLib import main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
260
venv/lib/python3.13/site-packages/fontTools/varLib/avar.py
Normal file
260
venv/lib/python3.13/site-packages/fontTools/varLib/avar.py
Normal file
|
|
@ -0,0 +1,260 @@
|
|||
from fontTools.varLib import _add_avar, load_designspace
|
||||
from fontTools.varLib.models import VariationModel
|
||||
from fontTools.varLib.varStore import VarStoreInstancer
|
||||
from fontTools.misc.fixedTools import fixedToFloat as fi2fl
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
from itertools import product
|
||||
import logging
|
||||
|
||||
log = logging.getLogger("fontTools.varLib.avar")
|
||||
|
||||
|
||||
def _denormalize(v, axis):
|
||||
if v >= 0:
|
||||
return axis.defaultValue + v * (axis.maxValue - axis.defaultValue)
|
||||
else:
|
||||
return axis.defaultValue + v * (axis.defaultValue - axis.minValue)
|
||||
|
||||
|
||||
def _pruneLocations(locations, poles, axisTags):
|
||||
# Now we have all the input locations, find which ones are
|
||||
# not needed and remove them.
|
||||
|
||||
# Note: This algorithm is heavily tied to how VariationModel
|
||||
# is implemented. It assumes that input was extracted from
|
||||
# VariationModel-generated object, like an ItemVariationStore
|
||||
# created by fontmake using varLib.models.VariationModel.
|
||||
# Some CoPilot blabbering:
|
||||
# I *think* I can prove that this algorithm is correct, but
|
||||
# I'm not 100% sure. It's possible that there are edge cases
|
||||
# where this algorithm will fail. I'm not sure how to prove
|
||||
# that it's correct, but I'm also not sure how to prove that
|
||||
# it's incorrect. I'm not sure how to write a test case that
|
||||
# would prove that it's incorrect. I'm not sure how to write
|
||||
# a test case that would prove that it's correct.
|
||||
|
||||
model = VariationModel(locations, axisTags)
|
||||
modelMapping = model.mapping
|
||||
modelSupports = model.supports
|
||||
pins = {tuple(k.items()): None for k in poles}
|
||||
for location in poles:
|
||||
i = locations.index(location)
|
||||
i = modelMapping[i]
|
||||
support = modelSupports[i]
|
||||
supportAxes = set(support.keys())
|
||||
for axisTag, (minV, _, maxV) in support.items():
|
||||
for v in (minV, maxV):
|
||||
if v in (-1, 0, 1):
|
||||
continue
|
||||
for pin in pins.keys():
|
||||
pinLocation = dict(pin)
|
||||
pinAxes = set(pinLocation.keys())
|
||||
if pinAxes != supportAxes:
|
||||
continue
|
||||
if axisTag not in pinAxes:
|
||||
continue
|
||||
if pinLocation[axisTag] == v:
|
||||
break
|
||||
else:
|
||||
# No pin found. Go through the previous masters
|
||||
# and find a suitable pin. Going backwards is
|
||||
# better because it can find a pin that is close
|
||||
# to the pole in more dimensions, and reducing
|
||||
# the total number of pins needed.
|
||||
for candidateIdx in range(i - 1, -1, -1):
|
||||
candidate = modelSupports[candidateIdx]
|
||||
candidateAxes = set(candidate.keys())
|
||||
if candidateAxes != supportAxes:
|
||||
continue
|
||||
if axisTag not in candidateAxes:
|
||||
continue
|
||||
candidate = {
|
||||
k: defaultV for k, (_, defaultV, _) in candidate.items()
|
||||
}
|
||||
if candidate[axisTag] == v:
|
||||
pins[tuple(candidate.items())] = None
|
||||
break
|
||||
else:
|
||||
assert False, "No pin found"
|
||||
return [dict(t) for t in pins.keys()]
|
||||
|
||||
|
||||
def mappings_from_avar(font, denormalize=True):
|
||||
fvarAxes = font["fvar"].axes
|
||||
axisMap = {a.axisTag: a for a in fvarAxes}
|
||||
axisTags = [a.axisTag for a in fvarAxes]
|
||||
axisIndexes = {a.axisTag: i for i, a in enumerate(fvarAxes)}
|
||||
if "avar" not in font:
|
||||
return {}, {}
|
||||
avar = font["avar"]
|
||||
axisMaps = {
|
||||
tag: seg
|
||||
for tag, seg in avar.segments.items()
|
||||
if seg and seg != {-1: -1, 0: 0, 1: 1}
|
||||
}
|
||||
mappings = []
|
||||
|
||||
if getattr(avar, "majorVersion", 1) == 2:
|
||||
varStore = avar.table.VarStore
|
||||
regions = varStore.VarRegionList.Region
|
||||
|
||||
# Find all the input locations; this finds "poles", that are
|
||||
# locations of the peaks, and "corners", that are locations
|
||||
# of the corners of the regions. These two sets of locations
|
||||
# together constitute inputLocations to consider.
|
||||
|
||||
poles = {(): None} # Just using it as an ordered set
|
||||
inputLocations = set({()})
|
||||
for varData in varStore.VarData:
|
||||
regionIndices = varData.VarRegionIndex
|
||||
for regionIndex in regionIndices:
|
||||
peakLocation = []
|
||||
corners = []
|
||||
region = regions[regionIndex]
|
||||
for axisIndex, axis in enumerate(region.VarRegionAxis):
|
||||
if axis.PeakCoord == 0:
|
||||
continue
|
||||
axisTag = axisTags[axisIndex]
|
||||
peakLocation.append((axisTag, axis.PeakCoord))
|
||||
corner = []
|
||||
if axis.StartCoord != 0:
|
||||
corner.append((axisTag, axis.StartCoord))
|
||||
if axis.EndCoord != 0:
|
||||
corner.append((axisTag, axis.EndCoord))
|
||||
corners.append(corner)
|
||||
corners = set(product(*corners))
|
||||
peakLocation = tuple(peakLocation)
|
||||
poles[peakLocation] = None
|
||||
inputLocations.add(peakLocation)
|
||||
inputLocations.update(corners)
|
||||
|
||||
# Sort them by number of axes, then by axis order
|
||||
inputLocations = [
|
||||
dict(t)
|
||||
for t in sorted(
|
||||
inputLocations,
|
||||
key=lambda t: (len(t), tuple(axisIndexes[tag] for tag, _ in t)),
|
||||
)
|
||||
]
|
||||
poles = [dict(t) for t in poles.keys()]
|
||||
inputLocations = _pruneLocations(inputLocations, list(poles), axisTags)
|
||||
|
||||
# Find the output locations, at input locations
|
||||
varIdxMap = avar.table.VarIdxMap
|
||||
instancer = VarStoreInstancer(varStore, fvarAxes)
|
||||
for location in inputLocations:
|
||||
instancer.setLocation(location)
|
||||
outputLocation = {}
|
||||
for axisIndex, axisTag in enumerate(axisTags):
|
||||
varIdx = axisIndex
|
||||
if varIdxMap is not None:
|
||||
varIdx = varIdxMap[varIdx]
|
||||
delta = instancer[varIdx]
|
||||
if delta != 0:
|
||||
v = location.get(axisTag, 0)
|
||||
v = v + fi2fl(delta, 14)
|
||||
# See https://github.com/fonttools/fonttools/pull/3598#issuecomment-2266082009
|
||||
# v = max(-1, min(1, v))
|
||||
outputLocation[axisTag] = v
|
||||
mappings.append((location, outputLocation))
|
||||
|
||||
# Remove base master we added, if it maps to the default location
|
||||
assert mappings[0][0] == {}
|
||||
if mappings[0][1] == {}:
|
||||
mappings.pop(0)
|
||||
|
||||
if denormalize:
|
||||
for tag, seg in axisMaps.items():
|
||||
if tag not in axisMap:
|
||||
raise ValueError(f"Unknown axis tag {tag}")
|
||||
denorm = lambda v: _denormalize(v, axisMap[tag])
|
||||
axisMaps[tag] = {denorm(k): denorm(v) for k, v in seg.items()}
|
||||
|
||||
for i, (inputLoc, outputLoc) in enumerate(mappings):
|
||||
inputLoc = {
|
||||
tag: _denormalize(val, axisMap[tag]) for tag, val in inputLoc.items()
|
||||
}
|
||||
outputLoc = {
|
||||
tag: _denormalize(val, axisMap[tag]) for tag, val in outputLoc.items()
|
||||
}
|
||||
mappings[i] = (inputLoc, outputLoc)
|
||||
|
||||
return axisMaps, mappings
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Add `avar` table from designspace file to variable font."""
|
||||
|
||||
if args is None:
|
||||
import sys
|
||||
|
||||
args = sys.argv[1:]
|
||||
|
||||
from fontTools import configLogger
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.avar",
|
||||
description="Add `avar` table from designspace file to variable font.",
|
||||
)
|
||||
parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
|
||||
parser.add_argument(
|
||||
"designspace",
|
||||
metavar="family.designspace",
|
||||
help="Designspace file.",
|
||||
nargs="?",
|
||||
default=None,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output-file",
|
||||
type=str,
|
||||
help="Output font file name.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Run more verbosely."
|
||||
)
|
||||
|
||||
options = parser.parse_args(args)
|
||||
|
||||
configLogger(level=("INFO" if options.verbose else "WARNING"))
|
||||
|
||||
font = TTFont(options.font)
|
||||
if not "fvar" in font:
|
||||
log.error("Not a variable font.")
|
||||
return 1
|
||||
|
||||
if options.designspace is None:
|
||||
from pprint import pprint
|
||||
|
||||
segments, mappings = mappings_from_avar(font)
|
||||
pprint(segments)
|
||||
pprint(mappings)
|
||||
print(len(mappings), "mappings")
|
||||
return
|
||||
|
||||
axisTags = [a.axisTag for a in font["fvar"].axes]
|
||||
|
||||
ds = load_designspace(options.designspace, require_sources=False)
|
||||
|
||||
if "avar" in font:
|
||||
log.warning("avar table already present, overwriting.")
|
||||
del font["avar"]
|
||||
|
||||
_add_avar(font, ds.axes, ds.axisMappings, axisTags)
|
||||
|
||||
if options.output_file is None:
|
||||
outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")
|
||||
else:
|
||||
outfile = options.output_file
|
||||
if outfile:
|
||||
log.info("Saving %s", outfile)
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main())
|
||||
1004
venv/lib/python3.13/site-packages/fontTools/varLib/avarPlanner.py
Normal file
1004
venv/lib/python3.13/site-packages/fontTools/varLib/avarPlanner.py
Normal file
File diff suppressed because it is too large
Load diff
215
venv/lib/python3.13/site-packages/fontTools/varLib/builder.py
Normal file
215
venv/lib/python3.13/site-packages/fontTools/varLib/builder.py
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
|
||||
# VariationStore
|
||||
|
||||
|
||||
def buildVarRegionAxis(axisSupport):
|
||||
self = ot.VarRegionAxis()
|
||||
self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
|
||||
return self
|
||||
|
||||
|
||||
def buildSparseVarRegionAxis(axisIndex, axisSupport):
|
||||
self = ot.SparseVarRegionAxis()
|
||||
self.AxisIndex = axisIndex
|
||||
self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
|
||||
return self
|
||||
|
||||
|
||||
def buildVarRegion(support, axisTags):
|
||||
assert all(tag in axisTags for tag in support.keys()), (
|
||||
"Unknown axis tag found.",
|
||||
support,
|
||||
axisTags,
|
||||
)
|
||||
self = ot.VarRegion()
|
||||
self.VarRegionAxis = []
|
||||
for tag in axisTags:
|
||||
self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0, 0, 0))))
|
||||
return self
|
||||
|
||||
|
||||
def buildSparseVarRegion(support, axisTags):
|
||||
assert all(tag in axisTags for tag in support.keys()), (
|
||||
"Unknown axis tag found.",
|
||||
support,
|
||||
axisTags,
|
||||
)
|
||||
self = ot.SparseVarRegion()
|
||||
self.SparseVarRegionAxis = []
|
||||
for i, tag in enumerate(axisTags):
|
||||
if tag not in support:
|
||||
continue
|
||||
self.SparseVarRegionAxis.append(
|
||||
buildSparseVarRegionAxis(i, support.get(tag, (0, 0, 0)))
|
||||
)
|
||||
self.SparseRegionCount = len(self.SparseVarRegionAxis)
|
||||
return self
|
||||
|
||||
|
||||
def buildVarRegionList(supports, axisTags):
|
||||
self = ot.VarRegionList()
|
||||
self.RegionAxisCount = len(axisTags)
|
||||
self.Region = []
|
||||
for support in supports:
|
||||
self.Region.append(buildVarRegion(support, axisTags))
|
||||
self.RegionCount = len(self.Region)
|
||||
return self
|
||||
|
||||
|
||||
def buildSparseVarRegionList(supports, axisTags):
|
||||
self = ot.SparseVarRegionList()
|
||||
self.RegionAxisCount = len(axisTags)
|
||||
self.Region = []
|
||||
for support in supports:
|
||||
self.Region.append(buildSparseVarRegion(support, axisTags))
|
||||
self.RegionCount = len(self.Region)
|
||||
return self
|
||||
|
||||
|
||||
def _reorderItem(lst, mapping):
|
||||
return [lst[i] for i in mapping]
|
||||
|
||||
|
||||
def VarData_calculateNumShorts(self, optimize=False):
|
||||
count = self.VarRegionCount
|
||||
items = self.Item
|
||||
bit_lengths = [0] * count
|
||||
for item in items:
|
||||
# The "+ (i < -1)" magic is to handle two's-compliment.
|
||||
# That is, we want to get back 7 for -128, whereas
|
||||
# bit_length() returns 8. Similarly for -65536.
|
||||
# The reason "i < -1" is used instead of "i < 0" is that
|
||||
# the latter would make it return 0 for "-1" instead of 1.
|
||||
bl = [(i + (i < -1)).bit_length() for i in item]
|
||||
bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
|
||||
# The addition of 8, instead of seven, is to account for the sign bit.
|
||||
# This "((b + 8) >> 3) if b else 0" when combined with the above
|
||||
# "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
|
||||
# conforming to:
|
||||
#
|
||||
# byte_length = (0 if i == 0 else
|
||||
# 1 if -128 <= i < 128 else
|
||||
# 2 if -65536 <= i < 65536 else
|
||||
# ...)
|
||||
byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
|
||||
|
||||
# https://github.com/fonttools/fonttools/issues/2279
|
||||
longWords = any(b > 2 for b in byte_lengths)
|
||||
|
||||
if optimize:
|
||||
# Reorder columns such that wider columns come before narrower columns
|
||||
mapping = []
|
||||
mapping.extend(i for i, b in enumerate(byte_lengths) if b > 2)
|
||||
mapping.extend(i for i, b in enumerate(byte_lengths) if b == 2)
|
||||
mapping.extend(i for i, b in enumerate(byte_lengths) if b == 1)
|
||||
|
||||
byte_lengths = _reorderItem(byte_lengths, mapping)
|
||||
self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
|
||||
self.VarRegionCount = len(self.VarRegionIndex)
|
||||
for i in range(len(items)):
|
||||
items[i] = _reorderItem(items[i], mapping)
|
||||
|
||||
if longWords:
|
||||
self.NumShorts = (
|
||||
max((i for i, b in enumerate(byte_lengths) if b > 2), default=-1) + 1
|
||||
)
|
||||
self.NumShorts |= 0x8000
|
||||
else:
|
||||
self.NumShorts = (
|
||||
max((i for i, b in enumerate(byte_lengths) if b > 1), default=-1) + 1
|
||||
)
|
||||
|
||||
self.VarRegionCount = len(self.VarRegionIndex)
|
||||
return self
|
||||
|
||||
|
||||
ot.VarData.calculateNumShorts = VarData_calculateNumShorts
|
||||
|
||||
|
||||
def VarData_CalculateNumShorts(self, optimize=True):
|
||||
"""Deprecated name for VarData_calculateNumShorts() which
|
||||
defaults to optimize=True. Use varData.calculateNumShorts()
|
||||
or varData.optimize()."""
|
||||
return VarData_calculateNumShorts(self, optimize=optimize)
|
||||
|
||||
|
||||
def VarData_optimize(self):
|
||||
return VarData_calculateNumShorts(self, optimize=True)
|
||||
|
||||
|
||||
ot.VarData.optimize = VarData_optimize
|
||||
|
||||
|
||||
def buildVarData(varRegionIndices, items, optimize=True):
|
||||
self = ot.VarData()
|
||||
self.VarRegionIndex = list(varRegionIndices)
|
||||
regionCount = self.VarRegionCount = len(self.VarRegionIndex)
|
||||
records = self.Item = []
|
||||
if items:
|
||||
for item in items:
|
||||
assert len(item) == regionCount
|
||||
records.append(list(item))
|
||||
self.ItemCount = len(self.Item)
|
||||
self.calculateNumShorts(optimize=optimize)
|
||||
return self
|
||||
|
||||
|
||||
def buildVarStore(varRegionList, varDataList):
|
||||
self = ot.VarStore()
|
||||
self.Format = 1
|
||||
self.VarRegionList = varRegionList
|
||||
self.VarData = list(varDataList)
|
||||
self.VarDataCount = len(self.VarData)
|
||||
return self
|
||||
|
||||
|
||||
def buildMultiVarData(varRegionIndices, items):
|
||||
self = ot.MultiVarData()
|
||||
self.Format = 1
|
||||
self.VarRegionIndex = list(varRegionIndices)
|
||||
regionCount = self.VarRegionCount = len(self.VarRegionIndex)
|
||||
records = self.Item = []
|
||||
if items:
|
||||
for item in items:
|
||||
assert len(item) == regionCount
|
||||
records.append(list(item))
|
||||
self.ItemCount = len(self.Item)
|
||||
return self
|
||||
|
||||
|
||||
def buildMultiVarStore(varRegionList, multiVarDataList):
|
||||
self = ot.MultiVarStore()
|
||||
self.Format = 1
|
||||
self.SparseVarRegionList = varRegionList
|
||||
self.MultiVarData = list(multiVarDataList)
|
||||
self.MultiVarDataCount = len(self.MultiVarData)
|
||||
return self
|
||||
|
||||
|
||||
# Variation helpers
|
||||
|
||||
|
||||
def buildVarIdxMap(varIdxes, glyphOrder):
|
||||
self = ot.VarIdxMap()
|
||||
self.mapping = {g: v for g, v in zip(glyphOrder, varIdxes)}
|
||||
return self
|
||||
|
||||
|
||||
def buildDeltaSetIndexMap(varIdxes):
|
||||
mapping = list(varIdxes)
|
||||
if all(i == v for i, v in enumerate(mapping)):
|
||||
return None
|
||||
self = ot.DeltaSetIndexMap()
|
||||
self.mapping = mapping
|
||||
self.Format = 1 if len(mapping) > 0xFFFF else 0
|
||||
return self
|
||||
|
||||
|
||||
def buildVarDevTable(varIdx):
|
||||
self = ot.Device()
|
||||
self.DeltaFormat = 0x8000
|
||||
self.StartSize = varIdx >> 16
|
||||
self.EndSize = varIdx & 0xFFFF
|
||||
return self
|
||||
631
venv/lib/python3.13/site-packages/fontTools/varLib/cff.py
Normal file
631
venv/lib/python3.13/site-packages/fontTools/varLib/cff.py
Normal file
|
|
@ -0,0 +1,631 @@
|
|||
from collections import namedtuple
|
||||
from fontTools.cffLib import (
|
||||
maxStackLimit,
|
||||
TopDictIndex,
|
||||
buildOrder,
|
||||
topDictOperators,
|
||||
topDictOperators2,
|
||||
privateDictOperators,
|
||||
privateDictOperators2,
|
||||
FDArrayIndex,
|
||||
FontDict,
|
||||
VarStoreData,
|
||||
)
|
||||
from io import BytesIO
|
||||
from fontTools.cffLib.specializer import specializeCommands, commandsToProgram
|
||||
from fontTools.ttLib import newTable
|
||||
from fontTools import varLib
|
||||
from fontTools.varLib.models import allEqual
|
||||
from fontTools.misc.loggingTools import deprecateFunction
|
||||
from fontTools.misc.roundTools import roundFunc
|
||||
from fontTools.misc.psCharStrings import T2CharString, T2OutlineExtractor
|
||||
from fontTools.pens.t2CharStringPen import T2CharStringPen
|
||||
from functools import partial
|
||||
|
||||
from .errors import (
|
||||
VarLibCFFDictMergeError,
|
||||
VarLibCFFPointTypeMergeError,
|
||||
VarLibCFFHintTypeMergeError,
|
||||
VarLibMergeError,
|
||||
)
|
||||
|
||||
|
||||
# Backwards compatibility
|
||||
MergeDictError = VarLibCFFDictMergeError
|
||||
MergeTypeError = VarLibCFFPointTypeMergeError
|
||||
|
||||
|
||||
def addCFFVarStore(varFont, varModel, varDataList, masterSupports):
|
||||
fvarTable = varFont["fvar"]
|
||||
axisKeys = [axis.axisTag for axis in fvarTable.axes]
|
||||
varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys)
|
||||
varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList)
|
||||
|
||||
topDict = varFont["CFF2"].cff.topDictIndex[0]
|
||||
topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV)
|
||||
if topDict.FDArray[0].vstore is None:
|
||||
fdArray = topDict.FDArray
|
||||
for fontDict in fdArray:
|
||||
if hasattr(fontDict, "Private"):
|
||||
fontDict.Private.vstore = topDict.VarStore
|
||||
|
||||
|
||||
@deprecateFunction("Use fontTools.cffLib.CFFToCFF2.convertCFFToCFF2 instead.")
|
||||
def convertCFFtoCFF2(varFont):
|
||||
from fontTools.cffLib.CFFToCFF2 import convertCFFToCFF2
|
||||
|
||||
return convertCFFToCFF2(varFont)
|
||||
|
||||
|
||||
def conv_to_int(num):
|
||||
if isinstance(num, float) and num.is_integer():
|
||||
return int(num)
|
||||
return num
|
||||
|
||||
|
||||
pd_blend_fields = (
|
||||
"BlueValues",
|
||||
"OtherBlues",
|
||||
"FamilyBlues",
|
||||
"FamilyOtherBlues",
|
||||
"BlueScale",
|
||||
"BlueShift",
|
||||
"BlueFuzz",
|
||||
"StdHW",
|
||||
"StdVW",
|
||||
"StemSnapH",
|
||||
"StemSnapV",
|
||||
)
|
||||
|
||||
|
||||
def get_private(regionFDArrays, fd_index, ri, fd_map):
|
||||
region_fdArray = regionFDArrays[ri]
|
||||
region_fd_map = fd_map[fd_index]
|
||||
if ri in region_fd_map:
|
||||
region_fdIndex = region_fd_map[ri]
|
||||
private = region_fdArray[region_fdIndex].Private
|
||||
else:
|
||||
private = None
|
||||
return private
|
||||
|
||||
|
||||
def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
|
||||
"""
|
||||
I step through the FontDicts in the FDArray of the varfont TopDict.
|
||||
For each varfont FontDict:
|
||||
|
||||
* step through each key in FontDict.Private.
|
||||
* For each key, step through each relevant source font Private dict, and
|
||||
build a list of values to blend.
|
||||
|
||||
The 'relevant' source fonts are selected by first getting the right
|
||||
submodel using ``vsindex_dict[vsindex]``. The indices of the
|
||||
``subModel.locations`` are mapped to source font list indices by
|
||||
assuming the latter order is the same as the order of the
|
||||
``var_model.locations``. I can then get the index of each subModel
|
||||
location in the list of ``var_model.locations``.
|
||||
"""
|
||||
|
||||
topDict = top_dicts[0]
|
||||
region_top_dicts = top_dicts[1:]
|
||||
if hasattr(region_top_dicts[0], "FDArray"):
|
||||
regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]
|
||||
else:
|
||||
regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]
|
||||
for fd_index, font_dict in enumerate(topDict.FDArray):
|
||||
private_dict = font_dict.Private
|
||||
vsindex = getattr(private_dict, "vsindex", 0)
|
||||
# At the moment, no PrivateDict has a vsindex key, but let's support
|
||||
# how it should work. See comment at end of
|
||||
# merge_charstrings() - still need to optimize use of vsindex.
|
||||
sub_model, _ = vsindex_dict[vsindex]
|
||||
master_indices = []
|
||||
for loc in sub_model.locations[1:]:
|
||||
i = var_model.locations.index(loc) - 1
|
||||
master_indices.append(i)
|
||||
pds = [private_dict]
|
||||
last_pd = private_dict
|
||||
for ri in master_indices:
|
||||
pd = get_private(regionFDArrays, fd_index, ri, fd_map)
|
||||
# If the region font doesn't have this FontDict, just reference
|
||||
# the last one used.
|
||||
if pd is None:
|
||||
pd = last_pd
|
||||
else:
|
||||
last_pd = pd
|
||||
pds.append(pd)
|
||||
num_masters = len(pds)
|
||||
for key, value in private_dict.rawDict.items():
|
||||
dataList = []
|
||||
if key not in pd_blend_fields:
|
||||
continue
|
||||
if isinstance(value, list):
|
||||
try:
|
||||
values = [pd.rawDict[key] for pd in pds]
|
||||
except KeyError:
|
||||
print(
|
||||
"Warning: {key} in default font Private dict is "
|
||||
"missing from another font, and was "
|
||||
"discarded.".format(key=key)
|
||||
)
|
||||
continue
|
||||
try:
|
||||
values = zip(*values)
|
||||
except IndexError:
|
||||
raise VarLibCFFDictMergeError(key, value, values)
|
||||
"""
|
||||
Row 0 contains the first value from each master.
|
||||
Convert each row from absolute values to relative
|
||||
values from the previous row.
|
||||
e.g for three masters, a list of values was:
|
||||
master 0 OtherBlues = [-217,-205]
|
||||
master 1 OtherBlues = [-234,-222]
|
||||
master 1 OtherBlues = [-188,-176]
|
||||
The call to zip() converts this to:
|
||||
[(-217, -234, -188), (-205, -222, -176)]
|
||||
and is converted finally to:
|
||||
OtherBlues = [[-217, 17.0, 46.0], [-205, 0.0, 0.0]]
|
||||
"""
|
||||
prev_val_list = [0] * num_masters
|
||||
any_points_differ = False
|
||||
for val_list in values:
|
||||
rel_list = [
|
||||
(val - prev_val_list[i]) for (i, val) in enumerate(val_list)
|
||||
]
|
||||
if (not any_points_differ) and not allEqual(rel_list):
|
||||
any_points_differ = True
|
||||
prev_val_list = val_list
|
||||
deltas = sub_model.getDeltas(rel_list)
|
||||
# For PrivateDict BlueValues, the default font
|
||||
# values are absolute, not relative to the prior value.
|
||||
deltas[0] = val_list[0]
|
||||
dataList.append(deltas)
|
||||
# If there are no blend values,then
|
||||
# we can collapse the blend lists.
|
||||
if not any_points_differ:
|
||||
dataList = [data[0] for data in dataList]
|
||||
else:
|
||||
values = [pd.rawDict[key] for pd in pds]
|
||||
if not allEqual(values):
|
||||
dataList = sub_model.getDeltas(values)
|
||||
else:
|
||||
dataList = values[0]
|
||||
|
||||
# Convert numbers with no decimal part to an int
|
||||
if isinstance(dataList, list):
|
||||
for i, item in enumerate(dataList):
|
||||
if isinstance(item, list):
|
||||
for j, jtem in enumerate(item):
|
||||
dataList[i][j] = conv_to_int(jtem)
|
||||
else:
|
||||
dataList[i] = conv_to_int(item)
|
||||
else:
|
||||
dataList = conv_to_int(dataList)
|
||||
|
||||
private_dict.rawDict[key] = dataList
|
||||
|
||||
|
||||
def _cff_or_cff2(font):
|
||||
if "CFF " in font:
|
||||
return font["CFF "]
|
||||
return font["CFF2"]
|
||||
|
||||
|
||||
def getfd_map(varFont, fonts_list):
|
||||
"""Since a subset source font may have fewer FontDicts in their
|
||||
FDArray than the default font, we have to match up the FontDicts in
|
||||
the different fonts . We do this with the FDSelect array, and by
|
||||
assuming that the same glyph will reference matching FontDicts in
|
||||
each source font. We return a mapping from fdIndex in the default
|
||||
font to a dictionary which maps each master list index of each
|
||||
region font to the equivalent fdIndex in the region font."""
|
||||
fd_map = {}
|
||||
default_font = fonts_list[0]
|
||||
region_fonts = fonts_list[1:]
|
||||
num_regions = len(region_fonts)
|
||||
topDict = _cff_or_cff2(default_font).cff.topDictIndex[0]
|
||||
if not hasattr(topDict, "FDSelect"):
|
||||
# All glyphs reference only one FontDict.
|
||||
# Map the FD index for regions to index 0.
|
||||
fd_map[0] = {ri: 0 for ri in range(num_regions)}
|
||||
return fd_map
|
||||
|
||||
gname_mapping = {}
|
||||
default_fdSelect = topDict.FDSelect
|
||||
glyphOrder = default_font.getGlyphOrder()
|
||||
for gid, fdIndex in enumerate(default_fdSelect):
|
||||
gname_mapping[glyphOrder[gid]] = fdIndex
|
||||
if fdIndex not in fd_map:
|
||||
fd_map[fdIndex] = {}
|
||||
for ri, region_font in enumerate(region_fonts):
|
||||
region_glyphOrder = region_font.getGlyphOrder()
|
||||
region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0]
|
||||
if not hasattr(region_topDict, "FDSelect"):
|
||||
# All the glyphs share the same FontDict. Pick any glyph.
|
||||
default_fdIndex = gname_mapping[region_glyphOrder[0]]
|
||||
fd_map[default_fdIndex][ri] = 0
|
||||
else:
|
||||
region_fdSelect = region_topDict.FDSelect
|
||||
for gid, fdIndex in enumerate(region_fdSelect):
|
||||
default_fdIndex = gname_mapping[region_glyphOrder[gid]]
|
||||
region_map = fd_map[default_fdIndex]
|
||||
if ri not in region_map:
|
||||
region_map[ri] = fdIndex
|
||||
return fd_map
|
||||
|
||||
|
||||
CVarData = namedtuple("CVarData", "varDataList masterSupports vsindex_dict")
|
||||
|
||||
|
||||
def merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder):
|
||||
topDict = varFont["CFF2"].cff.topDictIndex[0]
|
||||
top_dicts = [topDict] + [
|
||||
_cff_or_cff2(ttFont).cff.topDictIndex[0] for ttFont in ordered_fonts_list[1:]
|
||||
]
|
||||
num_masters = len(model.mapping)
|
||||
cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model)
|
||||
fd_map = getfd_map(varFont, ordered_fonts_list)
|
||||
merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map)
|
||||
addCFFVarStore(varFont, model, cvData.varDataList, cvData.masterSupports)
|
||||
|
||||
|
||||
def _get_cs(charstrings, glyphName, filterEmpty=False):
|
||||
if glyphName not in charstrings:
|
||||
return None
|
||||
cs = charstrings[glyphName]
|
||||
|
||||
if filterEmpty:
|
||||
cs.decompile()
|
||||
if cs.program == []: # CFF2 empty charstring
|
||||
return None
|
||||
elif (
|
||||
len(cs.program) <= 2
|
||||
and cs.program[-1] == "endchar"
|
||||
and (len(cs.program) == 1 or type(cs.program[0]) in (int, float))
|
||||
): # CFF1 empty charstring
|
||||
return None
|
||||
|
||||
return cs
|
||||
|
||||
|
||||
def _add_new_vsindex(
|
||||
model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList
|
||||
):
|
||||
varTupleIndexes = []
|
||||
for support in model.supports[1:]:
|
||||
if support not in masterSupports:
|
||||
masterSupports.append(support)
|
||||
varTupleIndexes.append(masterSupports.index(support))
|
||||
var_data = varLib.builder.buildVarData(varTupleIndexes, None, False)
|
||||
vsindex = len(vsindex_dict)
|
||||
vsindex_by_key[key] = vsindex
|
||||
vsindex_dict[vsindex] = (model, [key])
|
||||
varDataList.append(var_data)
|
||||
return vsindex
|
||||
|
||||
|
||||
def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
|
||||
vsindex_dict = {}
|
||||
vsindex_by_key = {}
|
||||
varDataList = []
|
||||
masterSupports = []
|
||||
default_charstrings = top_dicts[0].CharStrings
|
||||
for gid, gname in enumerate(glyphOrder):
|
||||
# interpret empty non-default masters as missing glyphs from a sparse master
|
||||
all_cs = [
|
||||
_get_cs(td.CharStrings, gname, i != 0) for i, td in enumerate(top_dicts)
|
||||
]
|
||||
model, model_cs = masterModel.getSubModel(all_cs)
|
||||
# create the first pass CFF2 charstring, from
|
||||
# the default charstring.
|
||||
default_charstring = model_cs[0]
|
||||
var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
|
||||
# We need to override outlineExtractor because these
|
||||
# charstrings do have widths in the 'program'; we need to drop these
|
||||
# values rather than post assertion error for them.
|
||||
default_charstring.outlineExtractor = MergeOutlineExtractor
|
||||
default_charstring.draw(var_pen)
|
||||
|
||||
# Add the coordinates from all the other regions to the
|
||||
# blend lists in the CFF2 charstring.
|
||||
region_cs = model_cs[1:]
|
||||
for region_idx, region_charstring in enumerate(region_cs, start=1):
|
||||
var_pen.restart(region_idx)
|
||||
region_charstring.outlineExtractor = MergeOutlineExtractor
|
||||
region_charstring.draw(var_pen)
|
||||
|
||||
# Collapse each coordinate list to a blend operator and its args.
|
||||
new_cs = var_pen.getCharString(
|
||||
private=default_charstring.private,
|
||||
globalSubrs=default_charstring.globalSubrs,
|
||||
var_model=model,
|
||||
optimize=True,
|
||||
)
|
||||
default_charstrings[gname] = new_cs
|
||||
|
||||
if not region_cs:
|
||||
continue
|
||||
|
||||
if (not var_pen.seen_moveto) or ("blend" not in new_cs.program):
|
||||
# If this is not a marking glyph, or if there are no blend
|
||||
# arguments, then we can use vsindex 0. No need to
|
||||
# check if we need a new vsindex.
|
||||
continue
|
||||
|
||||
# If the charstring required a new model, create
|
||||
# a VarData table to go with, and set vsindex.
|
||||
key = tuple(v is not None for v in all_cs)
|
||||
try:
|
||||
vsindex = vsindex_by_key[key]
|
||||
except KeyError:
|
||||
vsindex = _add_new_vsindex(
|
||||
model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList
|
||||
)
|
||||
# We do not need to check for an existing new_cs.private.vsindex,
|
||||
# as we know it doesn't exist yet.
|
||||
if vsindex != 0:
|
||||
new_cs.program[:0] = [vsindex, "vsindex"]
|
||||
|
||||
# If there is no variation in any of the charstrings, then vsindex_dict
|
||||
# never gets built. This could still be needed if there is variation
|
||||
# in the PrivatDict, so we will build the default data for vsindex = 0.
|
||||
if not vsindex_dict:
|
||||
key = (True,) * num_masters
|
||||
_add_new_vsindex(
|
||||
masterModel, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList
|
||||
)
|
||||
cvData = CVarData(
|
||||
varDataList=varDataList,
|
||||
masterSupports=masterSupports,
|
||||
vsindex_dict=vsindex_dict,
|
||||
)
|
||||
# XXX To do: optimize use of vsindex between the PrivateDicts and
|
||||
# charstrings
|
||||
return cvData
|
||||
|
||||
|
||||
class CFFToCFF2OutlineExtractor(T2OutlineExtractor):
|
||||
"""This class is used to remove the initial width from the CFF
|
||||
charstring without trying to add the width to self.nominalWidthX,
|
||||
which is None."""
|
||||
|
||||
def popallWidth(self, evenOdd=0):
|
||||
args = self.popall()
|
||||
if not self.gotWidth:
|
||||
if evenOdd ^ (len(args) % 2):
|
||||
args = args[1:]
|
||||
self.width = self.defaultWidthX
|
||||
self.gotWidth = 1
|
||||
return args
|
||||
|
||||
|
||||
class MergeOutlineExtractor(CFFToCFF2OutlineExtractor):
|
||||
"""Used to extract the charstring commands - including hints - from a
|
||||
CFF charstring in order to merge it as another set of region data
|
||||
into a CFF2 variable font charstring."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pen,
|
||||
localSubrs,
|
||||
globalSubrs,
|
||||
nominalWidthX,
|
||||
defaultWidthX,
|
||||
private=None,
|
||||
blender=None,
|
||||
):
|
||||
super().__init__(
|
||||
pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender
|
||||
)
|
||||
|
||||
def countHints(self):
|
||||
args = self.popallWidth()
|
||||
self.hintCount = self.hintCount + len(args) // 2
|
||||
return args
|
||||
|
||||
def _hint_op(self, type, args):
|
||||
self.pen.add_hint(type, args)
|
||||
|
||||
def op_hstem(self, index):
|
||||
args = self.countHints()
|
||||
self._hint_op("hstem", args)
|
||||
|
||||
def op_vstem(self, index):
|
||||
args = self.countHints()
|
||||
self._hint_op("vstem", args)
|
||||
|
||||
def op_hstemhm(self, index):
|
||||
args = self.countHints()
|
||||
self._hint_op("hstemhm", args)
|
||||
|
||||
def op_vstemhm(self, index):
|
||||
args = self.countHints()
|
||||
self._hint_op("vstemhm", args)
|
||||
|
||||
def _get_hintmask(self, index):
|
||||
if not self.hintMaskBytes:
|
||||
args = self.countHints()
|
||||
if args:
|
||||
self._hint_op("vstemhm", args)
|
||||
self.hintMaskBytes = (self.hintCount + 7) // 8
|
||||
hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
|
||||
return index, hintMaskBytes
|
||||
|
||||
def op_hintmask(self, index):
|
||||
index, hintMaskBytes = self._get_hintmask(index)
|
||||
self.pen.add_hintmask("hintmask", [hintMaskBytes])
|
||||
return hintMaskBytes, index
|
||||
|
||||
def op_cntrmask(self, index):
|
||||
index, hintMaskBytes = self._get_hintmask(index)
|
||||
self.pen.add_hintmask("cntrmask", [hintMaskBytes])
|
||||
return hintMaskBytes, index
|
||||
|
||||
|
||||
class CFF2CharStringMergePen(T2CharStringPen):
|
||||
"""Pen to merge Type 2 CharStrings."""
|
||||
|
||||
def __init__(
|
||||
self, default_commands, glyphName, num_masters, master_idx, roundTolerance=0.01
|
||||
):
|
||||
# For roundTolerance see https://github.com/fonttools/fonttools/issues/2838
|
||||
super().__init__(
|
||||
width=None, glyphSet=None, CFF2=True, roundTolerance=roundTolerance
|
||||
)
|
||||
self.pt_index = 0
|
||||
self._commands = default_commands
|
||||
self.m_index = master_idx
|
||||
self.num_masters = num_masters
|
||||
self.prev_move_idx = 0
|
||||
self.seen_moveto = False
|
||||
self.glyphName = glyphName
|
||||
self.round = roundFunc(roundTolerance, round=round)
|
||||
|
||||
def add_point(self, point_type, pt_coords):
|
||||
if self.m_index == 0:
|
||||
self._commands.append([point_type, [pt_coords]])
|
||||
else:
|
||||
cmd = self._commands[self.pt_index]
|
||||
if cmd[0] != point_type:
|
||||
raise VarLibCFFPointTypeMergeError(
|
||||
point_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName
|
||||
)
|
||||
cmd[1].append(pt_coords)
|
||||
self.pt_index += 1
|
||||
|
||||
def add_hint(self, hint_type, args):
|
||||
if self.m_index == 0:
|
||||
self._commands.append([hint_type, [args]])
|
||||
else:
|
||||
cmd = self._commands[self.pt_index]
|
||||
if cmd[0] != hint_type:
|
||||
raise VarLibCFFHintTypeMergeError(
|
||||
hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName
|
||||
)
|
||||
cmd[1].append(args)
|
||||
self.pt_index += 1
|
||||
|
||||
def add_hintmask(self, hint_type, abs_args):
|
||||
# For hintmask, fonttools.cffLib.specializer.py expects
|
||||
# each of these to be represented by two sequential commands:
|
||||
# first holding only the operator name, with an empty arg list,
|
||||
# second with an empty string as the op name, and the mask arg list.
|
||||
if self.m_index == 0:
|
||||
self._commands.append([hint_type, []])
|
||||
self._commands.append(["", [abs_args]])
|
||||
else:
|
||||
cmd = self._commands[self.pt_index]
|
||||
if cmd[0] != hint_type:
|
||||
raise VarLibCFFHintTypeMergeError(
|
||||
hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName
|
||||
)
|
||||
self.pt_index += 1
|
||||
cmd = self._commands[self.pt_index]
|
||||
cmd[1].append(abs_args)
|
||||
self.pt_index += 1
|
||||
|
||||
def _moveTo(self, pt):
|
||||
if not self.seen_moveto:
|
||||
self.seen_moveto = True
|
||||
pt_coords = self._p(pt)
|
||||
self.add_point("rmoveto", pt_coords)
|
||||
# I set prev_move_idx here because add_point()
|
||||
# can change self.pt_index.
|
||||
self.prev_move_idx = self.pt_index - 1
|
||||
|
||||
def _lineTo(self, pt):
|
||||
pt_coords = self._p(pt)
|
||||
self.add_point("rlineto", pt_coords)
|
||||
|
||||
def _curveToOne(self, pt1, pt2, pt3):
|
||||
_p = self._p
|
||||
pt_coords = _p(pt1) + _p(pt2) + _p(pt3)
|
||||
self.add_point("rrcurveto", pt_coords)
|
||||
|
||||
def _closePath(self):
|
||||
pass
|
||||
|
||||
def _endPath(self):
|
||||
pass
|
||||
|
||||
def restart(self, region_idx):
|
||||
self.pt_index = 0
|
||||
self.m_index = region_idx
|
||||
self._p0 = (0, 0)
|
||||
|
||||
def getCommands(self):
|
||||
return self._commands
|
||||
|
||||
def reorder_blend_args(self, commands, get_delta_func):
|
||||
"""
|
||||
We first re-order the master coordinate values.
|
||||
For a moveto to lineto, the args are now arranged as::
|
||||
|
||||
[ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
|
||||
|
||||
We re-arrange this to::
|
||||
|
||||
[ [master_0 x, master_1 x, master_2 x],
|
||||
[master_0 y, master_1 y, master_2 y]
|
||||
]
|
||||
|
||||
If the master values are all the same, we collapse the list to
|
||||
as single value instead of a list.
|
||||
|
||||
We then convert this to::
|
||||
|
||||
[ [master_0 x] + [x delta tuple] + [numBlends=1]
|
||||
[master_0 y] + [y delta tuple] + [numBlends=1]
|
||||
]
|
||||
"""
|
||||
for cmd in commands:
|
||||
# arg[i] is the set of arguments for this operator from master i.
|
||||
args = cmd[1]
|
||||
m_args = zip(*args)
|
||||
# m_args[n] is now all num_master args for the i'th argument
|
||||
# for this operation.
|
||||
cmd[1] = list(m_args)
|
||||
lastOp = None
|
||||
for cmd in commands:
|
||||
op = cmd[0]
|
||||
# masks are represented by two cmd's: first has only op names,
|
||||
# second has only args.
|
||||
if lastOp in ["hintmask", "cntrmask"]:
|
||||
coord = list(cmd[1])
|
||||
if not allEqual(coord):
|
||||
raise VarLibMergeError(
|
||||
"Hintmask values cannot differ between source fonts."
|
||||
)
|
||||
cmd[1] = [coord[0][0]]
|
||||
else:
|
||||
coords = cmd[1]
|
||||
new_coords = []
|
||||
for coord in coords:
|
||||
if allEqual(coord):
|
||||
new_coords.append(coord[0])
|
||||
else:
|
||||
# convert to deltas
|
||||
deltas = get_delta_func(coord)[1:]
|
||||
coord = [coord[0]] + deltas
|
||||
coord.append(1)
|
||||
new_coords.append(coord)
|
||||
cmd[1] = new_coords
|
||||
lastOp = op
|
||||
return commands
|
||||
|
||||
def getCharString(
|
||||
self, private=None, globalSubrs=None, var_model=None, optimize=True
|
||||
):
|
||||
commands = self._commands
|
||||
commands = self.reorder_blend_args(
|
||||
commands, partial(var_model.getDeltas, round=self.round)
|
||||
)
|
||||
if optimize:
|
||||
commands = specializeCommands(
|
||||
commands, generalizeFirst=False, maxstack=maxStackLimit
|
||||
)
|
||||
program = commandsToProgram(commands)
|
||||
charString = T2CharString(
|
||||
program=program, private=private, globalSubrs=globalSubrs
|
||||
)
|
||||
return charString
|
||||
219
venv/lib/python3.13/site-packages/fontTools/varLib/errors.py
Normal file
219
venv/lib/python3.13/site-packages/fontTools/varLib/errors.py
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
import textwrap
|
||||
|
||||
|
||||
class VarLibError(Exception):
|
||||
"""Base exception for the varLib module."""
|
||||
|
||||
|
||||
class VarLibValidationError(VarLibError):
|
||||
"""Raised when input data is invalid from varLib's point of view."""
|
||||
|
||||
|
||||
class VarLibMergeError(VarLibError):
|
||||
"""Raised when input data cannot be merged into a variable font."""
|
||||
|
||||
def __init__(self, merger=None, **kwargs):
|
||||
self.merger = merger
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
if "stack" in kwargs:
|
||||
self.stack = kwargs["stack"]
|
||||
del kwargs["stack"]
|
||||
else:
|
||||
self.stack = []
|
||||
self.cause = kwargs
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
return self.__doc__
|
||||
|
||||
def _master_name(self, ix):
|
||||
if self.merger is not None:
|
||||
ttf = self.merger.ttfs[ix]
|
||||
if "name" in ttf and ttf["name"].getBestFullName():
|
||||
return ttf["name"].getBestFullName()
|
||||
elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"):
|
||||
return ttf.reader.file.name
|
||||
return f"master number {ix}"
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
if "expected" in self.cause and "got" in self.cause:
|
||||
index = [x == self.cause["expected"] for x in self.cause["got"]].index(
|
||||
False
|
||||
)
|
||||
master_name = self._master_name(index)
|
||||
if "location" in self.cause:
|
||||
master_name = f"{master_name} ({self.cause['location']})"
|
||||
return index, master_name
|
||||
return None, None
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
if "expected" in self.cause and "got" in self.cause:
|
||||
offender_index, offender = self.offender
|
||||
got = self.cause["got"][offender_index]
|
||||
return f"Expected to see {self.stack[0]}=={self.cause['expected']!r}, instead saw {got!r}\n"
|
||||
return ""
|
||||
|
||||
def __str__(self):
|
||||
offender_index, offender = self.offender
|
||||
location = ""
|
||||
if offender:
|
||||
location = f"\n\nThe problem is likely to be in {offender}:\n"
|
||||
context = "".join(reversed(self.stack))
|
||||
basic = textwrap.fill(
|
||||
f"Couldn't merge the fonts, because {self.reason}. "
|
||||
f"This happened while performing the following operation: {context}",
|
||||
width=78,
|
||||
)
|
||||
return "\n\n" + basic + location + self.details
|
||||
|
||||
|
||||
class ShouldBeConstant(VarLibMergeError):
|
||||
"""some values were different, but should have been the same"""
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
basic_message = super().details
|
||||
|
||||
if self.stack[0] != ".FeatureCount" or self.merger is None:
|
||||
return basic_message
|
||||
|
||||
assert self.stack[0] == ".FeatureCount"
|
||||
offender_index, _ = self.offender
|
||||
bad_ttf = self.merger.ttfs[offender_index]
|
||||
good_ttf = next(
|
||||
ttf
|
||||
for ttf in self.merger.ttfs
|
||||
if self.stack[-1] in ttf
|
||||
and ttf[self.stack[-1]].table.FeatureList.FeatureCount
|
||||
== self.cause["expected"]
|
||||
)
|
||||
|
||||
good_features = [
|
||||
x.FeatureTag
|
||||
for x in good_ttf[self.stack[-1]].table.FeatureList.FeatureRecord
|
||||
]
|
||||
bad_features = [
|
||||
x.FeatureTag
|
||||
for x in bad_ttf[self.stack[-1]].table.FeatureList.FeatureRecord
|
||||
]
|
||||
return basic_message + (
|
||||
"\nIncompatible features between masters.\n"
|
||||
f"Expected: {', '.join(good_features)}.\n"
|
||||
f"Got: {', '.join(bad_features)}.\n"
|
||||
)
|
||||
|
||||
|
||||
class FoundANone(VarLibMergeError):
|
||||
"""one of the values in a list was empty when it shouldn't have been"""
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
index = [x is None for x in self.cause["got"]].index(True)
|
||||
return index, self._master_name(index)
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
cause, stack = self.cause, self.stack
|
||||
return f"{stack[0]}=={cause['got']}\n"
|
||||
|
||||
|
||||
class NotANone(VarLibMergeError):
|
||||
"""one of the values in a list was not empty when it should have been"""
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
index = [x is not None for x in self.cause["got"]].index(True)
|
||||
return index, self._master_name(index)
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
cause, stack = self.cause, self.stack
|
||||
return f"{stack[0]}=={cause['got']}\n"
|
||||
|
||||
|
||||
class MismatchedTypes(VarLibMergeError):
|
||||
"""data had inconsistent types"""
|
||||
|
||||
|
||||
class LengthsDiffer(VarLibMergeError):
|
||||
"""a list of objects had inconsistent lengths"""
|
||||
|
||||
|
||||
class KeysDiffer(VarLibMergeError):
|
||||
"""a list of objects had different keys"""
|
||||
|
||||
|
||||
class InconsistentGlyphOrder(VarLibMergeError):
|
||||
"""the glyph order was inconsistent between masters"""
|
||||
|
||||
|
||||
class InconsistentExtensions(VarLibMergeError):
|
||||
"""the masters use extension lookups in inconsistent ways"""
|
||||
|
||||
|
||||
class UnsupportedFormat(VarLibMergeError):
|
||||
"""an OpenType subtable (%s) had a format I didn't expect"""
|
||||
|
||||
def __init__(self, merger=None, **kwargs):
|
||||
super().__init__(merger, **kwargs)
|
||||
if not self.stack:
|
||||
self.stack = [".Format"]
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
s = self.__doc__ % self.cause["subtable"]
|
||||
if "value" in self.cause:
|
||||
s += f" ({self.cause['value']!r})"
|
||||
return s
|
||||
|
||||
|
||||
class InconsistentFormats(UnsupportedFormat):
|
||||
"""an OpenType subtable (%s) had inconsistent formats between masters"""
|
||||
|
||||
|
||||
class VarLibCFFMergeError(VarLibError):
|
||||
pass
|
||||
|
||||
|
||||
class VarLibCFFDictMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF PrivateDict cannot be merged."""
|
||||
|
||||
def __init__(self, key, value, values):
|
||||
error_msg = (
|
||||
f"For the Private Dict key '{key}', the default font value list:"
|
||||
f"\n\t{value}\nhad a different number of values than a region font:"
|
||||
)
|
||||
for region_value in values:
|
||||
error_msg += f"\n\t{region_value}"
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VarLibCFFPointTypeMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF glyph cannot be merged because of point type differences."""
|
||||
|
||||
def __init__(self, point_type, pt_index, m_index, default_type, glyph_name):
|
||||
error_msg = (
|
||||
f"Glyph '{glyph_name}': '{point_type}' at point index {pt_index} in "
|
||||
f"master index {m_index} differs from the default font point type "
|
||||
f"'{default_type}'"
|
||||
)
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VarLibCFFHintTypeMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF glyph cannot be merged because of hint type differences."""
|
||||
|
||||
def __init__(self, hint_type, cmd_index, m_index, default_type, glyph_name):
|
||||
error_msg = (
|
||||
f"Glyph '{glyph_name}': '{hint_type}' at index {cmd_index} in "
|
||||
f"master index {m_index} differs from the default font hint type "
|
||||
f"'{default_type}'"
|
||||
)
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VariationModelError(VarLibError):
|
||||
"""Raised when a variation model is faulty."""
|
||||
|
|
@ -0,0 +1,703 @@
|
|||
"""Module to build FeatureVariation tables:
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#featurevariations-table
|
||||
|
||||
NOTE: The API is experimental and subject to change.
|
||||
"""
|
||||
|
||||
from fontTools.misc.dictTools import hashdict
|
||||
from fontTools.misc.intTools import bit_count
|
||||
from fontTools.ttLib import newTable
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.ttLib.ttVisitor import TTVisitor
|
||||
from fontTools.otlLib.builder import buildLookup, buildSingleSubstSubtable
|
||||
from collections import OrderedDict
|
||||
|
||||
from .errors import VarLibError, VarLibValidationError
|
||||
|
||||
|
||||
def addFeatureVariations(font, conditionalSubstitutions, featureTag="rvrn"):
|
||||
"""Add conditional substitutions to a Variable Font.
|
||||
|
||||
The `conditionalSubstitutions` argument is a list of (Region, Substitutions)
|
||||
tuples.
|
||||
|
||||
A Region is a list of Boxes. A Box is a dict mapping axisTags to
|
||||
(minValue, maxValue) tuples. Irrelevant axes may be omitted and they are
|
||||
interpretted as extending to end of axis in each direction. A Box represents
|
||||
an orthogonal 'rectangular' subset of an N-dimensional design space.
|
||||
A Region represents a more complex subset of an N-dimensional design space,
|
||||
ie. the union of all the Boxes in the Region.
|
||||
For efficiency, Boxes within a Region should ideally not overlap, but
|
||||
functionality is not compromised if they do.
|
||||
|
||||
The minimum and maximum values are expressed in normalized coordinates.
|
||||
|
||||
A Substitution is a dict mapping source glyph names to substitute glyph names.
|
||||
|
||||
Example:
|
||||
|
||||
# >>> f = TTFont(srcPath)
|
||||
# >>> condSubst = [
|
||||
# ... # A list of (Region, Substitution) tuples.
|
||||
# ... ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}),
|
||||
# ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
|
||||
# ... ]
|
||||
# >>> addFeatureVariations(f, condSubst)
|
||||
# >>> f.save(dstPath)
|
||||
|
||||
The `featureTag` parameter takes either a str or a iterable of str (the single str
|
||||
is kept for backwards compatibility), and defines which feature(s) will be
|
||||
associated with the feature variations.
|
||||
Note, if this is "rvrn", then the substitution lookup will be inserted at the
|
||||
beginning of the lookup list so that it is processed before others, otherwise
|
||||
for any other feature tags it will be appended last.
|
||||
"""
|
||||
|
||||
# process first when "rvrn" is the only listed tag
|
||||
featureTags = [featureTag] if isinstance(featureTag, str) else sorted(featureTag)
|
||||
processLast = "rvrn" not in featureTags or len(featureTags) > 1
|
||||
|
||||
_checkSubstitutionGlyphsExist(
|
||||
glyphNames=set(font.getGlyphOrder()),
|
||||
substitutions=conditionalSubstitutions,
|
||||
)
|
||||
|
||||
substitutions = overlayFeatureVariations(conditionalSubstitutions)
|
||||
|
||||
# turn substitution dicts into tuples of tuples, so they are hashable
|
||||
conditionalSubstitutions, allSubstitutions = makeSubstitutionsHashable(
|
||||
substitutions
|
||||
)
|
||||
if "GSUB" not in font:
|
||||
font["GSUB"] = buildGSUB()
|
||||
else:
|
||||
existingTags = _existingVariableFeatures(font["GSUB"].table).intersection(
|
||||
featureTags
|
||||
)
|
||||
if existingTags:
|
||||
raise VarLibError(
|
||||
f"FeatureVariations already exist for feature tag(s): {existingTags}"
|
||||
)
|
||||
|
||||
# setup lookups
|
||||
lookupMap = buildSubstitutionLookups(
|
||||
font["GSUB"].table, allSubstitutions, processLast
|
||||
)
|
||||
|
||||
# addFeatureVariationsRaw takes a list of
|
||||
# ( {condition}, [ lookup indices ] )
|
||||
# so rearrange our lookups to match
|
||||
conditionsAndLookups = []
|
||||
for conditionSet, substitutions in conditionalSubstitutions:
|
||||
conditionsAndLookups.append(
|
||||
(conditionSet, [lookupMap[s] for s in substitutions])
|
||||
)
|
||||
|
||||
addFeatureVariationsRaw(font, font["GSUB"].table, conditionsAndLookups, featureTags)
|
||||
|
||||
# Update OS/2.usMaxContext in case the font didn't have features before, but
|
||||
# does now, if the OS/2 table exists. The table may be required, but
|
||||
# fontTools needs to be able to deal with non-standard fonts. Since feature
|
||||
# variations are always 1:1 mappings, we can set the value to at least 1
|
||||
# instead of recomputing it with `otlLib.maxContextCalc.maxCtxFont()`.
|
||||
if (os2 := font.get("OS/2")) is not None:
|
||||
os2.usMaxContext = max(1, os2.usMaxContext)
|
||||
|
||||
|
||||
def _existingVariableFeatures(table):
|
||||
existingFeatureVarsTags = set()
|
||||
if hasattr(table, "FeatureVariations") and table.FeatureVariations is not None:
|
||||
features = table.FeatureList.FeatureRecord
|
||||
for fvr in table.FeatureVariations.FeatureVariationRecord:
|
||||
for ftsr in fvr.FeatureTableSubstitution.SubstitutionRecord:
|
||||
existingFeatureVarsTags.add(features[ftsr.FeatureIndex].FeatureTag)
|
||||
return existingFeatureVarsTags
|
||||
|
||||
|
||||
def _checkSubstitutionGlyphsExist(glyphNames, substitutions):
|
||||
referencedGlyphNames = set()
|
||||
for _, substitution in substitutions:
|
||||
referencedGlyphNames |= substitution.keys()
|
||||
referencedGlyphNames |= set(substitution.values())
|
||||
missing = referencedGlyphNames - glyphNames
|
||||
if missing:
|
||||
raise VarLibValidationError(
|
||||
"Missing glyphs are referenced in conditional substitution rules:"
|
||||
f" {', '.join(missing)}"
|
||||
)
|
||||
|
||||
|
||||
def overlayFeatureVariations(conditionalSubstitutions):
|
||||
"""Compute overlaps between all conditional substitutions.
|
||||
|
||||
The `conditionalSubstitutions` argument is a list of (Region, Substitutions)
|
||||
tuples.
|
||||
|
||||
A Region is a list of Boxes. A Box is a dict mapping axisTags to
|
||||
(minValue, maxValue) tuples. Irrelevant axes may be omitted and they are
|
||||
interpretted as extending to end of axis in each direction. A Box represents
|
||||
an orthogonal 'rectangular' subset of an N-dimensional design space.
|
||||
A Region represents a more complex subset of an N-dimensional design space,
|
||||
ie. the union of all the Boxes in the Region.
|
||||
For efficiency, Boxes within a Region should ideally not overlap, but
|
||||
functionality is not compromised if they do.
|
||||
|
||||
The minimum and maximum values are expressed in normalized coordinates.
|
||||
|
||||
A Substitution is a dict mapping source glyph names to substitute glyph names.
|
||||
|
||||
Returns data is in similar but different format. Overlaps of distinct
|
||||
substitution Boxes (*not* Regions) are explicitly listed as distinct rules,
|
||||
and rules with the same Box merged. The more specific rules appear earlier
|
||||
in the resulting list. Moreover, instead of just a dictionary of substitutions,
|
||||
a list of dictionaries is returned for substitutions corresponding to each
|
||||
unique space, with each dictionary being identical to one of the input
|
||||
substitution dictionaries. These dictionaries are not merged to allow data
|
||||
sharing when they are converted into font tables.
|
||||
|
||||
Example::
|
||||
|
||||
>>> condSubst = [
|
||||
... # A list of (Region, Substitution) tuples.
|
||||
... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
|
||||
... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
|
||||
... ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}),
|
||||
... ([{"wght": (0.5, 1.0), "wdth": (-1, 1.0)}], {"dollar": "dollar.rvrn"}),
|
||||
... ]
|
||||
>>> from pprint import pprint
|
||||
>>> pprint(overlayFeatureVariations(condSubst))
|
||||
[({'wdth': (0.5, 1.0), 'wght': (0.5, 1.0)},
|
||||
[{'dollar': 'dollar.rvrn'}, {'cent': 'cent.rvrn'}]),
|
||||
({'wdth': (0.5, 1.0)}, [{'cent': 'cent.rvrn'}]),
|
||||
({'wght': (0.5, 1.0)}, [{'dollar': 'dollar.rvrn'}])]
|
||||
|
||||
"""
|
||||
|
||||
# Merge same-substitutions rules, as this creates fewer number oflookups.
|
||||
merged = OrderedDict()
|
||||
for value, key in conditionalSubstitutions:
|
||||
key = hashdict(key)
|
||||
if key in merged:
|
||||
merged[key].extend(value)
|
||||
else:
|
||||
merged[key] = value
|
||||
conditionalSubstitutions = [(v, dict(k)) for k, v in merged.items()]
|
||||
del merged
|
||||
|
||||
# Merge same-region rules, as this is cheaper.
|
||||
# Also convert boxes to hashdict()
|
||||
#
|
||||
# Reversing is such that earlier entries win in case of conflicting substitution
|
||||
# rules for the same region.
|
||||
merged = OrderedDict()
|
||||
for key, value in reversed(conditionalSubstitutions):
|
||||
key = tuple(
|
||||
sorted(
|
||||
(hashdict(cleanupBox(k)) for k in key),
|
||||
key=lambda d: tuple(sorted(d.items())),
|
||||
)
|
||||
)
|
||||
if key in merged:
|
||||
merged[key].update(value)
|
||||
else:
|
||||
merged[key] = dict(value)
|
||||
conditionalSubstitutions = list(reversed(merged.items()))
|
||||
del merged
|
||||
|
||||
# Overlay
|
||||
#
|
||||
# Rank is the bit-set of the index of all contributing layers.
|
||||
initMapInit = ((hashdict(), 0),) # Initializer representing the entire space
|
||||
boxMap = OrderedDict(initMapInit) # Map from Box to Rank
|
||||
for i, (currRegion, _) in enumerate(conditionalSubstitutions):
|
||||
newMap = OrderedDict(initMapInit)
|
||||
currRank = 1 << i
|
||||
for box, rank in boxMap.items():
|
||||
for currBox in currRegion:
|
||||
intersection, remainder = overlayBox(currBox, box)
|
||||
if intersection is not None:
|
||||
intersection = hashdict(intersection)
|
||||
newMap[intersection] = newMap.get(intersection, 0) | rank | currRank
|
||||
if remainder is not None:
|
||||
remainder = hashdict(remainder)
|
||||
newMap[remainder] = newMap.get(remainder, 0) | rank
|
||||
boxMap = newMap
|
||||
|
||||
# Generate output
|
||||
items = []
|
||||
for box, rank in sorted(
|
||||
boxMap.items(), key=(lambda BoxAndRank: -bit_count(BoxAndRank[1]))
|
||||
):
|
||||
# Skip any box that doesn't have any substitution.
|
||||
if rank == 0:
|
||||
continue
|
||||
substsList = []
|
||||
i = 0
|
||||
while rank:
|
||||
if rank & 1:
|
||||
substsList.append(conditionalSubstitutions[i][1])
|
||||
rank >>= 1
|
||||
i += 1
|
||||
items.append((dict(box), substsList))
|
||||
return items
|
||||
|
||||
|
||||
#
|
||||
# Terminology:
|
||||
#
|
||||
# A 'Box' is a dict representing an orthogonal "rectangular" bit of N-dimensional space.
|
||||
# The keys in the dict are axis tags, the values are (minValue, maxValue) tuples.
|
||||
# Missing dimensions (keys) are substituted by the default min and max values
|
||||
# from the corresponding axes.
|
||||
#
|
||||
|
||||
|
||||
def overlayBox(top, bot):
|
||||
"""Overlays ``top`` box on top of ``bot`` box.
|
||||
|
||||
Returns two items:
|
||||
|
||||
* Box for intersection of ``top`` and ``bot``, or None if they don't intersect.
|
||||
* Box for remainder of ``bot``. Remainder box might not be exact (since the
|
||||
remainder might not be a simple box), but is inclusive of the exact
|
||||
remainder.
|
||||
"""
|
||||
|
||||
# Intersection
|
||||
intersection = {}
|
||||
intersection.update(top)
|
||||
intersection.update(bot)
|
||||
for axisTag in set(top) & set(bot):
|
||||
min1, max1 = top[axisTag]
|
||||
min2, max2 = bot[axisTag]
|
||||
minimum = max(min1, min2)
|
||||
maximum = min(max1, max2)
|
||||
if not minimum < maximum:
|
||||
return None, bot # Do not intersect
|
||||
intersection[axisTag] = minimum, maximum
|
||||
|
||||
# Remainder
|
||||
#
|
||||
# Remainder is empty if bot's each axis range lies within that of intersection.
|
||||
#
|
||||
# Remainder is shrank if bot's each, except for exactly one, axis range lies
|
||||
# within that of intersection, and that one axis, it extrudes out of the
|
||||
# intersection only on one side.
|
||||
#
|
||||
# Bot is returned in full as remainder otherwise, as true remainder is not
|
||||
# representable as a single box.
|
||||
|
||||
remainder = dict(bot)
|
||||
extruding = False
|
||||
fullyInside = True
|
||||
for axisTag in top:
|
||||
if axisTag in bot:
|
||||
continue
|
||||
extruding = True
|
||||
fullyInside = False
|
||||
break
|
||||
for axisTag in bot:
|
||||
if axisTag not in top:
|
||||
continue # Axis range lies fully within
|
||||
min1, max1 = intersection[axisTag]
|
||||
min2, max2 = bot[axisTag]
|
||||
if min1 <= min2 and max2 <= max1:
|
||||
continue # Axis range lies fully within
|
||||
|
||||
# Bot's range doesn't fully lie within that of top's for this axis.
|
||||
# We know they intersect, so it cannot lie fully without either; so they
|
||||
# overlap.
|
||||
|
||||
# If we have had an overlapping axis before, remainder is not
|
||||
# representable as a box, so return full bottom and go home.
|
||||
if extruding:
|
||||
return intersection, bot
|
||||
extruding = True
|
||||
fullyInside = False
|
||||
|
||||
# Otherwise, cut remainder on this axis and continue.
|
||||
if min1 <= min2:
|
||||
# Right side survives.
|
||||
minimum = max(max1, min2)
|
||||
maximum = max2
|
||||
elif max2 <= max1:
|
||||
# Left side survives.
|
||||
minimum = min2
|
||||
maximum = min(min1, max2)
|
||||
else:
|
||||
# Remainder leaks out from both sides. Can't cut either.
|
||||
return intersection, bot
|
||||
|
||||
remainder[axisTag] = minimum, maximum
|
||||
|
||||
if fullyInside:
|
||||
# bot is fully within intersection. Remainder is empty.
|
||||
return intersection, None
|
||||
|
||||
return intersection, remainder
|
||||
|
||||
|
||||
def cleanupBox(box):
|
||||
"""Return a sparse copy of `box`, without redundant (default) values.
|
||||
|
||||
>>> cleanupBox({})
|
||||
{}
|
||||
>>> cleanupBox({'wdth': (0.0, 1.0)})
|
||||
{'wdth': (0.0, 1.0)}
|
||||
>>> cleanupBox({'wdth': (-1.0, 1.0)})
|
||||
{}
|
||||
|
||||
"""
|
||||
return {tag: limit for tag, limit in box.items() if limit != (-1.0, 1.0)}
|
||||
|
||||
|
||||
#
|
||||
# Low level implementation
|
||||
#
|
||||
|
||||
|
||||
def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag="rvrn"):
|
||||
"""Low level implementation of addFeatureVariations that directly
|
||||
models the possibilities of the FeatureVariations table."""
|
||||
|
||||
featureTags = [featureTag] if isinstance(featureTag, str) else sorted(featureTag)
|
||||
processLast = "rvrn" not in featureTags or len(featureTags) > 1
|
||||
|
||||
#
|
||||
# if a <featureTag> feature is not present:
|
||||
# make empty <featureTag> feature
|
||||
# sort features, get <featureTag> feature index
|
||||
# add <featureTag> feature to all scripts
|
||||
# if a <featureTag> feature is present:
|
||||
# reuse <featureTag> feature index
|
||||
# make lookups
|
||||
# add feature variations
|
||||
#
|
||||
if table.Version < 0x00010001:
|
||||
table.Version = 0x00010001 # allow table.FeatureVariations
|
||||
|
||||
varFeatureIndices = set()
|
||||
|
||||
existingTags = {
|
||||
feature.FeatureTag
|
||||
for feature in table.FeatureList.FeatureRecord
|
||||
if feature.FeatureTag in featureTags
|
||||
}
|
||||
|
||||
newTags = set(featureTags) - existingTags
|
||||
if newTags:
|
||||
varFeatures = []
|
||||
for featureTag in sorted(newTags):
|
||||
varFeature = buildFeatureRecord(featureTag, [])
|
||||
table.FeatureList.FeatureRecord.append(varFeature)
|
||||
varFeatures.append(varFeature)
|
||||
table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord)
|
||||
|
||||
sortFeatureList(table)
|
||||
|
||||
for varFeature in varFeatures:
|
||||
varFeatureIndex = table.FeatureList.FeatureRecord.index(varFeature)
|
||||
|
||||
for scriptRecord in table.ScriptList.ScriptRecord:
|
||||
if scriptRecord.Script.DefaultLangSys is None:
|
||||
# We need to have a default LangSys to attach variations to.
|
||||
langSys = ot.LangSys()
|
||||
langSys.LookupOrder = None
|
||||
langSys.ReqFeatureIndex = 0xFFFF
|
||||
langSys.FeatureIndex = []
|
||||
langSys.FeatureCount = 0
|
||||
scriptRecord.Script.DefaultLangSys = langSys
|
||||
langSystems = [lsr.LangSys for lsr in scriptRecord.Script.LangSysRecord]
|
||||
for langSys in [scriptRecord.Script.DefaultLangSys] + langSystems:
|
||||
langSys.FeatureIndex.append(varFeatureIndex)
|
||||
langSys.FeatureCount = len(langSys.FeatureIndex)
|
||||
varFeatureIndices.add(varFeatureIndex)
|
||||
|
||||
if existingTags:
|
||||
# indices may have changed if we inserted new features and sorted feature list
|
||||
# so we must do this after the above
|
||||
varFeatureIndices.update(
|
||||
index
|
||||
for index, feature in enumerate(table.FeatureList.FeatureRecord)
|
||||
if feature.FeatureTag in existingTags
|
||||
)
|
||||
|
||||
axisIndices = {
|
||||
axis.axisTag: axisIndex for axisIndex, axis in enumerate(font["fvar"].axes)
|
||||
}
|
||||
|
||||
hasFeatureVariations = (
|
||||
hasattr(table, "FeatureVariations") and table.FeatureVariations is not None
|
||||
)
|
||||
|
||||
featureVariationRecords = []
|
||||
for conditionSet, lookupIndices in conditionalSubstitutions:
|
||||
conditionTable = []
|
||||
for axisTag, (minValue, maxValue) in sorted(conditionSet.items()):
|
||||
if minValue > maxValue:
|
||||
raise VarLibValidationError(
|
||||
"A condition set has a minimum value above the maximum value."
|
||||
)
|
||||
ct = buildConditionTable(axisIndices[axisTag], minValue, maxValue)
|
||||
conditionTable.append(ct)
|
||||
records = []
|
||||
for varFeatureIndex in sorted(varFeatureIndices):
|
||||
existingLookupIndices = table.FeatureList.FeatureRecord[
|
||||
varFeatureIndex
|
||||
].Feature.LookupListIndex
|
||||
combinedLookupIndices = (
|
||||
existingLookupIndices + lookupIndices
|
||||
if processLast
|
||||
else lookupIndices + existingLookupIndices
|
||||
)
|
||||
|
||||
records.append(
|
||||
buildFeatureTableSubstitutionRecord(
|
||||
varFeatureIndex, combinedLookupIndices
|
||||
)
|
||||
)
|
||||
if hasFeatureVariations and (
|
||||
fvr := findFeatureVariationRecord(table.FeatureVariations, conditionTable)
|
||||
):
|
||||
fvr.FeatureTableSubstitution.SubstitutionRecord.extend(records)
|
||||
fvr.FeatureTableSubstitution.SubstitutionCount = len(
|
||||
fvr.FeatureTableSubstitution.SubstitutionRecord
|
||||
)
|
||||
else:
|
||||
featureVariationRecords.append(
|
||||
buildFeatureVariationRecord(conditionTable, records)
|
||||
)
|
||||
|
||||
if hasFeatureVariations:
|
||||
if table.FeatureVariations.Version != 0x00010000:
|
||||
raise VarLibError(
|
||||
"Unsupported FeatureVariations table version: "
|
||||
f"0x{table.FeatureVariations.Version:08x} (expected 0x00010000)."
|
||||
)
|
||||
table.FeatureVariations.FeatureVariationRecord.extend(featureVariationRecords)
|
||||
table.FeatureVariations.FeatureVariationCount = len(
|
||||
table.FeatureVariations.FeatureVariationRecord
|
||||
)
|
||||
else:
|
||||
table.FeatureVariations = buildFeatureVariations(featureVariationRecords)
|
||||
|
||||
|
||||
#
|
||||
# Building GSUB/FeatureVariations internals
|
||||
#
|
||||
|
||||
|
||||
def buildGSUB():
|
||||
"""Build a GSUB table from scratch."""
|
||||
fontTable = newTable("GSUB")
|
||||
gsub = fontTable.table = ot.GSUB()
|
||||
gsub.Version = 0x00010001 # allow gsub.FeatureVariations
|
||||
|
||||
gsub.ScriptList = ot.ScriptList()
|
||||
gsub.ScriptList.ScriptRecord = []
|
||||
gsub.FeatureList = ot.FeatureList()
|
||||
gsub.FeatureList.FeatureRecord = []
|
||||
gsub.LookupList = ot.LookupList()
|
||||
gsub.LookupList.Lookup = []
|
||||
|
||||
srec = ot.ScriptRecord()
|
||||
srec.ScriptTag = "DFLT"
|
||||
srec.Script = ot.Script()
|
||||
srec.Script.DefaultLangSys = None
|
||||
srec.Script.LangSysRecord = []
|
||||
srec.Script.LangSysCount = 0
|
||||
|
||||
langrec = ot.LangSysRecord()
|
||||
langrec.LangSys = ot.LangSys()
|
||||
langrec.LangSys.ReqFeatureIndex = 0xFFFF
|
||||
langrec.LangSys.FeatureIndex = []
|
||||
srec.Script.DefaultLangSys = langrec.LangSys
|
||||
|
||||
gsub.ScriptList.ScriptRecord.append(srec)
|
||||
gsub.ScriptList.ScriptCount = 1
|
||||
gsub.FeatureVariations = None
|
||||
|
||||
return fontTable
|
||||
|
||||
|
||||
def makeSubstitutionsHashable(conditionalSubstitutions):
|
||||
"""Turn all the substitution dictionaries in sorted tuples of tuples so
|
||||
they are hashable, to detect duplicates so we don't write out redundant
|
||||
data."""
|
||||
allSubstitutions = set()
|
||||
condSubst = []
|
||||
for conditionSet, substitutionMaps in conditionalSubstitutions:
|
||||
substitutions = []
|
||||
for substitutionMap in substitutionMaps:
|
||||
subst = tuple(sorted(substitutionMap.items()))
|
||||
substitutions.append(subst)
|
||||
allSubstitutions.add(subst)
|
||||
condSubst.append((conditionSet, substitutions))
|
||||
return condSubst, sorted(allSubstitutions)
|
||||
|
||||
|
||||
class ShifterVisitor(TTVisitor):
|
||||
def __init__(self, shift):
|
||||
self.shift = shift
|
||||
|
||||
|
||||
@ShifterVisitor.register_attr(ot.Feature, "LookupListIndex") # GSUB/GPOS
|
||||
def visit(visitor, obj, attr, value):
|
||||
shift = visitor.shift
|
||||
value = [l + shift for l in value]
|
||||
setattr(obj, attr, value)
|
||||
|
||||
|
||||
@ShifterVisitor.register_attr(
|
||||
(ot.SubstLookupRecord, ot.PosLookupRecord), "LookupListIndex"
|
||||
)
|
||||
def visit(visitor, obj, attr, value):
|
||||
setattr(obj, attr, visitor.shift + value)
|
||||
|
||||
|
||||
def buildSubstitutionLookups(gsub, allSubstitutions, processLast=False):
|
||||
"""Build the lookups for the glyph substitutions, return a dict mapping
|
||||
the substitution to lookup indices."""
|
||||
|
||||
# Insert lookups at the beginning of the lookup vector
|
||||
# https://github.com/googlefonts/fontmake/issues/950
|
||||
|
||||
firstIndex = len(gsub.LookupList.Lookup) if processLast else 0
|
||||
lookupMap = {}
|
||||
for i, substitutionMap in enumerate(allSubstitutions):
|
||||
lookupMap[substitutionMap] = firstIndex + i
|
||||
|
||||
if not processLast:
|
||||
# Shift all lookup indices in gsub by len(allSubstitutions)
|
||||
shift = len(allSubstitutions)
|
||||
visitor = ShifterVisitor(shift)
|
||||
visitor.visit(gsub.FeatureList.FeatureRecord)
|
||||
visitor.visit(gsub.LookupList.Lookup)
|
||||
|
||||
for i, subst in enumerate(allSubstitutions):
|
||||
substMap = dict(subst)
|
||||
lookup = buildLookup([buildSingleSubstSubtable(substMap)])
|
||||
if processLast:
|
||||
gsub.LookupList.Lookup.append(lookup)
|
||||
else:
|
||||
gsub.LookupList.Lookup.insert(i, lookup)
|
||||
assert gsub.LookupList.Lookup[lookupMap[subst]] is lookup
|
||||
gsub.LookupList.LookupCount = len(gsub.LookupList.Lookup)
|
||||
return lookupMap
|
||||
|
||||
|
||||
def buildFeatureVariations(featureVariationRecords):
|
||||
"""Build the FeatureVariations subtable."""
|
||||
fv = ot.FeatureVariations()
|
||||
fv.Version = 0x00010000
|
||||
fv.FeatureVariationRecord = featureVariationRecords
|
||||
fv.FeatureVariationCount = len(featureVariationRecords)
|
||||
return fv
|
||||
|
||||
|
||||
def buildFeatureRecord(featureTag, lookupListIndices):
|
||||
"""Build a FeatureRecord."""
|
||||
fr = ot.FeatureRecord()
|
||||
fr.FeatureTag = featureTag
|
||||
fr.Feature = ot.Feature()
|
||||
fr.Feature.LookupListIndex = lookupListIndices
|
||||
fr.Feature.populateDefaults()
|
||||
return fr
|
||||
|
||||
|
||||
def buildFeatureVariationRecord(conditionTable, substitutionRecords):
|
||||
"""Build a FeatureVariationRecord."""
|
||||
fvr = ot.FeatureVariationRecord()
|
||||
if len(conditionTable) != 0:
|
||||
fvr.ConditionSet = ot.ConditionSet()
|
||||
fvr.ConditionSet.ConditionTable = conditionTable
|
||||
fvr.ConditionSet.ConditionCount = len(conditionTable)
|
||||
else:
|
||||
fvr.ConditionSet = None
|
||||
fvr.FeatureTableSubstitution = ot.FeatureTableSubstitution()
|
||||
fvr.FeatureTableSubstitution.Version = 0x00010000
|
||||
fvr.FeatureTableSubstitution.SubstitutionRecord = substitutionRecords
|
||||
fvr.FeatureTableSubstitution.SubstitutionCount = len(substitutionRecords)
|
||||
return fvr
|
||||
|
||||
|
||||
def buildFeatureTableSubstitutionRecord(featureIndex, lookupListIndices):
|
||||
"""Build a FeatureTableSubstitutionRecord."""
|
||||
ftsr = ot.FeatureTableSubstitutionRecord()
|
||||
ftsr.FeatureIndex = featureIndex
|
||||
ftsr.Feature = ot.Feature()
|
||||
ftsr.Feature.LookupListIndex = lookupListIndices
|
||||
ftsr.Feature.LookupCount = len(lookupListIndices)
|
||||
return ftsr
|
||||
|
||||
|
||||
def buildConditionTable(axisIndex, filterRangeMinValue, filterRangeMaxValue):
|
||||
"""Build a ConditionTable."""
|
||||
ct = ot.ConditionTable()
|
||||
ct.Format = 1
|
||||
ct.AxisIndex = axisIndex
|
||||
ct.FilterRangeMinValue = filterRangeMinValue
|
||||
ct.FilterRangeMaxValue = filterRangeMaxValue
|
||||
return ct
|
||||
|
||||
|
||||
def findFeatureVariationRecord(featureVariations, conditionTable):
|
||||
"""Find a FeatureVariationRecord that has the same conditionTable."""
|
||||
if featureVariations.Version != 0x00010000:
|
||||
raise VarLibError(
|
||||
"Unsupported FeatureVariations table version: "
|
||||
f"0x{featureVariations.Version:08x} (expected 0x00010000)."
|
||||
)
|
||||
|
||||
for fvr in featureVariations.FeatureVariationRecord:
|
||||
if conditionTable == fvr.ConditionSet.ConditionTable:
|
||||
return fvr
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def sortFeatureList(table):
|
||||
"""Sort the feature list by feature tag, and remap the feature indices
|
||||
elsewhere. This is needed after the feature list has been modified.
|
||||
"""
|
||||
# decorate, sort, undecorate, because we need to make an index remapping table
|
||||
tagIndexFea = [
|
||||
(fea.FeatureTag, index, fea)
|
||||
for index, fea in enumerate(table.FeatureList.FeatureRecord)
|
||||
]
|
||||
tagIndexFea.sort()
|
||||
table.FeatureList.FeatureRecord = [fea for tag, index, fea in tagIndexFea]
|
||||
featureRemap = dict(
|
||||
zip([index for tag, index, fea in tagIndexFea], range(len(tagIndexFea)))
|
||||
)
|
||||
|
||||
# Remap the feature indices
|
||||
remapFeatures(table, featureRemap)
|
||||
|
||||
|
||||
def remapFeatures(table, featureRemap):
|
||||
"""Go through the scripts list, and remap feature indices."""
|
||||
for scriptIndex, script in enumerate(table.ScriptList.ScriptRecord):
|
||||
defaultLangSys = script.Script.DefaultLangSys
|
||||
if defaultLangSys is not None:
|
||||
_remapLangSys(defaultLangSys, featureRemap)
|
||||
for langSysRecordIndex, langSysRec in enumerate(script.Script.LangSysRecord):
|
||||
langSys = langSysRec.LangSys
|
||||
_remapLangSys(langSys, featureRemap)
|
||||
|
||||
if hasattr(table, "FeatureVariations") and table.FeatureVariations is not None:
|
||||
for fvr in table.FeatureVariations.FeatureVariationRecord:
|
||||
for ftsr in fvr.FeatureTableSubstitution.SubstitutionRecord:
|
||||
ftsr.FeatureIndex = featureRemap[ftsr.FeatureIndex]
|
||||
|
||||
|
||||
def _remapLangSys(langSys, featureRemap):
|
||||
if langSys.ReqFeatureIndex != 0xFFFF:
|
||||
langSys.ReqFeatureIndex = featureRemap[langSys.ReqFeatureIndex]
|
||||
langSys.FeatureIndex = [featureRemap[index] for index in langSys.FeatureIndex]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
113
venv/lib/python3.13/site-packages/fontTools/varLib/hvar.py
Normal file
113
venv/lib/python3.13/site-packages/fontTools/varLib/hvar.py
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
from fontTools.misc.roundTools import noRound
|
||||
from fontTools.ttLib import TTFont, newTable
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.ttLib.tables.otBase import OTTableWriter
|
||||
from fontTools.varLib import HVAR_FIELDS, VVAR_FIELDS, _add_VHVAR
|
||||
from fontTools.varLib import builder, models, varStore
|
||||
from fontTools.misc.fixedTools import fixedToFloat as fi2fl
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
from functools import partial
|
||||
import logging
|
||||
|
||||
log = logging.getLogger("fontTools.varLib.avar")
|
||||
|
||||
|
||||
def _get_advance_metrics(font, axisTags, tableFields):
|
||||
# There's two ways we can go from here:
|
||||
# 1. For each glyph, at each master peak, compute the value of the
|
||||
# advance width at that peak. Then pass these all to a VariationModel
|
||||
# builder to compute back the deltas.
|
||||
# 2. For each master peak, pull out the deltas of the advance width directly,
|
||||
# and feed these to the VarStoreBuilder, forgoing the remodeling step.
|
||||
# We'll go with the second option, as it's simpler, faster, and more direct.
|
||||
gvar = font["gvar"]
|
||||
vhAdvanceDeltasAndSupports = {}
|
||||
glyphOrder = font.getGlyphOrder()
|
||||
phantomIndex = tableFields.phantomIndex
|
||||
for glyphName in glyphOrder:
|
||||
supports = []
|
||||
deltas = []
|
||||
variations = gvar.variations.get(glyphName, [])
|
||||
|
||||
for tv in variations:
|
||||
supports.append(tv.axes)
|
||||
phantoms = tv.coordinates[-4:]
|
||||
phantoms = phantoms[phantomIndex * 2 : phantomIndex * 2 + 2]
|
||||
assert len(phantoms) == 2
|
||||
phantoms[0] = phantoms[0][phantomIndex] if phantoms[0] is not None else 0
|
||||
phantoms[1] = phantoms[1][phantomIndex] if phantoms[1] is not None else 0
|
||||
deltas.append(phantoms[1] - phantoms[0])
|
||||
|
||||
vhAdvanceDeltasAndSupports[glyphName] = (deltas, supports)
|
||||
|
||||
vOrigDeltasAndSupports = None # TODO
|
||||
|
||||
return vhAdvanceDeltasAndSupports, vOrigDeltasAndSupports
|
||||
|
||||
|
||||
def add_HVAR(font):
|
||||
if "HVAR" in font:
|
||||
del font["HVAR"]
|
||||
axisTags = [axis.axisTag for axis in font["fvar"].axes]
|
||||
getAdvanceMetrics = partial(_get_advance_metrics, font, axisTags, HVAR_FIELDS)
|
||||
_add_VHVAR(font, axisTags, HVAR_FIELDS, getAdvanceMetrics)
|
||||
|
||||
|
||||
def add_VVAR(font):
|
||||
if "VVAR" in font:
|
||||
del font["VVAR"]
|
||||
getAdvanceMetrics = partial(_get_advance_metrics, font, axisTags, VVAR_FIELDS)
|
||||
axisTags = [axis.axisTag for axis in font["fvar"].axes]
|
||||
_add_VHVAR(font, axisTags, VVAR_FIELDS, getAdvanceMetrics)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Add `HVAR` table to variable font."""
|
||||
|
||||
if args is None:
|
||||
import sys
|
||||
|
||||
args = sys.argv[1:]
|
||||
|
||||
from fontTools import configLogger
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.hvar",
|
||||
description="Add `HVAR` table from to variable font.",
|
||||
)
|
||||
parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output-file",
|
||||
type=str,
|
||||
help="Output font file name.",
|
||||
)
|
||||
|
||||
options = parser.parse_args(args)
|
||||
|
||||
configLogger(level="WARNING")
|
||||
|
||||
font = TTFont(options.font)
|
||||
if not "fvar" in font:
|
||||
log.error("Not a variable font.")
|
||||
return 1
|
||||
|
||||
add_HVAR(font)
|
||||
if "vmtx" in font:
|
||||
add_VVAR(font)
|
||||
|
||||
if options.output_file is None:
|
||||
outfile = makeOutputFileName(options.font, overWrite=True, suffix=".hvar")
|
||||
else:
|
||||
outfile = options.output_file
|
||||
if outfile:
|
||||
log.info("Saving %s", outfile)
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main())
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,5 @@
|
|||
import sys
|
||||
from fontTools.varLib.instancer import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,190 @@
|
|||
from fontTools.ttLib.tables import otTables as ot
|
||||
from copy import deepcopy
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.varLib.instancer")
|
||||
|
||||
|
||||
def _featureVariationRecordIsUnique(rec, seen):
|
||||
conditionSet = []
|
||||
conditionSets = (
|
||||
rec.ConditionSet.ConditionTable if rec.ConditionSet is not None else []
|
||||
)
|
||||
for cond in conditionSets:
|
||||
if cond.Format != 1:
|
||||
# can't tell whether this is duplicate, assume is unique
|
||||
return True
|
||||
conditionSet.append(
|
||||
(cond.AxisIndex, cond.FilterRangeMinValue, cond.FilterRangeMaxValue)
|
||||
)
|
||||
# besides the set of conditions, we also include the FeatureTableSubstitution
|
||||
# version to identify unique FeatureVariationRecords, even though only one
|
||||
# version is currently defined. It's theoretically possible that multiple
|
||||
# records with same conditions but different substitution table version be
|
||||
# present in the same font for backward compatibility.
|
||||
recordKey = frozenset([rec.FeatureTableSubstitution.Version] + conditionSet)
|
||||
if recordKey in seen:
|
||||
return False
|
||||
else:
|
||||
seen.add(recordKey) # side effect
|
||||
return True
|
||||
|
||||
|
||||
def _limitFeatureVariationConditionRange(condition, axisLimit):
|
||||
minValue = condition.FilterRangeMinValue
|
||||
maxValue = condition.FilterRangeMaxValue
|
||||
|
||||
if (
|
||||
minValue > maxValue
|
||||
or minValue > axisLimit.maximum
|
||||
or maxValue < axisLimit.minimum
|
||||
):
|
||||
# condition invalid or out of range
|
||||
return
|
||||
|
||||
return tuple(
|
||||
axisLimit.renormalizeValue(v, extrapolate=False) for v in (minValue, maxValue)
|
||||
)
|
||||
|
||||
|
||||
def _instantiateFeatureVariationRecord(
|
||||
record, recIdx, axisLimits, fvarAxes, axisIndexMap
|
||||
):
|
||||
applies = True
|
||||
shouldKeep = False
|
||||
newConditions = []
|
||||
from fontTools.varLib.instancer import NormalizedAxisTripleAndDistances
|
||||
|
||||
default_triple = NormalizedAxisTripleAndDistances(-1, 0, +1)
|
||||
if record.ConditionSet is None:
|
||||
record.ConditionSet = ot.ConditionSet()
|
||||
record.ConditionSet.ConditionTable = []
|
||||
record.ConditionSet.ConditionCount = 0
|
||||
for i, condition in enumerate(record.ConditionSet.ConditionTable):
|
||||
if condition.Format == 1:
|
||||
axisIdx = condition.AxisIndex
|
||||
axisTag = fvarAxes[axisIdx].axisTag
|
||||
|
||||
minValue = condition.FilterRangeMinValue
|
||||
maxValue = condition.FilterRangeMaxValue
|
||||
triple = axisLimits.get(axisTag, default_triple)
|
||||
|
||||
if not (minValue <= triple.default <= maxValue):
|
||||
applies = False
|
||||
|
||||
# if condition not met, remove entire record
|
||||
if triple.minimum > maxValue or triple.maximum < minValue:
|
||||
newConditions = None
|
||||
break
|
||||
|
||||
if axisTag in axisIndexMap:
|
||||
# remap axis index
|
||||
condition.AxisIndex = axisIndexMap[axisTag]
|
||||
|
||||
# remap condition limits
|
||||
newRange = _limitFeatureVariationConditionRange(condition, triple)
|
||||
if newRange:
|
||||
# keep condition with updated limits
|
||||
minimum, maximum = newRange
|
||||
condition.FilterRangeMinValue = minimum
|
||||
condition.FilterRangeMaxValue = maximum
|
||||
shouldKeep = True
|
||||
if minimum != -1 or maximum != +1:
|
||||
newConditions.append(condition)
|
||||
else:
|
||||
# condition out of range, remove entire record
|
||||
newConditions = None
|
||||
break
|
||||
|
||||
else:
|
||||
log.warning(
|
||||
"Condition table {0} of FeatureVariationRecord {1} has "
|
||||
"unsupported format ({2}); ignored".format(i, recIdx, condition.Format)
|
||||
)
|
||||
applies = False
|
||||
newConditions.append(condition)
|
||||
|
||||
if newConditions is not None and shouldKeep:
|
||||
record.ConditionSet.ConditionTable = newConditions
|
||||
if not newConditions:
|
||||
record.ConditionSet = None
|
||||
shouldKeep = True
|
||||
else:
|
||||
shouldKeep = False
|
||||
|
||||
# Does this *always* apply?
|
||||
universal = shouldKeep and not newConditions
|
||||
|
||||
return applies, shouldKeep, universal
|
||||
|
||||
|
||||
def _instantiateFeatureVariations(table, fvarAxes, axisLimits):
|
||||
pinnedAxes = set(axisLimits.pinnedLocation())
|
||||
axisOrder = [axis.axisTag for axis in fvarAxes if axis.axisTag not in pinnedAxes]
|
||||
axisIndexMap = {axisTag: axisOrder.index(axisTag) for axisTag in axisOrder}
|
||||
|
||||
featureVariationApplied = False
|
||||
uniqueRecords = set()
|
||||
newRecords = []
|
||||
defaultsSubsts = None
|
||||
|
||||
for i, record in enumerate(table.FeatureVariations.FeatureVariationRecord):
|
||||
applies, shouldKeep, universal = _instantiateFeatureVariationRecord(
|
||||
record, i, axisLimits, fvarAxes, axisIndexMap
|
||||
)
|
||||
|
||||
if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords):
|
||||
newRecords.append(record)
|
||||
|
||||
if applies and not featureVariationApplied:
|
||||
assert record.FeatureTableSubstitution.Version == 0x00010000
|
||||
defaultsSubsts = deepcopy(record.FeatureTableSubstitution)
|
||||
for default, rec in zip(
|
||||
defaultsSubsts.SubstitutionRecord,
|
||||
record.FeatureTableSubstitution.SubstitutionRecord,
|
||||
):
|
||||
default.Feature = deepcopy(
|
||||
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature
|
||||
)
|
||||
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = deepcopy(
|
||||
rec.Feature
|
||||
)
|
||||
# Set variations only once
|
||||
featureVariationApplied = True
|
||||
|
||||
# Further records don't have a chance to apply after a universal record
|
||||
if universal:
|
||||
break
|
||||
|
||||
# Insert a catch-all record to reinstate the old features if necessary
|
||||
if featureVariationApplied and newRecords and not universal:
|
||||
defaultRecord = ot.FeatureVariationRecord()
|
||||
defaultRecord.ConditionSet = ot.ConditionSet()
|
||||
defaultRecord.ConditionSet.ConditionTable = []
|
||||
defaultRecord.ConditionSet.ConditionCount = 0
|
||||
defaultRecord.FeatureTableSubstitution = defaultsSubsts
|
||||
|
||||
newRecords.append(defaultRecord)
|
||||
|
||||
if newRecords:
|
||||
table.FeatureVariations.FeatureVariationRecord = newRecords
|
||||
table.FeatureVariations.FeatureVariationCount = len(newRecords)
|
||||
else:
|
||||
del table.FeatureVariations
|
||||
# downgrade table version if there are no FeatureVariations left
|
||||
table.Version = 0x00010000
|
||||
|
||||
|
||||
def instantiateFeatureVariations(varfont, axisLimits):
|
||||
for tableTag in ("GPOS", "GSUB"):
|
||||
if tableTag not in varfont or not getattr(
|
||||
varfont[tableTag].table, "FeatureVariations", None
|
||||
):
|
||||
continue
|
||||
log.info("Instantiating FeatureVariations of %s table", tableTag)
|
||||
_instantiateFeatureVariations(
|
||||
varfont[tableTag].table, varfont["fvar"].axes, axisLimits
|
||||
)
|
||||
# remove unreferenced lookups
|
||||
varfont[tableTag].prune_lookups()
|
||||
|
|
@ -0,0 +1,388 @@
|
|||
"""Helpers for instantiating name table records."""
|
||||
|
||||
from contextlib import contextmanager
|
||||
from copy import deepcopy
|
||||
from enum import IntEnum
|
||||
import re
|
||||
|
||||
|
||||
class NameID(IntEnum):
|
||||
FAMILY_NAME = 1
|
||||
SUBFAMILY_NAME = 2
|
||||
UNIQUE_FONT_IDENTIFIER = 3
|
||||
FULL_FONT_NAME = 4
|
||||
VERSION_STRING = 5
|
||||
POSTSCRIPT_NAME = 6
|
||||
TYPOGRAPHIC_FAMILY_NAME = 16
|
||||
TYPOGRAPHIC_SUBFAMILY_NAME = 17
|
||||
VARIATIONS_POSTSCRIPT_NAME_PREFIX = 25
|
||||
|
||||
|
||||
ELIDABLE_AXIS_VALUE_NAME = 2
|
||||
|
||||
|
||||
def getVariationNameIDs(varfont):
|
||||
used = []
|
||||
if "fvar" in varfont:
|
||||
fvar = varfont["fvar"]
|
||||
for axis in fvar.axes:
|
||||
used.append(axis.axisNameID)
|
||||
for instance in fvar.instances:
|
||||
used.append(instance.subfamilyNameID)
|
||||
if instance.postscriptNameID != 0xFFFF:
|
||||
used.append(instance.postscriptNameID)
|
||||
if "STAT" in varfont:
|
||||
stat = varfont["STAT"].table
|
||||
for axis in stat.DesignAxisRecord.Axis if stat.DesignAxisRecord else ():
|
||||
used.append(axis.AxisNameID)
|
||||
for value in stat.AxisValueArray.AxisValue if stat.AxisValueArray else ():
|
||||
used.append(value.ValueNameID)
|
||||
elidedFallbackNameID = getattr(stat, "ElidedFallbackNameID", None)
|
||||
if elidedFallbackNameID is not None:
|
||||
used.append(elidedFallbackNameID)
|
||||
# nameIDs <= 255 are reserved by OT spec so we don't touch them
|
||||
return {nameID for nameID in used if nameID > 255}
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pruningUnusedNames(varfont):
|
||||
from . import log
|
||||
|
||||
origNameIDs = getVariationNameIDs(varfont)
|
||||
|
||||
yield
|
||||
|
||||
log.info("Pruning name table")
|
||||
exclude = origNameIDs - getVariationNameIDs(varfont)
|
||||
varfont["name"].names[:] = [
|
||||
record for record in varfont["name"].names if record.nameID not in exclude
|
||||
]
|
||||
if "ltag" in varfont:
|
||||
# Drop the whole 'ltag' table if all the language-dependent Unicode name
|
||||
# records that reference it have been dropped.
|
||||
# TODO: Only prune unused ltag tags, renumerating langIDs accordingly.
|
||||
# Note ltag can also be used by feat or morx tables, so check those too.
|
||||
if not any(
|
||||
record
|
||||
for record in varfont["name"].names
|
||||
if record.platformID == 0 and record.langID != 0xFFFF
|
||||
):
|
||||
del varfont["ltag"]
|
||||
|
||||
|
||||
def updateNameTable(varfont, axisLimits):
|
||||
"""Update instatiated variable font's name table using STAT AxisValues.
|
||||
|
||||
Raises ValueError if the STAT table is missing or an Axis Value table is
|
||||
missing for requested axis locations.
|
||||
|
||||
First, collect all STAT AxisValues that match the new default axis locations
|
||||
(excluding "elided" ones); concatenate the strings in design axis order,
|
||||
while giving priority to "synthetic" values (Format 4), to form the
|
||||
typographic subfamily name associated with the new default instance.
|
||||
Finally, update all related records in the name table, making sure that
|
||||
legacy family/sub-family names conform to the the R/I/B/BI (Regular, Italic,
|
||||
Bold, Bold Italic) naming model.
|
||||
|
||||
Example: Updating a partial variable font:
|
||||
| >>> ttFont = TTFont("OpenSans[wdth,wght].ttf")
|
||||
| >>> updateNameTable(ttFont, {"wght": (400, 900), "wdth": 75})
|
||||
|
||||
The name table records will be updated in the following manner:
|
||||
NameID 1 familyName: "Open Sans" --> "Open Sans Condensed"
|
||||
NameID 2 subFamilyName: "Regular" --> "Regular"
|
||||
NameID 3 Unique font identifier: "3.000;GOOG;OpenSans-Regular" --> \
|
||||
"3.000;GOOG;OpenSans-Condensed"
|
||||
NameID 4 Full font name: "Open Sans Regular" --> "Open Sans Condensed"
|
||||
NameID 6 PostScript name: "OpenSans-Regular" --> "OpenSans-Condensed"
|
||||
NameID 16 Typographic Family name: None --> "Open Sans"
|
||||
NameID 17 Typographic Subfamily name: None --> "Condensed"
|
||||
|
||||
References:
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/stat
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids
|
||||
"""
|
||||
from . import AxisLimits, axisValuesFromAxisLimits
|
||||
|
||||
if "STAT" not in varfont:
|
||||
raise ValueError("Cannot update name table since there is no STAT table.")
|
||||
stat = varfont["STAT"].table
|
||||
if not stat.AxisValueArray:
|
||||
raise ValueError("Cannot update name table since there are no STAT Axis Values")
|
||||
fvar = varfont["fvar"]
|
||||
|
||||
# The updated name table will reflect the new 'zero origin' of the font.
|
||||
# If we're instantiating a partial font, we will populate the unpinned
|
||||
# axes with their default axis values from fvar.
|
||||
axisLimits = AxisLimits(axisLimits).limitAxesAndPopulateDefaults(varfont)
|
||||
partialDefaults = axisLimits.defaultLocation()
|
||||
fvarDefaults = {a.axisTag: a.defaultValue for a in fvar.axes}
|
||||
defaultAxisCoords = AxisLimits({**fvarDefaults, **partialDefaults})
|
||||
assert all(v.minimum == v.maximum for v in defaultAxisCoords.values())
|
||||
|
||||
axisValueTables = axisValuesFromAxisLimits(stat, defaultAxisCoords)
|
||||
checkAxisValuesExist(stat, axisValueTables, defaultAxisCoords.pinnedLocation())
|
||||
|
||||
# ignore "elidable" axis values, should be omitted in application font menus.
|
||||
axisValueTables = [
|
||||
v for v in axisValueTables if not v.Flags & ELIDABLE_AXIS_VALUE_NAME
|
||||
]
|
||||
axisValueTables = _sortAxisValues(axisValueTables)
|
||||
_updateNameRecords(varfont, axisValueTables)
|
||||
|
||||
|
||||
def checkAxisValuesExist(stat, axisValues, axisCoords):
|
||||
seen = set()
|
||||
designAxes = stat.DesignAxisRecord.Axis
|
||||
hasValues = set()
|
||||
for value in stat.AxisValueArray.AxisValue:
|
||||
if value.Format in (1, 2, 3):
|
||||
hasValues.add(designAxes[value.AxisIndex].AxisTag)
|
||||
elif value.Format == 4:
|
||||
for rec in value.AxisValueRecord:
|
||||
hasValues.add(designAxes[rec.AxisIndex].AxisTag)
|
||||
|
||||
for axisValueTable in axisValues:
|
||||
axisValueFormat = axisValueTable.Format
|
||||
if axisValueTable.Format in (1, 2, 3):
|
||||
axisTag = designAxes[axisValueTable.AxisIndex].AxisTag
|
||||
if axisValueFormat == 2:
|
||||
axisValue = axisValueTable.NominalValue
|
||||
else:
|
||||
axisValue = axisValueTable.Value
|
||||
if axisTag in axisCoords and axisValue == axisCoords[axisTag]:
|
||||
seen.add(axisTag)
|
||||
elif axisValueTable.Format == 4:
|
||||
for rec in axisValueTable.AxisValueRecord:
|
||||
axisTag = designAxes[rec.AxisIndex].AxisTag
|
||||
if axisTag in axisCoords and rec.Value == axisCoords[axisTag]:
|
||||
seen.add(axisTag)
|
||||
|
||||
missingAxes = (set(axisCoords) - seen) & hasValues
|
||||
if missingAxes:
|
||||
missing = ", ".join(f"'{i}': {axisCoords[i]}" for i in missingAxes)
|
||||
raise ValueError(f"Cannot find Axis Values {{{missing}}}")
|
||||
|
||||
|
||||
def _sortAxisValues(axisValues):
|
||||
# Sort by axis index, remove duplicates and ensure that format 4 AxisValues
|
||||
# are dominant.
|
||||
# The MS Spec states: "if a format 1, format 2 or format 3 table has a
|
||||
# (nominal) value used in a format 4 table that also has values for
|
||||
# other axes, the format 4 table, being the more specific match, is used",
|
||||
# https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4
|
||||
results = []
|
||||
seenAxes = set()
|
||||
# Sort format 4 axes so the tables with the most AxisValueRecords are first
|
||||
format4 = sorted(
|
||||
[v for v in axisValues if v.Format == 4],
|
||||
key=lambda v: len(v.AxisValueRecord),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
for val in format4:
|
||||
axisIndexes = set(r.AxisIndex for r in val.AxisValueRecord)
|
||||
minIndex = min(axisIndexes)
|
||||
if not seenAxes & axisIndexes:
|
||||
seenAxes |= axisIndexes
|
||||
results.append((minIndex, val))
|
||||
|
||||
for val in axisValues:
|
||||
if val in format4:
|
||||
continue
|
||||
axisIndex = val.AxisIndex
|
||||
if axisIndex not in seenAxes:
|
||||
seenAxes.add(axisIndex)
|
||||
results.append((axisIndex, val))
|
||||
|
||||
return [axisValue for _, axisValue in sorted(results)]
|
||||
|
||||
|
||||
def _updateNameRecords(varfont, axisValues):
|
||||
# Update nametable based on the axisValues using the R/I/B/BI model.
|
||||
nametable = varfont["name"]
|
||||
stat = varfont["STAT"].table
|
||||
|
||||
axisValueNameIDs = [a.ValueNameID for a in axisValues]
|
||||
ribbiNameIDs = [n for n in axisValueNameIDs if _isRibbi(nametable, n)]
|
||||
nonRibbiNameIDs = [n for n in axisValueNameIDs if n not in ribbiNameIDs]
|
||||
elidedNameID = stat.ElidedFallbackNameID
|
||||
elidedNameIsRibbi = _isRibbi(nametable, elidedNameID)
|
||||
|
||||
getName = nametable.getName
|
||||
platforms = set((r.platformID, r.platEncID, r.langID) for r in nametable.names)
|
||||
for platform in platforms:
|
||||
if not all(getName(i, *platform) for i in (1, 2, elidedNameID)):
|
||||
# Since no family name and subfamily name records were found,
|
||||
# we cannot update this set of name Records.
|
||||
continue
|
||||
|
||||
subFamilyName = " ".join(
|
||||
getName(n, *platform).toUnicode() for n in ribbiNameIDs
|
||||
)
|
||||
if nonRibbiNameIDs:
|
||||
typoSubFamilyName = " ".join(
|
||||
getName(n, *platform).toUnicode() for n in axisValueNameIDs
|
||||
)
|
||||
else:
|
||||
typoSubFamilyName = None
|
||||
|
||||
# If neither subFamilyName and typographic SubFamilyName exist,
|
||||
# we will use the STAT's elidedFallbackName
|
||||
if not typoSubFamilyName and not subFamilyName:
|
||||
if elidedNameIsRibbi:
|
||||
subFamilyName = getName(elidedNameID, *platform).toUnicode()
|
||||
else:
|
||||
typoSubFamilyName = getName(elidedNameID, *platform).toUnicode()
|
||||
|
||||
familyNameSuffix = " ".join(
|
||||
getName(n, *platform).toUnicode() for n in nonRibbiNameIDs
|
||||
)
|
||||
|
||||
_updateNameTableStyleRecords(
|
||||
varfont,
|
||||
familyNameSuffix,
|
||||
subFamilyName,
|
||||
typoSubFamilyName,
|
||||
*platform,
|
||||
)
|
||||
|
||||
|
||||
def _isRibbi(nametable, nameID):
|
||||
englishRecord = nametable.getName(nameID, 3, 1, 0x409)
|
||||
return (
|
||||
True
|
||||
if englishRecord is not None
|
||||
and englishRecord.toUnicode() in ("Regular", "Italic", "Bold", "Bold Italic")
|
||||
else False
|
||||
)
|
||||
|
||||
|
||||
def _updateNameTableStyleRecords(
|
||||
varfont,
|
||||
familyNameSuffix,
|
||||
subFamilyName,
|
||||
typoSubFamilyName,
|
||||
platformID=3,
|
||||
platEncID=1,
|
||||
langID=0x409,
|
||||
):
|
||||
# TODO (Marc F) It may be nice to make this part a standalone
|
||||
# font renamer in the future.
|
||||
nametable = varfont["name"]
|
||||
platform = (platformID, platEncID, langID)
|
||||
|
||||
currentFamilyName = nametable.getName(
|
||||
NameID.TYPOGRAPHIC_FAMILY_NAME, *platform
|
||||
) or nametable.getName(NameID.FAMILY_NAME, *platform)
|
||||
|
||||
currentStyleName = nametable.getName(
|
||||
NameID.TYPOGRAPHIC_SUBFAMILY_NAME, *platform
|
||||
) or nametable.getName(NameID.SUBFAMILY_NAME, *platform)
|
||||
|
||||
if not all([currentFamilyName, currentStyleName]):
|
||||
raise ValueError(f"Missing required NameIDs 1 and 2 for platform {platform}")
|
||||
|
||||
currentFamilyName = currentFamilyName.toUnicode()
|
||||
currentStyleName = currentStyleName.toUnicode()
|
||||
|
||||
nameIDs = {
|
||||
NameID.FAMILY_NAME: currentFamilyName,
|
||||
NameID.SUBFAMILY_NAME: subFamilyName or "Regular",
|
||||
}
|
||||
if typoSubFamilyName:
|
||||
nameIDs[NameID.FAMILY_NAME] = f"{currentFamilyName} {familyNameSuffix}".strip()
|
||||
nameIDs[NameID.TYPOGRAPHIC_FAMILY_NAME] = currentFamilyName
|
||||
nameIDs[NameID.TYPOGRAPHIC_SUBFAMILY_NAME] = typoSubFamilyName
|
||||
else:
|
||||
# Remove previous Typographic Family and SubFamily names since they're
|
||||
# no longer required
|
||||
for nameID in (
|
||||
NameID.TYPOGRAPHIC_FAMILY_NAME,
|
||||
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
|
||||
):
|
||||
nametable.removeNames(nameID=nameID)
|
||||
|
||||
newFamilyName = (
|
||||
nameIDs.get(NameID.TYPOGRAPHIC_FAMILY_NAME) or nameIDs[NameID.FAMILY_NAME]
|
||||
)
|
||||
newStyleName = (
|
||||
nameIDs.get(NameID.TYPOGRAPHIC_SUBFAMILY_NAME) or nameIDs[NameID.SUBFAMILY_NAME]
|
||||
)
|
||||
|
||||
nameIDs[NameID.FULL_FONT_NAME] = f"{newFamilyName} {newStyleName}"
|
||||
nameIDs[NameID.POSTSCRIPT_NAME] = _updatePSNameRecord(
|
||||
varfont, newFamilyName, newStyleName, platform
|
||||
)
|
||||
|
||||
uniqueID = _updateUniqueIdNameRecord(varfont, nameIDs, platform)
|
||||
if uniqueID:
|
||||
nameIDs[NameID.UNIQUE_FONT_IDENTIFIER] = uniqueID
|
||||
|
||||
for nameID, string in nameIDs.items():
|
||||
assert string, nameID
|
||||
nametable.setName(string, nameID, *platform)
|
||||
|
||||
if "fvar" not in varfont:
|
||||
nametable.removeNames(NameID.VARIATIONS_POSTSCRIPT_NAME_PREFIX)
|
||||
|
||||
|
||||
def _updatePSNameRecord(varfont, familyName, styleName, platform):
|
||||
# Implementation based on Adobe Technical Note #5902 :
|
||||
# https://wwwimages2.adobe.com/content/dam/acom/en/devnet/font/pdfs/5902.AdobePSNameGeneration.pdf
|
||||
nametable = varfont["name"]
|
||||
|
||||
family_prefix = nametable.getName(
|
||||
NameID.VARIATIONS_POSTSCRIPT_NAME_PREFIX, *platform
|
||||
)
|
||||
if family_prefix:
|
||||
family_prefix = family_prefix.toUnicode()
|
||||
else:
|
||||
family_prefix = familyName
|
||||
|
||||
psName = f"{family_prefix}-{styleName}"
|
||||
# Remove any characters other than uppercase Latin letters, lowercase
|
||||
# Latin letters, digits and hyphens.
|
||||
psName = re.sub(r"[^A-Za-z0-9-]", r"", psName)
|
||||
|
||||
if len(psName) > 127:
|
||||
# Abbreviating the stylename so it fits within 127 characters whilst
|
||||
# conforming to every vendor's specification is too complex. Instead
|
||||
# we simply truncate the psname and add the required "..."
|
||||
return f"{psName[:124]}..."
|
||||
return psName
|
||||
|
||||
|
||||
def _updateUniqueIdNameRecord(varfont, nameIDs, platform):
|
||||
nametable = varfont["name"]
|
||||
currentRecord = nametable.getName(NameID.UNIQUE_FONT_IDENTIFIER, *platform)
|
||||
if not currentRecord:
|
||||
return None
|
||||
|
||||
# Check if full name and postscript name are a substring of currentRecord
|
||||
for nameID in (NameID.FULL_FONT_NAME, NameID.POSTSCRIPT_NAME):
|
||||
nameRecord = nametable.getName(nameID, *platform)
|
||||
if not nameRecord:
|
||||
continue
|
||||
if nameRecord.toUnicode() in currentRecord.toUnicode():
|
||||
return currentRecord.toUnicode().replace(
|
||||
nameRecord.toUnicode(), nameIDs[nameRecord.nameID]
|
||||
)
|
||||
|
||||
# Create a new string since we couldn't find any substrings.
|
||||
fontVersion = _fontVersion(varfont, platform)
|
||||
achVendID = varfont["OS/2"].achVendID
|
||||
# Remove non-ASCII characers and trailing spaces
|
||||
vendor = re.sub(r"[^\x00-\x7F]", "", achVendID).strip()
|
||||
psName = nameIDs[NameID.POSTSCRIPT_NAME]
|
||||
return f"{fontVersion};{vendor};{psName}"
|
||||
|
||||
|
||||
def _fontVersion(font, platform=(3, 1, 0x409)):
|
||||
nameRecord = font["name"].getName(NameID.VERSION_STRING, *platform)
|
||||
if nameRecord is None:
|
||||
return f'{font["head"].fontRevision:.3f}'
|
||||
# "Version 1.101; ttfautohint (v1.8.1.43-b0c9)" --> "1.101"
|
||||
# Also works fine with inputs "Version 1.101" or "1.101" etc
|
||||
versionNumber = nameRecord.toUnicode().split(";")[0]
|
||||
return versionNumber.lstrip("Version ").strip()
|
||||
|
|
@ -0,0 +1,309 @@
|
|||
from fontTools.varLib.models import supportScalar
|
||||
from fontTools.misc.fixedTools import MAX_F2DOT14
|
||||
from functools import lru_cache
|
||||
|
||||
__all__ = ["rebaseTent"]
|
||||
|
||||
EPSILON = 1 / (1 << 14)
|
||||
|
||||
|
||||
def _reverse_negate(v):
|
||||
return (-v[2], -v[1], -v[0])
|
||||
|
||||
|
||||
def _solve(tent, axisLimit, negative=False):
|
||||
axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit
|
||||
lower, peak, upper = tent
|
||||
|
||||
# Mirror the problem such that axisDef <= peak
|
||||
if axisDef > peak:
|
||||
return [
|
||||
(scalar, _reverse_negate(t) if t is not None else None)
|
||||
for scalar, t in _solve(
|
||||
_reverse_negate(tent),
|
||||
axisLimit.reverse_negate(),
|
||||
not negative,
|
||||
)
|
||||
]
|
||||
# axisDef <= peak
|
||||
|
||||
# case 1: The whole deltaset falls outside the new limit; we can drop it
|
||||
#
|
||||
# peak
|
||||
# 1.........................................o..........
|
||||
# / \
|
||||
# / \
|
||||
# / \
|
||||
# / \
|
||||
# 0---|-----------|----------|-------- o o----1
|
||||
# axisMin axisDef axisMax lower upper
|
||||
#
|
||||
if axisMax <= lower and axisMax < peak:
|
||||
return [] # No overlap
|
||||
|
||||
# case 2: Only the peak and outermost bound fall outside the new limit;
|
||||
# we keep the deltaset, update peak and outermost bound and and scale deltas
|
||||
# by the scalar value for the restricted axis at the new limit, and solve
|
||||
# recursively.
|
||||
#
|
||||
# |peak
|
||||
# 1...............................|.o..........
|
||||
# |/ \
|
||||
# / \
|
||||
# /| \
|
||||
# / | \
|
||||
# 0--------------------------- o | o----1
|
||||
# lower | upper
|
||||
# |
|
||||
# axisMax
|
||||
#
|
||||
# Convert to:
|
||||
#
|
||||
# 1............................................
|
||||
# |
|
||||
# o peak
|
||||
# /|
|
||||
# /x|
|
||||
# 0--------------------------- o o upper ----1
|
||||
# lower |
|
||||
# |
|
||||
# axisMax
|
||||
if axisMax < peak:
|
||||
mult = supportScalar({"tag": axisMax}, {"tag": tent})
|
||||
tent = (lower, axisMax, axisMax)
|
||||
return [(scalar * mult, t) for scalar, t in _solve(tent, axisLimit)]
|
||||
|
||||
# lower <= axisDef <= peak <= axisMax
|
||||
|
||||
gain = supportScalar({"tag": axisDef}, {"tag": tent})
|
||||
out = [(gain, None)]
|
||||
|
||||
# First, the positive side
|
||||
|
||||
# outGain is the scalar of axisMax at the tent.
|
||||
outGain = supportScalar({"tag": axisMax}, {"tag": tent})
|
||||
|
||||
# Case 3a: Gain is more than outGain. The tent down-slope crosses
|
||||
# the axis into negative. We have to split it into multiples.
|
||||
#
|
||||
# | peak |
|
||||
# 1...................|.o.....|..............
|
||||
# |/x\_ |
|
||||
# gain................+....+_.|..............
|
||||
# /| |y\|
|
||||
# ................../.|....|..+_......outGain
|
||||
# / | | | \
|
||||
# 0---|-----------o | | | o----------1
|
||||
# axisMin lower | | | upper
|
||||
# | | |
|
||||
# axisDef | axisMax
|
||||
# |
|
||||
# crossing
|
||||
if gain >= outGain:
|
||||
# Note that this is the branch taken if both gain and outGain are 0.
|
||||
|
||||
# Crossing point on the axis.
|
||||
crossing = peak + (1 - gain) * (upper - peak)
|
||||
|
||||
loc = (max(lower, axisDef), peak, crossing)
|
||||
scalar = 1
|
||||
|
||||
# The part before the crossing point.
|
||||
out.append((scalar - gain, loc))
|
||||
|
||||
# The part after the crossing point may use one or two tents,
|
||||
# depending on whether upper is before axisMax or not, in one
|
||||
# case we need to keep it down to eternity.
|
||||
|
||||
# Case 3a1, similar to case 1neg; just one tent needed, as in
|
||||
# the drawing above.
|
||||
if upper >= axisMax:
|
||||
loc = (crossing, axisMax, axisMax)
|
||||
scalar = outGain
|
||||
|
||||
out.append((scalar - gain, loc))
|
||||
|
||||
# Case 3a2: Similar to case 2neg; two tents needed, to keep
|
||||
# down to eternity.
|
||||
#
|
||||
# | peak |
|
||||
# 1...................|.o................|...
|
||||
# |/ \_ |
|
||||
# gain................+....+_............|...
|
||||
# /| | \xxxxxxxxxxy|
|
||||
# / | | \_xxxxxyyyy|
|
||||
# / | | \xxyyyyyy|
|
||||
# 0---|-----------o | | o-------|--1
|
||||
# axisMin lower | | upper |
|
||||
# | | |
|
||||
# axisDef | axisMax
|
||||
# |
|
||||
# crossing
|
||||
else:
|
||||
# A tent's peak cannot fall on axis default. Nudge it.
|
||||
if upper == axisDef:
|
||||
upper += EPSILON
|
||||
|
||||
# Downslope.
|
||||
loc1 = (crossing, upper, axisMax)
|
||||
scalar1 = 0
|
||||
|
||||
# Eternity justify.
|
||||
loc2 = (upper, axisMax, axisMax)
|
||||
scalar2 = 0
|
||||
|
||||
out.append((scalar1 - gain, loc1))
|
||||
out.append((scalar2 - gain, loc2))
|
||||
|
||||
else:
|
||||
# Special-case if peak is at axisMax.
|
||||
if axisMax == peak:
|
||||
upper = peak
|
||||
|
||||
# Case 3:
|
||||
# We keep delta as is and only scale the axis upper to achieve
|
||||
# the desired new tent if feasible.
|
||||
#
|
||||
# peak
|
||||
# 1.....................o....................
|
||||
# / \_|
|
||||
# ..................../....+_.........outGain
|
||||
# / | \
|
||||
# gain..............+......|..+_.............
|
||||
# /| | | \
|
||||
# 0---|-----------o | | | o----------1
|
||||
# axisMin lower| | | upper
|
||||
# | | newUpper
|
||||
# axisDef axisMax
|
||||
#
|
||||
newUpper = peak + (1 - gain) * (upper - peak)
|
||||
assert axisMax <= newUpper # Because outGain > gain
|
||||
# Disabled because ots doesn't like us:
|
||||
# https://github.com/fonttools/fonttools/issues/3350
|
||||
if False and newUpper <= axisDef + (axisMax - axisDef) * 2:
|
||||
upper = newUpper
|
||||
if not negative and axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper:
|
||||
# we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience
|
||||
upper = axisDef + (axisMax - axisDef) * MAX_F2DOT14
|
||||
assert peak < upper
|
||||
|
||||
loc = (max(axisDef, lower), peak, upper)
|
||||
scalar = 1
|
||||
|
||||
out.append((scalar - gain, loc))
|
||||
|
||||
# Case 4: New limit doesn't fit; we need to chop into two tents,
|
||||
# because the shape of a triangle with part of one side cut off
|
||||
# cannot be represented as a triangle itself.
|
||||
#
|
||||
# | peak |
|
||||
# 1.........|......o.|....................
|
||||
# ..........|...../x\|.............outGain
|
||||
# | |xxy|\_
|
||||
# | /xxxy| \_
|
||||
# | |xxxxy| \_
|
||||
# | /xxxxy| \_
|
||||
# 0---|-----|-oxxxxxx| o----------1
|
||||
# axisMin | lower | upper
|
||||
# | |
|
||||
# axisDef axisMax
|
||||
#
|
||||
else:
|
||||
loc1 = (max(axisDef, lower), peak, axisMax)
|
||||
scalar1 = 1
|
||||
|
||||
loc2 = (peak, axisMax, axisMax)
|
||||
scalar2 = outGain
|
||||
|
||||
out.append((scalar1 - gain, loc1))
|
||||
# Don't add a dirac delta!
|
||||
if peak < axisMax:
|
||||
out.append((scalar2 - gain, loc2))
|
||||
|
||||
# Now, the negative side
|
||||
|
||||
# Case 1neg: Lower extends beyond axisMin: we chop. Simple.
|
||||
#
|
||||
# | |peak
|
||||
# 1..................|...|.o.................
|
||||
# | |/ \
|
||||
# gain...............|...+...\...............
|
||||
# |x_/| \
|
||||
# |/ | \
|
||||
# _/| | \
|
||||
# 0---------------o | | o----------1
|
||||
# lower | | upper
|
||||
# | |
|
||||
# axisMin axisDef
|
||||
#
|
||||
if lower <= axisMin:
|
||||
loc = (axisMin, axisMin, axisDef)
|
||||
scalar = supportScalar({"tag": axisMin}, {"tag": tent})
|
||||
|
||||
out.append((scalar - gain, loc))
|
||||
|
||||
# Case 2neg: Lower is betwen axisMin and axisDef: we add two
|
||||
# tents to keep it down all the way to eternity.
|
||||
#
|
||||
# | |peak
|
||||
# 1...|...............|.o.................
|
||||
# | |/ \
|
||||
# gain|...............+...\...............
|
||||
# |yxxxxxxxxxxxxx/| \
|
||||
# |yyyyyyxxxxxxx/ | \
|
||||
# |yyyyyyyyyyyx/ | \
|
||||
# 0---|-----------o | o----------1
|
||||
# axisMin lower | upper
|
||||
# |
|
||||
# axisDef
|
||||
#
|
||||
else:
|
||||
# A tent's peak cannot fall on axis default. Nudge it.
|
||||
if lower == axisDef:
|
||||
lower -= EPSILON
|
||||
|
||||
# Downslope.
|
||||
loc1 = (axisMin, lower, axisDef)
|
||||
scalar1 = 0
|
||||
|
||||
# Eternity justify.
|
||||
loc2 = (axisMin, axisMin, lower)
|
||||
scalar2 = 0
|
||||
|
||||
out.append((scalar1 - gain, loc1))
|
||||
out.append((scalar2 - gain, loc2))
|
||||
|
||||
return out
|
||||
|
||||
|
||||
@lru_cache(128)
|
||||
def rebaseTent(tent, axisLimit):
|
||||
"""Given a tuple (lower,peak,upper) "tent" and new axis limits
|
||||
(axisMin,axisDefault,axisMax), solves how to represent the tent
|
||||
under the new axis configuration. All values are in normalized
|
||||
-1,0,+1 coordinate system. Tent values can be outside this range.
|
||||
|
||||
Return value is a list of tuples. Each tuple is of the form
|
||||
(scalar,tent), where scalar is a multipler to multiply any
|
||||
delta-sets by, and tent is a new tent for that output delta-set.
|
||||
If tent value is None, that is a special deltaset that should
|
||||
be always-enabled (called "gain")."""
|
||||
|
||||
axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit
|
||||
assert -1 <= axisMin <= axisDef <= axisMax <= +1
|
||||
|
||||
lower, peak, upper = tent
|
||||
assert -2 <= lower <= peak <= upper <= +2
|
||||
|
||||
assert peak != 0
|
||||
|
||||
sols = _solve(tent, axisLimit)
|
||||
|
||||
n = lambda v: axisLimit.renormalizeValue(v)
|
||||
sols = [
|
||||
(scalar, (n(v[0]), n(v[1]), n(v[2])) if v is not None else None)
|
||||
for scalar, v in sols
|
||||
if scalar
|
||||
]
|
||||
|
||||
return sols
|
||||
1209
venv/lib/python3.13/site-packages/fontTools/varLib/interpolatable.py
Normal file
1209
venv/lib/python3.13/site-packages/fontTools/varLib/interpolatable.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,396 @@
|
|||
from fontTools.ttLib.ttGlyphSet import LerpGlyphSet
|
||||
from fontTools.pens.basePen import AbstractPen, BasePen, DecomposingPen
|
||||
from fontTools.pens.pointPen import AbstractPointPen, SegmentToPointPen
|
||||
from fontTools.pens.recordingPen import RecordingPen, DecomposingRecordingPen
|
||||
from fontTools.misc.transform import Transform
|
||||
from collections import defaultdict, deque
|
||||
from math import sqrt, copysign, atan2, pi
|
||||
from enum import Enum
|
||||
import itertools
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger("fontTools.varLib.interpolatable")
|
||||
|
||||
|
||||
class InterpolatableProblem:
|
||||
NOTHING = "nothing"
|
||||
MISSING = "missing"
|
||||
OPEN_PATH = "open_path"
|
||||
PATH_COUNT = "path_count"
|
||||
NODE_COUNT = "node_count"
|
||||
NODE_INCOMPATIBILITY = "node_incompatibility"
|
||||
CONTOUR_ORDER = "contour_order"
|
||||
WRONG_START_POINT = "wrong_start_point"
|
||||
KINK = "kink"
|
||||
UNDERWEIGHT = "underweight"
|
||||
OVERWEIGHT = "overweight"
|
||||
|
||||
severity = {
|
||||
MISSING: 1,
|
||||
OPEN_PATH: 2,
|
||||
PATH_COUNT: 3,
|
||||
NODE_COUNT: 4,
|
||||
NODE_INCOMPATIBILITY: 5,
|
||||
CONTOUR_ORDER: 6,
|
||||
WRONG_START_POINT: 7,
|
||||
KINK: 8,
|
||||
UNDERWEIGHT: 9,
|
||||
OVERWEIGHT: 10,
|
||||
NOTHING: 11,
|
||||
}
|
||||
|
||||
|
||||
def sort_problems(problems):
|
||||
"""Sort problems by severity, then by glyph name, then by problem message."""
|
||||
return dict(
|
||||
sorted(
|
||||
problems.items(),
|
||||
key=lambda _: -min(
|
||||
(
|
||||
(InterpolatableProblem.severity[p["type"]] + p.get("tolerance", 0))
|
||||
for p in _[1]
|
||||
),
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def rot_list(l, k):
|
||||
"""Rotate list by k items forward. Ie. item at position 0 will be
|
||||
at position k in returned list. Negative k is allowed."""
|
||||
return l[-k:] + l[:-k]
|
||||
|
||||
|
||||
class PerContourPen(BasePen):
|
||||
def __init__(self, Pen, glyphset=None):
|
||||
BasePen.__init__(self, glyphset)
|
||||
self._glyphset = glyphset
|
||||
self._Pen = Pen
|
||||
self._pen = None
|
||||
self.value = []
|
||||
|
||||
def _moveTo(self, p0):
|
||||
self._newItem()
|
||||
self._pen.moveTo(p0)
|
||||
|
||||
def _lineTo(self, p1):
|
||||
self._pen.lineTo(p1)
|
||||
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
self._pen.qCurveTo(p1, p2)
|
||||
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
self._pen.curveTo(p1, p2, p3)
|
||||
|
||||
def _closePath(self):
|
||||
self._pen.closePath()
|
||||
self._pen = None
|
||||
|
||||
def _endPath(self):
|
||||
self._pen.endPath()
|
||||
self._pen = None
|
||||
|
||||
def _newItem(self):
|
||||
self._pen = pen = self._Pen()
|
||||
self.value.append(pen)
|
||||
|
||||
|
||||
class PerContourOrComponentPen(PerContourPen):
|
||||
def addComponent(self, glyphName, transformation):
|
||||
self._newItem()
|
||||
self.value[-1].addComponent(glyphName, transformation)
|
||||
|
||||
|
||||
class SimpleRecordingPointPen(AbstractPointPen):
|
||||
def __init__(self):
|
||||
self.value = []
|
||||
|
||||
def beginPath(self, identifier=None, **kwargs):
|
||||
pass
|
||||
|
||||
def endPath(self) -> None:
|
||||
pass
|
||||
|
||||
def addPoint(self, pt, segmentType=None):
|
||||
self.value.append((pt, False if segmentType is None else True))
|
||||
|
||||
|
||||
def vdiff_hypot2(v0, v1):
|
||||
s = 0
|
||||
for x0, x1 in zip(v0, v1):
|
||||
d = x1 - x0
|
||||
s += d * d
|
||||
return s
|
||||
|
||||
|
||||
def vdiff_hypot2_complex(v0, v1):
|
||||
s = 0
|
||||
for x0, x1 in zip(v0, v1):
|
||||
d = x1 - x0
|
||||
s += d.real * d.real + d.imag * d.imag
|
||||
# This does the same but seems to be slower:
|
||||
# s += (d * d.conjugate()).real
|
||||
return s
|
||||
|
||||
|
||||
def matching_cost(G, matching):
|
||||
return sum(G[i][j] for i, j in enumerate(matching))
|
||||
|
||||
|
||||
def min_cost_perfect_bipartite_matching_scipy(G):
|
||||
n = len(G)
|
||||
rows, cols = linear_sum_assignment(G)
|
||||
assert (rows == list(range(n))).all()
|
||||
# Convert numpy array and integer to Python types,
|
||||
# to ensure that this is JSON-serializable.
|
||||
cols = list(int(e) for e in cols)
|
||||
return list(cols), matching_cost(G, cols)
|
||||
|
||||
|
||||
def min_cost_perfect_bipartite_matching_munkres(G):
|
||||
n = len(G)
|
||||
cols = [None] * n
|
||||
for row, col in Munkres().compute(G):
|
||||
cols[row] = col
|
||||
return cols, matching_cost(G, cols)
|
||||
|
||||
|
||||
def min_cost_perfect_bipartite_matching_bruteforce(G):
|
||||
n = len(G)
|
||||
|
||||
if n > 6:
|
||||
raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'")
|
||||
|
||||
# Otherwise just brute-force
|
||||
permutations = itertools.permutations(range(n))
|
||||
best = list(next(permutations))
|
||||
best_cost = matching_cost(G, best)
|
||||
for p in permutations:
|
||||
cost = matching_cost(G, p)
|
||||
if cost < best_cost:
|
||||
best, best_cost = list(p), cost
|
||||
return best, best_cost
|
||||
|
||||
|
||||
try:
|
||||
from scipy.optimize import linear_sum_assignment
|
||||
|
||||
min_cost_perfect_bipartite_matching = min_cost_perfect_bipartite_matching_scipy
|
||||
except ImportError:
|
||||
try:
|
||||
from munkres import Munkres
|
||||
|
||||
min_cost_perfect_bipartite_matching = (
|
||||
min_cost_perfect_bipartite_matching_munkres
|
||||
)
|
||||
except ImportError:
|
||||
min_cost_perfect_bipartite_matching = (
|
||||
min_cost_perfect_bipartite_matching_bruteforce
|
||||
)
|
||||
|
||||
|
||||
def contour_vector_from_stats(stats):
|
||||
# Don't change the order of items here.
|
||||
# It's okay to add to the end, but otherwise, other
|
||||
# code depends on it. Search for "covariance".
|
||||
size = sqrt(abs(stats.area))
|
||||
return (
|
||||
copysign((size), stats.area),
|
||||
stats.meanX,
|
||||
stats.meanY,
|
||||
stats.stddevX * 2,
|
||||
stats.stddevY * 2,
|
||||
stats.correlation * size,
|
||||
)
|
||||
|
||||
|
||||
def matching_for_vectors(m0, m1):
|
||||
n = len(m0)
|
||||
|
||||
identity_matching = list(range(n))
|
||||
|
||||
costs = [[vdiff_hypot2(v0, v1) for v1 in m1] for v0 in m0]
|
||||
(
|
||||
matching,
|
||||
matching_cost,
|
||||
) = min_cost_perfect_bipartite_matching(costs)
|
||||
identity_cost = sum(costs[i][i] for i in range(n))
|
||||
return matching, matching_cost, identity_cost
|
||||
|
||||
|
||||
def points_characteristic_bits(points):
|
||||
bits = 0
|
||||
for pt, b in reversed(points):
|
||||
bits = (bits << 1) | b
|
||||
return bits
|
||||
|
||||
|
||||
_NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR = 4
|
||||
|
||||
|
||||
def points_complex_vector(points):
|
||||
vector = []
|
||||
if not points:
|
||||
return vector
|
||||
points = [complex(*pt) for pt, _ in points]
|
||||
n = len(points)
|
||||
assert _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR == 4
|
||||
points.extend(points[: _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR - 1])
|
||||
while len(points) < _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR:
|
||||
points.extend(points[: _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR - 1])
|
||||
for i in range(n):
|
||||
# The weights are magic numbers.
|
||||
|
||||
# The point itself
|
||||
p0 = points[i]
|
||||
vector.append(p0)
|
||||
|
||||
# The vector to the next point
|
||||
p1 = points[i + 1]
|
||||
d0 = p1 - p0
|
||||
vector.append(d0 * 3)
|
||||
|
||||
# The turn vector
|
||||
p2 = points[i + 2]
|
||||
d1 = p2 - p1
|
||||
vector.append(d1 - d0)
|
||||
|
||||
# The angle to the next point, as a cross product;
|
||||
# Square root of, to match dimentionality of distance.
|
||||
cross = d0.real * d1.imag - d0.imag * d1.real
|
||||
cross = copysign(sqrt(abs(cross)), cross)
|
||||
vector.append(cross * 4)
|
||||
|
||||
return vector
|
||||
|
||||
|
||||
def add_isomorphisms(points, isomorphisms, reverse):
|
||||
reference_bits = points_characteristic_bits(points)
|
||||
n = len(points)
|
||||
|
||||
# if points[0][0] == points[-1][0]:
|
||||
# abort
|
||||
|
||||
if reverse:
|
||||
points = points[::-1]
|
||||
bits = points_characteristic_bits(points)
|
||||
else:
|
||||
bits = reference_bits
|
||||
|
||||
vector = points_complex_vector(points)
|
||||
|
||||
assert len(vector) % n == 0
|
||||
mult = len(vector) // n
|
||||
mask = (1 << n) - 1
|
||||
|
||||
for i in range(n):
|
||||
b = ((bits << (n - i)) & mask) | (bits >> i)
|
||||
if b == reference_bits:
|
||||
isomorphisms.append(
|
||||
(rot_list(vector, -i * mult), n - 1 - i if reverse else i, reverse)
|
||||
)
|
||||
|
||||
|
||||
def find_parents_and_order(glyphsets, locations, *, discrete_axes=set()):
|
||||
parents = [None] + list(range(len(glyphsets) - 1))
|
||||
order = list(range(len(glyphsets)))
|
||||
if locations:
|
||||
# Order base master first
|
||||
bases = [
|
||||
i
|
||||
for i, l in enumerate(locations)
|
||||
if all(v == 0 for k, v in l.items() if k not in discrete_axes)
|
||||
]
|
||||
if bases:
|
||||
logging.info("Found %s base masters: %s", len(bases), bases)
|
||||
else:
|
||||
logging.warning("No base master location found")
|
||||
|
||||
# Form a minimum spanning tree of the locations
|
||||
try:
|
||||
from scipy.sparse.csgraph import minimum_spanning_tree
|
||||
|
||||
graph = [[0] * len(locations) for _ in range(len(locations))]
|
||||
axes = set()
|
||||
for l in locations:
|
||||
axes.update(l.keys())
|
||||
axes = sorted(axes)
|
||||
vectors = [tuple(l.get(k, 0) for k in axes) for l in locations]
|
||||
for i, j in itertools.combinations(range(len(locations)), 2):
|
||||
i_discrete_location = {
|
||||
k: v for k, v in zip(axes, vectors[i]) if k in discrete_axes
|
||||
}
|
||||
j_discrete_location = {
|
||||
k: v for k, v in zip(axes, vectors[j]) if k in discrete_axes
|
||||
}
|
||||
if i_discrete_location != j_discrete_location:
|
||||
continue
|
||||
graph[i][j] = vdiff_hypot2(vectors[i], vectors[j])
|
||||
|
||||
tree = minimum_spanning_tree(graph, overwrite=True)
|
||||
rows, cols = tree.nonzero()
|
||||
graph = defaultdict(set)
|
||||
for row, col in zip(rows, cols):
|
||||
graph[row].add(col)
|
||||
graph[col].add(row)
|
||||
|
||||
# Traverse graph from the base and assign parents
|
||||
parents = [None] * len(locations)
|
||||
order = []
|
||||
visited = set()
|
||||
queue = deque(bases)
|
||||
while queue:
|
||||
i = queue.popleft()
|
||||
visited.add(i)
|
||||
order.append(i)
|
||||
for j in sorted(graph[i]):
|
||||
if j not in visited:
|
||||
parents[j] = i
|
||||
queue.append(j)
|
||||
assert len(order) == len(
|
||||
parents
|
||||
), "Not all masters are reachable; report an issue"
|
||||
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
log.info("Parents: %s", parents)
|
||||
log.info("Order: %s", order)
|
||||
return parents, order
|
||||
|
||||
|
||||
def transform_from_stats(stats, inverse=False):
|
||||
# https://cookierobotics.com/007/
|
||||
a = stats.varianceX
|
||||
b = stats.covariance
|
||||
c = stats.varianceY
|
||||
|
||||
delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5
|
||||
lambda1 = (a + c) * 0.5 + delta # Major eigenvalue
|
||||
lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue
|
||||
theta = atan2(lambda1 - a, b) if b != 0 else (pi * 0.5 if a < c else 0)
|
||||
trans = Transform()
|
||||
|
||||
if lambda2 < 0:
|
||||
# XXX This is a hack.
|
||||
# The problem is that the covariance matrix is singular.
|
||||
# This happens when the contour is a line, or a circle.
|
||||
# In that case, the covariance matrix is not a good
|
||||
# representation of the contour.
|
||||
# We should probably detect this earlier and avoid
|
||||
# computing the covariance matrix in the first place.
|
||||
# But for now, we just avoid the division by zero.
|
||||
lambda2 = 0
|
||||
|
||||
if inverse:
|
||||
trans = trans.translate(-stats.meanX, -stats.meanY)
|
||||
trans = trans.rotate(-theta)
|
||||
trans = trans.scale(1 / sqrt(lambda1), 1 / sqrt(lambda2))
|
||||
else:
|
||||
trans = trans.scale(sqrt(lambda1), sqrt(lambda2))
|
||||
trans = trans.rotate(theta)
|
||||
trans = trans.translate(stats.meanX, stats.meanY)
|
||||
|
||||
return trans
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,82 @@
|
|||
from .interpolatableHelpers import *
|
||||
import logging
|
||||
|
||||
log = logging.getLogger("fontTools.varLib.interpolatable")
|
||||
|
||||
|
||||
def test_contour_order(glyph0, glyph1):
|
||||
# We try matching both the StatisticsControlPen vector
|
||||
# and the StatisticsPen vector.
|
||||
#
|
||||
# If either method found a identity matching, accept it.
|
||||
# This is crucial for fonts like Kablammo[MORF].ttf and
|
||||
# Nabla[EDPT,EHLT].ttf, since they really confuse the
|
||||
# StatisticsPen vector because of their area=0 contours.
|
||||
|
||||
n = len(glyph0.controlVectors)
|
||||
matching = None
|
||||
matching_cost = 0
|
||||
identity_cost = 0
|
||||
done = n <= 1
|
||||
if not done:
|
||||
m0Control = glyph0.controlVectors
|
||||
m1Control = glyph1.controlVectors
|
||||
(
|
||||
matching_control,
|
||||
matching_cost_control,
|
||||
identity_cost_control,
|
||||
) = matching_for_vectors(m0Control, m1Control)
|
||||
done = matching_cost_control == identity_cost_control
|
||||
if not done:
|
||||
m0Green = glyph0.greenVectors
|
||||
m1Green = glyph1.greenVectors
|
||||
(
|
||||
matching_green,
|
||||
matching_cost_green,
|
||||
identity_cost_green,
|
||||
) = matching_for_vectors(m0Green, m1Green)
|
||||
done = matching_cost_green == identity_cost_green
|
||||
|
||||
if not done:
|
||||
# See if reversing contours in one master helps.
|
||||
# That's a common problem. Then the wrong_start_point
|
||||
# test will fix them.
|
||||
#
|
||||
# Reverse the sign of the area (0); the rest stay the same.
|
||||
if not done:
|
||||
m1ControlReversed = [(-m[0],) + m[1:] for m in m1Control]
|
||||
(
|
||||
matching_control_reversed,
|
||||
matching_cost_control_reversed,
|
||||
identity_cost_control_reversed,
|
||||
) = matching_for_vectors(m0Control, m1ControlReversed)
|
||||
done = matching_cost_control_reversed == identity_cost_control_reversed
|
||||
if not done:
|
||||
m1GreenReversed = [(-m[0],) + m[1:] for m in m1Green]
|
||||
(
|
||||
matching_control_reversed,
|
||||
matching_cost_green_reversed,
|
||||
identity_cost_green_reversed,
|
||||
) = matching_for_vectors(m0Green, m1GreenReversed)
|
||||
done = matching_cost_green_reversed == identity_cost_green_reversed
|
||||
|
||||
if not done:
|
||||
# Otherwise, use the worst of the two matchings.
|
||||
if (
|
||||
matching_cost_control / identity_cost_control
|
||||
< matching_cost_green / identity_cost_green
|
||||
):
|
||||
matching = matching_control
|
||||
matching_cost = matching_cost_control
|
||||
identity_cost = identity_cost_control
|
||||
else:
|
||||
matching = matching_green
|
||||
matching_cost = matching_cost_green
|
||||
identity_cost = identity_cost_green
|
||||
|
||||
this_tolerance = matching_cost / identity_cost if identity_cost else 1
|
||||
log.debug(
|
||||
"test-contour-order: tolerance %g",
|
||||
this_tolerance,
|
||||
)
|
||||
return this_tolerance, matching
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
from .interpolatableHelpers import *
|
||||
|
||||
|
||||
def test_starting_point(glyph0, glyph1, ix, tolerance, matching):
|
||||
if matching is None:
|
||||
matching = list(range(len(glyph0.isomorphisms)))
|
||||
contour0 = glyph0.isomorphisms[ix]
|
||||
contour1 = glyph1.isomorphisms[matching[ix]]
|
||||
m0Vectors = glyph0.greenVectors
|
||||
m1Vectors = [glyph1.greenVectors[i] for i in matching]
|
||||
|
||||
c0 = contour0[0]
|
||||
# Next few lines duplicated below.
|
||||
costs = [vdiff_hypot2_complex(c0[0], c1[0]) for c1 in contour1]
|
||||
min_cost_idx, min_cost = min(enumerate(costs), key=lambda x: x[1])
|
||||
first_cost = costs[0]
|
||||
proposed_point = contour1[min_cost_idx][1]
|
||||
reverse = contour1[min_cost_idx][2]
|
||||
|
||||
if min_cost < first_cost * tolerance:
|
||||
# c0 is the first isomorphism of the m0 master
|
||||
# contour1 is list of all isomorphisms of the m1 master
|
||||
#
|
||||
# If the two shapes are both circle-ish and slightly
|
||||
# rotated, we detect wrong start point. This is for
|
||||
# example the case hundreds of times in
|
||||
# RobotoSerif-Italic[GRAD,opsz,wdth,wght].ttf
|
||||
#
|
||||
# If the proposed point is only one off from the first
|
||||
# point (and not reversed), try harder:
|
||||
#
|
||||
# Find the major eigenvector of the covariance matrix,
|
||||
# and rotate the contours by that angle. Then find the
|
||||
# closest point again. If it matches this time, let it
|
||||
# pass.
|
||||
|
||||
num_points = len(glyph1.points[ix])
|
||||
leeway = 3
|
||||
if not reverse and (
|
||||
proposed_point <= leeway or proposed_point >= num_points - leeway
|
||||
):
|
||||
# Try harder
|
||||
|
||||
# Recover the covariance matrix from the GreenVectors.
|
||||
# This is a 2x2 matrix.
|
||||
transforms = []
|
||||
for vector in (m0Vectors[ix], m1Vectors[ix]):
|
||||
meanX = vector[1]
|
||||
meanY = vector[2]
|
||||
stddevX = vector[3] * 0.5
|
||||
stddevY = vector[4] * 0.5
|
||||
correlation = vector[5]
|
||||
if correlation:
|
||||
correlation /= abs(vector[0])
|
||||
|
||||
# https://cookierobotics.com/007/
|
||||
a = stddevX * stddevX # VarianceX
|
||||
c = stddevY * stddevY # VarianceY
|
||||
b = correlation * stddevX * stddevY # Covariance
|
||||
|
||||
delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5
|
||||
lambda1 = (a + c) * 0.5 + delta # Major eigenvalue
|
||||
lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue
|
||||
theta = atan2(lambda1 - a, b) if b != 0 else (pi * 0.5 if a < c else 0)
|
||||
trans = Transform()
|
||||
# Don't translate here. We are working on the complex-vector
|
||||
# that includes more than just the points. It's horrible what
|
||||
# we are doing anyway...
|
||||
# trans = trans.translate(meanX, meanY)
|
||||
trans = trans.rotate(theta)
|
||||
trans = trans.scale(sqrt(lambda1), sqrt(lambda2))
|
||||
transforms.append(trans)
|
||||
|
||||
trans = transforms[0]
|
||||
new_c0 = (
|
||||
[complex(*trans.transformPoint((pt.real, pt.imag))) for pt in c0[0]],
|
||||
) + c0[1:]
|
||||
trans = transforms[1]
|
||||
new_contour1 = []
|
||||
for c1 in contour1:
|
||||
new_c1 = (
|
||||
[
|
||||
complex(*trans.transformPoint((pt.real, pt.imag)))
|
||||
for pt in c1[0]
|
||||
],
|
||||
) + c1[1:]
|
||||
new_contour1.append(new_c1)
|
||||
|
||||
# Next few lines duplicate from above.
|
||||
costs = [
|
||||
vdiff_hypot2_complex(new_c0[0], new_c1[0]) for new_c1 in new_contour1
|
||||
]
|
||||
min_cost_idx, min_cost = min(enumerate(costs), key=lambda x: x[1])
|
||||
first_cost = costs[0]
|
||||
if min_cost < first_cost * tolerance:
|
||||
# Don't report this
|
||||
# min_cost = first_cost
|
||||
# reverse = False
|
||||
# proposed_point = 0 # new_contour1[min_cost_idx][1]
|
||||
pass
|
||||
|
||||
this_tolerance = min_cost / first_cost if first_cost else 1
|
||||
log.debug(
|
||||
"test-starting-point: tolerance %g",
|
||||
this_tolerance,
|
||||
)
|
||||
return this_tolerance, proposed_point, reverse
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
"""
|
||||
Interpolate OpenType Layout tables (GDEF / GPOS / GSUB).
|
||||
"""
|
||||
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.varLib import models, VarLibError, load_designspace, load_masters
|
||||
from fontTools.varLib.merger import InstancerMerger
|
||||
import os.path
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from pprint import pformat
|
||||
|
||||
log = logging.getLogger("fontTools.varLib.interpolate_layout")
|
||||
|
||||
|
||||
def interpolate_layout(designspace, loc, master_finder=lambda s: s, mapped=False):
|
||||
"""
|
||||
Interpolate GPOS from a designspace file and location.
|
||||
|
||||
If master_finder is set, it should be a callable that takes master
|
||||
filename as found in designspace file and map it to master font
|
||||
binary as to be opened (eg. .ttf or .otf).
|
||||
|
||||
If mapped is False (default), then location is mapped using the
|
||||
map element of the axes in designspace file. If mapped is True,
|
||||
it is assumed that location is in designspace's internal space and
|
||||
no mapping is performed.
|
||||
"""
|
||||
if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
|
||||
pass
|
||||
else: # Assume a file path
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
|
||||
designspace = DesignSpaceDocument.fromfile(designspace)
|
||||
|
||||
ds = load_designspace(designspace)
|
||||
log.info("Building interpolated font")
|
||||
|
||||
log.info("Loading master fonts")
|
||||
master_fonts = load_masters(designspace, master_finder)
|
||||
font = deepcopy(master_fonts[ds.base_idx])
|
||||
|
||||
log.info("Location: %s", pformat(loc))
|
||||
if not mapped:
|
||||
loc = {name: ds.axes[name].map_forward(v) for name, v in loc.items()}
|
||||
log.info("Internal location: %s", pformat(loc))
|
||||
loc = models.normalizeLocation(loc, ds.internal_axis_supports)
|
||||
log.info("Normalized location: %s", pformat(loc))
|
||||
|
||||
# Assume single-model for now.
|
||||
model = models.VariationModel(ds.normalized_master_locs)
|
||||
assert 0 == model.mapping[ds.base_idx]
|
||||
|
||||
merger = InstancerMerger(font, model, loc)
|
||||
|
||||
log.info("Building interpolated tables")
|
||||
# TODO GSUB/GDEF
|
||||
merger.mergeTables(font, master_fonts, ["GPOS"])
|
||||
return font
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Interpolate GDEF/GPOS/GSUB tables for a point on a designspace"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.interpolate_layout",
|
||||
description=main.__doc__,
|
||||
)
|
||||
parser.add_argument(
|
||||
"designspace_filename", metavar="DESIGNSPACE", help="Input TTF files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"locations",
|
||||
metavar="LOCATION",
|
||||
type=str,
|
||||
nargs="+",
|
||||
help="Axis locations (e.g. wdth=120",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
metavar="OUTPUT",
|
||||
help="Output font file (defaults to <designspacename>-instance.ttf)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--loglevel",
|
||||
metavar="LEVEL",
|
||||
default="INFO",
|
||||
help="Logging level (defaults to INFO)",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
if not args.output:
|
||||
args.output = os.path.splitext(args.designspace_filename)[0] + "-instance.ttf"
|
||||
|
||||
configLogger(level=args.loglevel)
|
||||
|
||||
finder = lambda s: s.replace("master_ufo", "master_ttf_interpolatable").replace(
|
||||
".ufo", ".ttf"
|
||||
)
|
||||
|
||||
loc = {}
|
||||
for arg in args.locations:
|
||||
tag, val = arg.split("=")
|
||||
loc[tag] = float(val)
|
||||
|
||||
font = interpolate_layout(args.designspace_filename, loc, finder)
|
||||
log.info("Saving font %s", args.output)
|
||||
font.save(args.output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
19845
venv/lib/python3.13/site-packages/fontTools/varLib/iup.c
Normal file
19845
venv/lib/python3.13/site-packages/fontTools/varLib/iup.c
Normal file
File diff suppressed because it is too large
Load diff
Binary file not shown.
490
venv/lib/python3.13/site-packages/fontTools/varLib/iup.py
Normal file
490
venv/lib/python3.13/site-packages/fontTools/varLib/iup.py
Normal file
|
|
@ -0,0 +1,490 @@
|
|||
try:
|
||||
import cython
|
||||
except (AttributeError, ImportError):
|
||||
# if cython not installed, use mock module with no-op decorators and types
|
||||
from fontTools.misc import cython
|
||||
COMPILED = cython.compiled
|
||||
|
||||
from typing import (
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from numbers import Integral, Real
|
||||
|
||||
|
||||
_Point = Tuple[Real, Real]
|
||||
_Delta = Tuple[Real, Real]
|
||||
_PointSegment = Sequence[_Point]
|
||||
_DeltaSegment = Sequence[_Delta]
|
||||
_DeltaOrNone = Union[_Delta, None]
|
||||
_DeltaOrNoneSegment = Sequence[_DeltaOrNone]
|
||||
_Endpoints = Sequence[Integral]
|
||||
|
||||
|
||||
MAX_LOOKBACK = 8
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.locals(
|
||||
j=cython.int,
|
||||
n=cython.int,
|
||||
x1=cython.double,
|
||||
x2=cython.double,
|
||||
d1=cython.double,
|
||||
d2=cython.double,
|
||||
scale=cython.double,
|
||||
x=cython.double,
|
||||
d=cython.double,
|
||||
)
|
||||
def iup_segment(
|
||||
coords: _PointSegment, rc1: _Point, rd1: _Delta, rc2: _Point, rd2: _Delta
|
||||
): # -> _DeltaSegment:
|
||||
"""Given two reference coordinates `rc1` & `rc2` and their respective
|
||||
delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of
|
||||
coordinates `coords`."""
|
||||
|
||||
# rc1 = reference coord 1
|
||||
# rd1 = reference delta 1
|
||||
out_arrays = [None, None]
|
||||
for j in 0, 1:
|
||||
out_arrays[j] = out = []
|
||||
x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j]
|
||||
|
||||
if x1 == x2:
|
||||
n = len(coords)
|
||||
if d1 == d2:
|
||||
out.extend([d1] * n)
|
||||
else:
|
||||
out.extend([0] * n)
|
||||
continue
|
||||
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
d1, d2 = d2, d1
|
||||
|
||||
# x1 < x2
|
||||
scale = (d2 - d1) / (x2 - x1)
|
||||
for pair in coords:
|
||||
x = pair[j]
|
||||
|
||||
if x <= x1:
|
||||
d = d1
|
||||
elif x >= x2:
|
||||
d = d2
|
||||
else:
|
||||
# Interpolate
|
||||
#
|
||||
# NOTE: we assign an explicit intermediate variable here in
|
||||
# order to disable a fused mul-add optimization. See:
|
||||
#
|
||||
# - https://godbolt.org/z/YsP4T3TqK,
|
||||
# - https://github.com/fonttools/fonttools/issues/3703
|
||||
nudge = (x - x1) * scale
|
||||
d = d1 + nudge
|
||||
|
||||
out.append(d)
|
||||
|
||||
return zip(*out_arrays)
|
||||
|
||||
|
||||
def iup_contour(deltas: _DeltaOrNoneSegment, coords: _PointSegment) -> _DeltaSegment:
|
||||
"""For the contour given in `coords`, interpolate any missing
|
||||
delta values in delta vector `deltas`.
|
||||
|
||||
Returns fully filled-out delta vector."""
|
||||
|
||||
assert len(deltas) == len(coords)
|
||||
if None not in deltas:
|
||||
return deltas
|
||||
|
||||
n = len(deltas)
|
||||
# indices of points with explicit deltas
|
||||
indices = [i for i, v in enumerate(deltas) if v is not None]
|
||||
if not indices:
|
||||
# All deltas are None. Return 0,0 for all.
|
||||
return [(0, 0)] * n
|
||||
|
||||
out = []
|
||||
it = iter(indices)
|
||||
start = next(it)
|
||||
if start != 0:
|
||||
# Initial segment that wraps around
|
||||
i1, i2, ri1, ri2 = 0, start, start, indices[-1]
|
||||
out.extend(
|
||||
iup_segment(
|
||||
coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
|
||||
)
|
||||
)
|
||||
out.append(deltas[start])
|
||||
for end in it:
|
||||
if end - start > 1:
|
||||
i1, i2, ri1, ri2 = start + 1, end, start, end
|
||||
out.extend(
|
||||
iup_segment(
|
||||
coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
|
||||
)
|
||||
)
|
||||
out.append(deltas[end])
|
||||
start = end
|
||||
if start != n - 1:
|
||||
# Final segment that wraps around
|
||||
i1, i2, ri1, ri2 = start + 1, n, start, indices[0]
|
||||
out.extend(
|
||||
iup_segment(
|
||||
coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
|
||||
)
|
||||
)
|
||||
|
||||
assert len(deltas) == len(out), (len(deltas), len(out))
|
||||
return out
|
||||
|
||||
|
||||
def iup_delta(
|
||||
deltas: _DeltaOrNoneSegment, coords: _PointSegment, ends: _Endpoints
|
||||
) -> _DeltaSegment:
|
||||
"""For the outline given in `coords`, with contour endpoints given
|
||||
in sorted increasing order in `ends`, interpolate any missing
|
||||
delta values in delta vector `deltas`.
|
||||
|
||||
Returns fully filled-out delta vector."""
|
||||
|
||||
assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4
|
||||
n = len(coords)
|
||||
ends = ends + [n - 4, n - 3, n - 2, n - 1]
|
||||
out = []
|
||||
start = 0
|
||||
for end in ends:
|
||||
end += 1
|
||||
contour = iup_contour(deltas[start:end], coords[start:end])
|
||||
out.extend(contour)
|
||||
start = end
|
||||
|
||||
return out
|
||||
|
||||
|
||||
# Optimizer
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(
|
||||
i=cython.int,
|
||||
j=cython.int,
|
||||
# tolerance=cython.double, # https://github.com/fonttools/fonttools/issues/3282
|
||||
x=cython.double,
|
||||
y=cython.double,
|
||||
p=cython.double,
|
||||
q=cython.double,
|
||||
)
|
||||
@cython.returns(int)
|
||||
def can_iup_in_between(
|
||||
deltas: _DeltaSegment,
|
||||
coords: _PointSegment,
|
||||
i: Integral,
|
||||
j: Integral,
|
||||
tolerance: Real,
|
||||
): # -> bool:
|
||||
"""Return true if the deltas for points at `i` and `j` (`i < j`) can be
|
||||
successfully used to interpolate deltas for points in between them within
|
||||
provided error tolerance."""
|
||||
|
||||
assert j - i >= 2
|
||||
interp = iup_segment(coords[i + 1 : j], coords[i], deltas[i], coords[j], deltas[j])
|
||||
deltas = deltas[i + 1 : j]
|
||||
|
||||
return all(
|
||||
abs(complex(x - p, y - q)) <= tolerance
|
||||
for (x, y), (p, q) in zip(deltas, interp)
|
||||
)
|
||||
|
||||
|
||||
@cython.locals(
|
||||
cj=cython.double,
|
||||
dj=cython.double,
|
||||
lcj=cython.double,
|
||||
ldj=cython.double,
|
||||
ncj=cython.double,
|
||||
ndj=cython.double,
|
||||
force=cython.int,
|
||||
forced=set,
|
||||
)
|
||||
def _iup_contour_bound_forced_set(
|
||||
deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0
|
||||
) -> set:
|
||||
"""The forced set is a conservative set of points on the contour that must be encoded
|
||||
explicitly (ie. cannot be interpolated). Calculating this set allows for significantly
|
||||
speeding up the dynamic-programming, as well as resolve circularity in DP.
|
||||
|
||||
The set is precise; that is, if an index is in the returned set, then there is no way
|
||||
that IUP can generate delta for that point, given `coords` and `deltas`.
|
||||
"""
|
||||
assert len(deltas) == len(coords)
|
||||
|
||||
n = len(deltas)
|
||||
forced = set()
|
||||
# Track "last" and "next" points on the contour as we sweep.
|
||||
for i in range(len(deltas) - 1, -1, -1):
|
||||
ld, lc = deltas[i - 1], coords[i - 1]
|
||||
d, c = deltas[i], coords[i]
|
||||
nd, nc = deltas[i - n + 1], coords[i - n + 1]
|
||||
|
||||
for j in (0, 1): # For X and for Y
|
||||
cj = c[j]
|
||||
dj = d[j]
|
||||
lcj = lc[j]
|
||||
ldj = ld[j]
|
||||
ncj = nc[j]
|
||||
ndj = nd[j]
|
||||
|
||||
if lcj <= ncj:
|
||||
c1, c2 = lcj, ncj
|
||||
d1, d2 = ldj, ndj
|
||||
else:
|
||||
c1, c2 = ncj, lcj
|
||||
d1, d2 = ndj, ldj
|
||||
|
||||
force = False
|
||||
|
||||
# If the two coordinates are the same, then the interpolation
|
||||
# algorithm produces the same delta if both deltas are equal,
|
||||
# and zero if they differ.
|
||||
#
|
||||
# This test has to be before the next one.
|
||||
if c1 == c2:
|
||||
if abs(d1 - d2) > tolerance and abs(dj) > tolerance:
|
||||
force = True
|
||||
|
||||
# If coordinate for current point is between coordinate of adjacent
|
||||
# points on the two sides, but the delta for current point is NOT
|
||||
# between delta for those adjacent points (considering tolerance
|
||||
# allowance), then there is no way that current point can be IUP-ed.
|
||||
# Mark it forced.
|
||||
elif c1 <= cj <= c2: # and c1 != c2
|
||||
if not (min(d1, d2) - tolerance <= dj <= max(d1, d2) + tolerance):
|
||||
force = True
|
||||
|
||||
# Otherwise, the delta should either match the closest, or have the
|
||||
# same sign as the interpolation of the two deltas.
|
||||
else: # cj < c1 or c2 < cj
|
||||
if d1 != d2:
|
||||
if cj < c1:
|
||||
if (
|
||||
abs(dj) > tolerance
|
||||
and abs(dj - d1) > tolerance
|
||||
and ((dj - tolerance < d1) != (d1 < d2))
|
||||
):
|
||||
force = True
|
||||
else: # c2 < cj
|
||||
if (
|
||||
abs(dj) > tolerance
|
||||
and abs(dj - d2) > tolerance
|
||||
and ((d2 < dj + tolerance) != (d1 < d2))
|
||||
):
|
||||
force = True
|
||||
|
||||
if force:
|
||||
forced.add(i)
|
||||
break
|
||||
|
||||
return forced
|
||||
|
||||
|
||||
@cython.locals(
|
||||
i=cython.int,
|
||||
j=cython.int,
|
||||
best_cost=cython.double,
|
||||
best_j=cython.int,
|
||||
cost=cython.double,
|
||||
forced=set,
|
||||
tolerance=cython.double,
|
||||
)
|
||||
def _iup_contour_optimize_dp(
|
||||
deltas: _DeltaSegment,
|
||||
coords: _PointSegment,
|
||||
forced=set(),
|
||||
tolerance: Real = 0,
|
||||
lookback: Integral = None,
|
||||
):
|
||||
"""Straightforward Dynamic-Programming. For each index i, find least-costly encoding of
|
||||
points 0 to i where i is explicitly encoded. We find this by considering all previous
|
||||
explicit points j and check whether interpolation can fill points between j and i.
|
||||
|
||||
Note that solution always encodes last point explicitly. Higher-level is responsible
|
||||
for removing that restriction.
|
||||
|
||||
As major speedup, we stop looking further whenever we see a "forced" point."""
|
||||
|
||||
n = len(deltas)
|
||||
if lookback is None:
|
||||
lookback = n
|
||||
lookback = min(lookback, MAX_LOOKBACK)
|
||||
costs = {-1: 0}
|
||||
chain = {-1: None}
|
||||
for i in range(0, n):
|
||||
best_cost = costs[i - 1] + 1
|
||||
|
||||
costs[i] = best_cost
|
||||
chain[i] = i - 1
|
||||
|
||||
if i - 1 in forced:
|
||||
continue
|
||||
|
||||
for j in range(i - 2, max(i - lookback, -2), -1):
|
||||
cost = costs[j] + 1
|
||||
|
||||
if cost < best_cost and can_iup_in_between(deltas, coords, j, i, tolerance):
|
||||
costs[i] = best_cost = cost
|
||||
chain[i] = j
|
||||
|
||||
if j in forced:
|
||||
break
|
||||
|
||||
return chain, costs
|
||||
|
||||
|
||||
def _rot_list(l: list, k: int):
|
||||
"""Rotate list by k items forward. Ie. item at position 0 will be
|
||||
at position k in returned list. Negative k is allowed."""
|
||||
n = len(l)
|
||||
k %= n
|
||||
if not k:
|
||||
return l
|
||||
return l[n - k :] + l[: n - k]
|
||||
|
||||
|
||||
def _rot_set(s: set, k: int, n: int):
|
||||
k %= n
|
||||
if not k:
|
||||
return s
|
||||
return {(v + k) % n for v in s}
|
||||
|
||||
|
||||
def iup_contour_optimize(
|
||||
deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0.0
|
||||
) -> _DeltaOrNoneSegment:
|
||||
"""For contour with coordinates `coords`, optimize a set of delta
|
||||
values `deltas` within error `tolerance`.
|
||||
|
||||
Returns delta vector that has most number of None items instead of
|
||||
the input delta.
|
||||
"""
|
||||
|
||||
n = len(deltas)
|
||||
|
||||
# Get the easy cases out of the way:
|
||||
|
||||
# If all are within tolerance distance of 0, encode nothing:
|
||||
if all(abs(complex(*p)) <= tolerance for p in deltas):
|
||||
return [None] * n
|
||||
|
||||
# If there's exactly one point, return it:
|
||||
if n == 1:
|
||||
return deltas
|
||||
|
||||
# If all deltas are exactly the same, return just one (the first one):
|
||||
d0 = deltas[0]
|
||||
if all(d0 == d for d in deltas):
|
||||
return [d0] + [None] * (n - 1)
|
||||
|
||||
# Else, solve the general problem using Dynamic Programming.
|
||||
|
||||
forced = _iup_contour_bound_forced_set(deltas, coords, tolerance)
|
||||
# The _iup_contour_optimize_dp() routine returns the optimal encoding
|
||||
# solution given the constraint that the last point is always encoded.
|
||||
# To remove this constraint, we use two different methods, depending on
|
||||
# whether forced set is non-empty or not:
|
||||
|
||||
# Debugging: Make the next if always take the second branch and observe
|
||||
# if the font size changes (reduced); that would mean the forced-set
|
||||
# has members it should not have.
|
||||
if forced:
|
||||
# Forced set is non-empty: rotate the contour start point
|
||||
# such that the last point in the list is a forced point.
|
||||
k = (n - 1) - max(forced)
|
||||
assert k >= 0
|
||||
|
||||
deltas = _rot_list(deltas, k)
|
||||
coords = _rot_list(coords, k)
|
||||
forced = _rot_set(forced, k, n)
|
||||
|
||||
# Debugging: Pass a set() instead of forced variable to the next call
|
||||
# to exercise forced-set computation for under-counting.
|
||||
chain, costs = _iup_contour_optimize_dp(deltas, coords, forced, tolerance)
|
||||
|
||||
# Assemble solution.
|
||||
solution = set()
|
||||
i = n - 1
|
||||
while i is not None:
|
||||
solution.add(i)
|
||||
i = chain[i]
|
||||
solution.remove(-1)
|
||||
|
||||
# if not forced <= solution:
|
||||
# print("coord", coords)
|
||||
# print("deltas", deltas)
|
||||
# print("len", len(deltas))
|
||||
assert forced <= solution, (forced, solution)
|
||||
|
||||
deltas = [deltas[i] if i in solution else None for i in range(n)]
|
||||
|
||||
deltas = _rot_list(deltas, -k)
|
||||
else:
|
||||
# Repeat the contour an extra time, solve the new case, then look for solutions of the
|
||||
# circular n-length problem in the solution for new linear case. I cannot prove that
|
||||
# this always produces the optimal solution...
|
||||
chain, costs = _iup_contour_optimize_dp(
|
||||
deltas + deltas, coords + coords, forced, tolerance, n
|
||||
)
|
||||
best_sol, best_cost = None, n + 1
|
||||
|
||||
for start in range(n - 1, len(costs) - 1):
|
||||
# Assemble solution.
|
||||
solution = set()
|
||||
i = start
|
||||
while i > start - n:
|
||||
solution.add(i % n)
|
||||
i = chain[i]
|
||||
if i == start - n:
|
||||
cost = costs[start] - costs[start - n]
|
||||
if cost <= best_cost:
|
||||
best_sol, best_cost = solution, cost
|
||||
|
||||
# if not forced <= best_sol:
|
||||
# print("coord", coords)
|
||||
# print("deltas", deltas)
|
||||
# print("len", len(deltas))
|
||||
assert forced <= best_sol, (forced, best_sol)
|
||||
|
||||
deltas = [deltas[i] if i in best_sol else None for i in range(n)]
|
||||
|
||||
return deltas
|
||||
|
||||
|
||||
def iup_delta_optimize(
|
||||
deltas: _DeltaSegment,
|
||||
coords: _PointSegment,
|
||||
ends: _Endpoints,
|
||||
tolerance: Real = 0.0,
|
||||
) -> _DeltaOrNoneSegment:
|
||||
"""For the outline given in `coords`, with contour endpoints given
|
||||
in sorted increasing order in `ends`, optimize a set of delta
|
||||
values `deltas` within error `tolerance`.
|
||||
|
||||
Returns delta vector that has most number of None items instead of
|
||||
the input delta.
|
||||
"""
|
||||
assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4
|
||||
n = len(coords)
|
||||
ends = ends + [n - 4, n - 3, n - 2, n - 1]
|
||||
out = []
|
||||
start = 0
|
||||
for end in ends:
|
||||
contour = iup_contour_optimize(
|
||||
deltas[start : end + 1], coords[start : end + 1], tolerance
|
||||
)
|
||||
assert len(contour) == end - start + 1
|
||||
out.extend(contour)
|
||||
start = end + 1
|
||||
|
||||
return out
|
||||
1717
venv/lib/python3.13/site-packages/fontTools/varLib/merger.py
Normal file
1717
venv/lib/python3.13/site-packages/fontTools/varLib/merger.py
Normal file
File diff suppressed because it is too large
Load diff
642
venv/lib/python3.13/site-packages/fontTools/varLib/models.py
Normal file
642
venv/lib/python3.13/site-packages/fontTools/varLib/models.py
Normal file
|
|
@ -0,0 +1,642 @@
|
|||
"""Variation fonts interpolation models."""
|
||||
|
||||
__all__ = [
|
||||
"normalizeValue",
|
||||
"normalizeLocation",
|
||||
"supportScalar",
|
||||
"piecewiseLinearMap",
|
||||
"VariationModel",
|
||||
]
|
||||
|
||||
from fontTools.misc.roundTools import noRound
|
||||
from .errors import VariationModelError
|
||||
|
||||
|
||||
def nonNone(lst):
|
||||
return [l for l in lst if l is not None]
|
||||
|
||||
|
||||
def allNone(lst):
|
||||
return all(l is None for l in lst)
|
||||
|
||||
|
||||
def allEqualTo(ref, lst, mapper=None):
|
||||
if mapper is None:
|
||||
return all(ref == item for item in lst)
|
||||
|
||||
mapped = mapper(ref)
|
||||
return all(mapped == mapper(item) for item in lst)
|
||||
|
||||
|
||||
def allEqual(lst, mapper=None):
|
||||
if not lst:
|
||||
return True
|
||||
it = iter(lst)
|
||||
try:
|
||||
first = next(it)
|
||||
except StopIteration:
|
||||
return True
|
||||
return allEqualTo(first, it, mapper=mapper)
|
||||
|
||||
|
||||
def subList(truth, lst):
|
||||
assert len(truth) == len(lst)
|
||||
return [l for l, t in zip(lst, truth) if t]
|
||||
|
||||
|
||||
def normalizeValue(v, triple, extrapolate=False):
|
||||
"""Normalizes value based on a min/default/max triple.
|
||||
|
||||
>>> normalizeValue(400, (100, 400, 900))
|
||||
0.0
|
||||
>>> normalizeValue(100, (100, 400, 900))
|
||||
-1.0
|
||||
>>> normalizeValue(650, (100, 400, 900))
|
||||
0.5
|
||||
"""
|
||||
lower, default, upper = triple
|
||||
if not (lower <= default <= upper):
|
||||
raise ValueError(
|
||||
f"Invalid axis values, must be minimum, default, maximum: "
|
||||
f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
|
||||
)
|
||||
if not extrapolate:
|
||||
v = max(min(v, upper), lower)
|
||||
|
||||
if v == default or lower == upper:
|
||||
return 0.0
|
||||
|
||||
if (v < default and lower != default) or (v > default and upper == default):
|
||||
return (v - default) / (default - lower)
|
||||
else:
|
||||
assert (v > default and upper != default) or (
|
||||
v < default and lower == default
|
||||
), f"Ooops... v={v}, triple=({lower}, {default}, {upper})"
|
||||
return (v - default) / (upper - default)
|
||||
|
||||
|
||||
def normalizeLocation(location, axes, extrapolate=False, *, validate=False):
|
||||
"""Normalizes location based on axis min/default/max values from axes.
|
||||
|
||||
>>> axes = {"wght": (100, 400, 900)}
|
||||
>>> normalizeLocation({"wght": 400}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 100}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": 900}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 650}, axes)
|
||||
{'wght': 0.5}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> axes = {"wght": (0, 0, 1000)}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": -1}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 500}, axes)
|
||||
{'wght': 0.5}
|
||||
>>> normalizeLocation({"wght": 1001}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> axes = {"wght": (0, 1000, 1000)}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": -1}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": 500}, axes)
|
||||
{'wght': -0.5}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 1001}, axes)
|
||||
{'wght': 0.0}
|
||||
"""
|
||||
if validate:
|
||||
assert set(location.keys()) <= set(axes.keys()), set(location.keys()) - set(
|
||||
axes.keys()
|
||||
)
|
||||
out = {}
|
||||
for tag, triple in axes.items():
|
||||
v = location.get(tag, triple[1])
|
||||
out[tag] = normalizeValue(v, triple, extrapolate=extrapolate)
|
||||
return out
|
||||
|
||||
|
||||
def supportScalar(location, support, ot=True, extrapolate=False, axisRanges=None):
|
||||
"""Returns the scalar multiplier at location, for a master
|
||||
with support. If ot is True, then a peak value of zero
|
||||
for support of an axis means "axis does not participate". That
|
||||
is how OpenType Variation Font technology works.
|
||||
|
||||
If extrapolate is True, axisRanges must be a dict that maps axis
|
||||
names to (axisMin, axisMax) tuples.
|
||||
|
||||
>>> supportScalar({}, {})
|
||||
1.0
|
||||
>>> supportScalar({'wght':.2}, {})
|
||||
1.0
|
||||
>>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
|
||||
0.1
|
||||
>>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
|
||||
0.375
|
||||
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
|
||||
-1.0
|
||||
>>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
|
||||
-1.0
|
||||
>>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
|
||||
1.5
|
||||
>>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
|
||||
-0.5
|
||||
"""
|
||||
if extrapolate and axisRanges is None:
|
||||
raise TypeError("axisRanges must be passed when extrapolate is True")
|
||||
scalar = 1.0
|
||||
for axis, (lower, peak, upper) in support.items():
|
||||
if ot:
|
||||
# OpenType-specific case handling
|
||||
if peak == 0.0:
|
||||
continue
|
||||
if lower > peak or peak > upper:
|
||||
continue
|
||||
if lower < 0.0 and upper > 0.0:
|
||||
continue
|
||||
v = location.get(axis, 0.0)
|
||||
else:
|
||||
assert axis in location
|
||||
v = location[axis]
|
||||
if v == peak:
|
||||
continue
|
||||
|
||||
if extrapolate:
|
||||
axisMin, axisMax = axisRanges[axis]
|
||||
if v < axisMin and lower <= axisMin:
|
||||
if peak <= axisMin and peak < upper:
|
||||
scalar *= (v - upper) / (peak - upper)
|
||||
continue
|
||||
elif axisMin < peak:
|
||||
scalar *= (v - lower) / (peak - lower)
|
||||
continue
|
||||
elif axisMax < v and axisMax <= upper:
|
||||
if axisMax <= peak and lower < peak:
|
||||
scalar *= (v - lower) / (peak - lower)
|
||||
continue
|
||||
elif peak < axisMax:
|
||||
scalar *= (v - upper) / (peak - upper)
|
||||
continue
|
||||
|
||||
if v <= lower or upper <= v:
|
||||
scalar = 0.0
|
||||
break
|
||||
|
||||
if v < peak:
|
||||
scalar *= (v - lower) / (peak - lower)
|
||||
else: # v > peak
|
||||
scalar *= (v - upper) / (peak - upper)
|
||||
return scalar
|
||||
|
||||
|
||||
class VariationModel(object):
|
||||
"""Locations must have the base master at the origin (ie. 0).
|
||||
|
||||
If axis-ranges are not provided, values are assumed to be normalized to
|
||||
the range [-1, 1].
|
||||
|
||||
If the extrapolate argument is set to True, then values are extrapolated
|
||||
outside the axis range.
|
||||
|
||||
>>> from pprint import pprint
|
||||
>>> axisRanges = {'wght': (-180, +180), 'wdth': (-1, +1)}
|
||||
>>> locations = [ \
|
||||
{'wght':100}, \
|
||||
{'wght':-100}, \
|
||||
{'wght':-180}, \
|
||||
{'wdth':+.3}, \
|
||||
{'wght':+120,'wdth':.3}, \
|
||||
{'wght':+120,'wdth':.2}, \
|
||||
{}, \
|
||||
{'wght':+180,'wdth':.3}, \
|
||||
{'wght':+180}, \
|
||||
]
|
||||
>>> model = VariationModel(locations, axisOrder=['wght'], axisRanges=axisRanges)
|
||||
>>> pprint(model.locations)
|
||||
[{},
|
||||
{'wght': -100},
|
||||
{'wght': -180},
|
||||
{'wght': 100},
|
||||
{'wght': 180},
|
||||
{'wdth': 0.3},
|
||||
{'wdth': 0.3, 'wght': 180},
|
||||
{'wdth': 0.3, 'wght': 120},
|
||||
{'wdth': 0.2, 'wght': 120}]
|
||||
>>> pprint(model.deltaWeights)
|
||||
[{},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0, 4: 1.0, 5: 1.0},
|
||||
{0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},
|
||||
{0: 1.0,
|
||||
3: 0.75,
|
||||
4: 0.25,
|
||||
5: 0.6666666666666667,
|
||||
6: 0.4444444444444445,
|
||||
7: 0.6666666666666667}]
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, locations, axisOrder=None, extrapolate=False, *, axisRanges=None
|
||||
):
|
||||
if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
|
||||
raise VariationModelError("Locations must be unique.")
|
||||
|
||||
self.origLocations = locations
|
||||
self.axisOrder = axisOrder if axisOrder is not None else []
|
||||
self.extrapolate = extrapolate
|
||||
if axisRanges is None:
|
||||
if extrapolate:
|
||||
axisRanges = self.computeAxisRanges(locations)
|
||||
else:
|
||||
allAxes = {axis for loc in locations for axis in loc.keys()}
|
||||
axisRanges = {axis: (-1, 1) for axis in allAxes}
|
||||
self.axisRanges = axisRanges
|
||||
|
||||
locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]
|
||||
keyFunc = self.getMasterLocationsSortKeyFunc(
|
||||
locations, axisOrder=self.axisOrder
|
||||
)
|
||||
self.locations = sorted(locations, key=keyFunc)
|
||||
|
||||
# Mapping from user's master order to our master order
|
||||
self.mapping = [self.locations.index(l) for l in locations]
|
||||
self.reverseMapping = [locations.index(l) for l in self.locations]
|
||||
|
||||
self._computeMasterSupports()
|
||||
self._subModels = {}
|
||||
|
||||
def getSubModel(self, items):
|
||||
"""Return a sub-model and the items that are not None.
|
||||
|
||||
The sub-model is necessary for working with the subset
|
||||
of items when some are None.
|
||||
|
||||
The sub-model is cached."""
|
||||
if None not in items:
|
||||
return self, items
|
||||
key = tuple(v is not None for v in items)
|
||||
subModel = self._subModels.get(key)
|
||||
if subModel is None:
|
||||
subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
|
||||
self._subModels[key] = subModel
|
||||
return subModel, subList(key, items)
|
||||
|
||||
@staticmethod
|
||||
def computeAxisRanges(locations):
|
||||
axisRanges = {}
|
||||
allAxes = {axis for loc in locations for axis in loc.keys()}
|
||||
for loc in locations:
|
||||
for axis in allAxes:
|
||||
value = loc.get(axis, 0)
|
||||
axisMin, axisMax = axisRanges.get(axis, (value, value))
|
||||
axisRanges[axis] = min(value, axisMin), max(value, axisMax)
|
||||
return axisRanges
|
||||
|
||||
@staticmethod
|
||||
def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
|
||||
if {} not in locations:
|
||||
raise VariationModelError("Base master not found.")
|
||||
axisPoints = {}
|
||||
for loc in locations:
|
||||
if len(loc) != 1:
|
||||
continue
|
||||
axis = next(iter(loc))
|
||||
value = loc[axis]
|
||||
if axis not in axisPoints:
|
||||
axisPoints[axis] = {0.0}
|
||||
assert (
|
||||
value not in axisPoints[axis]
|
||||
), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)
|
||||
axisPoints[axis].add(value)
|
||||
|
||||
def getKey(axisPoints, axisOrder):
|
||||
def sign(v):
|
||||
return -1 if v < 0 else +1 if v > 0 else 0
|
||||
|
||||
def key(loc):
|
||||
rank = len(loc)
|
||||
onPointAxes = [
|
||||
axis
|
||||
for axis, value in loc.items()
|
||||
if axis in axisPoints and value in axisPoints[axis]
|
||||
]
|
||||
orderedAxes = [axis for axis in axisOrder if axis in loc]
|
||||
orderedAxes.extend(
|
||||
[axis for axis in sorted(loc.keys()) if axis not in axisOrder]
|
||||
)
|
||||
return (
|
||||
rank, # First, order by increasing rank
|
||||
-len(onPointAxes), # Next, by decreasing number of onPoint axes
|
||||
tuple(
|
||||
axisOrder.index(axis) if axis in axisOrder else 0x10000
|
||||
for axis in orderedAxes
|
||||
), # Next, by known axes
|
||||
tuple(orderedAxes), # Next, by all axes
|
||||
tuple(
|
||||
sign(loc[axis]) for axis in orderedAxes
|
||||
), # Next, by signs of axis values
|
||||
tuple(
|
||||
abs(loc[axis]) for axis in orderedAxes
|
||||
), # Next, by absolute value of axis values
|
||||
)
|
||||
|
||||
return key
|
||||
|
||||
ret = getKey(axisPoints, axisOrder)
|
||||
return ret
|
||||
|
||||
def reorderMasters(self, master_list, mapping):
|
||||
# For changing the master data order without
|
||||
# recomputing supports and deltaWeights.
|
||||
new_list = [master_list[idx] for idx in mapping]
|
||||
self.origLocations = [self.origLocations[idx] for idx in mapping]
|
||||
locations = [
|
||||
{k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations
|
||||
]
|
||||
self.mapping = [self.locations.index(l) for l in locations]
|
||||
self.reverseMapping = [locations.index(l) for l in self.locations]
|
||||
self._subModels = {}
|
||||
return new_list
|
||||
|
||||
def _computeMasterSupports(self):
|
||||
self.supports = []
|
||||
regions = self._locationsToRegions()
|
||||
for i, region in enumerate(regions):
|
||||
locAxes = set(region.keys())
|
||||
# Walk over previous masters now
|
||||
for prev_region in regions[:i]:
|
||||
# Master with different axes do not participte
|
||||
if set(prev_region.keys()) != locAxes:
|
||||
continue
|
||||
# If it's NOT in the current box, it does not participate
|
||||
relevant = True
|
||||
for axis, (lower, peak, upper) in region.items():
|
||||
if not (
|
||||
prev_region[axis][1] == peak
|
||||
or lower < prev_region[axis][1] < upper
|
||||
):
|
||||
relevant = False
|
||||
break
|
||||
if not relevant:
|
||||
continue
|
||||
|
||||
# Split the box for new master; split in whatever direction
|
||||
# that has largest range ratio.
|
||||
#
|
||||
# For symmetry, we actually cut across multiple axes
|
||||
# if they have the largest, equal, ratio.
|
||||
# https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804
|
||||
|
||||
bestAxes = {}
|
||||
bestRatio = -1
|
||||
for axis in prev_region.keys():
|
||||
val = prev_region[axis][1]
|
||||
assert axis in region
|
||||
lower, locV, upper = region[axis]
|
||||
newLower, newUpper = lower, upper
|
||||
if val < locV:
|
||||
newLower = val
|
||||
ratio = (val - locV) / (lower - locV)
|
||||
elif locV < val:
|
||||
newUpper = val
|
||||
ratio = (val - locV) / (upper - locV)
|
||||
else: # val == locV
|
||||
# Can't split box in this direction.
|
||||
continue
|
||||
if ratio > bestRatio:
|
||||
bestAxes = {}
|
||||
bestRatio = ratio
|
||||
if ratio == bestRatio:
|
||||
bestAxes[axis] = (newLower, locV, newUpper)
|
||||
|
||||
for axis, triple in bestAxes.items():
|
||||
region[axis] = triple
|
||||
self.supports.append(region)
|
||||
self._computeDeltaWeights()
|
||||
|
||||
def _locationsToRegions(self):
|
||||
locations = self.locations
|
||||
axisRanges = self.axisRanges
|
||||
|
||||
regions = []
|
||||
for loc in locations:
|
||||
region = {}
|
||||
for axis, locV in loc.items():
|
||||
if locV > 0:
|
||||
region[axis] = (0, locV, axisRanges[axis][1])
|
||||
else:
|
||||
region[axis] = (axisRanges[axis][0], locV, 0)
|
||||
regions.append(region)
|
||||
return regions
|
||||
|
||||
def _computeDeltaWeights(self):
|
||||
self.deltaWeights = []
|
||||
for i, loc in enumerate(self.locations):
|
||||
deltaWeight = {}
|
||||
# Walk over previous masters now, populate deltaWeight
|
||||
for j, support in enumerate(self.supports[:i]):
|
||||
scalar = supportScalar(loc, support)
|
||||
if scalar:
|
||||
deltaWeight[j] = scalar
|
||||
self.deltaWeights.append(deltaWeight)
|
||||
|
||||
def getDeltas(self, masterValues, *, round=noRound):
|
||||
assert len(masterValues) == len(self.deltaWeights), (
|
||||
len(masterValues),
|
||||
len(self.deltaWeights),
|
||||
)
|
||||
mapping = self.reverseMapping
|
||||
out = []
|
||||
for i, weights in enumerate(self.deltaWeights):
|
||||
delta = masterValues[mapping[i]]
|
||||
for j, weight in weights.items():
|
||||
if weight == 1:
|
||||
delta -= out[j]
|
||||
else:
|
||||
delta -= out[j] * weight
|
||||
out.append(round(delta))
|
||||
return out
|
||||
|
||||
def getDeltasAndSupports(self, items, *, round=noRound):
|
||||
model, items = self.getSubModel(items)
|
||||
return model.getDeltas(items, round=round), model.supports
|
||||
|
||||
def getScalars(self, loc):
|
||||
"""Return scalars for each delta, for the given location.
|
||||
If interpolating many master-values at the same location,
|
||||
this function allows speed up by fetching the scalars once
|
||||
and using them with interpolateFromMastersAndScalars()."""
|
||||
return [
|
||||
supportScalar(
|
||||
loc, support, extrapolate=self.extrapolate, axisRanges=self.axisRanges
|
||||
)
|
||||
for support in self.supports
|
||||
]
|
||||
|
||||
def getMasterScalars(self, targetLocation):
|
||||
"""Return multipliers for each master, for the given location.
|
||||
If interpolating many master-values at the same location,
|
||||
this function allows speed up by fetching the scalars once
|
||||
and using them with interpolateFromValuesAndScalars().
|
||||
|
||||
Note that the scalars used in interpolateFromMastersAndScalars(),
|
||||
are *not* the same as the ones returned here. They are the result
|
||||
of getScalars()."""
|
||||
out = self.getScalars(targetLocation)
|
||||
for i, weights in reversed(list(enumerate(self.deltaWeights))):
|
||||
for j, weight in weights.items():
|
||||
out[j] -= out[i] * weight
|
||||
|
||||
out = [out[self.mapping[i]] for i in range(len(out))]
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromValuesAndScalars(values, scalars):
|
||||
"""Interpolate from values and scalars coefficients.
|
||||
|
||||
If the values are master-values, then the scalars should be
|
||||
fetched from getMasterScalars().
|
||||
|
||||
If the values are deltas, then the scalars should be fetched
|
||||
from getScalars(); in which case this is the same as
|
||||
interpolateFromDeltasAndScalars().
|
||||
"""
|
||||
v = None
|
||||
assert len(values) == len(scalars)
|
||||
for value, scalar in zip(values, scalars):
|
||||
if not scalar:
|
||||
continue
|
||||
contribution = value * scalar
|
||||
if v is None:
|
||||
v = contribution
|
||||
else:
|
||||
v += contribution
|
||||
return v
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
"""Interpolate from deltas and scalars fetched from getScalars()."""
|
||||
return VariationModel.interpolateFromValuesAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromDeltas(self, loc, deltas):
|
||||
"""Interpolate from deltas, at location loc."""
|
||||
scalars = self.getScalars(loc)
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
|
||||
"""Interpolate from master-values, at location loc."""
|
||||
scalars = self.getMasterScalars(loc)
|
||||
return self.interpolateFromValuesAndScalars(masterValues, scalars)
|
||||
|
||||
def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
|
||||
"""Interpolate from master-values, and scalars fetched from
|
||||
getScalars(), which is useful when you want to interpolate
|
||||
multiple master-values with the same location."""
|
||||
deltas = self.getDeltas(masterValues, round=round)
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
def piecewiseLinearMap(v, mapping):
|
||||
keys = mapping.keys()
|
||||
if not keys:
|
||||
return v
|
||||
if v in keys:
|
||||
return mapping[v]
|
||||
k = min(keys)
|
||||
if v < k:
|
||||
return v + mapping[k] - k
|
||||
k = max(keys)
|
||||
if v > k:
|
||||
return v + mapping[k] - k
|
||||
# Interpolate
|
||||
a = max(k for k in keys if k < v)
|
||||
b = min(k for k in keys if k > v)
|
||||
va = mapping[a]
|
||||
vb = mapping[b]
|
||||
return va + (vb - va) * (v - a) / (b - a)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Normalize locations on a given designspace"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.models",
|
||||
description=main.__doc__,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
metavar="LEVEL",
|
||||
default="INFO",
|
||||
help="Logging level (defaults to INFO)",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str)
|
||||
group.add_argument(
|
||||
"-l",
|
||||
"--locations",
|
||||
metavar="LOCATION",
|
||||
nargs="+",
|
||||
help="Master locations as comma-separate coordinates. One must be all zeros.",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
configLogger(level=args.loglevel)
|
||||
from pprint import pprint
|
||||
|
||||
if args.designspace:
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
|
||||
doc = DesignSpaceDocument()
|
||||
doc.read(args.designspace)
|
||||
locs = [s.location for s in doc.sources]
|
||||
print("Original locations:")
|
||||
pprint(locs)
|
||||
doc.normalize()
|
||||
print("Normalized locations:")
|
||||
locs = [s.location for s in doc.sources]
|
||||
pprint(locs)
|
||||
else:
|
||||
axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
|
||||
locs = [
|
||||
dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations
|
||||
]
|
||||
|
||||
model = VariationModel(locs)
|
||||
print("Sorted locations:")
|
||||
pprint(model.locations)
|
||||
print("Supports:")
|
||||
pprint(model.supports)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
|
|
@ -0,0 +1,253 @@
|
|||
from fontTools.misc.roundTools import noRound, otRound
|
||||
from fontTools.misc.intTools import bit_count
|
||||
from fontTools.misc.vector import Vector
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.varLib.models import supportScalar
|
||||
import fontTools.varLib.varStore # For monkey-patching
|
||||
from fontTools.varLib.builder import (
|
||||
buildVarRegionList,
|
||||
buildSparseVarRegionList,
|
||||
buildSparseVarRegion,
|
||||
buildMultiVarStore,
|
||||
buildMultiVarData,
|
||||
)
|
||||
from fontTools.misc.iterTools import batched
|
||||
from functools import partial
|
||||
from collections import defaultdict
|
||||
from heapq import heappush, heappop
|
||||
|
||||
|
||||
NO_VARIATION_INDEX = ot.NO_VARIATION_INDEX
|
||||
ot.MultiVarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX
|
||||
|
||||
|
||||
def _getLocationKey(loc):
|
||||
return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
|
||||
|
||||
|
||||
class OnlineMultiVarStoreBuilder(object):
|
||||
def __init__(self, axisTags):
|
||||
self._axisTags = axisTags
|
||||
self._regionMap = {}
|
||||
self._regionList = buildSparseVarRegionList([], axisTags)
|
||||
self._store = buildMultiVarStore(self._regionList, [])
|
||||
self._data = None
|
||||
self._model = None
|
||||
self._supports = None
|
||||
self._varDataIndices = {}
|
||||
self._varDataCaches = {}
|
||||
self._cache = None
|
||||
|
||||
def setModel(self, model):
|
||||
self.setSupports(model.supports)
|
||||
self._model = model
|
||||
|
||||
def setSupports(self, supports):
|
||||
self._model = None
|
||||
self._supports = list(supports)
|
||||
if not self._supports[0]:
|
||||
del self._supports[0] # Drop base master support
|
||||
self._cache = None
|
||||
self._data = None
|
||||
|
||||
def finish(self):
|
||||
self._regionList.RegionCount = len(self._regionList.Region)
|
||||
self._store.MultiVarDataCount = len(self._store.MultiVarData)
|
||||
return self._store
|
||||
|
||||
def _add_MultiVarData(self):
|
||||
regionMap = self._regionMap
|
||||
regionList = self._regionList
|
||||
|
||||
regions = self._supports
|
||||
regionIndices = []
|
||||
for region in regions:
|
||||
key = _getLocationKey(region)
|
||||
idx = regionMap.get(key)
|
||||
if idx is None:
|
||||
varRegion = buildSparseVarRegion(region, self._axisTags)
|
||||
idx = regionMap[key] = len(regionList.Region)
|
||||
regionList.Region.append(varRegion)
|
||||
regionIndices.append(idx)
|
||||
|
||||
# Check if we have one already...
|
||||
key = tuple(regionIndices)
|
||||
varDataIdx = self._varDataIndices.get(key)
|
||||
if varDataIdx is not None:
|
||||
self._outer = varDataIdx
|
||||
self._data = self._store.MultiVarData[varDataIdx]
|
||||
self._cache = self._varDataCaches[key]
|
||||
if len(self._data.Item) == 0xFFFF:
|
||||
# This is full. Need new one.
|
||||
varDataIdx = None
|
||||
|
||||
if varDataIdx is None:
|
||||
self._data = buildMultiVarData(regionIndices, [])
|
||||
self._outer = len(self._store.MultiVarData)
|
||||
self._store.MultiVarData.append(self._data)
|
||||
self._varDataIndices[key] = self._outer
|
||||
if key not in self._varDataCaches:
|
||||
self._varDataCaches[key] = {}
|
||||
self._cache = self._varDataCaches[key]
|
||||
|
||||
def storeMasters(self, master_values, *, round=round):
|
||||
deltas = self._model.getDeltas(master_values, round=round)
|
||||
base = deltas.pop(0)
|
||||
return base, self.storeDeltas(deltas, round=noRound)
|
||||
|
||||
def storeDeltas(self, deltas, *, round=round):
|
||||
deltas = tuple(round(d) for d in deltas)
|
||||
|
||||
if not any(deltas):
|
||||
return NO_VARIATION_INDEX
|
||||
|
||||
deltas_tuple = tuple(tuple(d) for d in deltas)
|
||||
|
||||
if not self._data:
|
||||
self._add_MultiVarData()
|
||||
|
||||
varIdx = self._cache.get(deltas_tuple)
|
||||
if varIdx is not None:
|
||||
return varIdx
|
||||
|
||||
inner = len(self._data.Item)
|
||||
if inner == 0xFFFF:
|
||||
# Full array. Start new one.
|
||||
self._add_MultiVarData()
|
||||
return self.storeDeltas(deltas, round=noRound)
|
||||
self._data.addItem(deltas, round=noRound)
|
||||
|
||||
varIdx = (self._outer << 16) + inner
|
||||
self._cache[deltas_tuple] = varIdx
|
||||
return varIdx
|
||||
|
||||
|
||||
def MultiVarData_addItem(self, deltas, *, round=round):
|
||||
deltas = tuple(round(d) for d in deltas)
|
||||
|
||||
assert len(deltas) == self.VarRegionCount
|
||||
|
||||
values = []
|
||||
for d in deltas:
|
||||
values.extend(d)
|
||||
|
||||
self.Item.append(values)
|
||||
self.ItemCount = len(self.Item)
|
||||
|
||||
|
||||
ot.MultiVarData.addItem = MultiVarData_addItem
|
||||
|
||||
|
||||
def SparseVarRegion_get_support(self, fvar_axes):
|
||||
return {
|
||||
fvar_axes[reg.AxisIndex].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord)
|
||||
for reg in self.SparseVarRegionAxis
|
||||
}
|
||||
|
||||
|
||||
ot.SparseVarRegion.get_support = SparseVarRegion_get_support
|
||||
|
||||
|
||||
def MultiVarStore___bool__(self):
|
||||
return bool(self.MultiVarData)
|
||||
|
||||
|
||||
ot.MultiVarStore.__bool__ = MultiVarStore___bool__
|
||||
|
||||
|
||||
class MultiVarStoreInstancer(object):
|
||||
def __init__(self, multivarstore, fvar_axes, location={}):
|
||||
self.fvar_axes = fvar_axes
|
||||
assert multivarstore is None or multivarstore.Format == 1
|
||||
self._varData = multivarstore.MultiVarData if multivarstore else []
|
||||
self._regions = (
|
||||
multivarstore.SparseVarRegionList.Region if multivarstore else []
|
||||
)
|
||||
self.setLocation(location)
|
||||
|
||||
def setLocation(self, location):
|
||||
self.location = dict(location)
|
||||
self._clearCaches()
|
||||
|
||||
def _clearCaches(self):
|
||||
self._scalars = {}
|
||||
|
||||
def _getScalar(self, regionIdx):
|
||||
scalar = self._scalars.get(regionIdx)
|
||||
if scalar is None:
|
||||
support = self._regions[regionIdx].get_support(self.fvar_axes)
|
||||
scalar = supportScalar(self.location, support)
|
||||
self._scalars[regionIdx] = scalar
|
||||
return scalar
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
if not deltas:
|
||||
return Vector([])
|
||||
assert len(deltas) % len(scalars) == 0, (len(deltas), len(scalars))
|
||||
m = len(deltas) // len(scalars)
|
||||
delta = Vector([0] * m)
|
||||
for d, s in zip(batched(deltas, m), scalars):
|
||||
if not s:
|
||||
continue
|
||||
delta += Vector(d) * s
|
||||
return delta
|
||||
|
||||
def __getitem__(self, varidx):
|
||||
major, minor = varidx >> 16, varidx & 0xFFFF
|
||||
if varidx == NO_VARIATION_INDEX:
|
||||
return Vector([])
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
|
||||
deltas = varData[major].Item[minor]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromDeltas(self, varDataIndex, deltas):
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
def MultiVarStore_subset_varidxes(self, varIdxes):
|
||||
return ot.VarStore.subset_varidxes(self, varIdxes, VarData="MultiVarData")
|
||||
|
||||
|
||||
def MultiVarStore_prune_regions(self):
|
||||
return ot.VarStore.prune_regions(
|
||||
self, VarData="MultiVarData", VarRegionList="SparseVarRegionList"
|
||||
)
|
||||
|
||||
|
||||
ot.MultiVarStore.prune_regions = MultiVarStore_prune_regions
|
||||
ot.MultiVarStore.subset_varidxes = MultiVarStore_subset_varidxes
|
||||
|
||||
|
||||
def MultiVarStore_get_supports(self, major, fvarAxes):
|
||||
supports = []
|
||||
varData = self.MultiVarData[major]
|
||||
for regionIdx in varData.VarRegionIndex:
|
||||
region = self.SparseVarRegionList.Region[regionIdx]
|
||||
support = region.get_support(fvarAxes)
|
||||
supports.append(support)
|
||||
return supports
|
||||
|
||||
|
||||
ot.MultiVarStore.get_supports = MultiVarStore_get_supports
|
||||
|
||||
|
||||
def VARC_collect_varidxes(self, varidxes):
|
||||
for glyph in self.VarCompositeGlyphs.VarCompositeGlyph:
|
||||
for component in glyph.components:
|
||||
varidxes.add(component.axisValuesVarIndex)
|
||||
varidxes.add(component.transformVarIndex)
|
||||
|
||||
|
||||
def VARC_remap_varidxes(self, varidxes_map):
|
||||
for glyph in self.VarCompositeGlyphs.VarCompositeGlyph:
|
||||
for component in glyph.components:
|
||||
component.axisValuesVarIndex = varidxes_map[component.axisValuesVarIndex]
|
||||
component.transformVarIndex = varidxes_map[component.transformVarIndex]
|
||||
|
||||
|
||||
ot.VARC.collect_varidxes = VARC_collect_varidxes
|
||||
ot.VARC.remap_varidxes = VARC_remap_varidxes
|
||||
529
venv/lib/python3.13/site-packages/fontTools/varLib/mutator.py
Normal file
529
venv/lib/python3.13/site-packages/fontTools/varLib/mutator.py
Normal file
|
|
@ -0,0 +1,529 @@
|
|||
"""
|
||||
Instantiate a variation font. Run, eg:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
|
||||
|
||||
.. warning::
|
||||
``fontTools.varLib.mutator`` is deprecated in favor of :mod:`fontTools.varLib.instancer`
|
||||
which provides equivalent full instancing and also supports partial instancing.
|
||||
Please migrate CLI usage to ``fonttools varLib.instancer`` and API usage to
|
||||
:func:`fontTools.varLib.instancer.instantiateVariableFont`.
|
||||
"""
|
||||
|
||||
from fontTools.misc.fixedTools import floatToFixedToFloat, floatToFixed
|
||||
from fontTools.misc.loggingTools import deprecateFunction
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.pens.boundsPen import BoundsPen
|
||||
from fontTools.ttLib import TTFont, newTable
|
||||
from fontTools.ttLib.tables import ttProgram
|
||||
from fontTools.ttLib.tables._g_l_y_f import (
|
||||
GlyphCoordinates,
|
||||
flagOverlapSimple,
|
||||
OVERLAP_COMPOUND,
|
||||
)
|
||||
from fontTools.varLib.models import (
|
||||
supportScalar,
|
||||
normalizeLocation,
|
||||
piecewiseLinearMap,
|
||||
)
|
||||
from fontTools.varLib.merger import MutatorMerger
|
||||
from fontTools.varLib.varStore import VarStoreInstancer
|
||||
from fontTools.varLib.mvar import MVAR_ENTRIES
|
||||
from fontTools.varLib.iup import iup_delta
|
||||
import fontTools.subset.cff
|
||||
import os.path
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.varlib.mutator")
|
||||
|
||||
# map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest
|
||||
OS2_WIDTH_CLASS_VALUES = {}
|
||||
percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
|
||||
for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
|
||||
half = (prev + curr) / 2
|
||||
OS2_WIDTH_CLASS_VALUES[half] = i
|
||||
|
||||
|
||||
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
|
||||
pd_blend_lists = (
|
||||
"BlueValues",
|
||||
"OtherBlues",
|
||||
"FamilyBlues",
|
||||
"FamilyOtherBlues",
|
||||
"StemSnapH",
|
||||
"StemSnapV",
|
||||
)
|
||||
pd_blend_values = ("BlueScale", "BlueShift", "BlueFuzz", "StdHW", "StdVW")
|
||||
for fontDict in topDict.FDArray:
|
||||
pd = fontDict.Private
|
||||
vsindex = pd.vsindex if (hasattr(pd, "vsindex")) else 0
|
||||
for key, value in pd.rawDict.items():
|
||||
if (key in pd_blend_values) and isinstance(value, list):
|
||||
delta = interpolateFromDeltas(vsindex, value[1:])
|
||||
pd.rawDict[key] = otRound(value[0] + delta)
|
||||
elif (key in pd_blend_lists) and isinstance(value[0], list):
|
||||
"""If any argument in a BlueValues list is a blend list,
|
||||
then they all are. The first value of each list is an
|
||||
absolute value. The delta tuples are calculated from
|
||||
relative master values, hence we need to append all the
|
||||
deltas to date to each successive absolute value."""
|
||||
delta = 0
|
||||
for i, val_list in enumerate(value):
|
||||
delta += otRound(interpolateFromDeltas(vsindex, val_list[1:]))
|
||||
value[i] = val_list[0] + delta
|
||||
|
||||
|
||||
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
|
||||
charstrings = topDict.CharStrings
|
||||
for gname in glyphOrder:
|
||||
# Interpolate charstring
|
||||
# e.g replace blend op args with regular args,
|
||||
# and use and discard vsindex op.
|
||||
charstring = charstrings[gname]
|
||||
new_program = []
|
||||
vsindex = 0
|
||||
last_i = 0
|
||||
for i, token in enumerate(charstring.program):
|
||||
if token == "vsindex":
|
||||
vsindex = charstring.program[i - 1]
|
||||
if last_i != 0:
|
||||
new_program.extend(charstring.program[last_i : i - 1])
|
||||
last_i = i + 1
|
||||
elif token == "blend":
|
||||
num_regions = charstring.getNumRegions(vsindex)
|
||||
numMasters = 1 + num_regions
|
||||
num_args = charstring.program[i - 1]
|
||||
# The program list starting at program[i] is now:
|
||||
# ..args for following operations
|
||||
# num_args values from the default font
|
||||
# num_args tuples, each with numMasters-1 delta values
|
||||
# num_blend_args
|
||||
# 'blend'
|
||||
argi = i - (num_args * numMasters + 1)
|
||||
end_args = tuplei = argi + num_args
|
||||
while argi < end_args:
|
||||
next_ti = tuplei + num_regions
|
||||
deltas = charstring.program[tuplei:next_ti]
|
||||
delta = interpolateFromDeltas(vsindex, deltas)
|
||||
charstring.program[argi] += otRound(delta)
|
||||
tuplei = next_ti
|
||||
argi += 1
|
||||
new_program.extend(charstring.program[last_i:end_args])
|
||||
last_i = i + 1
|
||||
if last_i != 0:
|
||||
new_program.extend(charstring.program[last_i:])
|
||||
charstring.program = new_program
|
||||
|
||||
|
||||
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
|
||||
"""Unlike TrueType glyphs, neither advance width nor bounding box
|
||||
info is stored in a CFF2 charstring. The width data exists only in
|
||||
the hmtx and HVAR tables. Since LSB data cannot be interpolated
|
||||
reliably from the master LSB values in the hmtx table, we traverse
|
||||
the charstring to determine the actual bound box."""
|
||||
|
||||
charstrings = topDict.CharStrings
|
||||
boundsPen = BoundsPen(glyphOrder)
|
||||
hmtx = varfont["hmtx"]
|
||||
hvar_table = None
|
||||
if "HVAR" in varfont:
|
||||
hvar_table = varfont["HVAR"].table
|
||||
fvar = varfont["fvar"]
|
||||
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
|
||||
|
||||
for gid, gname in enumerate(glyphOrder):
|
||||
entry = list(hmtx[gname])
|
||||
# get width delta.
|
||||
if hvar_table:
|
||||
if hvar_table.AdvWidthMap:
|
||||
width_idx = hvar_table.AdvWidthMap.mapping[gname]
|
||||
else:
|
||||
width_idx = gid
|
||||
width_delta = otRound(varStoreInstancer[width_idx])
|
||||
else:
|
||||
width_delta = 0
|
||||
|
||||
# get LSB.
|
||||
boundsPen.init()
|
||||
charstring = charstrings[gname]
|
||||
charstring.draw(boundsPen)
|
||||
if boundsPen.bounds is None:
|
||||
# Happens with non-marking glyphs
|
||||
lsb_delta = 0
|
||||
else:
|
||||
lsb = otRound(boundsPen.bounds[0])
|
||||
lsb_delta = entry[1] - lsb
|
||||
|
||||
if lsb_delta or width_delta:
|
||||
if width_delta:
|
||||
entry[0] = max(0, entry[0] + width_delta)
|
||||
if lsb_delta:
|
||||
entry[1] = lsb
|
||||
hmtx[gname] = tuple(entry)
|
||||
|
||||
|
||||
@deprecateFunction(
|
||||
"use fontTools.varLib.instancer.instantiateVariableFont instead "
|
||||
"for either full or partial instancing",
|
||||
)
|
||||
def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
|
||||
"""Generate a static instance from a variable TTFont and a dictionary
|
||||
defining the desired location along the variable font's axes.
|
||||
The location values must be specified as user-space coordinates, e.g.:
|
||||
|
||||
.. code-block::
|
||||
|
||||
{'wght': 400, 'wdth': 100}
|
||||
|
||||
By default, a new TTFont object is returned. If ``inplace`` is True, the
|
||||
input varfont is modified and reduced to a static font.
|
||||
|
||||
When the overlap parameter is defined as True,
|
||||
OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
|
||||
"""
|
||||
if not inplace:
|
||||
# make a copy to leave input varfont unmodified
|
||||
stream = BytesIO()
|
||||
varfont.save(stream)
|
||||
stream.seek(0)
|
||||
varfont = TTFont(stream)
|
||||
|
||||
fvar = varfont["fvar"]
|
||||
axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes}
|
||||
loc = normalizeLocation(location, axes)
|
||||
if "avar" in varfont:
|
||||
maps = varfont["avar"].segments
|
||||
loc = {k: piecewiseLinearMap(v, maps[k]) for k, v in loc.items()}
|
||||
# Quantize to F2Dot14, to avoid surprise interpolations.
|
||||
loc = {k: floatToFixedToFloat(v, 14) for k, v in loc.items()}
|
||||
# Location is normalized now
|
||||
log.info("Normalized location: %s", loc)
|
||||
|
||||
if "gvar" in varfont:
|
||||
log.info("Mutating glyf/gvar tables")
|
||||
gvar = varfont["gvar"]
|
||||
glyf = varfont["glyf"]
|
||||
hMetrics = varfont["hmtx"].metrics
|
||||
vMetrics = getattr(varfont.get("vmtx"), "metrics", None)
|
||||
# get list of glyph names in gvar sorted by component depth
|
||||
glyphnames = sorted(
|
||||
gvar.variations.keys(),
|
||||
key=lambda name: (
|
||||
(
|
||||
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
|
||||
if glyf[name].isComposite()
|
||||
else 0
|
||||
),
|
||||
name,
|
||||
),
|
||||
)
|
||||
for glyphname in glyphnames:
|
||||
variations = gvar.variations[glyphname]
|
||||
coordinates, _ = glyf._getCoordinatesAndControls(
|
||||
glyphname, hMetrics, vMetrics
|
||||
)
|
||||
origCoords, endPts = None, None
|
||||
for var in variations:
|
||||
scalar = supportScalar(loc, var.axes)
|
||||
if not scalar:
|
||||
continue
|
||||
delta = var.coordinates
|
||||
if None in delta:
|
||||
if origCoords is None:
|
||||
origCoords, g = glyf._getCoordinatesAndControls(
|
||||
glyphname, hMetrics, vMetrics
|
||||
)
|
||||
delta = iup_delta(delta, origCoords, g.endPts)
|
||||
coordinates += GlyphCoordinates(delta) * scalar
|
||||
glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
|
||||
else:
|
||||
glyf = None
|
||||
|
||||
if "DSIG" in varfont:
|
||||
del varfont["DSIG"]
|
||||
|
||||
if "cvar" in varfont:
|
||||
log.info("Mutating cvt/cvar tables")
|
||||
cvar = varfont["cvar"]
|
||||
cvt = varfont["cvt "]
|
||||
deltas = {}
|
||||
for var in cvar.variations:
|
||||
scalar = supportScalar(loc, var.axes)
|
||||
if not scalar:
|
||||
continue
|
||||
for i, c in enumerate(var.coordinates):
|
||||
if c is not None:
|
||||
deltas[i] = deltas.get(i, 0) + scalar * c
|
||||
for i, delta in deltas.items():
|
||||
cvt[i] += otRound(delta)
|
||||
|
||||
if "CFF2" in varfont:
|
||||
log.info("Mutating CFF2 table")
|
||||
glyphOrder = varfont.getGlyphOrder()
|
||||
CFF2 = varfont["CFF2"]
|
||||
topDict = CFF2.cff.topDictIndex[0]
|
||||
vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
|
||||
interpolateFromDeltas = vsInstancer.interpolateFromDeltas
|
||||
interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
|
||||
CFF2.desubroutinize()
|
||||
interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
|
||||
interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
|
||||
del topDict.rawDict["VarStore"]
|
||||
del topDict.VarStore
|
||||
|
||||
if "MVAR" in varfont:
|
||||
log.info("Mutating MVAR table")
|
||||
mvar = varfont["MVAR"].table
|
||||
varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
|
||||
records = mvar.ValueRecord
|
||||
for rec in records:
|
||||
mvarTag = rec.ValueTag
|
||||
if mvarTag not in MVAR_ENTRIES:
|
||||
continue
|
||||
tableTag, itemName = MVAR_ENTRIES[mvarTag]
|
||||
delta = otRound(varStoreInstancer[rec.VarIdx])
|
||||
if not delta:
|
||||
continue
|
||||
setattr(
|
||||
varfont[tableTag],
|
||||
itemName,
|
||||
getattr(varfont[tableTag], itemName) + delta,
|
||||
)
|
||||
|
||||
log.info("Mutating FeatureVariations")
|
||||
for tableTag in "GSUB", "GPOS":
|
||||
if not tableTag in varfont:
|
||||
continue
|
||||
table = varfont[tableTag].table
|
||||
if not getattr(table, "FeatureVariations", None):
|
||||
continue
|
||||
variations = table.FeatureVariations
|
||||
for record in variations.FeatureVariationRecord:
|
||||
applies = True
|
||||
for condition in record.ConditionSet.ConditionTable:
|
||||
if condition.Format == 1:
|
||||
axisIdx = condition.AxisIndex
|
||||
axisTag = fvar.axes[axisIdx].axisTag
|
||||
Min = condition.FilterRangeMinValue
|
||||
Max = condition.FilterRangeMaxValue
|
||||
v = loc[axisTag]
|
||||
if not (Min <= v <= Max):
|
||||
applies = False
|
||||
else:
|
||||
applies = False
|
||||
if not applies:
|
||||
break
|
||||
|
||||
if applies:
|
||||
assert record.FeatureTableSubstitution.Version == 0x00010000
|
||||
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
|
||||
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = (
|
||||
rec.Feature
|
||||
)
|
||||
break
|
||||
del table.FeatureVariations
|
||||
|
||||
if "GDEF" in varfont and varfont["GDEF"].table.Version >= 0x00010003:
|
||||
log.info("Mutating GDEF/GPOS/GSUB tables")
|
||||
gdef = varfont["GDEF"].table
|
||||
instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
|
||||
|
||||
merger = MutatorMerger(varfont, instancer)
|
||||
merger.mergeTables(varfont, [varfont], ["GDEF", "GPOS"])
|
||||
|
||||
# Downgrade GDEF.
|
||||
del gdef.VarStore
|
||||
gdef.Version = 0x00010002
|
||||
if gdef.MarkGlyphSetsDef is None:
|
||||
del gdef.MarkGlyphSetsDef
|
||||
gdef.Version = 0x00010000
|
||||
|
||||
if not (
|
||||
gdef.LigCaretList
|
||||
or gdef.MarkAttachClassDef
|
||||
or gdef.GlyphClassDef
|
||||
or gdef.AttachList
|
||||
or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)
|
||||
):
|
||||
del varfont["GDEF"]
|
||||
|
||||
addidef = False
|
||||
if glyf:
|
||||
for glyph in glyf.glyphs.values():
|
||||
if hasattr(glyph, "program"):
|
||||
instructions = glyph.program.getAssembly()
|
||||
# If GETVARIATION opcode is used in bytecode of any glyph add IDEF
|
||||
addidef = any(op.startswith("GETVARIATION") for op in instructions)
|
||||
if addidef:
|
||||
break
|
||||
if overlap:
|
||||
for glyph_name in glyf.keys():
|
||||
glyph = glyf[glyph_name]
|
||||
# Set OVERLAP_COMPOUND bit for compound glyphs
|
||||
if glyph.isComposite():
|
||||
glyph.components[0].flags |= OVERLAP_COMPOUND
|
||||
# Set OVERLAP_SIMPLE bit for simple glyphs
|
||||
elif glyph.numberOfContours > 0:
|
||||
glyph.flags[0] |= flagOverlapSimple
|
||||
if addidef:
|
||||
log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
|
||||
asm = []
|
||||
if "fpgm" in varfont:
|
||||
fpgm = varfont["fpgm"]
|
||||
asm = fpgm.program.getAssembly()
|
||||
else:
|
||||
fpgm = newTable("fpgm")
|
||||
fpgm.program = ttProgram.Program()
|
||||
varfont["fpgm"] = fpgm
|
||||
asm.append("PUSHB[000] 145")
|
||||
asm.append("IDEF[ ]")
|
||||
args = [str(len(loc))]
|
||||
for a in fvar.axes:
|
||||
args.append(str(floatToFixed(loc[a.axisTag], 14)))
|
||||
asm.append("NPUSHW[ ] " + " ".join(args))
|
||||
asm.append("ENDF[ ]")
|
||||
fpgm.program.fromAssembly(asm)
|
||||
|
||||
# Change maxp attributes as IDEF is added
|
||||
if "maxp" in varfont:
|
||||
maxp = varfont["maxp"]
|
||||
setattr(
|
||||
maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0)
|
||||
)
|
||||
setattr(
|
||||
maxp,
|
||||
"maxStackElements",
|
||||
max(len(loc), getattr(maxp, "maxStackElements", 0)),
|
||||
)
|
||||
|
||||
if "name" in varfont:
|
||||
log.info("Pruning name table")
|
||||
exclude = {a.axisNameID for a in fvar.axes}
|
||||
for i in fvar.instances:
|
||||
exclude.add(i.subfamilyNameID)
|
||||
exclude.add(i.postscriptNameID)
|
||||
if "ltag" in varfont:
|
||||
# Drop the whole 'ltag' table if all its language tags are referenced by
|
||||
# name records to be pruned.
|
||||
# TODO: prune unused ltag tags and re-enumerate langIDs accordingly
|
||||
excludedUnicodeLangIDs = [
|
||||
n.langID
|
||||
for n in varfont["name"].names
|
||||
if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
|
||||
]
|
||||
if set(excludedUnicodeLangIDs) == set(range(len((varfont["ltag"].tags)))):
|
||||
del varfont["ltag"]
|
||||
varfont["name"].names[:] = [
|
||||
n
|
||||
for n in varfont["name"].names
|
||||
if n.nameID < 256 or n.nameID not in exclude
|
||||
]
|
||||
|
||||
if "wght" in location and "OS/2" in varfont:
|
||||
varfont["OS/2"].usWeightClass = otRound(max(1, min(location["wght"], 1000)))
|
||||
if "wdth" in location:
|
||||
wdth = location["wdth"]
|
||||
for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
|
||||
if wdth < percent:
|
||||
varfont["OS/2"].usWidthClass = widthClass
|
||||
break
|
||||
else:
|
||||
varfont["OS/2"].usWidthClass = 9
|
||||
if "slnt" in location and "post" in varfont:
|
||||
varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
|
||||
|
||||
log.info("Removing variable tables")
|
||||
for tag in ("avar", "cvar", "fvar", "gvar", "HVAR", "MVAR", "VVAR", "STAT"):
|
||||
if tag in varfont:
|
||||
del varfont[tag]
|
||||
|
||||
return varfont
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Instantiate a variation font"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.mutator", description="Instantiate a variable font"
|
||||
)
|
||||
parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.")
|
||||
parser.add_argument(
|
||||
"locargs",
|
||||
metavar="AXIS=LOC",
|
||||
nargs="*",
|
||||
help="List of space separated locations. A location consist in "
|
||||
"the name of a variation axis, followed by '=' and a number. E.g.: "
|
||||
" wght=700 wdth=80. The default is the location of the base master.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
metavar="OUTPUT.ttf",
|
||||
default=None,
|
||||
help="Output instance TTF file (default: INPUT-instance.ttf).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-recalc-timestamp",
|
||||
dest="recalc_timestamp",
|
||||
action="store_false",
|
||||
help="Don't set the output font's timestamp to the current time.",
|
||||
)
|
||||
logging_group = parser.add_mutually_exclusive_group(required=False)
|
||||
logging_group.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Run more verbosely."
|
||||
)
|
||||
logging_group.add_argument(
|
||||
"-q", "--quiet", action="store_true", help="Turn verbosity off."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-overlap",
|
||||
dest="overlap",
|
||||
action="store_false",
|
||||
help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags.",
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
varfilename = options.input
|
||||
outfile = (
|
||||
os.path.splitext(varfilename)[0] + "-instance.ttf"
|
||||
if not options.output
|
||||
else options.output
|
||||
)
|
||||
configLogger(
|
||||
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
|
||||
)
|
||||
|
||||
loc = {}
|
||||
for arg in options.locargs:
|
||||
try:
|
||||
tag, val = arg.split("=")
|
||||
assert len(tag) <= 4
|
||||
loc[tag.ljust(4)] = float(val)
|
||||
except (ValueError, AssertionError):
|
||||
parser.error("invalid location argument format: %r" % arg)
|
||||
log.info("Location: %s", loc)
|
||||
|
||||
log.info("Loading variable font")
|
||||
varfont = TTFont(varfilename, recalcTimestamp=options.recalc_timestamp)
|
||||
|
||||
instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap)
|
||||
|
||||
log.info("Saving instance font %s", outfile)
|
||||
varfont.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
40
venv/lib/python3.13/site-packages/fontTools/varLib/mvar.py
Normal file
40
venv/lib/python3.13/site-packages/fontTools/varLib/mvar.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
MVAR_ENTRIES = {
|
||||
"hasc": ("OS/2", "sTypoAscender"), # horizontal ascender
|
||||
"hdsc": ("OS/2", "sTypoDescender"), # horizontal descender
|
||||
"hlgp": ("OS/2", "sTypoLineGap"), # horizontal line gap
|
||||
"hcla": ("OS/2", "usWinAscent"), # horizontal clipping ascent
|
||||
"hcld": ("OS/2", "usWinDescent"), # horizontal clipping descent
|
||||
"vasc": ("vhea", "ascent"), # vertical ascender
|
||||
"vdsc": ("vhea", "descent"), # vertical descender
|
||||
"vlgp": ("vhea", "lineGap"), # vertical line gap
|
||||
"hcrs": ("hhea", "caretSlopeRise"), # horizontal caret rise
|
||||
"hcrn": ("hhea", "caretSlopeRun"), # horizontal caret run
|
||||
"hcof": ("hhea", "caretOffset"), # horizontal caret offset
|
||||
"vcrs": ("vhea", "caretSlopeRise"), # vertical caret rise
|
||||
"vcrn": ("vhea", "caretSlopeRun"), # vertical caret run
|
||||
"vcof": ("vhea", "caretOffset"), # vertical caret offset
|
||||
"xhgt": ("OS/2", "sxHeight"), # x height
|
||||
"cpht": ("OS/2", "sCapHeight"), # cap height
|
||||
"sbxs": ("OS/2", "ySubscriptXSize"), # subscript em x size
|
||||
"sbys": ("OS/2", "ySubscriptYSize"), # subscript em y size
|
||||
"sbxo": ("OS/2", "ySubscriptXOffset"), # subscript em x offset
|
||||
"sbyo": ("OS/2", "ySubscriptYOffset"), # subscript em y offset
|
||||
"spxs": ("OS/2", "ySuperscriptXSize"), # superscript em x size
|
||||
"spys": ("OS/2", "ySuperscriptYSize"), # superscript em y size
|
||||
"spxo": ("OS/2", "ySuperscriptXOffset"), # superscript em x offset
|
||||
"spyo": ("OS/2", "ySuperscriptYOffset"), # superscript em y offset
|
||||
"strs": ("OS/2", "yStrikeoutSize"), # strikeout size
|
||||
"stro": ("OS/2", "yStrikeoutPosition"), # strikeout offset
|
||||
"unds": ("post", "underlineThickness"), # underline size
|
||||
"undo": ("post", "underlinePosition"), # underline offset
|
||||
#'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0]
|
||||
#'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1]
|
||||
#'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2]
|
||||
#'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3]
|
||||
#'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4]
|
||||
#'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5]
|
||||
#'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6]
|
||||
#'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7]
|
||||
#'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8]
|
||||
#'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9]
|
||||
}
|
||||
238
venv/lib/python3.13/site-packages/fontTools/varLib/plot.py
Normal file
238
venv/lib/python3.13/site-packages/fontTools/varLib/plot.py
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
"""Visualize DesignSpaceDocument and resulting VariationModel."""
|
||||
|
||||
from fontTools.varLib.models import VariationModel, supportScalar
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
from matplotlib import pyplot
|
||||
from mpl_toolkits.mplot3d import axes3d
|
||||
from itertools import cycle
|
||||
import math
|
||||
import logging
|
||||
import sys
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def stops(support, count=10):
|
||||
a, b, c = support
|
||||
|
||||
return (
|
||||
[a + (b - a) * i / count for i in range(count)]
|
||||
+ [b + (c - b) * i / count for i in range(count)]
|
||||
+ [c]
|
||||
)
|
||||
|
||||
|
||||
def _plotLocationsDots(locations, axes, subplot, **kwargs):
|
||||
for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
|
||||
if len(axes) == 1:
|
||||
subplot.plot([loc.get(axes[0], 0)], [1.0], "o", color=color, **kwargs)
|
||||
elif len(axes) == 2:
|
||||
subplot.plot(
|
||||
[loc.get(axes[0], 0)],
|
||||
[loc.get(axes[1], 0)],
|
||||
[1.0],
|
||||
"o",
|
||||
color=color,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
raise AssertionError(len(axes))
|
||||
|
||||
|
||||
def plotLocations(locations, fig, names=None, **kwargs):
|
||||
n = len(locations)
|
||||
cols = math.ceil(n**0.5)
|
||||
rows = math.ceil(n / cols)
|
||||
|
||||
if names is None:
|
||||
names = [None] * len(locations)
|
||||
|
||||
model = VariationModel(locations)
|
||||
names = [names[model.reverseMapping[i]] for i in range(len(names))]
|
||||
|
||||
axes = sorted(locations[0].keys())
|
||||
if len(axes) == 1:
|
||||
_plotLocations2D(model, axes[0], fig, cols, rows, names=names, **kwargs)
|
||||
elif len(axes) == 2:
|
||||
_plotLocations3D(model, axes, fig, cols, rows, names=names, **kwargs)
|
||||
else:
|
||||
raise ValueError("Only 1 or 2 axes are supported")
|
||||
|
||||
|
||||
def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):
|
||||
subplot = fig.add_subplot(111)
|
||||
for i, (support, color, name) in enumerate(
|
||||
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
|
||||
):
|
||||
if name is not None:
|
||||
subplot.set_title(name)
|
||||
subplot.set_xlabel(axis)
|
||||
pyplot.xlim(-1.0, +1.0)
|
||||
|
||||
Xs = support.get(axis, (-1.0, 0.0, +1.0))
|
||||
X, Y = [], []
|
||||
for x in stops(Xs):
|
||||
y = supportScalar({axis: x}, support)
|
||||
X.append(x)
|
||||
Y.append(y)
|
||||
subplot.plot(X, Y, color=color, **kwargs)
|
||||
|
||||
_plotLocationsDots(model.locations, [axis], subplot)
|
||||
|
||||
|
||||
def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):
|
||||
ax1, ax2 = axes
|
||||
|
||||
axis3D = fig.add_subplot(111, projection="3d")
|
||||
for i, (support, color, name) in enumerate(
|
||||
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
|
||||
):
|
||||
if name is not None:
|
||||
axis3D.set_title(name)
|
||||
axis3D.set_xlabel(ax1)
|
||||
axis3D.set_ylabel(ax2)
|
||||
pyplot.xlim(-1.0, +1.0)
|
||||
pyplot.ylim(-1.0, +1.0)
|
||||
|
||||
Xs = support.get(ax1, (-1.0, 0.0, +1.0))
|
||||
Ys = support.get(ax2, (-1.0, 0.0, +1.0))
|
||||
for x in stops(Xs):
|
||||
X, Y, Z = [], [], []
|
||||
for y in Ys:
|
||||
z = supportScalar({ax1: x, ax2: y}, support)
|
||||
X.append(x)
|
||||
Y.append(y)
|
||||
Z.append(z)
|
||||
axis3D.plot(X, Y, Z, color=color, **kwargs)
|
||||
for y in stops(Ys):
|
||||
X, Y, Z = [], [], []
|
||||
for x in Xs:
|
||||
z = supportScalar({ax1: x, ax2: y}, support)
|
||||
X.append(x)
|
||||
Y.append(y)
|
||||
Z.append(z)
|
||||
axis3D.plot(X, Y, Z, color=color, **kwargs)
|
||||
|
||||
_plotLocationsDots(model.locations, [ax1, ax2], axis3D)
|
||||
|
||||
|
||||
def plotDocument(doc, fig, **kwargs):
|
||||
doc.normalize()
|
||||
locations = [s.location for s in doc.sources]
|
||||
names = [s.name for s in doc.sources]
|
||||
plotLocations(locations, fig, names, **kwargs)
|
||||
|
||||
|
||||
def _plotModelFromMasters2D(model, masterValues, fig, **kwargs):
|
||||
assert len(model.axisOrder) == 1
|
||||
axis = model.axisOrder[0]
|
||||
|
||||
axis_min = min(loc.get(axis, 0) for loc in model.locations)
|
||||
axis_max = max(loc.get(axis, 0) for loc in model.locations)
|
||||
|
||||
import numpy as np
|
||||
|
||||
X = np.arange(axis_min, axis_max, (axis_max - axis_min) / 100)
|
||||
Y = []
|
||||
|
||||
for x in X:
|
||||
loc = {axis: x}
|
||||
v = model.interpolateFromMasters(loc, masterValues)
|
||||
Y.append(v)
|
||||
|
||||
subplot = fig.add_subplot(111)
|
||||
subplot.plot(X, Y, "-", **kwargs)
|
||||
|
||||
|
||||
def _plotModelFromMasters3D(model, masterValues, fig, **kwargs):
|
||||
assert len(model.axisOrder) == 2
|
||||
axis1, axis2 = model.axisOrder[0], model.axisOrder[1]
|
||||
|
||||
axis1_min = min(loc.get(axis1, 0) for loc in model.locations)
|
||||
axis1_max = max(loc.get(axis1, 0) for loc in model.locations)
|
||||
axis2_min = min(loc.get(axis2, 0) for loc in model.locations)
|
||||
axis2_max = max(loc.get(axis2, 0) for loc in model.locations)
|
||||
|
||||
import numpy as np
|
||||
|
||||
X = np.arange(axis1_min, axis1_max, (axis1_max - axis1_min) / 100)
|
||||
Y = np.arange(axis2_min, axis2_max, (axis2_max - axis2_min) / 100)
|
||||
X, Y = np.meshgrid(X, Y)
|
||||
Z = []
|
||||
|
||||
for row_x, row_y in zip(X, Y):
|
||||
z_row = []
|
||||
Z.append(z_row)
|
||||
for x, y in zip(row_x, row_y):
|
||||
loc = {axis1: x, axis2: y}
|
||||
v = model.interpolateFromMasters(loc, masterValues)
|
||||
z_row.append(v)
|
||||
Z = np.array(Z)
|
||||
|
||||
axis3D = fig.add_subplot(111, projection="3d")
|
||||
axis3D.plot_surface(X, Y, Z, **kwargs)
|
||||
|
||||
|
||||
def plotModelFromMasters(model, masterValues, fig, **kwargs):
|
||||
"""Plot a variation model and set of master values corresponding
|
||||
to the locations to the model into a pyplot figure. Variation
|
||||
model must have axisOrder of size 1 or 2."""
|
||||
if len(model.axisOrder) == 1:
|
||||
_plotModelFromMasters2D(model, masterValues, fig, **kwargs)
|
||||
elif len(model.axisOrder) == 2:
|
||||
_plotModelFromMasters3D(model, masterValues, fig, **kwargs)
|
||||
else:
|
||||
raise ValueError("Only 1 or 2 axes are supported")
|
||||
|
||||
|
||||
def main(args=None):
|
||||
from fontTools import configLogger
|
||||
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
|
||||
# configure the library logger (for >= WARNING)
|
||||
configLogger()
|
||||
# comment this out to enable debug messages from logger
|
||||
# log.setLevel(logging.DEBUG)
|
||||
|
||||
if len(args) < 1:
|
||||
print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
|
||||
print(" or")
|
||||
print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
|
||||
print(" or")
|
||||
print(
|
||||
"usage: fonttools varLib.plot location1=value1 location2=value2 ...",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
fig = pyplot.figure()
|
||||
fig.set_tight_layout(True)
|
||||
|
||||
if len(args) == 1 and args[0].endswith(".designspace"):
|
||||
doc = DesignSpaceDocument()
|
||||
doc.read(args[0])
|
||||
plotDocument(doc, fig)
|
||||
else:
|
||||
axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
|
||||
if "=" not in args[0]:
|
||||
locs = [dict(zip(axes, (float(v) for v in s.split(",")))) for s in args]
|
||||
plotLocations(locs, fig)
|
||||
else:
|
||||
locations = []
|
||||
masterValues = []
|
||||
for arg in args:
|
||||
loc, v = arg.split("=")
|
||||
locations.append(dict(zip(axes, (float(v) for v in loc.split(",")))))
|
||||
masterValues.append(float(v))
|
||||
model = VariationModel(locations, axes[: len(locations[0])])
|
||||
plotModelFromMasters(model, masterValues, fig)
|
||||
|
||||
pyplot.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main())
|
||||
149
venv/lib/python3.13/site-packages/fontTools/varLib/stat.py
Normal file
149
venv/lib/python3.13/site-packages/fontTools/varLib/stat.py
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
"""Extra methods for DesignSpaceDocument to generate its STAT table data."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Dict, List, Union
|
||||
|
||||
import fontTools.otlLib.builder
|
||||
from fontTools.designspaceLib import (
|
||||
AxisLabelDescriptor,
|
||||
DesignSpaceDocument,
|
||||
DesignSpaceDocumentError,
|
||||
LocationLabelDescriptor,
|
||||
)
|
||||
from fontTools.designspaceLib.types import Region, getVFUserRegion, locationInRegion
|
||||
from fontTools.ttLib import TTFont
|
||||
|
||||
|
||||
def buildVFStatTable(ttFont: TTFont, doc: DesignSpaceDocument, vfName: str) -> None:
|
||||
"""Build the STAT table for the variable font identified by its name in
|
||||
the given document.
|
||||
|
||||
Knowing which variable we're building STAT data for is needed to subset
|
||||
the STAT locations to only include what the variable font actually ships.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
.. seealso::
|
||||
- :func:`getStatAxes()`
|
||||
- :func:`getStatLocations()`
|
||||
- :func:`fontTools.otlLib.builder.buildStatTable()`
|
||||
"""
|
||||
for vf in doc.getVariableFonts():
|
||||
if vf.name == vfName:
|
||||
break
|
||||
else:
|
||||
raise DesignSpaceDocumentError(
|
||||
f"Cannot find the variable font by name {vfName}"
|
||||
)
|
||||
|
||||
region = getVFUserRegion(doc, vf)
|
||||
|
||||
# if there are not currently any mac names don't add them here, that's inconsistent
|
||||
# https://github.com/fonttools/fonttools/issues/683
|
||||
macNames = any(
|
||||
nr.platformID == 1 for nr in getattr(ttFont.get("name"), "names", ())
|
||||
)
|
||||
|
||||
return fontTools.otlLib.builder.buildStatTable(
|
||||
ttFont,
|
||||
getStatAxes(doc, region),
|
||||
getStatLocations(doc, region),
|
||||
doc.elidedFallbackName if doc.elidedFallbackName is not None else 2,
|
||||
macNames=macNames,
|
||||
)
|
||||
|
||||
|
||||
def getStatAxes(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]:
|
||||
"""Return a list of axis dicts suitable for use as the ``axes``
|
||||
argument to :func:`fontTools.otlLib.builder.buildStatTable()`.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
# First, get the axis labels with explicit ordering
|
||||
# then append the others in the order they appear.
|
||||
maxOrdering = max(
|
||||
(axis.axisOrdering for axis in doc.axes if axis.axisOrdering is not None),
|
||||
default=-1,
|
||||
)
|
||||
axisOrderings = []
|
||||
for axis in doc.axes:
|
||||
if axis.axisOrdering is not None:
|
||||
axisOrderings.append(axis.axisOrdering)
|
||||
else:
|
||||
maxOrdering += 1
|
||||
axisOrderings.append(maxOrdering)
|
||||
return [
|
||||
dict(
|
||||
tag=axis.tag,
|
||||
name={"en": axis.name, **axis.labelNames},
|
||||
ordering=ordering,
|
||||
values=[
|
||||
_axisLabelToStatLocation(label)
|
||||
for label in axis.axisLabels
|
||||
if locationInRegion({axis.name: label.userValue}, userRegion)
|
||||
],
|
||||
)
|
||||
for axis, ordering in zip(doc.axes, axisOrderings)
|
||||
]
|
||||
|
||||
|
||||
def getStatLocations(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]:
|
||||
"""Return a list of location dicts suitable for use as the ``locations``
|
||||
argument to :func:`fontTools.otlLib.builder.buildStatTable()`.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
axesByName = {axis.name: axis for axis in doc.axes}
|
||||
return [
|
||||
dict(
|
||||
name={"en": label.name, **label.labelNames},
|
||||
# Location in the designspace is keyed by axis name
|
||||
# Location in buildStatTable by axis tag
|
||||
location={
|
||||
axesByName[name].tag: value
|
||||
for name, value in label.getFullUserLocation(doc).items()
|
||||
},
|
||||
flags=_labelToFlags(label),
|
||||
)
|
||||
for label in doc.locationLabels
|
||||
if locationInRegion(label.getFullUserLocation(doc), userRegion)
|
||||
]
|
||||
|
||||
|
||||
def _labelToFlags(label: Union[AxisLabelDescriptor, LocationLabelDescriptor]) -> int:
|
||||
flags = 0
|
||||
if label.olderSibling:
|
||||
flags |= 1
|
||||
if label.elidable:
|
||||
flags |= 2
|
||||
return flags
|
||||
|
||||
|
||||
def _axisLabelToStatLocation(
|
||||
label: AxisLabelDescriptor,
|
||||
) -> Dict:
|
||||
label_format = label.getFormat()
|
||||
name = {"en": label.name, **label.labelNames}
|
||||
flags = _labelToFlags(label)
|
||||
if label_format == 1:
|
||||
return dict(name=name, value=label.userValue, flags=flags)
|
||||
if label_format == 3:
|
||||
return dict(
|
||||
name=name,
|
||||
value=label.userValue,
|
||||
linkedValue=label.linkedUserValue,
|
||||
flags=flags,
|
||||
)
|
||||
if label_format == 2:
|
||||
res = dict(
|
||||
name=name,
|
||||
nominalValue=label.userValue,
|
||||
flags=flags,
|
||||
)
|
||||
if label.userMinimum is not None:
|
||||
res["rangeMinValue"] = label.userMinimum
|
||||
if label.userMaximum is not None:
|
||||
res["rangeMaxValue"] = label.userMaximum
|
||||
return res
|
||||
raise NotImplementedError("Unknown STAT label format")
|
||||
739
venv/lib/python3.13/site-packages/fontTools/varLib/varStore.py
Normal file
739
venv/lib/python3.13/site-packages/fontTools/varLib/varStore.py
Normal file
|
|
@ -0,0 +1,739 @@
|
|||
from fontTools.misc.roundTools import noRound, otRound
|
||||
from fontTools.misc.intTools import bit_count
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.varLib.models import supportScalar
|
||||
from fontTools.varLib.builder import (
|
||||
buildVarRegionList,
|
||||
buildVarStore,
|
||||
buildVarRegion,
|
||||
buildVarData,
|
||||
)
|
||||
from functools import partial
|
||||
from collections import defaultdict
|
||||
from heapq import heappush, heappop
|
||||
|
||||
|
||||
NO_VARIATION_INDEX = ot.NO_VARIATION_INDEX
|
||||
ot.VarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX
|
||||
|
||||
|
||||
def _getLocationKey(loc):
|
||||
return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
|
||||
|
||||
|
||||
class OnlineVarStoreBuilder(object):
|
||||
def __init__(self, axisTags):
|
||||
self._axisTags = axisTags
|
||||
self._regionMap = {}
|
||||
self._regionList = buildVarRegionList([], axisTags)
|
||||
self._store = buildVarStore(self._regionList, [])
|
||||
self._data = None
|
||||
self._model = None
|
||||
self._supports = None
|
||||
self._varDataIndices = {}
|
||||
self._varDataCaches = {}
|
||||
self._cache = None
|
||||
|
||||
def setModel(self, model):
|
||||
self.setSupports(model.supports)
|
||||
self._model = model
|
||||
|
||||
def setSupports(self, supports):
|
||||
self._model = None
|
||||
self._supports = list(supports)
|
||||
if self._supports and not self._supports[0]:
|
||||
del self._supports[0] # Drop base master support
|
||||
self._cache = None
|
||||
self._data = None
|
||||
|
||||
def finish(self, optimize=True):
|
||||
self._regionList.RegionCount = len(self._regionList.Region)
|
||||
self._store.VarDataCount = len(self._store.VarData)
|
||||
for data in self._store.VarData:
|
||||
data.ItemCount = len(data.Item)
|
||||
data.calculateNumShorts(optimize=optimize)
|
||||
return self._store
|
||||
|
||||
def _add_VarData(self, num_items=1):
|
||||
regionMap = self._regionMap
|
||||
regionList = self._regionList
|
||||
|
||||
regions = self._supports
|
||||
regionIndices = []
|
||||
for region in regions:
|
||||
key = _getLocationKey(region)
|
||||
idx = regionMap.get(key)
|
||||
if idx is None:
|
||||
varRegion = buildVarRegion(region, self._axisTags)
|
||||
idx = regionMap[key] = len(regionList.Region)
|
||||
regionList.Region.append(varRegion)
|
||||
regionIndices.append(idx)
|
||||
|
||||
# Check if we have one already...
|
||||
key = tuple(regionIndices)
|
||||
varDataIdx = self._varDataIndices.get(key)
|
||||
if varDataIdx is not None:
|
||||
self._outer = varDataIdx
|
||||
self._data = self._store.VarData[varDataIdx]
|
||||
self._cache = self._varDataCaches[key]
|
||||
if len(self._data.Item) + num_items > 0xFFFF:
|
||||
# This is full. Need new one.
|
||||
varDataIdx = None
|
||||
|
||||
if varDataIdx is None:
|
||||
self._data = buildVarData(regionIndices, [], optimize=False)
|
||||
self._outer = len(self._store.VarData)
|
||||
self._store.VarData.append(self._data)
|
||||
self._varDataIndices[key] = self._outer
|
||||
if key not in self._varDataCaches:
|
||||
self._varDataCaches[key] = {}
|
||||
self._cache = self._varDataCaches[key]
|
||||
|
||||
def storeMasters(self, master_values, *, round=round):
|
||||
deltas = self._model.getDeltas(master_values, round=round)
|
||||
base = deltas.pop(0)
|
||||
return base, self.storeDeltas(deltas, round=noRound)
|
||||
|
||||
def storeMastersMany(self, master_values_list, *, round=round):
|
||||
deltas_list = [
|
||||
self._model.getDeltas(master_values, round=round)
|
||||
for master_values in master_values_list
|
||||
]
|
||||
base_list = [deltas.pop(0) for deltas in deltas_list]
|
||||
return base_list, self.storeDeltasMany(deltas_list, round=noRound)
|
||||
|
||||
def storeDeltas(self, deltas, *, round=round):
|
||||
deltas = [round(d) for d in deltas]
|
||||
if len(deltas) == len(self._supports) + 1:
|
||||
deltas = tuple(deltas[1:])
|
||||
else:
|
||||
assert len(deltas) == len(self._supports)
|
||||
deltas = tuple(deltas)
|
||||
|
||||
if not self._data:
|
||||
self._add_VarData()
|
||||
|
||||
varIdx = self._cache.get(deltas)
|
||||
if varIdx is not None:
|
||||
return varIdx
|
||||
|
||||
inner = len(self._data.Item)
|
||||
if inner == 0xFFFF:
|
||||
# Full array. Start new one.
|
||||
self._add_VarData()
|
||||
return self.storeDeltas(deltas, round=noRound)
|
||||
self._data.addItem(deltas, round=noRound)
|
||||
|
||||
varIdx = (self._outer << 16) + inner
|
||||
self._cache[deltas] = varIdx
|
||||
return varIdx
|
||||
|
||||
def storeDeltasMany(self, deltas_list, *, round=round):
|
||||
deltas_list = [[round(d) for d in deltas] for deltas in deltas_list]
|
||||
deltas_list = tuple(tuple(deltas) for deltas in deltas_list)
|
||||
|
||||
if not self._data:
|
||||
self._add_VarData(len(deltas_list))
|
||||
|
||||
varIdx = self._cache.get(deltas_list)
|
||||
if varIdx is not None:
|
||||
return varIdx
|
||||
|
||||
inner = len(self._data.Item)
|
||||
if inner + len(deltas_list) > 0xFFFF:
|
||||
# Full array. Start new one.
|
||||
self._add_VarData(len(deltas_list))
|
||||
return self.storeDeltasMany(deltas_list, round=noRound)
|
||||
for i, deltas in enumerate(deltas_list):
|
||||
self._data.addItem(deltas, round=noRound)
|
||||
|
||||
varIdx = (self._outer << 16) + inner + i
|
||||
self._cache[deltas] = varIdx
|
||||
|
||||
varIdx = (self._outer << 16) + inner
|
||||
self._cache[deltas_list] = varIdx
|
||||
|
||||
return varIdx
|
||||
|
||||
|
||||
def VarData_addItem(self, deltas, *, round=round):
|
||||
deltas = [round(d) for d in deltas]
|
||||
|
||||
countUs = self.VarRegionCount
|
||||
countThem = len(deltas)
|
||||
if countUs + 1 == countThem:
|
||||
deltas = list(deltas[1:])
|
||||
else:
|
||||
assert countUs == countThem, (countUs, countThem)
|
||||
deltas = list(deltas)
|
||||
self.Item.append(deltas)
|
||||
self.ItemCount = len(self.Item)
|
||||
|
||||
|
||||
ot.VarData.addItem = VarData_addItem
|
||||
|
||||
|
||||
def VarRegion_get_support(self, fvar_axes):
|
||||
return {
|
||||
fvar_axes[i].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord)
|
||||
for i, reg in enumerate(self.VarRegionAxis)
|
||||
if reg.PeakCoord != 0
|
||||
}
|
||||
|
||||
|
||||
ot.VarRegion.get_support = VarRegion_get_support
|
||||
|
||||
|
||||
def VarStore___bool__(self):
|
||||
return bool(self.VarData)
|
||||
|
||||
|
||||
ot.VarStore.__bool__ = VarStore___bool__
|
||||
|
||||
|
||||
class VarStoreInstancer(object):
|
||||
def __init__(self, varstore, fvar_axes, location={}):
|
||||
self.fvar_axes = fvar_axes
|
||||
assert varstore is None or varstore.Format == 1
|
||||
self._varData = varstore.VarData if varstore else []
|
||||
self._regions = varstore.VarRegionList.Region if varstore else []
|
||||
self.setLocation(location)
|
||||
|
||||
def setLocation(self, location):
|
||||
self.location = dict(location)
|
||||
self._clearCaches()
|
||||
|
||||
def _clearCaches(self):
|
||||
self._scalars = {}
|
||||
|
||||
def _getScalar(self, regionIdx):
|
||||
scalar = self._scalars.get(regionIdx)
|
||||
if scalar is None:
|
||||
support = self._regions[regionIdx].get_support(self.fvar_axes)
|
||||
scalar = supportScalar(self.location, support)
|
||||
self._scalars[regionIdx] = scalar
|
||||
return scalar
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
delta = 0.0
|
||||
for d, s in zip(deltas, scalars):
|
||||
if not s:
|
||||
continue
|
||||
delta += d * s
|
||||
return delta
|
||||
|
||||
def __getitem__(self, varidx):
|
||||
major, minor = varidx >> 16, varidx & 0xFFFF
|
||||
if varidx == NO_VARIATION_INDEX:
|
||||
return 0.0
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
|
||||
deltas = varData[major].Item[minor]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromDeltas(self, varDataIndex, deltas):
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
#
|
||||
# Optimizations
|
||||
#
|
||||
# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed
|
||||
# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.
|
||||
|
||||
|
||||
def VarStore_subset_varidxes(
|
||||
self,
|
||||
varIdxes,
|
||||
optimize=True,
|
||||
retainFirstMap=False,
|
||||
advIdxes=set(),
|
||||
*,
|
||||
VarData="VarData",
|
||||
):
|
||||
# Sort out used varIdxes by major/minor.
|
||||
used = defaultdict(set)
|
||||
for varIdx in varIdxes:
|
||||
if varIdx == NO_VARIATION_INDEX:
|
||||
continue
|
||||
major = varIdx >> 16
|
||||
minor = varIdx & 0xFFFF
|
||||
used[major].add(minor)
|
||||
del varIdxes
|
||||
|
||||
#
|
||||
# Subset VarData
|
||||
#
|
||||
|
||||
varData = getattr(self, VarData)
|
||||
newVarData = []
|
||||
varDataMap = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}
|
||||
for major, data in enumerate(varData):
|
||||
usedMinors = used.get(major)
|
||||
if usedMinors is None:
|
||||
continue
|
||||
newMajor = len(newVarData)
|
||||
newVarData.append(data)
|
||||
|
||||
items = data.Item
|
||||
newItems = []
|
||||
if major == 0 and retainFirstMap:
|
||||
for minor in range(len(items)):
|
||||
newItems.append(
|
||||
items[minor] if minor in usedMinors else [0] * len(items[minor])
|
||||
)
|
||||
varDataMap[minor] = minor
|
||||
else:
|
||||
if major == 0:
|
||||
minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
|
||||
else:
|
||||
minors = sorted(usedMinors)
|
||||
for minor in minors:
|
||||
newMinor = len(newItems)
|
||||
newItems.append(items[minor])
|
||||
varDataMap[(major << 16) + minor] = (newMajor << 16) + newMinor
|
||||
|
||||
data.Item = newItems
|
||||
data.ItemCount = len(data.Item)
|
||||
|
||||
if VarData == "VarData":
|
||||
data.calculateNumShorts(optimize=optimize)
|
||||
|
||||
setattr(self, VarData, newVarData)
|
||||
setattr(self, VarData + "Count", len(newVarData))
|
||||
|
||||
self.prune_regions()
|
||||
|
||||
return varDataMap
|
||||
|
||||
|
||||
ot.VarStore.subset_varidxes = VarStore_subset_varidxes
|
||||
|
||||
|
||||
def VarStore_prune_regions(self, *, VarData="VarData", VarRegionList="VarRegionList"):
|
||||
"""Remove unused VarRegions."""
|
||||
#
|
||||
# Subset VarRegionList
|
||||
#
|
||||
|
||||
# Collect.
|
||||
usedRegions = set()
|
||||
for data in getattr(self, VarData):
|
||||
usedRegions.update(data.VarRegionIndex)
|
||||
# Subset.
|
||||
regionList = getattr(self, VarRegionList)
|
||||
regions = regionList.Region
|
||||
newRegions = []
|
||||
regionMap = {}
|
||||
for i in sorted(usedRegions):
|
||||
regionMap[i] = len(newRegions)
|
||||
newRegions.append(regions[i])
|
||||
regionList.Region = newRegions
|
||||
regionList.RegionCount = len(regionList.Region)
|
||||
# Map.
|
||||
for data in getattr(self, VarData):
|
||||
data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]
|
||||
|
||||
|
||||
ot.VarStore.prune_regions = VarStore_prune_regions
|
||||
|
||||
|
||||
def _visit(self, func):
|
||||
"""Recurse down from self, if type of an object is ot.Device,
|
||||
call func() on it. Works on otData-style classes."""
|
||||
|
||||
if type(self) == ot.Device:
|
||||
func(self)
|
||||
|
||||
elif isinstance(self, list):
|
||||
for that in self:
|
||||
_visit(that, func)
|
||||
|
||||
elif hasattr(self, "getConverters") and not hasattr(self, "postRead"):
|
||||
for conv in self.getConverters():
|
||||
that = getattr(self, conv.name, None)
|
||||
if that is not None:
|
||||
_visit(that, func)
|
||||
|
||||
elif isinstance(self, ot.ValueRecord):
|
||||
for that in self.__dict__.values():
|
||||
_visit(that, func)
|
||||
|
||||
|
||||
def _Device_recordVarIdx(self, s):
|
||||
"""Add VarIdx in this Device table (if any) to the set s."""
|
||||
if self.DeltaFormat == 0x8000:
|
||||
s.add((self.StartSize << 16) + self.EndSize)
|
||||
|
||||
|
||||
def Object_collect_device_varidxes(self, varidxes):
|
||||
adder = partial(_Device_recordVarIdx, s=varidxes)
|
||||
_visit(self, adder)
|
||||
|
||||
|
||||
ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes
|
||||
ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes
|
||||
|
||||
|
||||
def _Device_mapVarIdx(self, mapping, done):
|
||||
"""Map VarIdx in this Device table (if any) through mapping."""
|
||||
if id(self) in done:
|
||||
return
|
||||
done.add(id(self))
|
||||
if self.DeltaFormat == 0x8000:
|
||||
varIdx = mapping[(self.StartSize << 16) + self.EndSize]
|
||||
self.StartSize = varIdx >> 16
|
||||
self.EndSize = varIdx & 0xFFFF
|
||||
|
||||
|
||||
def Object_remap_device_varidxes(self, varidxes_map):
|
||||
mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())
|
||||
_visit(self, mapper)
|
||||
|
||||
|
||||
ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes
|
||||
ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes
|
||||
|
||||
|
||||
class _Encoding(object):
|
||||
def __init__(self, chars):
|
||||
self.chars = chars
|
||||
self.width = bit_count(chars)
|
||||
self.columns = self._columns(chars)
|
||||
self.overhead = self._characteristic_overhead(self.columns)
|
||||
self.items = set()
|
||||
|
||||
def append(self, row):
|
||||
self.items.add(row)
|
||||
|
||||
def extend(self, lst):
|
||||
self.items.update(lst)
|
||||
|
||||
def width_sort_key(self):
|
||||
return self.width, self.chars
|
||||
|
||||
@staticmethod
|
||||
def _characteristic_overhead(columns):
|
||||
"""Returns overhead in bytes of encoding this characteristic
|
||||
as a VarData."""
|
||||
c = 4 + 6 # 4 bytes for LOffset, 6 bytes for VarData header
|
||||
c += bit_count(columns) * 2
|
||||
return c
|
||||
|
||||
@staticmethod
|
||||
def _columns(chars):
|
||||
cols = 0
|
||||
i = 1
|
||||
while chars:
|
||||
if chars & 0b1111:
|
||||
cols |= i
|
||||
chars >>= 4
|
||||
i <<= 1
|
||||
return cols
|
||||
|
||||
def gain_from_merging(self, other_encoding):
|
||||
combined_chars = other_encoding.chars | self.chars
|
||||
combined_width = bit_count(combined_chars)
|
||||
combined_columns = self.columns | other_encoding.columns
|
||||
combined_overhead = _Encoding._characteristic_overhead(combined_columns)
|
||||
combined_gain = (
|
||||
+self.overhead
|
||||
+ other_encoding.overhead
|
||||
- combined_overhead
|
||||
- (combined_width - self.width) * len(self.items)
|
||||
- (combined_width - other_encoding.width) * len(other_encoding.items)
|
||||
)
|
||||
return combined_gain
|
||||
|
||||
|
||||
class _EncodingDict(dict):
|
||||
def __missing__(self, chars):
|
||||
r = self[chars] = _Encoding(chars)
|
||||
return r
|
||||
|
||||
def add_row(self, row):
|
||||
chars = self._row_characteristics(row)
|
||||
self[chars].append(row)
|
||||
|
||||
@staticmethod
|
||||
def _row_characteristics(row):
|
||||
"""Returns encoding characteristics for a row."""
|
||||
longWords = False
|
||||
|
||||
chars = 0
|
||||
i = 1
|
||||
for v in row:
|
||||
if v:
|
||||
chars += i
|
||||
if not (-128 <= v <= 127):
|
||||
chars += i * 0b0010
|
||||
if not (-32768 <= v <= 32767):
|
||||
longWords = True
|
||||
break
|
||||
i <<= 4
|
||||
|
||||
if longWords:
|
||||
# Redo; only allow 2byte/4byte encoding
|
||||
chars = 0
|
||||
i = 1
|
||||
for v in row:
|
||||
if v:
|
||||
chars += i * 0b0011
|
||||
if not (-32768 <= v <= 32767):
|
||||
chars += i * 0b1100
|
||||
i <<= 4
|
||||
|
||||
return chars
|
||||
|
||||
|
||||
def VarStore_optimize(self, use_NO_VARIATION_INDEX=True, quantization=1):
|
||||
"""Optimize storage. Returns mapping from old VarIdxes to new ones."""
|
||||
|
||||
# Overview:
|
||||
#
|
||||
# For each VarData row, we first extend it with zeroes to have
|
||||
# one column per region in VarRegionList. We then group the
|
||||
# rows into _Encoding objects, by their "characteristic" bitmap.
|
||||
# The characteristic bitmap is a binary number representing how
|
||||
# many bytes each column of the data takes up to encode. Each
|
||||
# column is encoded in four bits. For example, if a column has
|
||||
# only values in the range -128..127, it would only have a single
|
||||
# bit set in the characteristic bitmap for that column. If it has
|
||||
# values in the range -32768..32767, it would have two bits set.
|
||||
# The number of ones in the characteristic bitmap is the "width"
|
||||
# of the encoding.
|
||||
#
|
||||
# Each encoding as such has a number of "active" (ie. non-zero)
|
||||
# columns. The overhead of encoding the characteristic bitmap
|
||||
# is 10 bytes, plus 2 bytes per active column.
|
||||
#
|
||||
# When an encoding is merged into another one, if the characteristic
|
||||
# of the old encoding is a subset of the new one, then the overhead
|
||||
# of the old encoding is completely eliminated. However, each row
|
||||
# now would require more bytes to encode, to the tune of one byte
|
||||
# per characteristic bit that is active in the new encoding but not
|
||||
# in the old one.
|
||||
#
|
||||
# The "gain" of merging two encodings is how many bytes we save by doing so.
|
||||
#
|
||||
# High-level algorithm:
|
||||
#
|
||||
# - Each encoding has a minimal way to encode it. However, because
|
||||
# of the overhead of encoding the characteristic bitmap, it may
|
||||
# be beneficial to merge two encodings together, if there is
|
||||
# gain in doing so. As such, we need to search for the best
|
||||
# such successive merges.
|
||||
#
|
||||
# Algorithm:
|
||||
#
|
||||
# - Put all encodings into a "todo" list.
|
||||
#
|
||||
# - Sort todo list (for stability) by width_sort_key(), which is a tuple
|
||||
# of the following items:
|
||||
# * The "width" of the encoding.
|
||||
# * The characteristic bitmap of the encoding, with higher-numbered
|
||||
# columns compared first.
|
||||
#
|
||||
# - Make a priority-queue of the gain from combining each two
|
||||
# encodings in the todo list. The priority queue is sorted by
|
||||
# decreasing gain. Only positive gains are included.
|
||||
#
|
||||
# - While priority queue is not empty:
|
||||
# - Pop the first item from the priority queue,
|
||||
# - Merge the two encodings it represents,
|
||||
# - Remove the two encodings from the todo list,
|
||||
# - Insert positive gains from combining the new encoding with
|
||||
# all existing todo list items into the priority queue,
|
||||
# - If a todo list item with the same characteristic bitmap as
|
||||
# the new encoding exists, remove it from the todo list and
|
||||
# merge it into the new encoding.
|
||||
# - Insert the new encoding into the todo list,
|
||||
#
|
||||
# - Encode all remaining items in the todo list.
|
||||
#
|
||||
# The output is then sorted for stability, in the following way:
|
||||
# - The VarRegionList of the input is kept intact.
|
||||
# - The VarData is sorted by the same width_sort_key() used at the beginning.
|
||||
# - Within each VarData, the items are sorted as vectors of numbers.
|
||||
#
|
||||
# Finally, each VarData is optimized to remove the empty columns and
|
||||
# reorder columns as needed.
|
||||
|
||||
# TODO
|
||||
# Check that no two VarRegions are the same; if they are, fold them.
|
||||
|
||||
n = len(self.VarRegionList.Region) # Number of columns
|
||||
zeroes = [0] * n
|
||||
|
||||
front_mapping = {} # Map from old VarIdxes to full row tuples
|
||||
|
||||
encodings = _EncodingDict()
|
||||
|
||||
# Collect all items into a set of full rows (with lots of zeroes.)
|
||||
for major, data in enumerate(self.VarData):
|
||||
regionIndices = data.VarRegionIndex
|
||||
|
||||
for minor, item in enumerate(data.Item):
|
||||
row = list(zeroes)
|
||||
|
||||
if quantization == 1:
|
||||
for regionIdx, v in zip(regionIndices, item):
|
||||
row[regionIdx] += v
|
||||
else:
|
||||
for regionIdx, v in zip(regionIndices, item):
|
||||
row[regionIdx] += (
|
||||
round(v / quantization) * quantization
|
||||
) # TODO https://github.com/fonttools/fonttools/pull/3126#discussion_r1205439785
|
||||
|
||||
row = tuple(row)
|
||||
|
||||
if use_NO_VARIATION_INDEX and not any(row):
|
||||
front_mapping[(major << 16) + minor] = None
|
||||
continue
|
||||
|
||||
encodings.add_row(row)
|
||||
front_mapping[(major << 16) + minor] = row
|
||||
|
||||
# Prepare for the main algorithm.
|
||||
todo = sorted(encodings.values(), key=_Encoding.width_sort_key)
|
||||
del encodings
|
||||
|
||||
# Repeatedly pick two best encodings to combine, and combine them.
|
||||
|
||||
heap = []
|
||||
for i, encoding in enumerate(todo):
|
||||
for j in range(i + 1, len(todo)):
|
||||
other_encoding = todo[j]
|
||||
combining_gain = encoding.gain_from_merging(other_encoding)
|
||||
if combining_gain > 0:
|
||||
heappush(heap, (-combining_gain, i, j))
|
||||
|
||||
while heap:
|
||||
_, i, j = heappop(heap)
|
||||
if todo[i] is None or todo[j] is None:
|
||||
continue
|
||||
|
||||
encoding, other_encoding = todo[i], todo[j]
|
||||
todo[i], todo[j] = None, None
|
||||
|
||||
# Combine the two encodings
|
||||
combined_chars = other_encoding.chars | encoding.chars
|
||||
combined_encoding = _Encoding(combined_chars)
|
||||
combined_encoding.extend(encoding.items)
|
||||
combined_encoding.extend(other_encoding.items)
|
||||
|
||||
for k, enc in enumerate(todo):
|
||||
if enc is None:
|
||||
continue
|
||||
|
||||
# In the unlikely event that the same encoding exists already,
|
||||
# combine it.
|
||||
if enc.chars == combined_chars:
|
||||
combined_encoding.extend(enc.items)
|
||||
todo[k] = None
|
||||
continue
|
||||
|
||||
combining_gain = combined_encoding.gain_from_merging(enc)
|
||||
if combining_gain > 0:
|
||||
heappush(heap, (-combining_gain, k, len(todo)))
|
||||
|
||||
todo.append(combined_encoding)
|
||||
|
||||
encodings = [encoding for encoding in todo if encoding is not None]
|
||||
|
||||
# Assemble final store.
|
||||
back_mapping = {} # Mapping from full rows to new VarIdxes
|
||||
encodings.sort(key=_Encoding.width_sort_key)
|
||||
self.VarData = []
|
||||
for encoding in encodings:
|
||||
items = sorted(encoding.items)
|
||||
|
||||
while items:
|
||||
major = len(self.VarData)
|
||||
data = ot.VarData()
|
||||
self.VarData.append(data)
|
||||
data.VarRegionIndex = range(n)
|
||||
data.VarRegionCount = len(data.VarRegionIndex)
|
||||
|
||||
# Each major can only encode up to 0xFFFF entries.
|
||||
data.Item, items = items[:0xFFFF], items[0xFFFF:]
|
||||
|
||||
for minor, item in enumerate(data.Item):
|
||||
back_mapping[item] = (major << 16) + minor
|
||||
|
||||
# Compile final mapping.
|
||||
varidx_map = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}
|
||||
for k, v in front_mapping.items():
|
||||
varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX
|
||||
|
||||
# Recalculate things and go home.
|
||||
self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
|
||||
self.VarDataCount = len(self.VarData)
|
||||
for data in self.VarData:
|
||||
data.ItemCount = len(data.Item)
|
||||
data.optimize()
|
||||
|
||||
# Remove unused regions.
|
||||
self.prune_regions()
|
||||
|
||||
return varidx_map
|
||||
|
||||
|
||||
ot.VarStore.optimize = VarStore_optimize
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Optimize a font's GDEF variation store"""
|
||||
from argparse import ArgumentParser
|
||||
from fontTools import configLogger
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.ttLib.tables.otBase import OTTableWriter
|
||||
|
||||
parser = ArgumentParser(prog="varLib.varStore", description=main.__doc__)
|
||||
parser.add_argument("--quantization", type=int, default=1)
|
||||
parser.add_argument("fontfile")
|
||||
parser.add_argument("outfile", nargs="?")
|
||||
options = parser.parse_args(args)
|
||||
|
||||
# TODO: allow user to configure logging via command-line options
|
||||
configLogger(level="INFO")
|
||||
|
||||
quantization = options.quantization
|
||||
fontfile = options.fontfile
|
||||
outfile = options.outfile
|
||||
|
||||
font = TTFont(fontfile)
|
||||
gdef = font["GDEF"]
|
||||
store = gdef.table.VarStore
|
||||
|
||||
writer = OTTableWriter()
|
||||
store.compile(writer, font)
|
||||
size = len(writer.getAllData())
|
||||
print("Before: %7d bytes" % size)
|
||||
|
||||
varidx_map = store.optimize(quantization=quantization)
|
||||
|
||||
writer = OTTableWriter()
|
||||
store.compile(writer, font)
|
||||
size = len(writer.getAllData())
|
||||
print("After: %7d bytes" % size)
|
||||
|
||||
if outfile is not None:
|
||||
gdef.table.remap_device_varidxes(varidx_map)
|
||||
if "GPOS" in font:
|
||||
font["GPOS"].table.remap_device_varidxes(varidx_map)
|
||||
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
Loading…
Add table
Add a link
Reference in a new issue