From 83d922a70893e00274f560f12eca80e8311c7ccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 17 Mar 2023 14:51:30 +0100 Subject: [PATCH 001/734] Initial commit --- .github/workflows/release.yml | 44 +++++++ .github/workflows/tests.yml | 34 +++++ .gitignore | 3 + .pre-commit-config.yaml | 30 +++++ outlines/__init__.py | 37 ++++++ outlines/compile.py | 25 ++++ outlines/graph.py | 229 ++++++++++++++++++++++++++++++++++ outlines/text/__init__.py | 4 + outlines/text/basic.py | 23 ++++ outlines/text/var.py | 72 +++++++++++ pyproject.toml | 32 +++++ requirements.txt | 3 + setup.cfg | 14 +++ tests/test_graph.py | 101 +++++++++++++++ tests/text/test_basic.py | 42 +++++++ tests/text/test_var.py | 31 +++++ 16 files changed, 724 insertions(+) create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/tests.yml create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100644 outlines/__init__.py create mode 100644 outlines/compile.py create mode 100644 outlines/graph.py create mode 100644 outlines/text/__init__.py create mode 100644 outlines/text/basic.py create mode 100644 outlines/text/var.py create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100644 tests/test_graph.py create mode 100644 tests/text/test_basic.py create mode 100644 tests/text/test_var.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..a9c826b3 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,44 @@ +name: Release + +on: + release: + types: + - created + +jobs: + release-job: + name: Build and publish on PyPi + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Build sdist and wheel + run: | + python -m pip install -U pip + python -m pip install build + python -m build + - name: Check that the package version matches the Release name + run: | + grep -Rq "^Version: ${GITHUB_REF:10}$" outlines.egg-info/PKG-INFO + - name: Check sdist install and imports + run: | + mkdir -p test-sdist + cd test-sdist + python -m venv venv-sdist + venv-sdist/bin/python -m pip install ../dist/outlines-*.tar.gz + venv-sdist/bin/python -c "import outlines" + - name: Check wheel install and imports + run: | + mkdir -p test-wheel + cd test-wheel + python -m venv venv-wheel + venv-wheel/bin/python -m pip install ../dist/outlines-*.whl + venv-wheel/bin/python -c "import outlines" + - name: Publish to PyPi + uses: pypa/gh-action-pypi-publish@v1.4.2 + with: + user: __token__ + password: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..937986c7 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,34 @@ +name: Tests + +on: + pull_request: + branches: [main] + push: + branches: [main] + +jobs: + style: + name: Check the code style + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.9 + - uses: pre-commit/action@v2.0.0 + + tests: + name: Run the tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Set up test environment + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Run tests + run: | + pytest diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..14550d04 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +*.egg-info +__pycache__ +*_version.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..831c44e9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,30 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-merge-conflict + - id: debug-statements + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + args: [--profile, black] +- repo: https://github.com/asottile/pyupgrade + rev: v2.31.1 + hooks: + - id: pyupgrade + args: [--py37-plus] +- repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 +- repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.1.1 + hooks: + - id: mypy diff --git a/outlines/__init__.py b/outlines/__init__.py new file mode 100644 index 00000000..c52450b9 --- /dev/null +++ b/outlines/__init__.py @@ -0,0 +1,37 @@ +"""Outlines is a Probabilistic Generative Model Programming language. + +Outlines allows to build and evaluate graphs that represent interactions between +user-defined strings (usually called "prompts"), images, and generative models. + +Most generative models are probabilistic, their outputs are random variables +whose distribution is determined by the generative model. Chained generative +models are thus probabilistic programs [1]_. By default, compiling an Outlines +graph returns a callable object which will yield different values each time it +is called. + +Deterministic decoding methods are implemented as graph transformations: they +take a probabilistic program as an input and return a graph that represents the +decoding process. Compiling these graphs will produce a callable that returns +the same value each time it is called. + +Outlines supports plugins as long as they admit text/images as an input and +return strings and/or images. They are represented by operators in the graph. + +The design of Outlines was heavily inspired by `Aesara `_, +a library for defining, optimizing and evaluating mathematical expressions +involving multi-dimensional arrays. A complete integration would be desirable +and is not excluded. + + +References +---------- +.. [1] Dohan, David, et al. "Language model cascades." arXiv preprint arXiv:2207.10342 (2022). + +""" +from outlines.compile import compile +from outlines.text import string + +__all__ = [ + "compile", + "string", +] diff --git a/outlines/compile.py b/outlines/compile.py new file mode 100644 index 00000000..7f13885a --- /dev/null +++ b/outlines/compile.py @@ -0,0 +1,25 @@ +from outlines.graph import io_toposort +from outlines.text.var import StringConstant + + +def compile(symbolic_inputs, outputs): + sorted = io_toposort(symbolic_inputs, outputs) + + def fn(*inputs): + storage_map = {s: v for s, v in zip(symbolic_inputs, inputs)} + + for node in sorted: + for i in node.inputs: + if isinstance(i, StringConstant): + storage_map[i] = i.value + inputs = [storage_map[i] for i in node.inputs] + results = node.op.perform(*inputs) + for i, o in enumerate(node.outputs): + storage_map[o] = results[i] + + if len(outputs) == 1: + return storage_map[outputs[0]] + else: + return tuple(storage_map[o] for o in outputs) + + return fn diff --git a/outlines/graph.py b/outlines/graph.py new file mode 100644 index 00000000..e87546bc --- /dev/null +++ b/outlines/graph.py @@ -0,0 +1,229 @@ +"""Graph objects and manipulation functions. + +Manipulating Outlines templates and operations implicitly defines a graph that +can be explored, rewritten and compiled. + +This module defines the basic types these graphs are build from: + +- `Variable` nodes represent constants or results of computation; +- `Op`s represent the operations performed on variables; +- `Apply` nodes represent the application of an `Op` onto one or several + variables. + +This module is heavily inspired by `Aesara List: + """Return a list of this node's parents.""" + raise NotImplementedError() + + +class Variable(Node): + """A `Variable` is a node in an expression graph that represents a variable. + + There are a few kind of `Variable` to be aware of: + + - `StringVariable` subclass of `Variable` that represents a ``str`` object. + + """ + + def __init__( + self, + owner: Optional["Apply"] = None, + index: Optional[int] = None, + name: Optional[str] = None, + ): + if owner is not None and not isinstance(owner, Apply): + raise TypeError("owner must be an Apply instance") + self.owner = owner + + if index is not None and not isinstance(index, int): + raise TypeError("index must be an int") + self.index = index + + if name is not None and not isinstance(name, str): + raise TypeError("name must be a string") + self.name = name + + def __str__(self): + """Return a ``str`` representation of the `Variable`.""" + if self.name is not None: + return self.name + if self.owner is not None: + op = self.owner.op + if self.index == 0: + return f"{str(op)}.out" + else: + return f"{str(op)}.{str(self.index)}" + else: + return f"<{getattr(type(self), '__name__')}>" + + +class Apply(Node): + """A `Node` represents the application of an `Op` to variables. + + It is instantiated by calling the `Op.make_node` method with a list of + inputs. The `Apply` node is in charge of filtering the inputs and outputs. + + Attribute + --------- + op + The operation that produces `outputs` given `inputs`. + inputs + The arguments of the expression modeled by the `Apply` node. + outputs + The outputs of the expression modeled by the `Apply` node. + + """ + + def __init__( + self, op: "Op", inputs: Sequence["Variable"], outputs: Sequence["Variable"] + ): + if not isinstance(inputs, Sequence): + raise TypeError("The inputs of an Apply node must be a sequence type") + + if not isinstance(outputs, Sequence): + raise TypeError("The outputs of an Apply node must be a sequence type") + + self.op = op + self.inputs: List[Variable] = [] + + # Filter inputs + for input in inputs: + if isinstance(input, Variable): + self.inputs.append(input) + else: + raise TypeError( + f"The 'inputs' argument to an Apply node must contain Variable instances, got {input} instead." + ) + + self.outputs: List[Variable] = [] + # Filter outputs + for i, output in enumerate(outputs): + if isinstance(output, Variable): + if output.owner is None: + output.owner = self + output.index = i + elif output.owner is not self or output.index != i: + raise ValueError( + "All outputs passed to an Apply node must belong to it." + ) + self.outputs.append(output) + else: + raise TypeError( + f"The 'outputs' to argument to an Apply node must contain Variable instance, got {output} instead" + ) + + def get_parents(self) -> List[Variable]: + return list(self.inputs) + + +class Op: + """Represents and constructs operations in a graph. + + An `Op` instance has the following responsibilities: + + * Construct `Apply` nodes via the :meth:`Op.make_node` method + * Perform the computation of the modeled operation via the + :meth:`Op.perform` method. + + A user that wants to add new capabilities to the libraries: generative + model, API interactions, tools, etc. will need to subclass `Op` and + implement the :meth:`Op.perform` and :meth:`Op.make_node` methods. + + """ + + def make_node(self, *inputs: Variable) -> Apply: + r"""Construct an `Apply` node that represents the application of this + operation to the given inputs. + + This must be implemented by subclasses as it specifies the input + and output types of the `Apply` node. + + Parameters + ---------- + inputs + The `Variable`\s that represent the inputs of this operation + + Returns + ------- + The constructed `Apply` node. + + """ + raise NotImplementedError + + def __call__(self, *inputs: Variable) -> Union[Variable, List[Variable]]: + """Calls :meth:`Op.make_node` to construct an `Apply` node.""" + + node = self.make_node(*inputs) + if len(node.outputs) == 1: + return node.outputs[0] + else: + return node.outputs + + def perform(self, node: Apply, *inputs): + """Apply the functions to the inputs and return the output. + + Parameters + ---------- + node + The symbolic `Apply` node that represents this computation. + inputs + Sequence of non-symbolic/numeric/text intputs. + + Returns + ------- + The non-symbolic/numerica/text outputs of the function that this + operation represents + + """ + + def __str__(self): + """Return a ``str`` representation of the `Op`.""" + return getattr(type(self), "__name__") + + +def io_toposort( + inputs: Iterable[Variable], outputs: Reversible[Variable] +) -> List[Apply]: + """Sort the graph topologically starting from the inputs to the outputs. + + This function is typically used when compiling the graph, where we need + to apply operators in the correct order to go from the user inputs to + the program outputs. + + Parameters + ---------- + inputs + Graph inputs. + outputs + Graph outputs. + + """ + computed = set(inputs) + todo = [o.owner for o in reversed(outputs) if o.owner] + order = [] + while todo: + node = todo.pop() + if node.outputs[0] in computed: + continue + if all(i in computed or i.owner is None for i in node.inputs): + computed.update(node.outputs) + order.append(node) + else: + todo.append(node) + todo.extend(i.owner for i in node.inputs if i.owner) + + return order diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py new file mode 100644 index 00000000..2e110191 --- /dev/null +++ b/outlines/text/__init__.py @@ -0,0 +1,4 @@ +from .basic import * +from .var import as_string, string + +__all__ = ["string"] diff --git a/outlines/text/basic.py b/outlines/text/basic.py new file mode 100644 index 00000000..4217a461 --- /dev/null +++ b/outlines/text/basic.py @@ -0,0 +1,23 @@ +"""Basic `StringVariable` manipulations.""" +import outlines +from outlines.graph import Apply, Op +from outlines.text.var import StringVariable + +__all__ = ["add"] + + +class Add(Op): + def __init__(self): + pass + + def make_node(self, s, t): + s = outlines.text.as_string(s) + t = outlines.text.as_string(t) + out = StringVariable() + return Apply(self, [s, t], [out]) + + def perform(self, s, t): + return (s + t,) + + +add = Add() diff --git a/outlines/text/var.py b/outlines/text/var.py new file mode 100644 index 00000000..0124d546 --- /dev/null +++ b/outlines/text/var.py @@ -0,0 +1,72 @@ +from functools import singledispatch + +import outlines.text as ot +from outlines.graph import Variable + + +class StringVariable(Variable): + """Subclass to add the string operators to `Variable`.""" + + def __init__(self, owner=None, index=None, name=None): + super().__init__(owner, index, name) + + def __add__(self, other): + return ot.add(self, other) + + def __radd__(self, other): + return ot.add(other, self) + + +string = StringVariable + + +class StringConstant(StringVariable): + """Constant `StringVariable` that corresponds to user input.""" + + def __init__(self, value, name=None): + self.value = value + super().__init__(name=name) + + def __str__(self): + if self.name is not None: + name = self.name + else: + name = "StringConstant" + return f"{name}{{'{self.value}'}}" + + +@singledispatch +def as_string(x, name=None): + """Convert `x` into an equivalent `StringVariable`. + + This function can be used to turn `str`, `int` and `float` instances into a + `StringVariable`. + + Parameters + ---------- + x + The object that will we converted into a `StringVariable`. + name + If a new `StringVariable` instance is created it will be attributed this + name. + + """ + raise TypeError(f"{x} cannot be cast into a string") + + +@as_string.register(str) +def as_string_variable_strings(x, name=None): + return StringConstant(x, name) + + +@as_string.register(Variable) +def as_string_variable_Variable(x, name=None): + if not isinstance(x, StringVariable): + raise TypeError(f"{type(x)} cannot be cast as a `StringVariable`.") + return x + + +@as_string.register(int) +@as_string.register(float) +def as_string_variable_numbers(x, name=None): + return StringConstant(str(x), name) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..7cff8fbc --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,32 @@ +[build-system] +requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" + +[project] +name = "outlines" +authors= [{name = "Normal Computing", email = "support@normalcomputing.com"}] +description = "Probabilistic Generative Model Programming" +requires-python = ">=3.7" +keywords=[ + "normal computing", + "machine learning", + "deep learning", + "language models", + "diffusion models", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Information Technology", + "Intended Audience :: Science/Research", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "mako" +] +dynamic = ["version"] + +[tool.setuptools_scm] +write_to = "outlines/_version.py" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..28311efc --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +-e . +pre-commit +pytest diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..f590378c --- /dev/null +++ b/setup.cfg @@ -0,0 +1,14 @@ +[flake8] +max-line-length = 88 +select = C,E,F,W +ignore = E203,E231,E501,E741,W503,W504,C901,E731 +per-file-ignores = + **/__init__.py:F401,F403 +exclude = + normalai/_version.py + +[tool:pytest] +python_files=test*.py +testpaths=tests +filterwarnings = + error diff --git a/tests/test_graph.py b/tests/test_graph.py new file mode 100644 index 00000000..3d8c1042 --- /dev/null +++ b/tests/test_graph.py @@ -0,0 +1,101 @@ +import pytest + +from outlines.graph import Apply, Op, Variable, io_toposort + + +class MyVar(Variable): + def __init__(self, value): + self.value = value + super().__init__() + + def __eq__(self, other): + return type(self) == type(other) and self.value == other.value + + def __hash__(self): + return hash((type(self), self.value)) + + +class MyOp(Op): + def make_node(self, *inputs): + result = sum(input.value for input in inputs) + outputs = [MyVar(result)] + return Apply(self, inputs, outputs) + + +op = MyOp() + + +def test_apply_wrong_args(): + with pytest.raises(TypeError): + Apply(op, 1.0, []) + + with pytest.raises(TypeError): + Apply(op, [], 1.0) + + with pytest.raises(TypeError): + Apply(op, [1.0], []) + + with pytest.raises(TypeError): + Apply(op, [], [1.0]) + + +def test_Apply(): + i = Variable(name="i") + o = Variable(name="o") + a = Apply(op, [i], [o]) + assert len(a.inputs) == 1 + assert len(a.outputs) == 1 + assert a.inputs[0].name == "i" + assert a.outputs[0].name == "o" + assert a.outputs[0].owner == a + + +def test_Apply_multiple_inputs(): + i1, i2 = Variable(name="i1"), Variable(name="i2") + o = Variable(name="o") + a = Apply(op, [i1, i2], [o]) + assert len(a.inputs) == 2 + + +def test_Variable_wrong_input(): + owner = "txt" + with pytest.raises(TypeError): + Variable(owner) + + owner = Apply(op, [], []) + index = "i" + with pytest.raises(TypeError): + Variable(owner, index) + + owner = Apply(op, [], []) + index = "i" + name = 1 + with pytest.raises(TypeError): + Variable(owner, index, name) + + +def test_Op(): + v1, v2 = MyVar(1), MyVar(2) + node = op.make_node(v1, v2) + assert [x for x in node.inputs] == [v1, v2] + assert [type(x) for x in node.outputs] == [MyVar] + assert node.outputs[0].owner is node and node.outputs[0].index == 0 + + +def test_string_formatting(): + v1, v2 = MyVar(1), MyVar(2) + node = op.make_node(v1, v2) + assert str(node.op) == "MyOp" + assert str(v1) == "" + assert [str(o) for o in node.outputs] == ["MyOp.out"] + + +def test_toposort_simple(): + r1, r2, r5 = MyVar(1), MyVar(2), MyVar(5) + o1 = op(r1, r2) + o1.name = "o1" + o2 = op(o1, r5) + o2.name = "o2" + + res = io_toposort([r1, r2, r5], [o2]) + assert res == [o1.owner, o2.owner] diff --git a/tests/text/test_basic.py b/tests/text/test_basic.py new file mode 100644 index 00000000..c7bcfcfe --- /dev/null +++ b/tests/text/test_basic.py @@ -0,0 +1,42 @@ +import outlines +from outlines.graph import Apply +from outlines.text.basic import Add, add +from outlines.text.var import StringVariable + + +def test_add_symbolic(): + s, t = outlines.string(), outlines.string() + w = add(s, t) + assert isinstance(w, StringVariable) + assert isinstance(w.owner, Apply) + assert isinstance(w.owner.op, Add) + assert len(w.owner.inputs) == 2 + assert len(w.owner.outputs) == 1 + + a = Add() + assert a.perform("a", "string")[0] == "astring" + + w = s + t + assert isinstance(w, StringVariable) + assert isinstance(w.owner, Apply) + assert isinstance(w.owner.op, Add) + assert len(w.owner.inputs) == 2 + assert len(w.owner.outputs) == 1 + + +def test_add_mixed(): + s, t = "a string", outlines.string() + w = s + t + assert isinstance(w, StringVariable) + assert isinstance(w.owner, Apply) + assert isinstance(w.owner.op, Add) + assert len(w.owner.inputs) == 2 + assert len(w.owner.outputs) == 1 + + s, t = outlines.string(), "a string" + w = s + t + assert isinstance(w, StringVariable) + assert isinstance(w.owner, Apply) + assert isinstance(w.owner.op, Add) + assert len(w.owner.inputs) == 2 + assert len(w.owner.outputs) == 1 diff --git a/tests/text/test_var.py b/tests/text/test_var.py new file mode 100644 index 00000000..7f7afc20 --- /dev/null +++ b/tests/text/test_var.py @@ -0,0 +1,31 @@ +import pytest + +import outlines +from outlines.graph import Variable +from outlines.text.var import StringConstant + + +def test_cast(): + with pytest.raises(TypeError): + outlines.text.as_string([]) + + with pytest.raises(TypeError): + outlines.text.as_string(()) + + with pytest.raises(TypeError): + outlines.text.as_string(Variable()) + + s = outlines.text.as_string(1) + assert type(s) == StringConstant + assert s.value == "1" + + s = outlines.text.as_string(1.3) + assert type(s) == StringConstant + assert s.value == "1.3" + + s = outlines.text.as_string("test") + assert type(s) == StringConstant + assert s.value == "test" + + s = outlines.text.string() + outlines.text.as_string(s) From e91d37be124e14b3934d4e7602171492acba2d95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 24 Mar 2023 16:11:53 +0100 Subject: [PATCH 002/734] Parse and transpile scripts --- outlines/graph.py | 2 +- outlines/text/__init__.py | 3 ++- outlines/text/script.py | 49 +++++++++++++++++++++++++++++++++++++++ tests/text/test_script.py | 49 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 2 deletions(-) create mode 100644 outlines/text/script.py create mode 100644 tests/text/test_script.py diff --git a/outlines/graph.py b/outlines/graph.py index e87546bc..15c1e7ae 100644 --- a/outlines/graph.py +++ b/outlines/graph.py @@ -1,6 +1,6 @@ """Graph objects and manipulation functions. -Manipulating Outlines templates and operations implicitly defines a graph that +Manipulating Outlines prompts and operations implicitly defines a graph that can be explored, rewritten and compiled. This module defines the basic types these graphs are build from: diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 2e110191..3a4d0106 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,4 +1,5 @@ from .basic import * +from .script import script from .var import as_string, string -__all__ = ["string"] +__all__ = ["string", "script"] diff --git a/outlines/text/script.py b/outlines/text/script.py new file mode 100644 index 00000000..ecfee34b --- /dev/null +++ b/outlines/text/script.py @@ -0,0 +1,49 @@ +from functools import singledispatchmethod +from typing import Dict, Union + +from mako import lexer +from mako.parsetree import Expression, Text + +from outlines.graph import Op +from outlines.text.var import StringVariable, as_string + + +class Script: + """Represents a scripted interaction with generative models. + + The `Script` class provides a convenient way to define Outlines graph using + the Mako templating languages.`Scripts` are instantiated by passing a string + that represents the flow of interaction with one or several generative models. + + """ + + def __init__(self, script): + self.parsetree = lexer.Lexer(script).parse() + + def __call__(self, **inputs: Dict[str, Union[StringVariable, Op]]): + nodes = self.parsetree.nodes + graph = self.parse_node(nodes[0], inputs) + for node in self.parsetree.nodes[1:]: + graph = graph + self.parse_node(node, inputs) + + return graph + + @singledispatchmethod + def parse_node(self, node, inputs): + raise NotImplementedError(f"Cannot transpile {node} to an Outlines graph.") + + @parse_node.register(Text) + def parse_Text(self, node, inputs): + return as_string(node.content) + + @parse_node.register(Expression) + def parse_Expression(self, node, inputs): + try: + return as_string(inputs[node.text]) + except KeyError: + raise TypeError( + f"Prompt evaluation missing 1 required argument: '{node.text}'" + ) + + +script = Script diff --git a/tests/text/test_script.py b/tests/text/test_script.py new file mode 100644 index 00000000..81720851 --- /dev/null +++ b/tests/text/test_script.py @@ -0,0 +1,49 @@ +import pytest + +from outlines.text import script, string +from outlines.text.basic import Add +from outlines.text.var import StringConstant, StringVariable + + +def test_template_text(): + with pytest.raises(TypeError): + script("String ${one}")(two="two") + + string = "Test" + t = script(string)() + assert isinstance(t, StringConstant) + assert t.value == "Test" + + t = script("Test ${variable}")(variable="string") + assert t.owner.inputs[0].value == "Test " + assert t.owner.inputs[1].value == "string" + + t = script("Test ${variable}")(variable=1) + assert t.owner.inputs[0].value == "Test " + assert t.owner.inputs[1].value == "1" + + t = script("Test repeated ${variable} ${variable}")(variable="string") + assert isinstance(t.owner.op, Add) + assert t.owner.inputs[1].value == "string" + assert isinstance(t.owner.inputs[0].owner.op, Add) + assert t.owner.inputs[0].owner.inputs[1].value == " " + + t = script("Test ${one} ${two}")(one="1", two="2") + assert t.owner.inputs[1].value == "2" + assert t.owner.inputs[0].owner.inputs[0].owner.inputs[1].value == "1" + + +def test_template_string_variable(): + variable = string() + t = script("Test ${variable}")(variable=variable) + assert isinstance(t.owner.op, Add) + assert isinstance(t.owner.inputs[0], StringConstant) + assert isinstance(t.owner.inputs[1], StringVariable) + assert t.owner.inputs[0].value == "Test " + + variable = string() + t = script("${variable} test")(variable=variable) + assert isinstance(t.owner.op, Add) + assert isinstance(t.owner.inputs[0], StringVariable) + assert isinstance(t.owner.inputs[1], StringConstant) + assert t.owner.inputs[1].value == " test" From a3c4ada1e6e655dd8308c31a59f19f3d8f4726d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 25 Mar 2023 13:18:47 +0100 Subject: [PATCH 003/734] Add tests for the compiler --- outlines/__init__.py | 3 ++- outlines/compile.py | 37 ++++++++++++++++++++++++++++++------- tests/test_compile.py | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 8 deletions(-) create mode 100644 tests/test_compile.py diff --git a/outlines/__init__.py b/outlines/__init__.py index c52450b9..e69b2a4d 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -29,9 +29,10 @@ """ from outlines.compile import compile -from outlines.text import string +from outlines.text import script, string __all__ = [ "compile", + "script", "string", ] diff --git a/outlines/compile.py b/outlines/compile.py index 7f13885a..bf4c9e3e 100644 --- a/outlines/compile.py +++ b/outlines/compile.py @@ -1,19 +1,42 @@ -from outlines.graph import io_toposort +from typing import Callable, Iterable, Reversible + +from outlines.graph import Variable, io_toposort from outlines.text.var import StringConstant -def compile(symbolic_inputs, outputs): - sorted = io_toposort(symbolic_inputs, outputs) +def compile(inputs: Iterable[Variable], outputs: Reversible[Variable]) -> Callable: + r"""Compile an Outlines graph into an executable function. + + `compile` first sorts the graph defined by the input and output nodes + topologically. It then visits the nodes one by one and executes their + `Op`'s `perform` method, fetching and storing their values in a map. + + Parameters + ---------- + inputs + The symbolic `Variable`\s that represent the inputs of the compiled + program. + outputs + The symbolic `Variable`\s that represent the outputs of the compiled + program. + + Returns + ------- + A function which returns the values of the output nodes when passed the values + of the input nodes as arguments. + + """ + sorted = io_toposort(inputs, outputs) - def fn(*inputs): - storage_map = {s: v for s, v in zip(symbolic_inputs, inputs)} + def fn(*values): + storage_map = {s: v for s, v in zip(inputs, values)} for node in sorted: for i in node.inputs: if isinstance(i, StringConstant): storage_map[i] = i.value - inputs = [storage_map[i] for i in node.inputs] - results = node.op.perform(*inputs) + node_inputs = [storage_map[i] for i in node.inputs] + results = node.op.perform(*node_inputs) for i, o in enumerate(node.outputs): storage_map[o] = results[i] diff --git a/tests/test_compile.py b/tests/test_compile.py new file mode 100644 index 00000000..17a57696 --- /dev/null +++ b/tests/test_compile.py @@ -0,0 +1,32 @@ +from outlines import compile, script, string + + +def test_compile(): + s = string() + out = compile([s], [s]) + assert out("test") == "test" + + s = string() + p = "Test " + s + out = compile([s], [p]) + assert out("test") == "Test test" + + s1 = string() + s2 = string() + p = s1 + s2 + out = compile([s1, s2], [p]) + assert out("one", "two") == "onetwo" + + s1 = string() + s2 = string() + p1 = s1 + s2 + p2 = s1 + "three" + out = compile([s1, s2], [p1, p2]) + assert out("one", "two") == ("onetwo", "onethree") + + +def test_compile_scripts(): + s = string() + o = script("This is a ${var}")(var=s) + out = compile([s], [o]) + assert out("test") == "This is a test" From 682bf7a0cf101398bba1e83d36285d13179e9840 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 26 Mar 2023 21:42:59 +0200 Subject: [PATCH 004/734] Remove `requirements.txt` in favor of pyproject's optional dependencies --- .github/workflows/tests.yml | 2 +- outlines/text/script.py | 37 ++++++++++++++++++++++++++++++++++--- pyproject.toml | 6 ++++++ 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 937986c7..0e6e68bf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -28,7 +28,7 @@ jobs: - name: Set up test environment run: | python -m pip install --upgrade pip - pip install -r requirements.txt + pip install .[test] - name: Run tests run: | pytest diff --git a/outlines/text/script.py b/outlines/text/script.py index ecfee34b..986c875e 100644 --- a/outlines/text/script.py +++ b/outlines/text/script.py @@ -5,6 +5,7 @@ from mako.parsetree import Expression, Text from outlines.graph import Op +from outlines.text.models import LanguageModel from outlines.text.var import StringVariable, as_string @@ -21,6 +22,13 @@ def __init__(self, script): self.parsetree = lexer.Lexer(script).parse() def __call__(self, **inputs: Dict[str, Union[StringVariable, Op]]): + """Create an Outlines graph from a Mako template. + + When one calls a `Script` instance with arguments that represent + variables in the template, Outlines parses the template and iteratively + builds the graph it represents before returning it. + + """ nodes = self.parsetree.nodes graph = self.parse_node(nodes[0], inputs) for node in self.parsetree.nodes[1:]: @@ -33,13 +41,36 @@ def parse_node(self, node, inputs): raise NotImplementedError(f"Cannot transpile {node} to an Outlines graph.") @parse_node.register(Text) - def parse_Text(self, node, inputs): + def parse_Text(self, node, inputs, graph): + """Parse Mako's `Text` nodes. + + `Text` nodes corresponds to `StringConstants` in Outline's language. + + """ return as_string(node.content) @parse_node.register(Expression) - def parse_Expression(self, node, inputs): + def parse_Expression(self, node, inputs, graph): + """Parse Mako's `Expression` nodes. + + We first fetch the argument that the user passed to the `__call__` + method that corresponds to the current variable name. Then we check if + this argument has already been seen; if that's the case we assume the + user is referencing the output of a previously-run LM and add the + corresponding node. + + """ try: - return as_string(inputs[node.text]) + user_input = inputs[node.text] + if isinstance(user_input, LanguageModel): + try: + return self.model_outputs[node.text] + except KeyError: + output = user_input(graph) + self.model_outputs[node.text] = output + return output + else: + return as_string(inputs[node.text]) except KeyError: raise TypeError( f"Prompt evaluation missing 1 required argument: '{node.text}'" diff --git a/pyproject.toml b/pyproject.toml index 7cff8fbc..dd0dcc49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,5 +28,11 @@ dependencies = [ ] dynamic = ["version"] +[project.optional-dependencies] +test = [ + "pre-commit", + "pytest" +] + [tool.setuptools_scm] write_to = "outlines/_version.py" From d937034d705485b4b443bf6358d367babd2c4414 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 25 Mar 2023 22:26:19 +0100 Subject: [PATCH 005/734] Add `LanguageModel` Op --- outlines/text/models/__init__.py | 1 + outlines/text/models/model.py | 57 ++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 outlines/text/models/__init__.py create mode 100644 outlines/text/models/model.py diff --git a/outlines/text/models/__init__.py b/outlines/text/models/__init__.py new file mode 100644 index 00000000..43357b6b --- /dev/null +++ b/outlines/text/models/__init__.py @@ -0,0 +1 @@ +from .model import LanguageModel diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py new file mode 100644 index 00000000..e3b9c331 --- /dev/null +++ b/outlines/text/models/model.py @@ -0,0 +1,57 @@ +from outlines.graph import Op, Apply +from outlines.text.var import as_string, StringVariable + + +class LanguageModel(Op): + + def make_node(self, prompt): + prompt = as_string(prompt) + out = StringVariable() + return Apply(self, [prompt], [out]) + + def perform(self, prompt): + return self.sample(prompt) + + def sample(self, prompt): + return (f"2x{prompt}",) + + def logprob(self, prompt, context): + """Return the log-probability of each token in the vocabulary given the + input prompt and the current context (previously generated tokens). + + Parameters + ---------- + prompt + The input to the language model, parameter of the distribution. + context + A sequence that contains the previously generated tokens that + are part of the context window. This sequence can be shorter + than the total sequence generated so far if the context length + has been reached. + + Returns + ------- + A sequence that represents the log-probability distribution over the + tokens. + + """ + raise NotImplementedError + + def encode(self, sequence: str): + """Encode the given sequence. + + Defaults to a pass-through so it does not have to be implemented by + subclasses that represent an integration to an API that take text as an + input. + + """ + return sequence + + def decode(self, ids) -> str: + """Decode a list of ids to a string. + + Defaults to a pass-through so it does not have to be implemented by + subclasses that represent an integration to an API that returns text. + + """ + return ids From 16e11fb8c609fbac1b13064337fd4d79e3f2caa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 25 Mar 2023 22:26:46 +0100 Subject: [PATCH 006/734] Transpile scripts containing language models --- outlines/text/models/model.py | 5 ++--- outlines/text/script.py | 7 ++++--- tests/text/test_script.py | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py index e3b9c331..7423bf0c 100644 --- a/outlines/text/models/model.py +++ b/outlines/text/models/model.py @@ -1,9 +1,8 @@ -from outlines.graph import Op, Apply -from outlines.text.var import as_string, StringVariable +from outlines.graph import Apply, Op +from outlines.text.var import StringVariable, as_string class LanguageModel(Op): - def make_node(self, prompt): prompt = as_string(prompt) out = StringVariable() diff --git a/outlines/text/script.py b/outlines/text/script.py index 986c875e..448583e6 100644 --- a/outlines/text/script.py +++ b/outlines/text/script.py @@ -20,6 +20,7 @@ class Script: def __init__(self, script): self.parsetree = lexer.Lexer(script).parse() + self.model_outputs = {} def __call__(self, **inputs: Dict[str, Union[StringVariable, Op]]): """Create an Outlines graph from a Mako template. @@ -30,14 +31,14 @@ def __call__(self, **inputs: Dict[str, Union[StringVariable, Op]]): """ nodes = self.parsetree.nodes - graph = self.parse_node(nodes[0], inputs) + graph = self.parse_node(nodes[0], inputs, "") for node in self.parsetree.nodes[1:]: - graph = graph + self.parse_node(node, inputs) + graph = graph + self.parse_node(node, inputs, graph) return graph @singledispatchmethod - def parse_node(self, node, inputs): + def parse_node(self, node, inputs, graph): raise NotImplementedError(f"Cannot transpile {node} to an Outlines graph.") @parse_node.register(Text) diff --git a/tests/text/test_script.py b/tests/text/test_script.py index 81720851..ef8079de 100644 --- a/tests/text/test_script.py +++ b/tests/text/test_script.py @@ -47,3 +47,22 @@ def test_template_string_variable(): assert isinstance(t.owner.inputs[0], StringVariable) assert isinstance(t.owner.inputs[1], StringConstant) assert t.owner.inputs[1].value == " test" + + +def test_template_language_model(): + from outlines.compile import compile + from outlines.text.models import LanguageModel + + # Single occurence + lm = LanguageModel() + t = script("Test ${lm}")(lm=lm) + out = compile([], [t]) + assert out() == "Test 2xTest " + + # The first reference to the lamguage model should + # execute decoding, the following ones be replaced + # by the result of this evaluation. + lm = LanguageModel() + t = script("Test ${lm} more text ${lm}")(lm=lm) + out = compile([], [t]) + assert out() == "Test 2xTest more text 2xTest " From a0558f82646247e593fc7f6447ff49ce9f3a20f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 26 Mar 2023 16:45:33 +0200 Subject: [PATCH 007/734] Remove extra line breaks and indents from scripts --- outlines/text/script.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/outlines/text/script.py b/outlines/text/script.py index 448583e6..70667c2d 100644 --- a/outlines/text/script.py +++ b/outlines/text/script.py @@ -1,3 +1,4 @@ +import textwrap from functools import singledispatchmethod from typing import Dict, Union @@ -19,6 +20,7 @@ class Script: """ def __init__(self, script): + script = textwrap.dedent(script).lstrip().rstrip() self.parsetree = lexer.Lexer(script).parse() self.model_outputs = {} From 045c90b9b1a206694af819ae8e82479d0b06bb31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 26 Mar 2023 16:46:21 +0200 Subject: [PATCH 008/734] Remove compilation step in script testing --- tests/text/test_script.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tests/text/test_script.py b/tests/text/test_script.py index ef8079de..1db587d8 100644 --- a/tests/text/test_script.py +++ b/tests/text/test_script.py @@ -2,6 +2,7 @@ from outlines.text import script, string from outlines.text.basic import Add +from outlines.text.models import LanguageModel from outlines.text.var import StringConstant, StringVariable @@ -49,20 +50,29 @@ def test_template_string_variable(): assert t.owner.inputs[1].value == " test" +class MockLanguageModel(LanguageModel): + def sample(self, prompt): + return f"2x{prompt}" + + def test_template_language_model(): - from outlines.compile import compile - from outlines.text.models import LanguageModel + r"""Test the transpilation of scripts that contain one or + several `LanguageModel`\s. + """ # Single occurence - lm = LanguageModel() + lm = MockLanguageModel() t = script("Test ${lm}")(lm=lm) - out = compile([], [t]) - assert out() == "Test 2xTest " + assert isinstance(t.owner.op, Add) + assert isinstance(t.owner.inputs[1].owner.op, LanguageModel) + + lm_input = t.owner.inputs[1].owner.inputs[0].value + assert lm_input == "Test " # The first reference to the lamguage model should # execute decoding, the following ones be replaced # by the result of this evaluation. - lm = LanguageModel() + lm = MockLanguageModel(name="lm") t = script("Test ${lm} more text ${lm}")(lm=lm) - out = compile([], [t]) - assert out() == "Test 2xTest more text 2xTest " + assert isinstance(t.owner.inputs[1].owner.op, MockLanguageModel) + assert t.owner.inputs[1].owner.inputs[0].value == "Test " From ba27986a0cb8f9bd3038c503843a9d45c7255a12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 26 Mar 2023 16:47:09 +0200 Subject: [PATCH 009/734] Add HuggingFace's GPT2 model --- outlines/text/models/hugging_face.py | 76 ++++++++++++++++++++++++++++ outlines/text/models/model.py | 27 ++++++++-- requirements.txt | 3 -- tests/test_compile.py | 19 +++++++ 4 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 outlines/text/models/hugging_face.py delete mode 100644 requirements.txt diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py new file mode 100644 index 00000000..be3cc26f --- /dev/null +++ b/outlines/text/models/hugging_face.py @@ -0,0 +1,76 @@ +import random +from typing import Dict + +from outlines.text.models.model import LanguageModel + +try: + import jax + from transformers import AutoTokenizer, FlaxAutoModelForCausalLM +except ImportError: + raise ImportError( + "You need to install `transformers` and `flax` to run the GTP2 model." + ) + + +class GPT2(LanguageModel): + def __init__(self): + """Initialize the GPT2 model. + + We use HuggingFace's Flax implementation of GPT2. This method will download + the model's weights if they are not yet cached on your machine. + + # TODO: Download the pre-trained weight when the model is executed instead of + # when the graph is built. + + """ + + random.seed() + self.seed = random.randint(0, 2**32) + super().__init__() + + def sample(self, prompt_tokens: Dict[str, jax.Array]) -> jax.Array: + """Sample new tokens give the tokenized prompt. + + Since HuggingFace's `generate` method returns the prompt along with the + generated token we need to truncate the returned array of tokens. + + Parameters + ---------- + prompt_tokens + A dictionary that contains the ids of the tokens contained in the input + prompt and the input mask. This is the default output of HuggingFace's + tokenizers. + + + """ + self.model = FlaxAutoModelForCausalLM.from_pretrained("gpt2") + returned_tokens = self.model.generate( + **prompt_tokens, + do_sample=True, + max_new_tokens=100, + prng_key=jax.random.PRNGKey(self.seed), + pad_token_id=self.tokenizer.eos_token_id, + ).sequences + new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] + new_tokens = new_tokens.squeeze() + + return new_tokens + + def encode(self, sequence: str) -> Dict[str, jax.Array]: + """Return a list of token ids from a text sequence. + + Parameters + ---------- + sequence + The text sequence to tokenize. + + Returns + ------- + A dictionary that contains the token ids and the input mask. + """ + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + return self.tokenizer(sequence, return_tensors="jax") + + def decode(self, ids: jax.Array) -> str: + """Return a text sequence from a array of token ids.""" + return self.tokenizer.decode(ids, skip_special_tokens=True) diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py index 7423bf0c..b4a6a9fc 100644 --- a/outlines/text/models/model.py +++ b/outlines/text/models/model.py @@ -3,21 +3,42 @@ class LanguageModel(Op): + """An `Op` that produces a sample from a language model. + + The output of language models in outlines is modeled as a random variable. + Therefore, calling a language model will return a random sequence (via + ancestral sampling) by default. Other decoding methods are constructed + as graph transformations. + + """ + + def __init__(self, name=None): + super().__init__() + self.name = name + def make_node(self, prompt): prompt = as_string(prompt) out = StringVariable() + if self.name is not None: + out.name = self.name + return Apply(self, [prompt], [out]) def perform(self, prompt): - return self.sample(prompt) + tokens = self.encode(prompt) + sampled_tokens = self.sample(tokens) + outputs = self.decode(sampled_tokens) + return (outputs,) - def sample(self, prompt): - return (f"2x{prompt}",) + def sample(self, tokens): + raise NotImplementedError def logprob(self, prompt, context): """Return the log-probability of each token in the vocabulary given the input prompt and the current context (previously generated tokens). + # TODO: Implement `logprob` as a graph transformation? + Parameters ---------- prompt diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 28311efc..00000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ --e . -pre-commit -pytest diff --git a/tests/test_compile.py b/tests/test_compile.py index 17a57696..625d1e69 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -1,3 +1,5 @@ +import pytest + from outlines import compile, script, string @@ -30,3 +32,20 @@ def test_compile_scripts(): o = script("This is a ${var}")(var=s) out = compile([s], [o]) assert out("test") == "This is a test" + + +@pytest.mark.skip +def test_compile_hf(): + """Move when we have found a better way to run these slow examples.""" + import outlines + import outlines.text.models.hugging_face + + gpt2 = outlines.text.models.hugging_face.GPT2() + o = script( + """ + Here is a good joke: ${joke} + And a random fact: ${fact} + """ + )(joke=gpt2, fact=gpt2) + fn = compile([], [o]) + print(fn()) From 8e22dc18458410f3fa320c1473101da5d69dc42e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Mar 2023 10:40:10 +0200 Subject: [PATCH 010/734] Add tests for the `LanguageModel` Op --- tests/text/test_model.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 tests/text/test_model.py diff --git a/tests/text/test_model.py b/tests/text/test_model.py new file mode 100644 index 00000000..10362835 --- /dev/null +++ b/tests/text/test_model.py @@ -0,0 +1,22 @@ +from outlines import string +from outlines.text.models.model import LanguageModel + + +def test_initialize_model(): + llm = LanguageModel(name="llm") + + prompt = string() + out = llm(prompt) + assert isinstance(out.owner.op, LanguageModel) + assert out.owner.inputs[0] == prompt + assert out.name == "llm" + + +class MockLM(LanguageModel): + def sample(self, _): + return "test" + + +def test_sample(): + llm = MockLM() + assert llm.perform("")[0] == "test" From edd83366120f74b44833b7bd4ba6115541112fc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Mar 2023 11:25:56 +0200 Subject: [PATCH 011/734] Simplify `as_string` and add new test --- outlines/text/__init__.py | 2 +- outlines/text/var.py | 12 +++++------- tests/text/__init.py | 0 tests/text/test_var.py | 4 ++++ 4 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 tests/text/__init.py diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 3a4d0106..4769ffbc 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -2,4 +2,4 @@ from .script import script from .var import as_string, string -__all__ = ["string", "script"] +__all__ = ["as_string", "string", "script"] diff --git a/outlines/text/var.py b/outlines/text/var.py index 0124d546..f9d43f27 100644 --- a/outlines/text/var.py +++ b/outlines/text/var.py @@ -59,14 +59,12 @@ def as_string_variable_strings(x, name=None): return StringConstant(x, name) -@as_string.register(Variable) -def as_string_variable_Variable(x, name=None): - if not isinstance(x, StringVariable): - raise TypeError(f"{type(x)} cannot be cast as a `StringVariable`.") - return x - - @as_string.register(int) @as_string.register(float) def as_string_variable_numbers(x, name=None): return StringConstant(str(x), name) + + +@as_string.register(StringVariable) +def as_string_variable_StringVariable(x, name=None): + return x diff --git a/tests/text/__init.py b/tests/text/__init.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/text/test_var.py b/tests/text/test_var.py index 7f7afc20..136c4c36 100644 --- a/tests/text/test_var.py +++ b/tests/text/test_var.py @@ -15,6 +15,10 @@ def test_cast(): with pytest.raises(TypeError): outlines.text.as_string(Variable()) + s = outlines.text.as_string(StringConstant("")) + assert isinstance(s, StringConstant) + assert s.value == "" + s = outlines.text.as_string(1) assert type(s) == StringConstant assert s.value == "1" From 7300909b1c6ceb4623db1e57e332ad82da1c1d7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Mar 2023 11:26:17 +0200 Subject: [PATCH 012/734] Add the `ImageVariable` type --- outlines/__init__.py | 1 + outlines/image/__init__.py | 3 ++ outlines/image/models/__init__.py | 0 outlines/image/models/hugging_face.py | 0 outlines/image/var.py | 64 +++++++++++++++++++++++++++ pyproject.toml | 3 +- tests/image/test_var.py | 28 ++++++++++++ tests/text/__init__.py | 0 8 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 outlines/image/__init__.py create mode 100644 outlines/image/models/__init__.py create mode 100644 outlines/image/models/hugging_face.py create mode 100644 outlines/image/var.py create mode 100644 tests/image/test_var.py create mode 100644 tests/text/__init__.py diff --git a/outlines/__init__.py b/outlines/__init__.py index e69b2a4d..697faf5c 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -29,6 +29,7 @@ """ from outlines.compile import compile +from outlines.image import as_image from outlines.text import script, string __all__ = [ diff --git a/outlines/image/__init__.py b/outlines/image/__init__.py new file mode 100644 index 00000000..216d0e7e --- /dev/null +++ b/outlines/image/__init__.py @@ -0,0 +1,3 @@ +from .var import as_image, image + +__all__ = ["as_image", "image"] diff --git a/outlines/image/models/__init__.py b/outlines/image/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/outlines/image/models/hugging_face.py b/outlines/image/models/hugging_face.py new file mode 100644 index 00000000..e69de29b diff --git a/outlines/image/var.py b/outlines/image/var.py new file mode 100644 index 00000000..7a8ec072 --- /dev/null +++ b/outlines/image/var.py @@ -0,0 +1,64 @@ +from functools import singledispatch + +from PIL.Image import Image as PILImage + +from outlines.graph import Variable + + +class ImageVariable(Variable): + """Subclass to add the image operators to `Variable`.""" + + def __init__(self, owner=None, index=None, name=None): + super().__init__(owner, index, name) + + +image = ImageVariable + + +class ImageConstant(ImageVariable): + """Constant `ImageVariable` that corresponds to user input.""" + + def __init__(self, value, name=None): + if not isinstance(value, PILImage): + raise TypeError( + "`ImageConstant` values must be instances of `pillow.Image`." + ) + + self.value = value + super().__init__(name=name) + + def __str__(self): + if self.name is not None: + name = self.name + else: + name = "ImageConstant" + return f"{name}{{'{self.value}'}}" + + +@singledispatch +def as_image(x, name=None): + """Convert `x` into an equivalent `StringVariable` + + This function can be used to turn `pillow.Image` instances into an + `ImageVariable`. + + Parameters + ---------- + x + The object that will we converted into a `ImageVariable`. + name + If a new `ImageVariable` instance is created it will be attributed this + name. + + """ + raise TypeError(f"{x} cannot be cast into a string") + + +@as_image.register(PILImage) +def as_image_Image(x, name=None): + return ImageConstant(x, name) + + +@as_image.register(ImageVariable) +def as_image_ImageConstant(x, name=None): + return x diff --git a/pyproject.toml b/pyproject.toml index dd0dcc49..42d6edc4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,8 @@ classifiers = [ "Topic :: Scientific/Engineering :: Artificial Intelligence", ] dependencies = [ - "mako" + "mako", + "pillow", ] dynamic = ["version"] diff --git a/tests/image/test_var.py b/tests/image/test_var.py new file mode 100644 index 00000000..8b2faecc --- /dev/null +++ b/tests/image/test_var.py @@ -0,0 +1,28 @@ +import pytest +from PIL.Image import Image as PILImage + +import outlines +from outlines.image.var import ImageConstant +from outlines.text.var import Variable + + +def test_cast(): + with pytest.raises(TypeError): + outlines.as_image("") + + with pytest.raises(TypeError): + outlines.as_image(Variable()) + + with pytest.raises(TypeError): + outlines.as_image(ImageConstant("")) + + img = PILImage() + s = outlines.as_image(img) + assert isinstance(s, ImageConstant) + assert isinstance(s.value, type(img)) + + i = ImageConstant(img) + outlines.as_image(i) + + i = outlines.image.image() + outlines.as_image(i) diff --git a/tests/text/__init__.py b/tests/text/__init__.py new file mode 100644 index 00000000..e69de29b From 5510b943bca7b25cec855b05071863e01d5f6bc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Mar 2023 13:01:41 +0200 Subject: [PATCH 013/734] Add HuggingFace's StableDiffusion implementation --- outlines/image/models/hugging_face.py | 23 +++++++++++++ outlines/image/models/model.py | 47 +++++++++++++++++++++++++++ outlines/text/models/hugging_face.py | 4 +-- tests/test_compile.py | 15 +++++++++ 4 files changed, 87 insertions(+), 2 deletions(-) create mode 100644 outlines/image/models/model.py diff --git a/outlines/image/models/hugging_face.py b/outlines/image/models/hugging_face.py index e69de29b..dc23dd1f 100644 --- a/outlines/image/models/hugging_face.py +++ b/outlines/image/models/hugging_face.py @@ -0,0 +1,23 @@ +from outlines.image.models.model import ImageModel + +try: + from diffusers import StableDiffusionPipeline +except ImportError: + raise ImportError( + "You need to install `torch` and `diffusers` to run the StableDiffusion model." + ) + + +class StableDiffusion(ImageModel): + """A `StableDiffusion` distributed random image.""" + + def __init__(self, name=None): + super().__init__(name) + + def sample(self, prompt): + """Use HuggingFace's `StableDiffusion` pipeline to sample a new image.""" + pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + pipe = pipe.to("cuda") + image = pipe(prompt).images[0] + + return image diff --git a/outlines/image/models/model.py b/outlines/image/models/model.py new file mode 100644 index 00000000..37383690 --- /dev/null +++ b/outlines/image/models/model.py @@ -0,0 +1,47 @@ +from typing import Tuple + +from PIL.Image import Image as PILImage + +from outlines.graph import Apply, Op, Variable +from outlines.image.var import ImageVariable +from outlines.text.var import as_string + + +class ImageModel(Op): + """An `Op` that produces a sample image from generative model. + + The output of generative models in outlines is modeled as a random variable. + Therefore, calling an image generative model will return a random image by + default. + + Attributes + ---------- + name + The `Op`'s name in the graph. + + """ + + def __init__(self, name=None): + super().__init__() + self.name = name + + def make_node(self, prompt: Variable) -> Apply: # type: ignore + prompt = as_string(prompt) + out = ImageVariable() + if self.name is not None: + out.name = self.name + + return Apply(self, [prompt], [out]) + + def perform(self, prompt: str) -> Tuple[PILImage]: # type: ignore + """Perform the operations represented by this `Op` on the input prompt. + + This defaults to sampling a new image. Other decoding methods act by + patching this method. + + """ + return (self.sample(prompt),) + + def sample(self, prompt: str) -> PILImage: + """Sample a new image given the input prompt.""" + raise NotImplementedError diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index be3cc26f..913c3e6f 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -13,7 +13,7 @@ class GPT2(LanguageModel): - def __init__(self): + def __init__(self, name=None): """Initialize the GPT2 model. We use HuggingFace's Flax implementation of GPT2. This method will download @@ -26,7 +26,7 @@ def __init__(self): random.seed() self.seed = random.randint(0, 2**32) - super().__init__() + super().__init__(name) def sample(self, prompt_tokens: Dict[str, jax.Array]) -> jax.Array: """Sample new tokens give the tokenized prompt. diff --git a/tests/test_compile.py b/tests/test_compile.py index 625d1e69..2cbdce6e 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -49,3 +49,18 @@ def test_compile_hf(): )(joke=gpt2, fact=gpt2) fn = compile([], [o]) print(fn()) + + +@pytest.mark.skip +def test_compile_diffusers(): + """Move when we have found a better way to run these slow examples.""" + import outlines + import outlines.image.models.hugging_face as hugging_face + + sd = hugging_face.StableDiffusion() + o = outlines.text.as_string( + "Image of a Pokemon jumping off a skyscraper with a parachute. High resolution. 4k. In the style of Van Gohg" + ) + img = sd(o) + fn = compile([], [img]) + o = fn() From 110bcba8beebfb57a12c05abf274f82ae9f93317 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 10:21:37 +0200 Subject: [PATCH 014/734] Update the `Op` base class's types --- outlines/graph.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/outlines/graph.py b/outlines/graph.py index 15c1e7ae..0fa17fb8 100644 --- a/outlines/graph.py +++ b/outlines/graph.py @@ -13,7 +13,7 @@ This module is heavily inspired by `Aesara Union[Variable, List[Variable]]: else: return node.outputs - def perform(self, node: Apply, *inputs): + def perform(self, inputs: Tuple[Any]) -> Tuple[Any]: """Apply the functions to the inputs and return the output. Parameters ---------- - node - The symbolic `Apply` node that represents this computation. inputs Sequence of non-symbolic/numeric/text intputs. Returns ------- The non-symbolic/numerica/text outputs of the function that this - operation represents + operation represents as a tuple. """ + raise NotImplementedError def __str__(self): """Return a ``str`` representation of the `Op`.""" From 7cf702efbfe68f991113e8f18714da4957c58401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 10:40:01 +0200 Subject: [PATCH 015/734] Generalize the HF integration to all available diffusers --- outlines/image/models/hugging_face.py | 7 ++++--- tests/test_compile.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/outlines/image/models/hugging_face.py b/outlines/image/models/hugging_face.py index dc23dd1f..7d749b11 100644 --- a/outlines/image/models/hugging_face.py +++ b/outlines/image/models/hugging_face.py @@ -8,15 +8,16 @@ ) -class StableDiffusion(ImageModel): +class HFDiffuser(ImageModel): """A `StableDiffusion` distributed random image.""" - def __init__(self, name=None): + def __init__(self, model_name: str, name=None): + self.model_name = model_name super().__init__(name) def sample(self, prompt): """Use HuggingFace's `StableDiffusion` pipeline to sample a new image.""" - pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + pipe = StableDiffusionPipeline.from_pretrained(self.model_name) pipe = pipe.to("cuda") image = pipe(prompt).images[0] diff --git a/tests/test_compile.py b/tests/test_compile.py index 2cbdce6e..0d01c76d 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -57,7 +57,7 @@ def test_compile_diffusers(): import outlines import outlines.image.models.hugging_face as hugging_face - sd = hugging_face.StableDiffusion() + sd = hugging_face.HFDiffuser("runwayml/stable-diffusion-v1-5") o = outlines.text.as_string( "Image of a Pokemon jumping off a skyscraper with a parachute. High resolution. 4k. In the style of Van Gohg" ) From a7aa17cb3b7f9f7cfc5722e537dbbeb2d4a9b255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 10:40:25 +0200 Subject: [PATCH 016/734] Ignore `transformers`'s `FutureWarning`s in tests --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index f590378c..4af3825c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,3 +12,4 @@ python_files=test*.py testpaths=tests filterwarnings = error + ignore::FutureWarning:transformers.* From 78d91dfdc3f6336a8ac1db0e986d0fe4c3fa0822 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Mar 2023 17:49:55 +0200 Subject: [PATCH 017/734] Add name to GPT2/HF language model --- outlines/text/models/hugging_face.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index 913c3e6f..483a647f 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -23,10 +23,9 @@ def __init__(self, name=None): # when the graph is built. """ - random.seed() self.seed = random.randint(0, 2**32) - super().__init__(name) + super().__init__(name="HuggingFace GPT2") def sample(self, prompt_tokens: Dict[str, jax.Array]) -> jax.Array: """Sample new tokens give the tokenized prompt. @@ -47,7 +46,7 @@ def sample(self, prompt_tokens: Dict[str, jax.Array]) -> jax.Array: returned_tokens = self.model.generate( **prompt_tokens, do_sample=True, - max_new_tokens=100, + max_new_tokens=20, prng_key=jax.random.PRNGKey(self.seed), pad_token_id=self.tokenizer.eos_token_id, ).sequences From 6b3a5c1586dcd33eb0772b3f0bc53efbc91deb85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 28 Mar 2023 10:21:17 +0200 Subject: [PATCH 018/734] Move pytest configuration to pyproject.toml --- pyproject.toml | 7 +++++++ setup.cfg | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 42d6edc4..e1bc1c66 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,3 +37,10 @@ test = [ [tool.setuptools_scm] write_to = "outlines/_version.py" + +[tool.pytest.ini_options] +testpaths = ["tests"] +filterwarnings = [ + "error", + "ignore::FutureWarning:transformers.*" +] diff --git a/setup.cfg b/setup.cfg index 4af3825c..3eced887 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,10 +6,3 @@ per-file-ignores = **/__init__.py:F401,F403 exclude = normalai/_version.py - -[tool:pytest] -python_files=test*.py -testpaths=tests -filterwarnings = - error - ignore::FutureWarning:transformers.* From 186ab3398fb37a664aaa7e46a6cc6652632102eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 28 Mar 2023 10:18:59 +0200 Subject: [PATCH 019/734] Name `Op`s with their name in the script --- outlines/text/models/model.py | 8 ++++++++ outlines/text/script.py | 2 +- tests/text/test_script.py | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py index b4a6a9fc..72ec023c 100644 --- a/outlines/text/models/model.py +++ b/outlines/text/models/model.py @@ -16,6 +16,14 @@ def __init__(self, name=None): super().__init__() self.name = name + def __call__(self, prompt, name=None): + res = super().__call__(prompt) + + if name is not None: + res.name = name + + return res + def make_node(self, prompt): prompt = as_string(prompt) out = StringVariable() diff --git a/outlines/text/script.py b/outlines/text/script.py index 70667c2d..0baadf0b 100644 --- a/outlines/text/script.py +++ b/outlines/text/script.py @@ -69,7 +69,7 @@ def parse_Expression(self, node, inputs, graph): try: return self.model_outputs[node.text] except KeyError: - output = user_input(graph) + output = user_input(graph, name=node.text) self.model_outputs[node.text] = output return output else: diff --git a/tests/text/test_script.py b/tests/text/test_script.py index 1db587d8..a30711a5 100644 --- a/tests/text/test_script.py +++ b/tests/text/test_script.py @@ -65,6 +65,7 @@ def test_template_language_model(): t = script("Test ${lm}")(lm=lm) assert isinstance(t.owner.op, Add) assert isinstance(t.owner.inputs[1].owner.op, LanguageModel) + assert t.owner.inputs[1].name == "lm" lm_input = t.owner.inputs[1].owner.inputs[0].value assert lm_input == "Test " From abe997e85999b133348657773811c429f0e08a2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Mar 2023 17:50:09 +0200 Subject: [PATCH 020/734] Add interactive execution mode Since execution can take some time we would like to display current progress to the user. We use the `rich` library to show live updates as the program is being executed. --- outlines/__init__.py | 4 +- outlines/compile.py | 48 ------ outlines/program.py | 216 ++++++++++++++++++++++++++ pyproject.toml | 1 + tests/{text/__init.py => __init__.py} | 0 tests/image/__init__.py | 0 tests/test_compile.py | 48 ++++-- 7 files changed, 254 insertions(+), 63 deletions(-) delete mode 100644 outlines/compile.py create mode 100644 outlines/program.py rename tests/{text/__init.py => __init__.py} (100%) create mode 100644 tests/image/__init__.py diff --git a/outlines/__init__.py b/outlines/__init__.py index 697faf5c..18c869bd 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -28,12 +28,12 @@ .. [1] Dohan, David, et al. "Language model cascades." arXiv preprint arXiv:2207.10342 (2022). """ -from outlines.compile import compile from outlines.image import as_image from outlines.text import script, string +from outlines.program import program __all__ = [ - "compile", + "program", "script", "string", ] diff --git a/outlines/compile.py b/outlines/compile.py deleted file mode 100644 index bf4c9e3e..00000000 --- a/outlines/compile.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Callable, Iterable, Reversible - -from outlines.graph import Variable, io_toposort -from outlines.text.var import StringConstant - - -def compile(inputs: Iterable[Variable], outputs: Reversible[Variable]) -> Callable: - r"""Compile an Outlines graph into an executable function. - - `compile` first sorts the graph defined by the input and output nodes - topologically. It then visits the nodes one by one and executes their - `Op`'s `perform` method, fetching and storing their values in a map. - - Parameters - ---------- - inputs - The symbolic `Variable`\s that represent the inputs of the compiled - program. - outputs - The symbolic `Variable`\s that represent the outputs of the compiled - program. - - Returns - ------- - A function which returns the values of the output nodes when passed the values - of the input nodes as arguments. - - """ - sorted = io_toposort(inputs, outputs) - - def fn(*values): - storage_map = {s: v for s, v in zip(inputs, values)} - - for node in sorted: - for i in node.inputs: - if isinstance(i, StringConstant): - storage_map[i] = i.value - node_inputs = [storage_map[i] for i in node.inputs] - results = node.op.perform(*node_inputs) - for i, o in enumerate(node.outputs): - storage_map[o] = results[i] - - if len(outputs) == 1: - return storage_map[outputs[0]] - else: - return tuple(storage_map[o] for o in outputs) - - return fn diff --git a/outlines/program.py b/outlines/program.py new file mode 100644 index 00000000..40ebcce6 --- /dev/null +++ b/outlines/program.py @@ -0,0 +1,216 @@ +import itertools +import textwrap +import time +from functools import singledispatchmethod +from typing import Iterable, Reversible + +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel + +from outlines.graph import Variable, io_toposort +from outlines.text.models.model import LanguageModel +from outlines.text.var import StringConstant + +COLORS = itertools.cycle(["deep_sky_blue2", "gold3", "deep_pink2"]) + + +class Program: + """ """ + + def __init__(self, inputs: Iterable[Variable], outputs: Reversible[Variable]): + self.inputs = inputs + self.outputs = outputs + self.frames = io_toposort(inputs, outputs) + + self.language_models = list( + { + node.op + for node in self.frames + if node.op is not None and isinstance(node.op, LanguageModel) + } + ) + self.lm_colors = {lm: next(COLORS) for lm in self.language_models} + + self.console = Console() + + def build_layout(self) -> Layout: + """Create the layout for the command line interface. + + +-------------------------------------+ + | Logo + instructions | + +-------------------------------------+ + | List of Ops | Executed trace | + | + parameters | | + +-------------------------------------+ + + """ + layout = Layout(name="root") + layout.split_column(Layout(name="header", size=12), Layout(name="execution")) + layout["execution"].split_row( + Layout(name="models"), Layout(name="script", ratio=4) + ) + + return layout + + def print_ops_description(self) -> Panel: + """Create the model panel. + + The `model` panel displays each `Op` used in the program and their + parameters in the color that was assigned to them. The color matches + the color used in the `script` panel for the text they generate. + + """ + model_str = "\n".join( + [f"[{self.lm_colors[lm]}] {lm.name} [/]" for lm in self.language_models] + ) + return Panel( + model_str, + border_style="bright_black", + title="[bright_black]Models[/]", + title_align="left", + ) + + def print_header(self) -> str: + """Display the program's header in the console.""" + + welcome_ascii = textwrap.dedent( + r""" + ___ _ _ _ + / _ \ _ _| |_| (_)_ __ ___ ___ + | | | | | | | __| | | '_ \ / _ \/ __| + | |_| | |_| | |_| | | | | | __/\__ \ + \___/ \____|\__|_|_|_| |_|\___||___/ + """ + ) + + text = f"[bold green]{welcome_ascii}[/bold green]\n\n" + text += "[bright_black]Type Ctrl-C to interrupt the execution and return the current trace.[/]\n" + + return text + + def print_trace( + self, script: str = "", elapsed_time_s: float = 0, words: int = 0 + ) -> Panel: + """Display the current script.""" + subtitle_str = f"[bright_black]Words:[/] [bold red]{words}[/] | " + subtitle_str += ( + f"[bright_black]Time Elapsed:[/][bold yellow] {elapsed_time_s:.1f}s [/]" + ) + return Panel( + script, + border_style="bright_black", + title="[bright_black]Script[/]", + title_align="left", + subtitle=subtitle_str, + subtitle_align="right", + ) + + def execute_frames(self, *values): + storage_map = {s: v for s, v in zip(self.inputs, values)} + script_fmt = "" + trace = {"script": "", "nodes": {}} + + start_time = time.time() + time_elapsed_s = 0 + + # Corner case where the users only passes strings + if len(self.frames) == 0: + trace["script"] = "".join(values) + + try: + with Live(self.layout, console=self.console) as live: + self.layout["script"].update(self.print_trace()) + live.update(self.layout) + + for node in self.frames: + input_fmt = self.process_frame_inputs(node, storage_map) + script_fmt += input_fmt + self.layout["script"].update( + self.print_trace( + script_fmt, time_elapsed_s, len(script_fmt.split()) + ) + ) + live.update(self.layout) + + self.execute_frame(node, storage_map, trace) + time_elapsed_s = time.time() - start_time + + output_fmt = self.process_frame_outputs(node, storage_map) + script_fmt += output_fmt + self.layout["script"].update( + self.print_trace( + script_fmt, time_elapsed_s, len(script_fmt.split()) + ) + ) + live.update(self.layout) + + except KeyboardInterrupt: + pass + except Exception as e: + raise e + finally: + decoded_script = tuple(storage_map[output] for output in self.outputs) + trace["script"] = decoded_script + if len(decoded_script) == 1: + trace["script"] = decoded_script[0] + return trace + return trace + + def process_frame_inputs(self, node, storage_map): + """Process the nodes' inputs. + + If either of the node's inputs is a `StringConstant` we add its + value to the storage map and return its (formatted) value to + be added to the current value of the decoded script. + + """ + input_str, input_fmt = "", "" + for var in node.inputs: + if isinstance(var, StringConstant): + if var not in storage_map: + storage_map[var] = var.value + input_str = var.value + input_fmt = self.format_display(None, input_str) + + return input_fmt + + def execute_frame(self, node, storage_map, trace): + """Execute the current frame.""" + node_inputs = [storage_map[i] for i in node.inputs] + results = node.op.perform(*node_inputs) + for i, o in enumerate(node.outputs): + storage_map[o] = results[i] + trace[o.name] = results[i] + + def process_frame_outputs(self, node, storage_map): + """Process the node's outputs. + + If the node's `Op` is a `LanguageModel` we append its + result to the current value of the decoded script. + + """ + output_str, output_fmt = "", "" + if isinstance(node.op, LanguageModel): + output_str = storage_map[node.outputs[0]] + output_fmt = self.format_display(node.op, output_str) + + return output_fmt + + @singledispatchmethod + def format_display(self, op, text): + return f"[white]{text}[/]" + + @format_display.register(LanguageModel) + def format_display_LanguageModel(self, op, text): + return f"[{self.lm_colors[op]}]{text}[/]" + + def run(self, *values): + self.layout = self.build_layout() + self.layout["header"].update(self.print_header()) + self.layout["models"].update(self.print_ops_description()) + return self.execute_frames(*values) + + +program = Program diff --git a/pyproject.toml b/pyproject.toml index e1bc1c66..1570f8e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ classifiers = [ dependencies = [ "mako", "pillow", + "rich" ] dynamic = ["version"] diff --git a/tests/text/__init.py b/tests/__init__.py similarity index 100% rename from tests/text/__init.py rename to tests/__init__.py diff --git a/tests/image/__init__.py b/tests/image/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_compile.py b/tests/test_compile.py index 0d01c76d..bde3e3c6 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -1,37 +1,59 @@ import pytest -from outlines import compile, script, string +from outlines import program +from outlines.text import script, string +from outlines.text.models.model import LanguageModel def test_compile(): s = string() - out = compile([s], [s]) - assert out("test") == "test" + out = program([s], [s]) + assert out.run("test")["script"] == "test" s = string() p = "Test " + s - out = compile([s], [p]) - assert out("test") == "Test test" + out = program([s], [p]) + assert out.run("test")["script"] == "Test test" s1 = string() s2 = string() p = s1 + s2 - out = compile([s1, s2], [p]) - assert out("one", "two") == "onetwo" + out = program([s1, s2], [p]) + assert out.run("one", "two")["script"] == "onetwo" s1 = string() s2 = string() p1 = s1 + s2 p2 = s1 + "three" - out = compile([s1, s2], [p1, p2]) - assert out("one", "two") == ("onetwo", "onethree") + out = program([s1, s2], [p1, p2]) + assert out.run("one", "two")["script"] == ("onetwo", "onethree") def test_compile_scripts(): s = string() o = script("This is a ${var}")(var=s) - out = compile([s], [o]) - assert out("test") == "This is a test" + out = program([s], [o]) + assert out.run("test")["script"] == "This is a test" + + +class MockLanguageModel(LanguageModel): + def __init__(self): + self.name: str = "mock" + + def sample(self, _): + return "This is a LM speaking" + + +def test_compile_mock(): + """Move when we have found a better way to run these slow examples.""" + gpt2 = MockLanguageModel() + o = script( + """ + Here is a good joke: ${joke} + And a random fact: ${fact} + """ + )(joke=gpt2, fact=gpt2) + program([], [o]) @pytest.mark.skip @@ -47,7 +69,7 @@ def test_compile_hf(): And a random fact: ${fact} """ )(joke=gpt2, fact=gpt2) - fn = compile([], [o]) + fn = program([], [o]) print(fn()) @@ -62,5 +84,5 @@ def test_compile_diffusers(): "Image of a Pokemon jumping off a skyscraper with a parachute. High resolution. 4k. In the style of Van Gohg" ) img = sd(o) - fn = compile([], [img]) + fn = program([], [img]) o = fn() From 25a50311cf8d3d3fe5fa71522396caef4d06c8fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 13:46:55 +0200 Subject: [PATCH 021/734] Remove `string` from top-level imports --- outlines/__init__.py | 2 +- tests/text/test_basic.py | 9 ++++----- tests/text/test_model.py | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/outlines/__init__.py b/outlines/__init__.py index 18c869bd..c8a83c63 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -29,8 +29,8 @@ """ from outlines.image import as_image -from outlines.text import script, string from outlines.program import program +from outlines.text import as_string, script __all__ = [ "program", diff --git a/tests/text/test_basic.py b/tests/text/test_basic.py index c7bcfcfe..b15588fe 100644 --- a/tests/text/test_basic.py +++ b/tests/text/test_basic.py @@ -1,11 +1,10 @@ -import outlines from outlines.graph import Apply from outlines.text.basic import Add, add -from outlines.text.var import StringVariable +from outlines.text.var import StringVariable, string def test_add_symbolic(): - s, t = outlines.string(), outlines.string() + s, t = string(), string() w = add(s, t) assert isinstance(w, StringVariable) assert isinstance(w.owner, Apply) @@ -25,7 +24,7 @@ def test_add_symbolic(): def test_add_mixed(): - s, t = "a string", outlines.string() + s, t = "a string", string() w = s + t assert isinstance(w, StringVariable) assert isinstance(w.owner, Apply) @@ -33,7 +32,7 @@ def test_add_mixed(): assert len(w.owner.inputs) == 2 assert len(w.owner.outputs) == 1 - s, t = outlines.string(), "a string" + s, t = string(), "a string" w = s + t assert isinstance(w, StringVariable) assert isinstance(w.owner, Apply) diff --git a/tests/text/test_model.py b/tests/text/test_model.py index 10362835..7ff7721c 100644 --- a/tests/text/test_model.py +++ b/tests/text/test_model.py @@ -1,4 +1,4 @@ -from outlines import string +from outlines.text import string from outlines.text.models.model import LanguageModel From c7a5e72425e46cd7c515e612ac035fe73f018554 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 16:42:31 +0200 Subject: [PATCH 022/734] Add debug execution mode --- outlines/program.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/outlines/program.py b/outlines/program.py index 40ebcce6..c9dd2f0c 100644 --- a/outlines/program.py +++ b/outlines/program.py @@ -62,7 +62,7 @@ def print_ops_description(self) -> Panel: the color used in the `script` panel for the text they generate. """ - model_str = "\n".join( + model_str = "\n\n".join( [f"[{self.lm_colors[lm]}] {lm.name} [/]" for lm in self.language_models] ) return Panel( @@ -158,6 +158,15 @@ def execute_frames(self, *values): return trace return trace + def debug(self, *values): + storage_map = {s: v for s, v in zip(self.inputs, values)} + trace = {"script": "", "nodes": {}} + for node in self.frames: + self.process_frame_inputs(node, storage_map) + self.execute_frame(node, storage_map, trace) + + return storage_map + def process_frame_inputs(self, node, storage_map): """Process the nodes' inputs. From 7105048506238d5a7132697fca43313fef3b9738 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 16:43:12 +0200 Subject: [PATCH 023/734] Make it possible to name vars created by `ImageModel`s --- outlines/image/models/__init__.py | 2 ++ outlines/image/models/model.py | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/outlines/image/models/__init__.py b/outlines/image/models/__init__.py index e69de29b..7fc171e1 100644 --- a/outlines/image/models/__init__.py +++ b/outlines/image/models/__init__.py @@ -0,0 +1,2 @@ +from .hugging_face import HFDiffuser +from .model import ImageModel diff --git a/outlines/image/models/model.py b/outlines/image/models/model.py index 37383690..948a6435 100644 --- a/outlines/image/models/model.py +++ b/outlines/image/models/model.py @@ -25,6 +25,14 @@ def __init__(self, name=None): super().__init__() self.name = name + def __call__(self, prompt, name=None): + res = super().__call__(prompt) + + if name is not None: + res.name = name + + return res + def make_node(self, prompt: Variable) -> Apply: # type: ignore prompt = as_string(prompt) out = ImageVariable() From 5bd43ebd01d05b19c0a07ce4a28dacb38fef6f7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 29 Mar 2023 16:44:24 +0200 Subject: [PATCH 024/734] Generalize the HF integration to available Causal LMs --- outlines/image/models/__init__.py | 1 - outlines/text/models/hugging_face.py | 66 ++++++++++++++++++++-------- tests/test_compile.py | 5 +-- 3 files changed, 49 insertions(+), 23 deletions(-) diff --git a/outlines/image/models/__init__.py b/outlines/image/models/__init__.py index 7fc171e1..fbc196b2 100644 --- a/outlines/image/models/__init__.py +++ b/outlines/image/models/__init__.py @@ -1,2 +1 @@ -from .hugging_face import HFDiffuser from .model import ImageModel diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index 483a647f..e63cc387 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -1,19 +1,42 @@ -import random -from typing import Dict - from outlines.text.models.model import LanguageModel try: - import jax - from transformers import AutoTokenizer, FlaxAutoModelForCausalLM + import torch + from transformers import AutoModelForCausalLM, AutoTokenizer except ImportError: raise ImportError( - "You need to install `transformers` and `flax` to run the GTP2 model." + "You need to install `transformers` and `torch` to run HuggingFace's Causal LM models." ) -class GPT2(LanguageModel): - def __init__(self, name=None): +class HFCausalLM(LanguageModel): + """Represent any of HuggingFace's causal language model implementations. + + You should have the `torch` and `transformers` packages installed. First + execution may take a while since the pre-trained weights will be downloaded. + + Available models are listed on https://huggingface.co/models + + Example + ------ + + >> from outlines.text.models import HFCausalLM + >> from outlines.text import string + >> + >> gpt2 = HFCausalLM("gpt2") + >> in = string() + >> out = gpt2(in) + + Attributes + ---------- + model_id + The model string identifier in the `transformers` library. + name + The name of this `Op` in the graph. + + """ + + def __init__(self, model_name: str, name=None): """Initialize the GPT2 model. We use HuggingFace's Flax implementation of GPT2. This method will download @@ -23,11 +46,11 @@ def __init__(self, name=None): # when the graph is built. """ - random.seed() - self.seed = random.randint(0, 2**32) - super().__init__(name="HuggingFace GPT2") - def sample(self, prompt_tokens: Dict[str, jax.Array]) -> jax.Array: + super().__init__(name=f"HuggingFace {model_name}") + self.model_name = model_name + + def sample(self, prompt_tokens: torch.Tensor) -> torch.Tensor: """Sample new tokens give the tokenized prompt. Since HuggingFace's `generate` method returns the prompt along with the @@ -42,20 +65,24 @@ def sample(self, prompt_tokens: Dict[str, jax.Array]) -> jax.Array: """ - self.model = FlaxAutoModelForCausalLM.from_pretrained("gpt2") + self.model = AutoModelForCausalLM.from_pretrained(self.model_name) + + if torch.cuda.is_available(): + self.model = self.model.to("cuda") + prompt_tokens = prompt_tokens.to("cuda") + returned_tokens = self.model.generate( **prompt_tokens, do_sample=True, max_new_tokens=20, - prng_key=jax.random.PRNGKey(self.seed), pad_token_id=self.tokenizer.eos_token_id, - ).sequences + ) new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() return new_tokens - def encode(self, sequence: str) -> Dict[str, jax.Array]: + def encode(self, sequence: str) -> torch.Tensor: """Return a list of token ids from a text sequence. Parameters @@ -67,9 +94,10 @@ def encode(self, sequence: str) -> Dict[str, jax.Array]: ------- A dictionary that contains the token ids and the input mask. """ - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - return self.tokenizer(sequence, return_tensors="jax") - def decode(self, ids: jax.Array) -> str: + self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) + return self.tokenizer(sequence, return_tensors="pt") + + def decode(self, ids: torch.Tensor) -> str: """Return a text sequence from a array of token ids.""" return self.tokenizer.decode(ids, skip_special_tokens=True) diff --git a/tests/test_compile.py b/tests/test_compile.py index bde3e3c6..3905993c 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -59,10 +59,9 @@ def test_compile_mock(): @pytest.mark.skip def test_compile_hf(): """Move when we have found a better way to run these slow examples.""" - import outlines - import outlines.text.models.hugging_face + import outlines.text.models.hugging_face as hugging_face - gpt2 = outlines.text.models.hugging_face.GPT2() + gpt2 = hugging_face.HFCausaLM() o = script( """ Here is a good joke: ${joke} From 78afd3a991349b7fa7dc1372f54c3e44efaf9cf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 3 Apr 2023 16:41:15 +0200 Subject: [PATCH 025/734] Integration with the OpenAI API (#38) --- outlines/text/models/openai.py | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 outlines/text/models/openai.py diff --git a/outlines/text/models/openai.py b/outlines/text/models/openai.py new file mode 100644 index 00000000..3bd708e1 --- /dev/null +++ b/outlines/text/models/openai.py @@ -0,0 +1,61 @@ +import os +from typing import Optional + +from outlines.text.models import LanguageModel + +try: + import openai + from openai import error +except ImportError: + raise ImportError("You need to install `openai` to run OpenAI's language models.") + + +class OpenAI(LanguageModel): + """Represents any of OpenAI's language models + + You should have the `openai` package installed, and store + you OpenAI key in the `OPENAI_API_KEY` environment variable. + + """ + + def __init__(self, model_name: str, name: Optional[str] = None): + """Initialize the OpenAI model.""" + + try: + self.openai_api_key = os.environ["OPENAI_API_KEY"] + except KeyError: + raise OSError( + "Could not find the `OPENAI_API_KEY` environment variable. Please make sure it is set to your OpenAI key before re-running your model." + ) + + available_models = openai.Model.list() + available_model_names = [model["id"] for model in available_models["data"]] + if model_name not in available_model_names: + raise OSError(f"{model_name} is not a valid OpenAI model name.") + + super().__init__(name=f"OpenAI {model_name}") + self.model_name = model_name + + def sample(self, prompt: str) -> str: + try: + resp = openai.Completion.create( + model=self.model_name, + prompt=prompt, + max_tokens=128, + ) + except error.APIConnectionError as e: + raise OSError(f"Open API failed to connect: {e}") + except error.AuthenticationError as e: + raise OSError( + f"Open API request not authorized: {e}. Check that the token provided is valid." + ) + except error.PermissionError as e: + raise OSError(f"Open API request was not permitted: {e}") + except error.RateLimitError as e: + raise OSError( + f"Open API requests exceeded the rate limit: {e}. Wait before re-running your program." + ) + except error.Timeout as e: + raise OSError(f"Open API request timed out: {e}") + + return resp["choices"][0]["text"] From cd6dcb4b1dc6ab7e51e249d74b398cfb7d9193cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 4 Apr 2023 17:58:38 +0200 Subject: [PATCH 026/734] Use Mako's engine to compose outlines graphs (#39) --- outlines/__init__.py | 7 +-- outlines/text/__init__.py | 4 +- outlines/text/compose.py | 96 ++++++++++++++++++++++++++++++++++++++ outlines/text/script.py | 83 -------------------------------- tests/test_compile.py | 58 +---------------------- tests/text/test_compose.py | 61 ++++++++++++++++++++++++ tests/text/test_script.py | 79 ------------------------------- 7 files changed, 165 insertions(+), 223 deletions(-) create mode 100644 outlines/text/compose.py delete mode 100644 outlines/text/script.py create mode 100644 tests/text/test_compose.py delete mode 100644 tests/text/test_script.py diff --git a/outlines/__init__.py b/outlines/__init__.py index c8a83c63..907b6527 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -30,10 +30,11 @@ """ from outlines.image import as_image from outlines.program import program -from outlines.text import as_string, script +from outlines.text import as_string, compose __all__ = [ + "as_image", + "as_string", "program", - "script", - "string", + "compose", ] diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 4769ffbc..63baf99a 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,5 +1,5 @@ from .basic import * -from .script import script +from .compose import compose from .var import as_string, string -__all__ = ["as_string", "string", "script"] +__all__ = ["as_string", "string", "compose"] diff --git a/outlines/text/compose.py b/outlines/text/compose.py new file mode 100644 index 00000000..fc699053 --- /dev/null +++ b/outlines/text/compose.py @@ -0,0 +1,96 @@ +import collections +import textwrap +from typing import Dict, Union + +from mako.runtime import Context +from mako.template import Template + +from outlines.text.var import StringVariable + + +class OutlinesEncodingBuffer: + """An encoding buffer for Mako's templating engine. + + This is a modified version of Mako's `FastEncodingBuffer`. It build outlines + graph when the template is rendered with `StringVariable`s. + + """ + + def __init__(self, encoding=None, errors="strict"): + self.data = collections.deque() + self.encoding = encoding + self.delim = "" + self.errors = errors + self.write = self.data.append + + def truncate(self): + self.data = collections.deque() + self.write = self.data.append + + def get_value(self): + if self.encoding: + return self.delim.join(self.data).encode(self.encoding, self.errors) + else: + output = "" + for d in self.data: + if isinstance(d, StringVariable): + output = output + d + else: + output = output + str(d) + return output + + +def compose( + template: str, **values: Dict[str, Union[str, StringVariable]] +) -> Union[str, StringVariable]: + r"""Parse a Mako template and translate it into an Outlines graph. + + Examples + -------- + + Outlines follow Mako's syntax + + >>> import outlines + >>> outline = outlines.compose("I like ${food} and ${sport}", food="tomatoes", sport="tennis") + I like tomatoes and tennis + + When a variable in the template is assigne a `StringVariable` value, the + `compose` function builds the corresponding outlines graph and returns a + `StringVariable`: + + >>> s = outlines.text.string() + >>> outlines.compose("I like ${food}", food=food) + + + It is also possible to use control flow inside templates: + + >>> examples = ["one", "two", "three"] + >>> outlines = outlines.compose( + ... ''' + ... % for example in examples: + ... Example: ${example} + ... % endfor + ... ''', + ... examples=examples + ... ) + + Parameters + ---------- + template + A string that contains a template written in the Mako syntax. + **values + Map from the variables in the template to their value. + + Returns + ------- + A string when the values are all strings, a `StringVariable` otherwise. + + """ + buf = OutlinesEncodingBuffer() + ctx = Context(buf, **values) + + outline = textwrap.dedent(template).lstrip().rstrip() + mako_template = Template(outline, default_filters=[]) + mako_template.render_context(ctx) + + return buf.get_value() diff --git a/outlines/text/script.py b/outlines/text/script.py deleted file mode 100644 index 0baadf0b..00000000 --- a/outlines/text/script.py +++ /dev/null @@ -1,83 +0,0 @@ -import textwrap -from functools import singledispatchmethod -from typing import Dict, Union - -from mako import lexer -from mako.parsetree import Expression, Text - -from outlines.graph import Op -from outlines.text.models import LanguageModel -from outlines.text.var import StringVariable, as_string - - -class Script: - """Represents a scripted interaction with generative models. - - The `Script` class provides a convenient way to define Outlines graph using - the Mako templating languages.`Scripts` are instantiated by passing a string - that represents the flow of interaction with one or several generative models. - - """ - - def __init__(self, script): - script = textwrap.dedent(script).lstrip().rstrip() - self.parsetree = lexer.Lexer(script).parse() - self.model_outputs = {} - - def __call__(self, **inputs: Dict[str, Union[StringVariable, Op]]): - """Create an Outlines graph from a Mako template. - - When one calls a `Script` instance with arguments that represent - variables in the template, Outlines parses the template and iteratively - builds the graph it represents before returning it. - - """ - nodes = self.parsetree.nodes - graph = self.parse_node(nodes[0], inputs, "") - for node in self.parsetree.nodes[1:]: - graph = graph + self.parse_node(node, inputs, graph) - - return graph - - @singledispatchmethod - def parse_node(self, node, inputs, graph): - raise NotImplementedError(f"Cannot transpile {node} to an Outlines graph.") - - @parse_node.register(Text) - def parse_Text(self, node, inputs, graph): - """Parse Mako's `Text` nodes. - - `Text` nodes corresponds to `StringConstants` in Outline's language. - - """ - return as_string(node.content) - - @parse_node.register(Expression) - def parse_Expression(self, node, inputs, graph): - """Parse Mako's `Expression` nodes. - - We first fetch the argument that the user passed to the `__call__` - method that corresponds to the current variable name. Then we check if - this argument has already been seen; if that's the case we assume the - user is referencing the output of a previously-run LM and add the - corresponding node. - - """ - try: - user_input = inputs[node.text] - if isinstance(user_input, LanguageModel): - try: - return self.model_outputs[node.text] - except KeyError: - output = user_input(graph, name=node.text) - self.model_outputs[node.text] = output - return output - else: - return as_string(inputs[node.text]) - except KeyError: - raise TypeError( - f"Prompt evaluation missing 1 required argument: '{node.text}'" - ) - - -script = Script diff --git a/tests/test_compile.py b/tests/test_compile.py index 3905993c..988f50a3 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -1,8 +1,5 @@ -import pytest - from outlines import program -from outlines.text import script, string -from outlines.text.models.model import LanguageModel +from outlines.text import compose, string def test_compile(): @@ -31,57 +28,6 @@ def test_compile(): def test_compile_scripts(): s = string() - o = script("This is a ${var}")(var=s) + o = compose("This is a ${var}", var=s) out = program([s], [o]) assert out.run("test")["script"] == "This is a test" - - -class MockLanguageModel(LanguageModel): - def __init__(self): - self.name: str = "mock" - - def sample(self, _): - return "This is a LM speaking" - - -def test_compile_mock(): - """Move when we have found a better way to run these slow examples.""" - gpt2 = MockLanguageModel() - o = script( - """ - Here is a good joke: ${joke} - And a random fact: ${fact} - """ - )(joke=gpt2, fact=gpt2) - program([], [o]) - - -@pytest.mark.skip -def test_compile_hf(): - """Move when we have found a better way to run these slow examples.""" - import outlines.text.models.hugging_face as hugging_face - - gpt2 = hugging_face.HFCausaLM() - o = script( - """ - Here is a good joke: ${joke} - And a random fact: ${fact} - """ - )(joke=gpt2, fact=gpt2) - fn = program([], [o]) - print(fn()) - - -@pytest.mark.skip -def test_compile_diffusers(): - """Move when we have found a better way to run these slow examples.""" - import outlines - import outlines.image.models.hugging_face as hugging_face - - sd = hugging_face.HFDiffuser("runwayml/stable-diffusion-v1-5") - o = outlines.text.as_string( - "Image of a Pokemon jumping off a skyscraper with a parachute. High resolution. 4k. In the style of Van Gohg" - ) - img = sd(o) - fn = program([], [img]) - o = fn() diff --git a/tests/text/test_compose.py b/tests/text/test_compose.py new file mode 100644 index 00000000..c745f8c5 --- /dev/null +++ b/tests/text/test_compose.py @@ -0,0 +1,61 @@ +import pytest + +from outlines.text import compose, string +from outlines.text.basic import Add +from outlines.text.var import StringConstant, StringVariable + + +def test_template_text(): + with pytest.raises(NameError): + compose("String ${one}", two="two") + + t = compose("Test") + assert t == "Test" + + t = compose("Test ${variable}", variable="string") + assert t == "Test string" + + t = compose("Test ${variable}", variable=1) + assert t == "Test 1" + + t = compose("Test repeated ${variable} ${variable}", variable="string") + assert t == "Test repeated string string" + + t = compose("Test ${one} ${two}", one="1", two="2") + assert t == "Test 1 2" + + +def test_template_string_variable(): + variable = string() + t = compose("Test ${variable}", variable=variable) + assert isinstance(t.owner.op, Add) + assert isinstance(t.owner.inputs[0], StringConstant) + assert isinstance(t.owner.inputs[1], StringVariable) + assert t.owner.inputs[0].value == "Test " + + variable = string() + t = compose("${variable} test", variable=variable) + assert isinstance(t.owner.op, Add) + assert isinstance(t.owner.inputs[0], StringVariable) + assert isinstance(t.owner.inputs[1], StringConstant) + assert t.owner.inputs[1].value == " test" + + +def test_template_few_shots(): + wa = string() + examples = [["here", "there"], ["this", "that"]] + prompt = compose( + """ + This is a test + + ${wa} + + % for s, t in examples: + Search: ${s} + Trap: ${t} + % endfor + """, + wa=wa, + examples=examples, + ) + assert isinstance(prompt, StringVariable) diff --git a/tests/text/test_script.py b/tests/text/test_script.py deleted file mode 100644 index a30711a5..00000000 --- a/tests/text/test_script.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest - -from outlines.text import script, string -from outlines.text.basic import Add -from outlines.text.models import LanguageModel -from outlines.text.var import StringConstant, StringVariable - - -def test_template_text(): - with pytest.raises(TypeError): - script("String ${one}")(two="two") - - string = "Test" - t = script(string)() - assert isinstance(t, StringConstant) - assert t.value == "Test" - - t = script("Test ${variable}")(variable="string") - assert t.owner.inputs[0].value == "Test " - assert t.owner.inputs[1].value == "string" - - t = script("Test ${variable}")(variable=1) - assert t.owner.inputs[0].value == "Test " - assert t.owner.inputs[1].value == "1" - - t = script("Test repeated ${variable} ${variable}")(variable="string") - assert isinstance(t.owner.op, Add) - assert t.owner.inputs[1].value == "string" - assert isinstance(t.owner.inputs[0].owner.op, Add) - assert t.owner.inputs[0].owner.inputs[1].value == " " - - t = script("Test ${one} ${two}")(one="1", two="2") - assert t.owner.inputs[1].value == "2" - assert t.owner.inputs[0].owner.inputs[0].owner.inputs[1].value == "1" - - -def test_template_string_variable(): - variable = string() - t = script("Test ${variable}")(variable=variable) - assert isinstance(t.owner.op, Add) - assert isinstance(t.owner.inputs[0], StringConstant) - assert isinstance(t.owner.inputs[1], StringVariable) - assert t.owner.inputs[0].value == "Test " - - variable = string() - t = script("${variable} test")(variable=variable) - assert isinstance(t.owner.op, Add) - assert isinstance(t.owner.inputs[0], StringVariable) - assert isinstance(t.owner.inputs[1], StringConstant) - assert t.owner.inputs[1].value == " test" - - -class MockLanguageModel(LanguageModel): - def sample(self, prompt): - return f"2x{prompt}" - - -def test_template_language_model(): - r"""Test the transpilation of scripts that contain one or - several `LanguageModel`\s. - """ - - # Single occurence - lm = MockLanguageModel() - t = script("Test ${lm}")(lm=lm) - assert isinstance(t.owner.op, Add) - assert isinstance(t.owner.inputs[1].owner.op, LanguageModel) - assert t.owner.inputs[1].name == "lm" - - lm_input = t.owner.inputs[1].owner.inputs[0].value - assert lm_input == "Test " - - # The first reference to the lamguage model should - # execute decoding, the following ones be replaced - # by the result of this evaluation. - lm = MockLanguageModel(name="lm") - t = script("Test ${lm} more text ${lm}")(lm=lm) - assert isinstance(t.owner.inputs[1].owner.op, MockLanguageModel) - assert t.owner.inputs[1].owner.inputs[0].value == "Test " From 48c9e3ff453368676f1ac461c4c53a5f818ab21a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 4 Apr 2023 22:11:59 +0200 Subject: [PATCH 027/734] Expand docstrings and fix typos --- outlines/graph.py | 12 ++++++++---- outlines/image/var.py | 2 +- outlines/text/basic.py | 3 --- outlines/text/var.py | 9 +++++---- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/outlines/graph.py b/outlines/graph.py index 0fa17fb8..1c3e5699 100644 --- a/outlines/graph.py +++ b/outlines/graph.py @@ -1,6 +1,6 @@ """Graph objects and manipulation functions. -Manipulating Outlines prompts and operations implicitly defines a graph that +Manipulating prompts and operations in Outlines implicitly defines a graph that can be explored, rewritten and compiled. This module defines the basic types these graphs are build from: @@ -10,7 +10,10 @@ - `Apply` nodes represent the application of an `Op` onto one or several variables. -This module is heavily inspired by `Aesara `_ uses to represents mathematical +operations on arrays. It is possible that Aesara may be used as a backend for +Outlines in the near future. """ from typing import Any, Iterable, List, Optional, Reversible, Sequence, Tuple, Union @@ -35,7 +38,8 @@ class Variable(Node): There are a few kind of `Variable` to be aware of: - - `StringVariable` subclass of `Variable` that represents a ``str`` object. + - `StringVariable` is a subclass of `Variable` that represents a ``str`` object. + - `ImageVariable` is a subclass of `Variable` that represents image objects. """ @@ -72,7 +76,7 @@ def __str__(self): class Apply(Node): - """A `Node` represents the application of an `Op` to variables. + """An `Apply` node represents the application of an `Op` to variables. It is instantiated by calling the `Op.make_node` method with a list of inputs. The `Apply` node is in charge of filtering the inputs and outputs. diff --git a/outlines/image/var.py b/outlines/image/var.py index 7a8ec072..dce4d672 100644 --- a/outlines/image/var.py +++ b/outlines/image/var.py @@ -37,7 +37,7 @@ def __str__(self): @singledispatch def as_image(x, name=None): - """Convert `x` into an equivalent `StringVariable` + """Convert `x` into an equivalent `ImageVariable`. This function can be used to turn `pillow.Image` instances into an `ImageVariable`. diff --git a/outlines/text/basic.py b/outlines/text/basic.py index 4217a461..52c59645 100644 --- a/outlines/text/basic.py +++ b/outlines/text/basic.py @@ -7,9 +7,6 @@ class Add(Op): - def __init__(self): - pass - def make_node(self, s, t): s = outlines.text.as_string(s) t = outlines.text.as_string(t) diff --git a/outlines/text/var.py b/outlines/text/var.py index f9d43f27..65b46262 100644 --- a/outlines/text/var.py +++ b/outlines/text/var.py @@ -40,7 +40,8 @@ def as_string(x, name=None): """Convert `x` into an equivalent `StringVariable`. This function can be used to turn `str`, `int` and `float` instances into a - `StringVariable`. + `StringVariable`. It is mainly used in `Op`s' `make_node` method to convert + inputs to a `StringVariable` and add them to the graph. Parameters ---------- @@ -55,16 +56,16 @@ def as_string(x, name=None): @as_string.register(str) -def as_string_variable_strings(x, name=None): +def as_string_strings(x, name=None): return StringConstant(x, name) @as_string.register(int) @as_string.register(float) -def as_string_variable_numbers(x, name=None): +def as_string_numbers(x, name=None): return StringConstant(str(x), name) @as_string.register(StringVariable) -def as_string_variable_StringVariable(x, name=None): +def as_string_StringVariable(x, name=None): return x From 30fafc9fa1000c66ee0c2a25cb87aeccc4633f1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 4 Apr 2023 22:12:16 +0200 Subject: [PATCH 028/734] Simplify the `LanguageModel` class --- outlines/image/models/model.py | 20 +++++++- outlines/text/models/hugging_face.py | 56 ++++++-------------- outlines/text/models/model.py | 77 ++++++++-------------------- outlines/text/models/openai.py | 19 ++++--- tests/text/test_model.py | 12 +---- 5 files changed, 67 insertions(+), 117 deletions(-) diff --git a/outlines/image/models/model.py b/outlines/image/models/model.py index 948a6435..3ffc715f 100644 --- a/outlines/image/models/model.py +++ b/outlines/image/models/model.py @@ -22,10 +22,28 @@ class ImageModel(Op): """ def __init__(self, name=None): + """Instantiate the `ImageModel` `Op`. + + Parameters + ---------- + name + The name of the `Op` in the graph. + + """ super().__init__() self.name = name def __call__(self, prompt, name=None): + """Create the `Apply` node that represents the `Op`'s application to inputs. + + Parameters + ---------- + prompt + The prompt used to condition the generative model's sampling procedure. + name + The name of the output variable in the graph. + + """ res = super().__call__(prompt) if name is not None: @@ -36,8 +54,6 @@ def __call__(self, prompt, name=None): def make_node(self, prompt: Variable) -> Apply: # type: ignore prompt = as_string(prompt) out = ImageVariable() - if self.name is not None: - out.name = self.name return Apply(self, [prompt], [out]) diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index e63cc387..8c5d2cd3 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -29,28 +29,24 @@ class HFCausalLM(LanguageModel): Attributes ---------- - model_id + model_name The model string identifier in the `transformers` library. - name - The name of this `Op` in the graph. """ - def __init__(self, model_name: str, name=None): - """Initialize the GPT2 model. + def __init__(self, model: str): + """Instantiate the model `Op`. - We use HuggingFace's Flax implementation of GPT2. This method will download - the model's weights if they are not yet cached on your machine. - - # TODO: Download the pre-trained weight when the model is executed instead of - # when the graph is built. + Parameters + ---------- + model + The model id of a model hosted inside a model repo on huggingface.co """ + super().__init__(name=f"HuggingFace {model}") + self.model_name = model - super().__init__(name=f"HuggingFace {model_name}") - self.model_name = model_name - - def sample(self, prompt_tokens: torch.Tensor) -> torch.Tensor: + def perform(self, prompt): """Sample new tokens give the tokenized prompt. Since HuggingFace's `generate` method returns the prompt along with the @@ -63,15 +59,17 @@ def sample(self, prompt_tokens: torch.Tensor) -> torch.Tensor: prompt and the input mask. This is the default output of HuggingFace's tokenizers. - """ - self.model = AutoModelForCausalLM.from_pretrained(self.model_name) + tokenizer = AutoTokenizer.from_pretrained(self.model) + model = AutoModelForCausalLM.from_pretrained(self.model) + + prompt_tokens = tokenizer(prompt, return_tensors="pt") if torch.cuda.is_available(): - self.model = self.model.to("cuda") + model = model.to("cuda") prompt_tokens = prompt_tokens.to("cuda") - returned_tokens = self.model.generate( + returned_tokens = model.generate( **prompt_tokens, do_sample=True, max_new_tokens=20, @@ -80,24 +78,4 @@ def sample(self, prompt_tokens: torch.Tensor) -> torch.Tensor: new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() - return new_tokens - - def encode(self, sequence: str) -> torch.Tensor: - """Return a list of token ids from a text sequence. - - Parameters - ---------- - sequence - The text sequence to tokenize. - - Returns - ------- - A dictionary that contains the token ids and the input mask. - """ - - self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) - return self.tokenizer(sequence, return_tensors="pt") - - def decode(self, ids: torch.Tensor) -> str: - """Return a text sequence from a array of token ids.""" - return self.tokenizer.decode(ids, skip_special_tokens=True) + return tokenizer.decode(new_tokens, skip_special_tokens=True) diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py index 72ec023c..d3ac362e 100644 --- a/outlines/text/models/model.py +++ b/outlines/text/models/model.py @@ -5,18 +5,36 @@ class LanguageModel(Op): """An `Op` that produces a sample from a language model. - The output of language models in outlines is modeled as a random variable. - Therefore, calling a language model will return a random sequence (via - ancestral sampling) by default. Other decoding methods are constructed + The output of language models in outlines is represented as a random + variable. Therefore, calling a language model will return a random sequence + (via ancestral sampling) by default. Other decoding methods are constructed as graph transformations. """ def __init__(self, name=None): + """Instantiate the `LanguageModel` `Op`. + + Parameters + ---------- + name + The name of the `Op` in the graph. + + """ super().__init__() self.name = name def __call__(self, prompt, name=None): + """Create the `Apply` node that represents the `Op`'s application to inputs. + + Parameters + ---------- + prompt + The prompt used to condition the language model's sampling procedure. + name + The name of the output variable in the graph. + + """ res = super().__call__(prompt) if name is not None: @@ -27,59 +45,8 @@ def __call__(self, prompt, name=None): def make_node(self, prompt): prompt = as_string(prompt) out = StringVariable() - if self.name is not None: - out.name = self.name return Apply(self, [prompt], [out]) def perform(self, prompt): - tokens = self.encode(prompt) - sampled_tokens = self.sample(tokens) - outputs = self.decode(sampled_tokens) - return (outputs,) - - def sample(self, tokens): - raise NotImplementedError - - def logprob(self, prompt, context): - """Return the log-probability of each token in the vocabulary given the - input prompt and the current context (previously generated tokens). - - # TODO: Implement `logprob` as a graph transformation? - - Parameters - ---------- - prompt - The input to the language model, parameter of the distribution. - context - A sequence that contains the previously generated tokens that - are part of the context window. This sequence can be shorter - than the total sequence generated so far if the context length - has been reached. - - Returns - ------- - A sequence that represents the log-probability distribution over the - tokens. - - """ - raise NotImplementedError - - def encode(self, sequence: str): - """Encode the given sequence. - - Defaults to a pass-through so it does not have to be implemented by - subclasses that represent an integration to an API that take text as an - input. - - """ - return sequence - - def decode(self, ids) -> str: - """Decode a list of ids to a string. - - Defaults to a pass-through so it does not have to be implemented by - subclasses that represent an integration to an API that returns text. - - """ - return ids + return NotImplementedError diff --git a/outlines/text/models/openai.py b/outlines/text/models/openai.py index 3bd708e1..af0a6f9d 100644 --- a/outlines/text/models/openai.py +++ b/outlines/text/models/openai.py @@ -1,5 +1,4 @@ import os -from typing import Optional from outlines.text.models import LanguageModel @@ -13,12 +12,12 @@ class OpenAI(LanguageModel): """Represents any of OpenAI's language models - You should have the `openai` package installed, and store - you OpenAI key in the `OPENAI_API_KEY` environment variable. + You should have the `openai` package installed, and store you OpenAI key in + the `OPENAI_API_KEY` environment variable. """ - def __init__(self, model_name: str, name: Optional[str] = None): + def __init__(self, model: str): """Initialize the OpenAI model.""" try: @@ -30,16 +29,16 @@ def __init__(self, model_name: str, name: Optional[str] = None): available_models = openai.Model.list() available_model_names = [model["id"] for model in available_models["data"]] - if model_name not in available_model_names: - raise OSError(f"{model_name} is not a valid OpenAI model name.") + if model not in available_model_names: + raise OSError(f"{model} is not a valid OpenAI model name.") - super().__init__(name=f"OpenAI {model_name}") - self.model_name = model_name + super().__init__(name=f"OpenAI {model}") + self.model = model - def sample(self, prompt: str) -> str: + def perform(self, prompt): try: resp = openai.Completion.create( - model=self.model_name, + model=self.model, prompt=prompt, max_tokens=128, ) diff --git a/tests/text/test_model.py b/tests/text/test_model.py index 7ff7721c..9b90e2da 100644 --- a/tests/text/test_model.py +++ b/tests/text/test_model.py @@ -9,14 +9,4 @@ def test_initialize_model(): out = llm(prompt) assert isinstance(out.owner.op, LanguageModel) assert out.owner.inputs[0] == prompt - assert out.name == "llm" - - -class MockLM(LanguageModel): - def sample(self, _): - return "test" - - -def test_sample(): - llm = MockLM() - assert llm.perform("")[0] == "test" + assert out.owner.op.name == "llm" From 2f6a7ff54d455728ef333d26f6bef49adfa70013 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 12:38:45 +0200 Subject: [PATCH 029/734] Add decorator to turn python functions into an Op --- outlines/__init__.py | 3 +- outlines/function.py | 73 ++++++++++++++++++++++++++++++++++++++++++ outlines/graph.py | 34 ++++++++++++++++++-- tests/test_function.py | 47 +++++++++++++++++++++++++++ 4 files changed, 154 insertions(+), 3 deletions(-) create mode 100644 outlines/function.py create mode 100644 tests/test_function.py diff --git a/outlines/__init__.py b/outlines/__init__.py index 907b6527..66eb163f 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -28,6 +28,7 @@ .. [1] Dohan, David, et al. "Language model cascades." arXiv preprint arXiv:2207.10342 (2022). """ +from outlines.function import fn from outlines.image import as_image from outlines.program import program from outlines.text import as_string, compose @@ -35,6 +36,6 @@ __all__ = [ "as_image", "as_string", - "program", + "fn" "program", "compose", ] diff --git a/outlines/function.py b/outlines/function.py new file mode 100644 index 00000000..722a39fb --- /dev/null +++ b/outlines/function.py @@ -0,0 +1,73 @@ +"""Functionalities to wrap user-defined functions as Ops. + +The content of this module is heavily inspired by the design of +`Aesara `_. + +""" +import inspect +from typing import Callable, Sequence, Type + +from outlines.graph import Op, Variable +from outlines.text.var import StringVariable + + +class FromFunctionOp(Op): + """Build an outlines Op around a function.""" + + def __init__( + self, + fn: Callable, + input_types: Sequence[Type[Variable]], + output_types: Sequence[Type[Variable]], + ): + self._fn = fn + self.input_types = input_types + self.output_types = output_types + + def __str__(self): + return f"FromFunctionOp({self._fn.__name__})" + + def perform(self, *inputs): + outs = self._fn(*inputs) + if not isinstance(outs, (list, tuple)): + outs = (outs,) + + return outs + + +def fn(function: Callable): + """Decorator that converts a Python function into an Outlines `Op` + that will call the function as its implementation. + + The user must specify the types of the inputs and outputs as type + hints. + + """ + sig = inspect.signature(function) + + inputs = [] + for name, parameter in sig.parameters.items(): + if parameter.annotation == str: + inputs.append(StringVariable) + elif parameter.annotation == inspect._empty: + raise TypeError( + "You need to specify the function's input types as type hints." + ) + else: + raise TypeError( + "The `fn` decorator currently only supports string arguments." + ) + + outputs = [] + if sig.return_annotation == str: + outputs.append(StringVariable) + elif sig.return_annotation == inspect._empty: + raise TypeError( + "You need to specify the function's output types as type hints." + ) + else: + raise TypeError( + "The `fn` decorator currently only supports string return types" + ) + + return FromFunctionOp(function, input_types=inputs, output_types=outputs) diff --git a/outlines/graph.py b/outlines/graph.py index 1c3e5699..d0df7bdc 100644 --- a/outlines/graph.py +++ b/outlines/graph.py @@ -16,7 +16,17 @@ Outlines in the near future. """ -from typing import Any, Iterable, List, Optional, Reversible, Sequence, Tuple, Union +from typing import ( + Any, + Iterable, + List, + Optional, + Reversible, + Sequence, + Tuple, + Type, + Union, +) class Node: @@ -149,6 +159,9 @@ class Op: """ + input_types: Optional[Sequence[Type[Variable]]] = None + output_types: Optional[Sequence[Type[Variable]]] = None + def make_node(self, *inputs: Variable) -> Apply: r"""Construct an `Apply` node that represents the application of this operation to the given inputs. @@ -166,7 +179,24 @@ def make_node(self, *inputs: Variable) -> Apply: The constructed `Apply` node. """ - raise NotImplementedError + if self.input_types is None: + raise NotImplementedError( + "You need to either provide `input_types` and `output_types` or implement the `make_node` method." + ) + + if self.output_types is None: + raise NotImplementedError( + "You need to either provide `input_types` and `output_types` or implement the `make_node` method." + ) + + if len(inputs) != len(self.input_types): + raise ValueError( + f"You need to provide an input type for each input. Got {len(self.input_types)} type definitions and {len(inputs)} inputs." + ) + + # Check that the input types are valid + + return Apply(self, inputs, [o() for o in self.output_types]) def __call__(self, *inputs: Variable) -> Union[Variable, List[Variable]]: """Calls :meth:`Op.make_node` to construct an `Apply` node.""" diff --git a/tests/test_function.py b/tests/test_function.py new file mode 100644 index 00000000..8ff31362 --- /dev/null +++ b/tests/test_function.py @@ -0,0 +1,47 @@ +import pytest + +import outlines + + +def test_function_no_types(): + with pytest.raises(TypeError, match="input types"): + + @outlines.fn + def constant(inp): + return "constant" + + constant("") + + with pytest.raises(TypeError, match="only supports string arguments"): + + @outlines.fn + def constant(inp: float): + return "constant" + + constant("") + + with pytest.raises(TypeError, match="output types"): + + @outlines.fn + def constant(inp: str): + return "constant" + + constant("") + + with pytest.raises(TypeError, match="only supports string return types"): + + @outlines.fn + def constant(inp: str) -> float: + return 1 + + constant("") + + +def test_function_decorator(): + @outlines.fn + def constant(inp: str) -> str: + return "constant" + + inp = outlines.text.string() + out = constant(inp) + assert str(out.owner.op) == "FromFunctionOp(constant)" From c4d0cf8d73ef8e0c5aaff849e8786ae495d28274 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 13:20:25 +0200 Subject: [PATCH 030/734] Add a `chain` function to compile an outlines graph --- outlines/__init__.py | 6 ++++-- outlines/program.py | 44 ++++++++++++++++++++++++++++++++++++++++++- tests/test_compile.py | 32 ++++++++++++++++++++----------- 3 files changed, 68 insertions(+), 14 deletions(-) diff --git a/outlines/__init__.py b/outlines/__init__.py index 66eb163f..2937d8ba 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -30,12 +30,14 @@ """ from outlines.function import fn from outlines.image import as_image -from outlines.program import program +from outlines.program import chain, program from outlines.text import as_string, compose __all__ = [ + "chain", "as_image", "as_string", - "fn" "program", + "fn", + "program", "compose", ] diff --git a/outlines/program.py b/outlines/program.py index c9dd2f0c..9a7fab44 100644 --- a/outlines/program.py +++ b/outlines/program.py @@ -2,7 +2,7 @@ import textwrap import time from functools import singledispatchmethod -from typing import Iterable, Reversible +from typing import Callable, Iterable, Reversible from rich.console import Console from rich.layout import Layout @@ -223,3 +223,45 @@ def run(self, *values): program = Program + + +def chain(input_vars, output_vars) -> Callable: + """Return a function that will compute the outputs of a chain from its outputs. + + Parameters + ---------- + input_vars + Sequence of symbolic variables that correspond to the function's + parameters. + output_vars + Symbolic variable(s) representing the expression(s) to compute. + + """ + + if not isinstance(input_vars, (list, tuple)): + raise Exception( + "Input variables of the `compile` function should be contained in a list or a tupe, even when there is a single input." + ) + if not isinstance(output_vars, (list, tuple)): + output_vars = (output_vars,) + + sorted_nodes = io_toposort(input_vars, output_vars) + + def function(*inputs): + storage_map = {s: v for s, v in zip(input_vars, inputs)} + + for node in sorted_nodes: + for i in node.inputs: + if isinstance(i, StringConstant): + storage_map[i] = i.value + inputs = [storage_map[i] for i in node.inputs] + results = node.op.perform(*inputs) + for i, o in enumerate(node.outputs): + storage_map[o] = results[i] + + if len(output_vars) == 1: + return storage_map[output_vars[0]] + else: + return tuple(storage_map[o] for o in output_vars) + + return function diff --git a/tests/test_compile.py b/tests/test_compile.py index 988f50a3..74d72583 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -1,33 +1,43 @@ -from outlines import program +import outlines from outlines.text import compose, string def test_compile(): s = string() - out = program([s], [s]) - assert out.run("test")["script"] == "test" + chain = outlines.chain([s], s) + assert chain("test") == "test" s = string() p = "Test " + s - out = program([s], [p]) - assert out.run("test")["script"] == "Test test" + chain = outlines.chain([s], p) + assert chain("test") == "Test test" s1 = string() s2 = string() p = s1 + s2 - out = program([s1, s2], [p]) - assert out.run("one", "two")["script"] == "onetwo" + chain = outlines.chain([s1, s2], p) + assert chain("one", "two") == "onetwo" s1 = string() s2 = string() p1 = s1 + s2 p2 = s1 + "three" - out = program([s1, s2], [p1, p2]) - assert out.run("one", "two")["script"] == ("onetwo", "onethree") + chain = outlines.chain([s1, s2], [p1, p2]) + assert chain("one", "two") == ("onetwo", "onethree") def test_compile_scripts(): s = string() o = compose("This is a ${var}", var=s) - out = program([s], [o]) - assert out.run("test")["script"] == "This is a test" + chain = outlines.chain([s], o) + assert chain("test") == "This is a test" + + +def test_eval(): + s = string() + assert s.eval({s: "s"}) == "s" + + s = string() + t = string() + o = s + t + assert o.eval({s: "one", t: "two"}) == "onetwo" From 33cb839af4fe425c5de3075f559fd4d6cd6d5a2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 13:20:41 +0200 Subject: [PATCH 031/734] Add an `eval` method to evaluate variables --- outlines/graph.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/outlines/graph.py b/outlines/graph.py index d0df7bdc..9c23d587 100644 --- a/outlines/graph.py +++ b/outlines/graph.py @@ -18,6 +18,7 @@ """ from typing import ( Any, + Dict, Iterable, List, Optional, @@ -84,6 +85,49 @@ def __str__(self): else: return f"<{getattr(type(self), '__name__')}>" + def eval(self, inputs_to_values: Optional[Dict] = None): + r"""Evaluate the `Variable`. + + This is a quick way to execute an Outlines graph, and be used for + instance for debugging. + + Parameters + ---------- + inputs_to_values : + A dictionary mapping Outlines `Variable`\s to values. + + Examples + -------- + + When every upstream variable in the graph already has a value we can + call :meth:`eval` with no argument: + + >>> import outlines + >>> prompt = "This is a test prompt" + >>> answer = outlines.text.model.OpenAI("davinci")(prompt) + >>> answer.eval() + + Otherwise, we need to pass :math:`eval` a dictionnary that maps symbolic + `Variable`\s to the value to substitute for them: + + >>> import outlines + >>> prompt = outlines.text.string() + >>> answer = outlines.text.model.OpenAI("davinci")(prompt) + >>> answer.eval({prompt: "This is a test prompt"}) + + """ + from outlines.program import chain + + if inputs_to_values is None: + inputs_to_values = {} + + inputs = tuple(sorted(inputs_to_values.keys(), key=id)) + args = [inputs_to_values[var] for var in inputs] + + fn = chain(inputs, self) + + return fn(*args) + class Apply(Node): """An `Apply` node represents the application of an `Op` to variables. From cfdfbd9dc3d55655cd4feee60df6e944e6d1bd57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 15:25:45 +0200 Subject: [PATCH 032/734] Add `stops_at` parameter for language models --- outlines/text/models/model.py | 4 +++- outlines/text/models/openai.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py index d3ac362e..9f2f026f 100644 --- a/outlines/text/models/model.py +++ b/outlines/text/models/model.py @@ -24,7 +24,7 @@ def __init__(self, name=None): super().__init__() self.name = name - def __call__(self, prompt, name=None): + def __call__(self, prompt, stops_at=None, name=None): """Create the `Apply` node that represents the `Op`'s application to inputs. Parameters @@ -37,6 +37,8 @@ def __call__(self, prompt, name=None): """ res = super().__call__(prompt) + self.stops_at = stops_at + if name is not None: res.name = name diff --git a/outlines/text/models/openai.py b/outlines/text/models/openai.py index af0a6f9d..8293fbc7 100644 --- a/outlines/text/models/openai.py +++ b/outlines/text/models/openai.py @@ -36,11 +36,15 @@ def __init__(self, model: str): self.model = model def perform(self, prompt): + if self.stops_at is not None and len(self.stops_at) > 4: + raise Exception("OpenAI's API does not accept more than 4 stop sequences.") + try: resp = openai.Completion.create( model=self.model, prompt=prompt, max_tokens=128, + stop=self.stops_at, ) except error.APIConnectionError as e: raise OSError(f"Open API failed to connect: {e}") @@ -57,4 +61,4 @@ def perform(self, prompt): except error.Timeout as e: raise OSError(f"Open API request timed out: {e}") - return resp["choices"][0]["text"] + return (resp["choices"][0]["text"],) From 1645a4eb285a1814efc4d865b5411eb425b65147 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 15:26:58 +0200 Subject: [PATCH 033/734] Add meta-prompting examples --- examples/meta_prompting.py | 138 +++++++++++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 examples/meta_prompting.py diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py new file mode 100644 index 00000000..2e560785 --- /dev/null +++ b/examples/meta_prompting.py @@ -0,0 +1,138 @@ +"""Meta-prompting examples. + +References +---------- + +.. [0] "Prompting is programming: A Query Language for Large Language Models" + https://arxiv.org/abs/2212.06094 +.. [1] "Prompt programming For Large Language Models: Beyond the Few-Shot Paradigm" + https://arxiv.org/abs/2102.07350. + +""" +import outlines +from outlines import compose +from outlines.text.models.openai import OpenAI + + +def split_into_steps(question, llm): + prompt = compose( + """ + ${question} + Let's solve this problem by splitting it into steps. + """, + question=question, + ) + return llm(prompt) + + +def fill_in_the_blanks(question, llm): + meta_prompt = compose( + """ + ${question} + To solve this problem, we will analyze each of the options and determine + """, + question=question, + ) + goal = llm(meta_prompt) + + prompt = compose( + """ + ${meta_prompt}${goal}. Let's begin. + """, + meta_prompt=meta_prompt, + goal=goal, + ) + answer = llm(prompt) + + return goal, answer + + +def ask_an_expert(question, llm): + prompt = compose( + """ + ${question} + I entered my question into the Expert Generator + and waited. The Expert Generator will render a + simulation of an expert to answer my question. + The expert could be anyone, dead or alive, real + or fictional; the machine will find the person + most qualified to answer the question. For this + question in particular, the expert must be someone + who has thought a lot about the problem of + artificial intelligence and its alignment. + The Expert Generator beeped, indicating that it has + found the most qualified expert. The name displayed + on the screen: " + """, + question=question, + ) + expert = llm(prompt, stops_at=['""']) + + prompt = compose( + """ + ${prompt}${expert} + I am ready to ask my question. + "${expert} I say, + ${question} + """, + prompt=prompt, + expert=expert, + question=question, + ) + answer = llm(prompt) + return prompt, expert, answer + + +def ask_an_expert_simple(question, llm): + meta_prompt = compose( + """ + Q: ${question} + A: A good person to answer this question would be + """, + question=question, + ) + expert = llm(meta_prompt, stops_at=["/n", "."]) + + prompt = compose( + """ + ${meta_prompt}${expert} + + For instance,${expert} would answer + """, + meta_prompt=meta_prompt, + expert=expert, + ) + answer = llm(prompt) + + return answer + + +llm = OpenAI("text-davinci-001") +fn = outlines.chain([], ask_an_expert_simple("What is the meaning of life?", llm)) +fn = outlines.chain([], split_into_steps("f(x) = x*x. What is f(f(3))?", llm)) +fn = outlines.chain( + [], + ask_an_expert( + "What should humankind do to ensure that artificial general intelligence is aligned?", + llm, + ), +) + +direction = compose( + """ +Directions: In the following question, a related +pair of words or phrases is followed by five +pairs of words or phrases. Choose the pair +that best expresses a relationship similar to +that in the original pair. +BRAGGART :: MODESTY +A) FLEDGLING : EXPERIENCE +B) EMBEZZLER : GREED +C) WALLFLOWER : TIMIDITY +D) INVALID : MALADY +E) CANDIDATE : AMBITION + +""" +) +fn = outlines.chain([], split_into_steps(direction, llm)) +fn = outlines.chain([], fill_in_the_blanks(direction, llm)) From 88ec4ff744566b5c9012809c80f9974c8d232c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 17:33:39 +0200 Subject: [PATCH 034/734] Fix the `stops_at` argument to LLMs --- outlines/text/models/model.py | 2 -- outlines/text/models/openai.py | 9 +++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py index 9f2f026f..a48ed075 100644 --- a/outlines/text/models/model.py +++ b/outlines/text/models/model.py @@ -37,8 +37,6 @@ def __call__(self, prompt, stops_at=None, name=None): """ res = super().__call__(prompt) - self.stops_at = stops_at - if name is not None: res.name = name diff --git a/outlines/text/models/openai.py b/outlines/text/models/openai.py index 8293fbc7..4cb39c4e 100644 --- a/outlines/text/models/openai.py +++ b/outlines/text/models/openai.py @@ -17,7 +17,7 @@ class OpenAI(LanguageModel): """ - def __init__(self, model: str): + def __init__(self, model: str, stops_at=None): """Initialize the OpenAI model.""" try: @@ -32,13 +32,14 @@ def __init__(self, model: str): if model not in available_model_names: raise OSError(f"{model} is not a valid OpenAI model name.") + if stops_at is not None and len(stops_at) > 4: + raise Exception("OpenAI's API does not accept more than 4 stop sequences.") + self.stops_at = stops_at + super().__init__(name=f"OpenAI {model}") self.model = model def perform(self, prompt): - if self.stops_at is not None and len(self.stops_at) > 4: - raise Exception("OpenAI's API does not accept more than 4 stop sequences.") - try: resp = openai.Completion.create( model=self.model, From e9c16585275fefd0b64fdd511d06b823d6d248c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 5 Apr 2023 17:33:45 +0200 Subject: [PATCH 035/734] Refactor the meta-prompting examples --- examples/meta_prompting.py | 113 ++++++++++++++++++++++--------------- 1 file changed, 66 insertions(+), 47 deletions(-) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 2e560785..8723cbd0 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -9,12 +9,14 @@ https://arxiv.org/abs/2102.07350. """ +import argparse + import outlines from outlines import compose from outlines.text.models.openai import OpenAI -def split_into_steps(question, llm): +def split_into_steps(question, model: str): prompt = compose( """ ${question} @@ -22,18 +24,21 @@ def split_into_steps(question, llm): """, question=question, ) - return llm(prompt) + answer = OpenAI(model)(prompt) + + return prompt, answer -def fill_in_the_blanks(question, llm): +def fill_in_the_blanks(question, model: str): meta_prompt = compose( """ ${question} - To solve this problem, we will analyze each of the options and determine + + In order to solve this problem, we will analyze each of the options and determine """, question=question, ) - goal = llm(meta_prompt) + goal = OpenAI(model, stops_at=["."])(meta_prompt) prompt = compose( """ @@ -42,13 +47,13 @@ def fill_in_the_blanks(question, llm): meta_prompt=meta_prompt, goal=goal, ) - answer = llm(prompt) + answer = OpenAI(model)(prompt) - return goal, answer + return prompt, answer -def ask_an_expert(question, llm): - prompt = compose( +def ask_an_expert(question, model: str): + meta_prompt = compose( """ ${question} I entered my question into the Expert Generator @@ -66,24 +71,24 @@ def ask_an_expert(question, llm): """, question=question, ) - expert = llm(prompt, stops_at=['""']) + expert = OpenAI(model, stops_at=['"'])(meta_prompt) prompt = compose( """ - ${prompt}${expert} + ${prompt}${expert}" I am ready to ask my question. - "${expert} I say, + "${expert}" I say, ${question} """, - prompt=prompt, + prompt=meta_prompt, expert=expert, question=question, ) - answer = llm(prompt) - return prompt, expert, answer + answer = OpenAI(model)(prompt) + return prompt, answer -def ask_an_expert_simple(question, llm): +def ask_an_expert_simple(question, model: str): meta_prompt = compose( """ Q: ${question} @@ -91,48 +96,62 @@ def ask_an_expert_simple(question, llm): """, question=question, ) - expert = llm(meta_prompt, stops_at=["/n", "."]) + expert = OpenAI(model, stops_at=["/n", "."])(meta_prompt) prompt = compose( """ - ${meta_prompt}${expert} + ${meta_prompt}${expert}. For instance,${expert} would answer """, meta_prompt=meta_prompt, expert=expert, ) - answer = llm(prompt) + answer = OpenAI(model)(prompt) - return answer + return prompt, answer -llm = OpenAI("text-davinci-001") -fn = outlines.chain([], ask_an_expert_simple("What is the meaning of life?", llm)) -fn = outlines.chain([], split_into_steps("f(x) = x*x. What is f(f(3))?", llm)) -fn = outlines.chain( - [], - ask_an_expert( - "What should humankind do to ensure that artificial general intelligence is aligned?", - llm, - ), -) +def run_example(model_fn, question, model): + print("\n-----------------------------------------\n") + question_s = outlines.text.string() + fn = outlines.chain([question_s], model_fn(question_s, model)) + prompt, answer = fn(question) + print(f"{prompt}{answer}") -direction = compose( - """ -Directions: In the following question, a related -pair of words or phrases is followed by five -pairs of words or phrases. Choose the pair -that best expresses a relationship similar to -that in the original pair. -BRAGGART :: MODESTY -A) FLEDGLING : EXPERIENCE -B) EMBEZZLER : GREED -C) WALLFLOWER : TIMIDITY -D) INVALID : MALADY -E) CANDIDATE : AMBITION -""" -) -fn = outlines.chain([], split_into_steps(direction, llm)) -fn = outlines.chain([], fill_in_the_blanks(direction, llm)) +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run the Meta Prompting examples") + parser.add_argument( + "--model", + type=str, + default="text-davinci-001", + help="The Large Language Model to use to run the examples.", + ) + args = parser.parse_args() + + math_q = "f(x) = x*x. What is f(f(3))?" + sat_q = compose( + """ + Directions: In the following question, a related + pair of words or phrases is followed by five + pairs of words or phrases. Choose the pair + that best expresses a relationship similar to + that in the original pair. + BRAGGART :: MODESTY + A) FLEDGLING : EXPERIENCE + B) EMBEZZLER : GREED + C) WALLFLOWER : TIMIDITY + D) INVALID : MALADY + E) CANDIDATE : AMBITION + + """ + ) + alignment_q = "What should humankind do to ensure that artificial general intelligence is aligned?" + meaning_q = "What is the meaning of life?" + + run_example(split_into_steps, math_q, args.model) + run_example(split_into_steps, sat_q, args.model) + run_example(fill_in_the_blanks, sat_q, args.model) + run_example(ask_an_expert, alignment_q, args.model) + run_example(ask_an_expert_simple, meaning_q, args.model) From 632e477ad3ce3f3717235a65c6341fbeca5ed2e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 6 Apr 2023 09:57:20 +0200 Subject: [PATCH 036/734] Add a `model` to decorator to encapsulate calls to language models --- examples/meta_prompting.py | 109 ++++++++--------- outlines/program.py | 2 +- outlines/text/__init__.py | 3 +- outlines/text/models/__init__.py | 2 +- outlines/text/models/hugging_face.py | 2 +- outlines/text/models/language_model.py | 156 +++++++++++++++++++++++++ outlines/text/models/model.py | 52 --------- tests/text/test_model.py | 27 ++++- 8 files changed, 234 insertions(+), 119 deletions(-) create mode 100644 outlines/text/models/language_model.py delete mode 100644 outlines/text/models/model.py diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 8723cbd0..3b2e5dc3 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -12,48 +12,42 @@ import argparse import outlines -from outlines import compose -from outlines.text.models.openai import OpenAI +import outlines.text as text -def split_into_steps(question, model: str): - prompt = compose( - """ - ${question} +def split_into_steps(question, model_name: str): + @text.model(model_name) + def solve(question): + """${question} Let's solve this problem by splitting it into steps. - """, - question=question, - ) - answer = OpenAI(model)(prompt) + """ + + answer, prompt = solve(question) return prompt, answer -def fill_in_the_blanks(question, model: str): - meta_prompt = compose( - """ - ${question} +def fill_in_the_blanks(question, model_name: str): + @text.model(model_name, stops_at=["."]) + def determine_goal(question): + """${question} In order to solve this problem, we will analyze each of the options and determine - """, - question=question, - ) - goal = OpenAI(model, stops_at=["."])(meta_prompt) - - prompt = compose( """ - ${meta_prompt}${goal}. Let's begin. - """, - meta_prompt=meta_prompt, - goal=goal, - ) - answer = OpenAI(model)(prompt) - return prompt, answer + @text.model(model_name, stops_at=["."]) + def solve(memory): + """${memory}. Let's begin.""" + _, memory = determine_goal(question) + answer, full_interaction = solve(memory) -def ask_an_expert(question, model: str): - meta_prompt = compose( + return full_interaction, answer + + +def ask_an_expert(question, model_name: str): + @text.model(model_name, stops_at=['"']) + def find_expert(question): """ ${question} I entered my question into the Expert Generator @@ -68,48 +62,43 @@ def ask_an_expert(question, model: str): The Expert Generator beeped, indicating that it has found the most qualified expert. The name displayed on the screen: " - """, - question=question, - ) - expert = OpenAI(model, stops_at=['"'])(meta_prompt) + """ - prompt = compose( + @text.model(model_name) + def get_answer(question, expert, memory): """ - ${prompt}${expert}" + ${memory} I am ready to ask my question. "${expert}" I say, ${question} - """, - prompt=meta_prompt, - expert=expert, - question=question, - ) - answer = OpenAI(model)(prompt) - return prompt, answer + """ + expert, memory = find_expert(question) + answer, full_interaction = get_answer(question, expert, memory) -def ask_an_expert_simple(question, model: str): - meta_prompt = compose( + return full_interaction, answer + + +def ask_an_expert_simple(question, model_name: str): + @text.model(model_name, stops_at=["\n", "."]) + def find_expert(question): """ Q: ${question} A: A good person to answer this question would be - """, - question=question, - ) - expert = OpenAI(model, stops_at=["/n", "."])(meta_prompt) + """ - prompt = compose( + @text.model(model_name) + def get_answer(expert, memory): """ - ${meta_prompt}${expert}. + ${memory}. For instance,${expert} would answer - """, - meta_prompt=meta_prompt, - expert=expert, - ) - answer = OpenAI(model)(prompt) + """ - return prompt, answer + expert, memory = find_expert(question) + answer, full_interaction = get_answer(expert, memory) + + return full_interaction, answer def run_example(model_fn, question, model): @@ -117,7 +106,7 @@ def run_example(model_fn, question, model): question_s = outlines.text.string() fn = outlines.chain([question_s], model_fn(question_s, model)) prompt, answer = fn(question) - print(f"{prompt}{answer}") + print(f"{prompt}") if __name__ == "__main__": @@ -125,14 +114,13 @@ def run_example(model_fn, question, model): parser.add_argument( "--model", type=str, - default="text-davinci-001", + default="openai/text-davinci-001", help="The Large Language Model to use to run the examples.", ) args = parser.parse_args() math_q = "f(x) = x*x. What is f(f(3))?" - sat_q = compose( - """ + sat_q = """ Directions: In the following question, a related pair of words or phrases is followed by five pairs of words or phrases. Choose the pair @@ -146,7 +134,6 @@ def run_example(model_fn, question, model): E) CANDIDATE : AMBITION """ - ) alignment_q = "What should humankind do to ensure that artificial general intelligence is aligned?" meaning_q = "What is the meaning of life?" diff --git a/outlines/program.py b/outlines/program.py index 9a7fab44..a3f2f7b8 100644 --- a/outlines/program.py +++ b/outlines/program.py @@ -10,7 +10,7 @@ from rich.panel import Panel from outlines.graph import Variable, io_toposort -from outlines.text.models.model import LanguageModel +from outlines.text.models import LanguageModel from outlines.text.var import StringConstant COLORS = itertools.cycle(["deep_sky_blue2", "gold3", "deep_pink2"]) diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 63baf99a..2b057f03 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,5 +1,6 @@ from .basic import * from .compose import compose +from .models import model from .var import as_string, string -__all__ = ["as_string", "string", "compose"] +__all__ = ["as_string", "model", "string", "compose"] diff --git a/outlines/text/models/__init__.py b/outlines/text/models/__init__.py index 43357b6b..b75a091c 100644 --- a/outlines/text/models/__init__.py +++ b/outlines/text/models/__init__.py @@ -1 +1 @@ -from .model import LanguageModel +from .language_model import LanguageModel, model diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index 8c5d2cd3..9425ae57 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -1,4 +1,4 @@ -from outlines.text.models.model import LanguageModel +from outlines.text.models.language_model import LanguageModel try: import torch diff --git a/outlines/text/models/language_model.py b/outlines/text/models/language_model.py new file mode 100644 index 00000000..40756d3d --- /dev/null +++ b/outlines/text/models/language_model.py @@ -0,0 +1,156 @@ +import inspect + +from outlines.graph import Apply, Op +from outlines.text.compose import compose +from outlines.text.var import StringVariable, as_string + + +class LanguageModel(Op): + """An `Op` that produces a sample from a language model. + + The output of language models in outlines is represented as a random + variable. Therefore, calling a language model will return a random sequence + (via ancestral sampling) by default. Other decoding methods are constructed + as graph transformations. + + """ + + def __init__(self, name=None): + """Instantiate the `LanguageModel` `Op`. + + Parameters + ---------- + name + The name of the `Op` in the graph. + + """ + super().__init__() + self.name = name + + def __call__(self, prompt, stops_at=None, name=None): + """Create the `Apply` node that represents the `Op`'s application to inputs. + + Parameters + ---------- + prompt + The prompt used to condition the language model's sampling procedure. + name + The name of the output variable in the graph. + + """ + res = super().__call__(prompt) + + if name is not None: + res.name = name + + return res + + def make_node(self, prompt): + prompt = as_string(prompt) + out = StringVariable() + + return Apply(self, [prompt], [out]) + + def perform(self, prompt): + return NotImplementedError + + +def model(name: str, stops_at=None): + """Decorator that allows to simplify calls to language models. + + Prompts that are passed to language models are often rendered templates, + and the workflow typically looks like: + + >>> import outlines + >>> from outlines.text.models.openai import OpenAI + >>> + >>> llm = OpenAI("davinci") + >>> tpl = "I have a ${question}" + >>> prompt = outlines.compose(tpl, question="How are you?") + >>> answer = llm(prompt) + + While explicit, these 4 lines have the following defaults: + + 1. The prompt is hidden; + 2. The language model instantiation is far from the prompt; prompt templates + are however attached to a specific language model call. + 3. The intent behind the language model call is hidden. + + To encapsulate the logic behind language model calls, we thus define the + template prompt inside a function and decorate the function with a model + specification. When that function is called, the template is rendered using + the arguments passed to the function, and the rendered prompt is passed to + a language model instantiated with the arguments passed to the decorator. + + The previous example is equivalent to the following: + + >>> import outlines + >>> + >>> @outlines.text.model("openai/davinci") + ... def answer(question): + ... "I have a ${question}" + ... + >>> answer, _ = answer("How are you?") + + Decorated functions return two objects: the first represents the output of + the language model call, the second represents the concatenation of the + rendered prompt with the output of the language model call. The latter can + be used in context where one expands an initial prompt with recursive calls + to language models. + + """ + provider_name = name.split("/")[0] + model_name = name[len(provider_name) + 1 :] + + if provider_name == "openai": + from outlines.text.models.openai import OpenAI + + llm = OpenAI(model_name, stops_at) # type:ignore + elif provider_name == "hf": + from outlines.text.models.hugging_face import HFCausalLM + + llm = HFCausalLM(model_name) # type:ignore + else: + raise NameError(f"The model provider {provider_name} is not available.") + + def decorator(fn): + # Get the names of the parameters to the function, which must correspond + # to the variables defined in the template. + var_names = [] + kwargs_data = {} + sig = inspect.signature(fn) + for parameter in sig.parameters.values(): + if parameter.default == inspect._empty: + var_names.append(parameter.name) + else: + kwargs_data[parameter.name] = parameter.default + + # The docstring contains the template that will be rendered to be used + # as a prompt to the language model. + template = inspect.cleandoc(fn.__doc__) + + def wrapper(*args, **kwargs): + """Call the LLM with the rendered template. + + Building prompts with recursive calls to language models is common + in prompt engineering, we thus return both the raw answer from the + language model as well as the rendered prompt including the answer. + + Returns + ------- + A tuple that contains the result of the language model call, and the + rendered prompt concatenated with the result of the language model + call. + + """ + args_data = {name: arg for name, arg in zip(var_names, args)} + kwargs_data.update(kwargs) + data = {**args_data, **kwargs_data} + + prompt = compose(template, **data) + result = llm(prompt) + return result, prompt + result + + return wrapper + + return decorator diff --git a/outlines/text/models/model.py b/outlines/text/models/model.py deleted file mode 100644 index a48ed075..00000000 --- a/outlines/text/models/model.py +++ /dev/null @@ -1,52 +0,0 @@ -from outlines.graph import Apply, Op -from outlines.text.var import StringVariable, as_string - - -class LanguageModel(Op): - """An `Op` that produces a sample from a language model. - - The output of language models in outlines is represented as a random - variable. Therefore, calling a language model will return a random sequence - (via ancestral sampling) by default. Other decoding methods are constructed - as graph transformations. - - """ - - def __init__(self, name=None): - """Instantiate the `LanguageModel` `Op`. - - Parameters - ---------- - name - The name of the `Op` in the graph. - - """ - super().__init__() - self.name = name - - def __call__(self, prompt, stops_at=None, name=None): - """Create the `Apply` node that represents the `Op`'s application to inputs. - - Parameters - ---------- - prompt - The prompt used to condition the language model's sampling procedure. - name - The name of the output variable in the graph. - - """ - res = super().__call__(prompt) - - if name is not None: - res.name = name - - return res - - def make_node(self, prompt): - prompt = as_string(prompt) - out = StringVariable() - - return Apply(self, [prompt], [out]) - - def perform(self, prompt): - return NotImplementedError diff --git a/tests/text/test_model.py b/tests/text/test_model.py index 9b90e2da..90a35873 100644 --- a/tests/text/test_model.py +++ b/tests/text/test_model.py @@ -1,8 +1,10 @@ +import pytest + from outlines.text import string -from outlines.text.models.model import LanguageModel +from outlines.text.models.language_model import LanguageModel, model -def test_initialize_model(): +def test_initialize_LanguageModel(): llm = LanguageModel(name="llm") prompt = string() @@ -10,3 +12,24 @@ def test_initialize_model(): assert isinstance(out.owner.op, LanguageModel) assert out.owner.inputs[0] == prompt assert out.owner.op.name == "llm" + + +def test_model_wrong_provide(): + with pytest.raises(NameError, match="not available"): + + @model("aa/model_name") + def test_function(): + """""" + + +@pytest.mark.skip +def test_model(): + @model("openai/text-davinci-001", stops_at=["."]) + def test_function(question, type="bad"): + """You're a witty and sarcastic AI. + + Tell me a ${type} ${question}. + Joke: + """ + + answer, prompt = test_function("joke", type="good") From 2bed272c4a7deae5e33f26c98373b5f206868f23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 6 Apr 2023 10:01:17 +0200 Subject: [PATCH 037/734] Use `inspect.cleandoc` instead of `textwrap.dedent` in `compose` `textwrap.dedent` and `inspect.cleandoc` have a different behavior when it comes to indentation. With `dedent` the indentation is relative to the first line; with `cleandoc` the relative indentation of the first line is discarded. --- outlines/text/compose.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/outlines/text/compose.py b/outlines/text/compose.py index fc699053..5d911a17 100644 --- a/outlines/text/compose.py +++ b/outlines/text/compose.py @@ -1,5 +1,5 @@ import collections -import textwrap +import inspect from typing import Dict, Union from mako.runtime import Context @@ -89,7 +89,7 @@ def compose( buf = OutlinesEncodingBuffer() ctx = Context(buf, **values) - outline = textwrap.dedent(template).lstrip().rstrip() + outline = inspect.cleandoc(template) mako_template = Template(outline, default_filters=[]) mako_template.render_context(ctx) From 629c021d5e4be9a73208beef400404f93836c003 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 7 Apr 2023 20:45:54 +0200 Subject: [PATCH 038/734] s/compose/render --- outlines/__init__.py | 4 ++-- outlines/text/__init__.py | 4 ++-- outlines/text/models/language_model.py | 6 +++--- outlines/text/{compose.py => render.py} | 10 +++++----- tests/test_compile.py | 4 ++-- tests/text/test_compose.py | 20 ++++++++++---------- 6 files changed, 24 insertions(+), 24 deletions(-) rename outlines/text/{compose.py => render.py} (89%) diff --git a/outlines/__init__.py b/outlines/__init__.py index 2937d8ba..2f2003e2 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -31,7 +31,7 @@ from outlines.function import fn from outlines.image import as_image from outlines.program import chain, program -from outlines.text import as_string, compose +from outlines.text import as_string, render __all__ = [ "chain", @@ -39,5 +39,5 @@ "as_string", "fn", "program", - "compose", + "render", ] diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 2b057f03..4337b678 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,6 +1,6 @@ from .basic import * -from .compose import compose +from .render import render from .models import model from .var import as_string, string -__all__ = ["as_string", "model", "string", "compose"] +__all__ = ["as_string", "model", "string", "render"] diff --git a/outlines/text/models/language_model.py b/outlines/text/models/language_model.py index 40756d3d..c58a3c40 100644 --- a/outlines/text/models/language_model.py +++ b/outlines/text/models/language_model.py @@ -1,7 +1,7 @@ import inspect from outlines.graph import Apply, Op -from outlines.text.compose import compose +from outlines.text.render import render from outlines.text.var import StringVariable, as_string @@ -66,7 +66,7 @@ def model(name: str, stops_at=None): >>> >>> llm = OpenAI("davinci") >>> tpl = "I have a ${question}" - >>> prompt = outlines.compose(tpl, question="How are you?") + >>> prompt = outlines.render(tpl, question="How are you?") >>> answer = llm(prompt) While explicit, these 4 lines have the following defaults: @@ -147,7 +147,7 @@ def wrapper(*args, **kwargs): kwargs_data.update(kwargs) data = {**args_data, **kwargs_data} - prompt = compose(template, **data) + prompt = render(template, **data) result = llm(prompt) return result, prompt + result diff --git a/outlines/text/compose.py b/outlines/text/render.py similarity index 89% rename from outlines/text/compose.py rename to outlines/text/render.py index 5d911a17..32e9d825 100644 --- a/outlines/text/compose.py +++ b/outlines/text/render.py @@ -40,7 +40,7 @@ def get_value(self): return output -def compose( +def render( template: str, **values: Dict[str, Union[str, StringVariable]] ) -> Union[str, StringVariable]: r"""Parse a Mako template and translate it into an Outlines graph. @@ -51,21 +51,21 @@ def compose( Outlines follow Mako's syntax >>> import outlines - >>> outline = outlines.compose("I like ${food} and ${sport}", food="tomatoes", sport="tennis") + >>> outline = outlines.render("I like ${food} and ${sport}", food="tomatoes", sport="tennis") I like tomatoes and tennis When a variable in the template is assigne a `StringVariable` value, the - `compose` function builds the corresponding outlines graph and returns a + `render` function builds the corresponding outlines graph and returns a `StringVariable`: >>> s = outlines.text.string() - >>> outlines.compose("I like ${food}", food=food) + >>> outlines.render("I like ${food}", food=food) It is also possible to use control flow inside templates: >>> examples = ["one", "two", "three"] - >>> outlines = outlines.compose( + >>> outlines = outlines.render( ... ''' ... % for example in examples: ... Example: ${example} diff --git a/tests/test_compile.py b/tests/test_compile.py index 74d72583..12933005 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -1,5 +1,5 @@ import outlines -from outlines.text import compose, string +from outlines.text import render, string def test_compile(): @@ -28,7 +28,7 @@ def test_compile(): def test_compile_scripts(): s = string() - o = compose("This is a ${var}", var=s) + o = render("This is a ${var}", var=s) chain = outlines.chain([s], o) assert chain("test") == "This is a test" diff --git a/tests/text/test_compose.py b/tests/text/test_compose.py index c745f8c5..f7f161b3 100644 --- a/tests/text/test_compose.py +++ b/tests/text/test_compose.py @@ -1,40 +1,40 @@ import pytest -from outlines.text import compose, string +from outlines.text import render, string from outlines.text.basic import Add from outlines.text.var import StringConstant, StringVariable def test_template_text(): with pytest.raises(NameError): - compose("String ${one}", two="two") + render("String ${one}", two="two") - t = compose("Test") + t = render("Test") assert t == "Test" - t = compose("Test ${variable}", variable="string") + t = render("Test ${variable}", variable="string") assert t == "Test string" - t = compose("Test ${variable}", variable=1) + t = render("Test ${variable}", variable=1) assert t == "Test 1" - t = compose("Test repeated ${variable} ${variable}", variable="string") + t = render("Test repeated ${variable} ${variable}", variable="string") assert t == "Test repeated string string" - t = compose("Test ${one} ${two}", one="1", two="2") + t = render("Test ${one} ${two}", one="1", two="2") assert t == "Test 1 2" def test_template_string_variable(): variable = string() - t = compose("Test ${variable}", variable=variable) + t = render("Test ${variable}", variable=variable) assert isinstance(t.owner.op, Add) assert isinstance(t.owner.inputs[0], StringConstant) assert isinstance(t.owner.inputs[1], StringVariable) assert t.owner.inputs[0].value == "Test " variable = string() - t = compose("${variable} test", variable=variable) + t = render("${variable} test", variable=variable) assert isinstance(t.owner.op, Add) assert isinstance(t.owner.inputs[0], StringVariable) assert isinstance(t.owner.inputs[1], StringConstant) @@ -44,7 +44,7 @@ def test_template_string_variable(): def test_template_few_shots(): wa = string() examples = [["here", "there"], ["this", "that"]] - prompt = compose( + prompt = render( """ This is a test From 2a2109ff79ca45d37f9de1d9a2f477cd7b21d703 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 7 Apr 2023 20:54:50 +0200 Subject: [PATCH 039/734] s/model/completion `@text.model` does not convey what the function is doing, `text.completion` does a much better job at this. --- examples/meta_prompting.py | 25 +++--- outlines/__init__.py | 2 +- outlines/text/__init__.py | 4 +- outlines/text/completion.py | 104 +++++++++++++++++++++++++ outlines/text/models/__init__.py | 2 +- outlines/text/models/language_model.py | 104 ------------------------- tests/text/test_model.py | 8 +- 7 files changed, 124 insertions(+), 125 deletions(-) create mode 100644 outlines/text/completion.py diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 3b2e5dc3..fd0056f3 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -16,7 +16,7 @@ def split_into_steps(question, model_name: str): - @text.model(model_name) + @text.completion(model_name) def solve(question): """${question} Let's solve this problem by splitting it into steps. @@ -28,14 +28,14 @@ def solve(question): def fill_in_the_blanks(question, model_name: str): - @text.model(model_name, stops_at=["."]) + @text.completion(model_name, stops_at=["."]) def determine_goal(question): """${question} In order to solve this problem, we will analyze each of the options and determine """ - @text.model(model_name, stops_at=["."]) + @text.completion(model_name, stops_at=["."]) def solve(memory): """${memory}. Let's begin.""" @@ -46,7 +46,7 @@ def solve(memory): def ask_an_expert(question, model_name: str): - @text.model(model_name, stops_at=['"']) + @text.completion(model_name, stops_at=['"']) def find_expert(question): """ ${question} @@ -64,7 +64,7 @@ def find_expert(question): on the screen: " """ - @text.model(model_name) + @text.completion(model_name) def get_answer(question, expert, memory): """ ${memory} @@ -80,14 +80,14 @@ def get_answer(question, expert, memory): def ask_an_expert_simple(question, model_name: str): - @text.model(model_name, stops_at=["\n", "."]) + @text.completion(model_name, stops_at=["\n", "."]) def find_expert(question): """ Q: ${question} A: A good person to answer this question would be """ - @text.model(model_name) + @text.completion(model_name) def get_answer(expert, memory): """ ${memory}. @@ -114,18 +114,17 @@ def run_example(model_fn, question, model): parser.add_argument( "--model", type=str, - default="openai/text-davinci-001", + default="openai/text-davinci-003", help="The Large Language Model to use to run the examples.", ) args = parser.parse_args() math_q = "f(x) = x*x. What is f(f(3))?" sat_q = """ - Directions: In the following question, a related - pair of words or phrases is followed by five - pairs of words or phrases. Choose the pair - that best expresses a relationship similar to - that in the original pair. + Directions: In the following question, a related pair of words or phrases \ + is followed by five pairs of words or phrases. Choose the pair that best \ + expresses a relationship similar to that in the original pair. \ + BRAGGART :: MODESTY A) FLEDGLING : EXPERIENCE B) EMBEZZLER : GREED diff --git a/outlines/__init__.py b/outlines/__init__.py index 2f2003e2..970abe25 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -31,7 +31,7 @@ from outlines.function import fn from outlines.image import as_image from outlines.program import chain, program -from outlines.text import as_string, render +from outlines.text import as_string, completion, render __all__ = [ "chain", diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 4337b678..6396d4b9 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,6 +1,6 @@ from .basic import * +from .completion import completion from .render import render -from .models import model from .var import as_string, string -__all__ = ["as_string", "model", "string", "render"] +__all__ = ["as_string", "completion", "string", "render"] diff --git a/outlines/text/completion.py b/outlines/text/completion.py new file mode 100644 index 00000000..a28187db --- /dev/null +++ b/outlines/text/completion.py @@ -0,0 +1,104 @@ +import inspect + +from outlines.text.render import render + + +def completion(name: str, stops_at=None): + """Decorator that allows to simplify calls to language models. + + Prompts that are passed to language models are often rendered templates, + and the workflow typically looks like: + + >>> import outlines + >>> from outlines.text.models.openai import OpenAI + >>> + >>> llm = OpenAI("davinci") + >>> tpl = "I have a ${question}" + >>> prompt = outlines.render(tpl, question="How are you?") + >>> answer = llm(prompt) + + While explicit, these 4 lines have the following defaults: + + 1. The prompt is hidden; + 2. The language model instantiation is far from the prompt; prompt templates + are however attached to a specific language model call. + 3. The intent behind the language model call is hidden. + + To encapsulate the logic behind language model calls, we thus define the + template prompt inside a function and decorate the function with a model + specification. When that function is called, the template is rendered using + the arguments passed to the function, and the rendered prompt is passed to + a language model instantiated with the arguments passed to the decorator. + + The previous example is equivalent to the following: + + >>> import outlines + >>> + >>> @outlines.text.model("openai/davinci") + ... def answer(question): + ... "I have a ${question}" + ... + >>> answer, _ = answer("How are you?") + + Decorated functions return two objects: the first represents the output of + the language model call, the second represents the concatenation of the + rendered prompt with the output of the language model call. The latter can + be used in context where one expands an initial prompt with recursive calls + to language models. + + """ + provider_name = name.split("/")[0] + model_name = name[len(provider_name) + 1 :] + + if provider_name == "openai": + from outlines.text.models.openai import OpenAI + + llm = OpenAI(model_name, stops_at) # type:ignore + elif provider_name == "hf": + from outlines.text.models.hugging_face import HFCausalLM + + llm = HFCausalLM(model_name) # type:ignore + else: + raise NameError(f"The model provider {provider_name} is not available.") + + def decorator(fn): + # Get the names of the parameters to the function, which must correspond + # to the variables defined in the template. + var_names = [] + kwargs_data = {} + sig = inspect.signature(fn) + for parameter in sig.parameters.values(): + if parameter.default == inspect._empty: + var_names.append(parameter.name) + else: + kwargs_data[parameter.name] = parameter.default + + # The docstring contains the template that will be rendered to be used + # as a prompt to the language model. + template = inspect.cleandoc(fn.__doc__) + + def wrapper(*args, **kwargs): + """Call the LLM with the rendered template. + + Building prompts with recursive calls to language models is common + in prompt engineering, we thus return both the raw answer from the + language model as well as the rendered prompt including the answer. + + Returns + ------- + A tuple that contains the result of the language model call, and the + rendered prompt concatenated with the result of the language model + call. + + """ + args_data = {name: arg for name, arg in zip(var_names, args)} + kwargs_data.update(kwargs) + data = {**args_data, **kwargs_data} + + prompt = render(template, **data) + result = llm(prompt) + return result, prompt + result + + return wrapper + + return decorator diff --git a/outlines/text/models/__init__.py b/outlines/text/models/__init__.py index b75a091c..2a7dc397 100644 --- a/outlines/text/models/__init__.py +++ b/outlines/text/models/__init__.py @@ -1 +1 @@ -from .language_model import LanguageModel, model +from .language_model import LanguageModel diff --git a/outlines/text/models/language_model.py b/outlines/text/models/language_model.py index c58a3c40..a48ed075 100644 --- a/outlines/text/models/language_model.py +++ b/outlines/text/models/language_model.py @@ -1,7 +1,4 @@ -import inspect - from outlines.graph import Apply, Op -from outlines.text.render import render from outlines.text.var import StringVariable, as_string @@ -53,104 +50,3 @@ def make_node(self, prompt): def perform(self, prompt): return NotImplementedError - - -def model(name: str, stops_at=None): - """Decorator that allows to simplify calls to language models. - - Prompts that are passed to language models are often rendered templates, - and the workflow typically looks like: - - >>> import outlines - >>> from outlines.text.models.openai import OpenAI - >>> - >>> llm = OpenAI("davinci") - >>> tpl = "I have a ${question}" - >>> prompt = outlines.render(tpl, question="How are you?") - >>> answer = llm(prompt) - - While explicit, these 4 lines have the following defaults: - - 1. The prompt is hidden; - 2. The language model instantiation is far from the prompt; prompt templates - are however attached to a specific language model call. - 3. The intent behind the language model call is hidden. - - To encapsulate the logic behind language model calls, we thus define the - template prompt inside a function and decorate the function with a model - specification. When that function is called, the template is rendered using - the arguments passed to the function, and the rendered prompt is passed to - a language model instantiated with the arguments passed to the decorator. - - The previous example is equivalent to the following: - - >>> import outlines - >>> - >>> @outlines.text.model("openai/davinci") - ... def answer(question): - ... "I have a ${question}" - ... - >>> answer, _ = answer("How are you?") - - Decorated functions return two objects: the first represents the output of - the language model call, the second represents the concatenation of the - rendered prompt with the output of the language model call. The latter can - be used in context where one expands an initial prompt with recursive calls - to language models. - - """ - provider_name = name.split("/")[0] - model_name = name[len(provider_name) + 1 :] - - if provider_name == "openai": - from outlines.text.models.openai import OpenAI - - llm = OpenAI(model_name, stops_at) # type:ignore - elif provider_name == "hf": - from outlines.text.models.hugging_face import HFCausalLM - - llm = HFCausalLM(model_name) # type:ignore - else: - raise NameError(f"The model provider {provider_name} is not available.") - - def decorator(fn): - # Get the names of the parameters to the function, which must correspond - # to the variables defined in the template. - var_names = [] - kwargs_data = {} - sig = inspect.signature(fn) - for parameter in sig.parameters.values(): - if parameter.default == inspect._empty: - var_names.append(parameter.name) - else: - kwargs_data[parameter.name] = parameter.default - - # The docstring contains the template that will be rendered to be used - # as a prompt to the language model. - template = inspect.cleandoc(fn.__doc__) - - def wrapper(*args, **kwargs): - """Call the LLM with the rendered template. - - Building prompts with recursive calls to language models is common - in prompt engineering, we thus return both the raw answer from the - language model as well as the rendered prompt including the answer. - - Returns - ------- - A tuple that contains the result of the language model call, and the - rendered prompt concatenated with the result of the language model - call. - - """ - args_data = {name: arg for name, arg in zip(var_names, args)} - kwargs_data.update(kwargs) - data = {**args_data, **kwargs_data} - - prompt = render(template, **data) - result = llm(prompt) - return result, prompt + result - - return wrapper - - return decorator diff --git a/tests/text/test_model.py b/tests/text/test_model.py index 90a35873..5434fba4 100644 --- a/tests/text/test_model.py +++ b/tests/text/test_model.py @@ -1,7 +1,7 @@ import pytest -from outlines.text import string -from outlines.text.models.language_model import LanguageModel, model +from outlines.text import completion, string +from outlines.text.models.language_model import LanguageModel def test_initialize_LanguageModel(): @@ -17,14 +17,14 @@ def test_initialize_LanguageModel(): def test_model_wrong_provide(): with pytest.raises(NameError, match="not available"): - @model("aa/model_name") + @completion("aa/model_name") def test_function(): """""" @pytest.mark.skip def test_model(): - @model("openai/text-davinci-001", stops_at=["."]) + @completion("openai/text-davinci-001", stops_at=["."]) def test_function(question, type="bad"): """You're a witty and sarcastic AI. From e00a76995c972529a881e53d716207d13368dd5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 10 Apr 2023 11:41:23 +0200 Subject: [PATCH 040/734] Add convenience function to render prompts --- examples/meta_prompting.py | 54 +++++++------- outlines/__init__.py | 3 +- outlines/text/__init__.py | 4 +- outlines/text/completion.py | 25 +------ outlines/text/{render.py => prompt.py} | 58 ++++++++++++++- .../text/{test_compose.py => test_prompt.py} | 74 +++++++++++++++++++ 6 files changed, 164 insertions(+), 54 deletions(-) rename outlines/text/{render.py => prompt.py} (59%) rename tests/text/{test_compose.py => test_prompt.py} (50%) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index fd0056f3..206308d7 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -11,7 +11,6 @@ """ import argparse -import outlines import outlines.text as text @@ -22,9 +21,9 @@ def solve(question): Let's solve this problem by splitting it into steps. """ - answer, prompt = solve(question) + _, completed = solve(question) - return prompt, answer + return completed def fill_in_the_blanks(question, model_name: str): @@ -39,10 +38,10 @@ def determine_goal(question): def solve(memory): """${memory}. Let's begin.""" - _, memory = determine_goal(question) - answer, full_interaction = solve(memory) + _, completed = determine_goal(question) + _, completed = solve(completed) - return full_interaction, answer + return completed def ask_an_expert(question, model_name: str): @@ -73,10 +72,10 @@ def get_answer(question, expert, memory): ${question} """ - expert, memory = find_expert(question) - answer, full_interaction = get_answer(question, expert, memory) + expert, completed = find_expert(question) + _, completed = get_answer(question, expert, completed) - return full_interaction, answer + return completed def ask_an_expert_simple(question, model_name: str): @@ -95,18 +94,16 @@ def get_answer(expert, memory): For instance,${expert} would answer """ - expert, memory = find_expert(question) - answer, full_interaction = get_answer(expert, memory) + expert, completed = find_expert(question) + answer, completed = get_answer(expert, completed) - return full_interaction, answer + return completed -def run_example(model_fn, question, model): - print("\n-----------------------------------------\n") - question_s = outlines.text.string() - fn = outlines.chain([question_s], model_fn(question_s, model)) - prompt, answer = fn(question) - print(f"{prompt}") +def run_example(model_fn, question, model_name): + completed = model_fn(question, model_name) + print(f"\n-----------------------") + print(f"{completed}") if __name__ == "__main__": @@ -121,16 +118,17 @@ def run_example(model_fn, question, model): math_q = "f(x) = x*x. What is f(f(3))?" sat_q = """ - Directions: In the following question, a related pair of words or phrases \ - is followed by five pairs of words or phrases. Choose the pair that best \ - expresses a relationship similar to that in the original pair. \ - - BRAGGART :: MODESTY - A) FLEDGLING : EXPERIENCE - B) EMBEZZLER : GREED - C) WALLFLOWER : TIMIDITY - D) INVALID : MALADY - E) CANDIDATE : AMBITION + +Directions: In the following question, a related pair of words or phrases +is followed by five pairs of words or phrases. Choose the pair that best +expresses a relationship similar to that in the original pair. + +BRAGGART :: MODESTY +A) FLEDGLING : EXPERIENCE +B) EMBEZZLER : GREED +C) WALLFLOWER : TIMIDITY +D) INVALID : MALADY +E) CANDIDATE : AMBITION """ alignment_q = "What should humankind do to ensure that artificial general intelligence is aligned?" diff --git a/outlines/__init__.py b/outlines/__init__.py index 970abe25..fbf4c838 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -31,7 +31,7 @@ from outlines.function import fn from outlines.image import as_image from outlines.program import chain, program -from outlines.text import as_string, completion, render +from outlines.text import as_string, completion, prompt, render __all__ = [ "chain", @@ -39,5 +39,6 @@ "as_string", "fn", "program", + "prompt", "render", ] diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 6396d4b9..8651d0a8 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,6 +1,6 @@ from .basic import * from .completion import completion -from .render import render +from .prompt import prompt, render from .var import as_string, string -__all__ = ["as_string", "completion", "string", "render"] +__all__ = ["as_string", "completion", "prompt", "string", "render"] diff --git a/outlines/text/completion.py b/outlines/text/completion.py index a28187db..df41ab8a 100644 --- a/outlines/text/completion.py +++ b/outlines/text/completion.py @@ -1,6 +1,4 @@ -import inspect - -from outlines.text.render import render +from outlines.text.prompt import prompt def completion(name: str, stops_at=None): @@ -62,20 +60,7 @@ def completion(name: str, stops_at=None): raise NameError(f"The model provider {provider_name} is not available.") def decorator(fn): - # Get the names of the parameters to the function, which must correspond - # to the variables defined in the template. - var_names = [] - kwargs_data = {} - sig = inspect.signature(fn) - for parameter in sig.parameters.values(): - if parameter.default == inspect._empty: - var_names.append(parameter.name) - else: - kwargs_data[parameter.name] = parameter.default - - # The docstring contains the template that will be rendered to be used - # as a prompt to the language model. - template = inspect.cleandoc(fn.__doc__) + prompt_fn = prompt(fn) def wrapper(*args, **kwargs): """Call the LLM with the rendered template. @@ -91,11 +76,7 @@ def wrapper(*args, **kwargs): call. """ - args_data = {name: arg for name, arg in zip(var_names, args)} - kwargs_data.update(kwargs) - data = {**args_data, **kwargs_data} - - prompt = render(template, **data) + prompt = prompt_fn(*args, **kwargs) result = llm(prompt) return result, prompt + result diff --git a/outlines/text/render.py b/outlines/text/prompt.py similarity index 59% rename from outlines/text/render.py rename to outlines/text/prompt.py index 32e9d825..1f6fa0a6 100644 --- a/outlines/text/render.py +++ b/outlines/text/prompt.py @@ -1,6 +1,6 @@ import collections import inspect -from typing import Dict, Union +from typing import Callable, Dict, Union from mako.runtime import Context from mako.template import Template @@ -94,3 +94,59 @@ def render( mako_template.render_context(ctx) return buf.get_value() + + +def prompt(fn: Callable): + """Decorator around a function that contains a prompt template. + + This allows to define prompts in the docstring of a function and ease their + manipulation by providing some degree of encapsulation. + + >>> import outlines + >>> + >>> @outlines.prompt + >>> def answer_tpl(question): + ... "I have a ${question}" + ... + >>> prompt = answer_tpl("How are you?") + + This is syntactic sugar and uses the `render` function internally. + Therefore, the wrapped functions return `str` when called with `str` + arguments only, and a `StringVariable` when at least one argument is a + `StringVariable`. + + """ + + # Get the names of the parameters to the function, which must correspond + # to the variables defined in the template. + var_names = [] + kwargs_data = {} + sig = inspect.signature(fn) + for parameter in sig.parameters.values(): + if parameter.default == inspect._empty: + var_names.append(parameter.name) + else: + kwargs_data[parameter.name] = parameter.default + + # The docstring contains the template that will be rendered to be used + # as a prompt to the language model. + docstring = fn.__doc__ + if docstring is None: + raise TypeError("Could not find a template in the function's docstring.") + else: + template = inspect.cleandoc(docstring) + + def wrapper(*args, **kwargs): + """Render and return the template. + + Returns + ------- + A Python `str` when all arguments are Python `str`, a `StringVariable` + otherwise. + + """ + bound_arguments = sig.bind(*args, **kwargs) + bound_arguments.apply_defaults() + return render(template, **bound_arguments.arguments) + + return wrapper diff --git a/tests/text/test_compose.py b/tests/text/test_prompt.py similarity index 50% rename from tests/text/test_compose.py rename to tests/text/test_prompt.py index f7f161b3..659b0f81 100644 --- a/tests/text/test_compose.py +++ b/tests/text/test_prompt.py @@ -1,5 +1,6 @@ import pytest +import outlines.text as text from outlines.text import render, string from outlines.text.basic import Add from outlines.text.var import StringConstant, StringVariable @@ -59,3 +60,76 @@ def test_template_few_shots(): examples=examples, ) assert isinstance(prompt, StringVariable) + + +def test_prompt_basic(): + @text.prompt + def test_tpl(variable): + """${variable} test""" + + with pytest.raises(TypeError): + test_tpl(v="test") + + p = test_tpl("test") + assert p == "test test" + + p = test_tpl(variable="test") + assert p == "test test" + + @text.prompt + def test_single_quote_tpl(variable): + "${variable} test" + + p = test_tpl("test") + assert p == "test test" + + +def test_prompt_kwargs(): + @text.prompt + def test_kwarg_tpl(var, other_var="other"): + """${var} and ${other_var}""" + + p = test_kwarg_tpl("test") + assert p == "test and other" + + p = test_kwarg_tpl("test", other_var="kwarg") + assert p == "test and kwarg" + + p = test_kwarg_tpl("test", "test") + assert p == "test and test" + + +def test_not_prompt(): + with pytest.raises(TypeError, match="template"): + + @text.prompt + def test_empty(variable): + pass + + with pytest.raises(TypeError, match="template"): + + @text.prompt + def test_only_code(variable): + return variable + + +def test_prompt_few_shots(): + @text.prompt + def few_shots_tpl(w, examples): + """This is a test + + ${w} + + % for s, t in examples: + Search: ${s} + Trap: ${t} + % endfor + """ + + prompt = few_shots_tpl("Test", [["a", "b"], ["c", "d"]]) + assert ( + prompt == "This is a test\n\nTest\n\nSearch: a\nTrap: b\nSearch: c\nTrap: d\n" + ) + + prompt = few_shots_tpl(string(), [["a", "b"], ["c", "d"]]) + assert isinstance(prompt, StringVariable) From ca1abe15fb5f5eaf278779f3c86b75a215f7e969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 10 Apr 2023 15:31:23 +0200 Subject: [PATCH 041/734] Remove whitespaces introduced by linebreaks --- outlines/text/prompt.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/outlines/text/prompt.py b/outlines/text/prompt.py index 1f6fa0a6..351ff670 100644 --- a/outlines/text/prompt.py +++ b/outlines/text/prompt.py @@ -1,5 +1,6 @@ import collections import inspect +import re from typing import Callable, Dict, Union from mako.runtime import Context @@ -134,6 +135,9 @@ def prompt(fn: Callable): if docstring is None: raise TypeError("Could not find a template in the function's docstring.") else: + docstring = re.sub( + " +", " ", docstring + ) # Remove extra whitespace due to linebreaks template = inspect.cleandoc(docstring) def wrapper(*args, **kwargs): From e11dc826e7c5058a91fa9999c8fdb19a030e78eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 11 Apr 2023 10:10:13 +0200 Subject: [PATCH 042/734] Add `temperature` and `max_tokens` kwargs for completion --- examples/meta_prompting.py | 2 +- outlines/text/completion.py | 14 +++++++++++--- outlines/text/models/hugging_face.py | 19 ++++++++++++++----- outlines/text/models/openai.py | 13 +++++++++++-- 4 files changed, 37 insertions(+), 11 deletions(-) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 206308d7..a2c2350e 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -102,7 +102,7 @@ def get_answer(expert, memory): def run_example(model_fn, question, model_name): completed = model_fn(question, model_name) - print(f"\n-----------------------") + print("\n-----------------------") print(f"{completed}") diff --git a/outlines/text/completion.py b/outlines/text/completion.py index df41ab8a..94814dfe 100644 --- a/outlines/text/completion.py +++ b/outlines/text/completion.py @@ -1,7 +1,7 @@ from outlines.text.prompt import prompt -def completion(name: str, stops_at=None): +def completion(name: str, *, stops_at=None, max_tokens=None, temperature=None): """Decorator that allows to simplify calls to language models. Prompts that are passed to language models are often rendered templates, @@ -44,6 +44,14 @@ def completion(name: str, stops_at=None): be used in context where one expands an initial prompt with recursive calls to language models. + Parameters + ---------- + stops_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. """ provider_name = name.split("/")[0] model_name = name[len(provider_name) + 1 :] @@ -51,11 +59,11 @@ def completion(name: str, stops_at=None): if provider_name == "openai": from outlines.text.models.openai import OpenAI - llm = OpenAI(model_name, stops_at) # type:ignore + llm = OpenAI(model_name, stops_at, max_tokens, temperature) # type:ignore elif provider_name == "hf": from outlines.text.models.hugging_face import HFCausalLM - llm = HFCausalLM(model_name) # type:ignore + llm = HFCausalLM(model_name, max_tokens, temperature) # type:ignore else: raise NameError(f"The model provider {provider_name} is not available.") diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index 9425ae57..1e6656bb 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -34,7 +34,7 @@ class HFCausalLM(LanguageModel): """ - def __init__(self, model: str): + def __init__(self, model: str, max_tokens=None, temperature=None): """Instantiate the model `Op`. Parameters @@ -43,6 +43,14 @@ def __init__(self, model: str): The model id of a model hosted inside a model repo on huggingface.co """ + if max_tokens is None: + max_tokens = 216 + self.max_tokens = max_tokens + + if temperature is None: + temperature = 1.0 + self.temperature = temperature + super().__init__(name=f"HuggingFace {model}") self.model_name = model @@ -60,8 +68,8 @@ def perform(self, prompt): tokenizers. """ - tokenizer = AutoTokenizer.from_pretrained(self.model) - model = AutoModelForCausalLM.from_pretrained(self.model) + tokenizer = AutoTokenizer.from_pretrained(self.model_name) + model = AutoModelForCausalLM.from_pretrained(self.model_name) prompt_tokens = tokenizer(prompt, return_tensors="pt") @@ -72,8 +80,9 @@ def perform(self, prompt): returned_tokens = model.generate( **prompt_tokens, do_sample=True, - max_new_tokens=20, - pad_token_id=self.tokenizer.eos_token_id, + temperature=self.temperature, + max_new_tokens=self.max_tokens, + pad_token_id=tokenizer.eos_token_id, ) new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() diff --git a/outlines/text/models/openai.py b/outlines/text/models/openai.py index 4cb39c4e..962e2544 100644 --- a/outlines/text/models/openai.py +++ b/outlines/text/models/openai.py @@ -17,7 +17,7 @@ class OpenAI(LanguageModel): """ - def __init__(self, model: str, stops_at=None): + def __init__(self, model: str, stops_at=None, max_tokens=None, temperature=None): """Initialize the OpenAI model.""" try: @@ -36,6 +36,14 @@ def __init__(self, model: str, stops_at=None): raise Exception("OpenAI's API does not accept more than 4 stop sequences.") self.stops_at = stops_at + if max_tokens is None: + max_tokens = 216 + self.max_tokens = max_tokens + + if temperature is None: + temperature = 1.0 + self.temperature = temperature + super().__init__(name=f"OpenAI {model}") self.model = model @@ -44,8 +52,9 @@ def perform(self, prompt): resp = openai.Completion.create( model=self.model, prompt=prompt, - max_tokens=128, + max_tokens=self.max_tokens, stop=self.stops_at, + temperature=self.temperature, ) except error.APIConnectionError as e: raise OSError(f"Open API failed to connect: {e}") From 34492a270e493e1d08ba0ab24ae1bedbd59d4da0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 11 Apr 2023 11:24:33 +0200 Subject: [PATCH 043/734] Fix HF `perform` output --- outlines/text/models/hugging_face.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/text/models/hugging_face.py b/outlines/text/models/hugging_face.py index 1e6656bb..90d20d47 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/text/models/hugging_face.py @@ -87,4 +87,4 @@ def perform(self, prompt): new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() - return tokenizer.decode(new_tokens, skip_special_tokens=True) + return (tokenizer.decode(new_tokens, skip_special_tokens=True),) From 9043cc639c881b465f82af268edc8c99cb37c203 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 11 Apr 2023 11:24:52 +0200 Subject: [PATCH 044/734] Add decorator to simplify text2img generation --- outlines/image/__init__.py | 3 ++- outlines/image/generation.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 outlines/image/generation.py diff --git a/outlines/image/__init__.py b/outlines/image/__init__.py index 216d0e7e..f7591e18 100644 --- a/outlines/image/__init__.py +++ b/outlines/image/__init__.py @@ -1,3 +1,4 @@ +from .generation import generation from .var import as_image, image -__all__ = ["as_image", "image"] +__all__ = ["as_image", "image", "generation"] diff --git a/outlines/image/generation.py b/outlines/image/generation.py new file mode 100644 index 00000000..e6a5c2a3 --- /dev/null +++ b/outlines/image/generation.py @@ -0,0 +1,33 @@ +from outlines.text.prompt import prompt + + +def generation(name: str): + """Decorator that allows to simplify calls to image generation models.""" + provider_name = name.split("/")[0] + model_name = name[len(provider_name) + 1 :] + + if provider_name == "hf": + from outlines.image.models.hugging_face import HFDiffuser + + generative_model = HFDiffuser(model_name) # type:ignore + else: + raise NameError(f"The model provider {provider_name} is not available.") + + def decorator(fn): + prompt_fn = prompt(fn) + + def wrapper(*args, **kwargs): + """Call the Diffuser with the rendered template. + + Returns + ------- + A `PIL.Image` instance that represents the generated image. + + """ + prompt = prompt_fn(*args, **kwargs) + result = generative_model(prompt) + return result + + return wrapper + + return decorator From 294e3e1459c08334fe2531966d45731ab89442f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 12 Apr 2023 18:38:36 +0200 Subject: [PATCH 045/734] Remove the symbolic components of outlines --- outlines/__init__.py | 44 +-- outlines/function.py | 73 ----- outlines/graph.py | 306 ------------------ outlines/{image/generation.py => image.py} | 2 +- outlines/image/__init__.py | 4 - outlines/image/models/__init__.py | 1 - outlines/image/models/model.py | 71 ---- outlines/image/var.py | 64 ---- .../hf_diffusers.py} | 9 +- .../hf_transformers.py} | 16 +- outlines/{text => }/models/openai.py | 21 +- outlines/program.py | 267 --------------- outlines/text.py | 248 ++++++++++++++ outlines/text/__init__.py | 6 - outlines/text/basic.py | 20 -- outlines/text/completion.py | 93 ------ outlines/text/models/__init__.py | 1 - outlines/text/models/language_model.py | 52 --- outlines/text/prompt.py | 156 --------- outlines/text/var.py | 71 ---- tests/image/__init__.py | 0 tests/image/test_var.py | 28 -- tests/test_compile.py | 43 --- tests/test_function.py | 47 --- tests/test_graph.py | 101 ------ tests/test_model.py | 24 ++ tests/test_text.py | 126 ++++++++ tests/text/__init__.py | 0 tests/text/test_basic.py | 41 --- tests/text/test_model.py | 35 -- tests/text/test_prompt.py | 135 -------- tests/text/test_var.py | 35 -- 32 files changed, 429 insertions(+), 1711 deletions(-) delete mode 100644 outlines/function.py delete mode 100644 outlines/graph.py rename outlines/{image/generation.py => image.py} (95%) delete mode 100644 outlines/image/__init__.py delete mode 100644 outlines/image/models/__init__.py delete mode 100644 outlines/image/models/model.py delete mode 100644 outlines/image/var.py rename outlines/{image/models/hugging_face.py => models/hf_diffusers.py} (73%) rename outlines/{text/models/hugging_face.py => models/hf_transformers.py} (88%) rename outlines/{text => }/models/openai.py (87%) delete mode 100644 outlines/program.py create mode 100644 outlines/text.py delete mode 100644 outlines/text/__init__.py delete mode 100644 outlines/text/basic.py delete mode 100644 outlines/text/completion.py delete mode 100644 outlines/text/models/__init__.py delete mode 100644 outlines/text/models/language_model.py delete mode 100644 outlines/text/prompt.py delete mode 100644 outlines/text/var.py delete mode 100644 tests/image/__init__.py delete mode 100644 tests/image/test_var.py delete mode 100644 tests/test_compile.py delete mode 100644 tests/test_function.py delete mode 100644 tests/test_graph.py create mode 100644 tests/test_model.py create mode 100644 tests/test_text.py delete mode 100644 tests/text/__init__.py delete mode 100644 tests/text/test_basic.py delete mode 100644 tests/text/test_model.py delete mode 100644 tests/text/test_prompt.py delete mode 100644 tests/text/test_var.py diff --git a/outlines/__init__.py b/outlines/__init__.py index fbf4c838..e8375e89 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,44 +1,10 @@ -"""Outlines is a Probabilistic Generative Model Programming language. - -Outlines allows to build and evaluate graphs that represent interactions between -user-defined strings (usually called "prompts"), images, and generative models. - -Most generative models are probabilistic, their outputs are random variables -whose distribution is determined by the generative model. Chained generative -models are thus probabilistic programs [1]_. By default, compiling an Outlines -graph returns a callable object which will yield different values each time it -is called. - -Deterministic decoding methods are implemented as graph transformations: they -take a probabilistic program as an input and return a graph that represents the -decoding process. Compiling these graphs will produce a callable that returns -the same value each time it is called. - -Outlines supports plugins as long as they admit text/images as an input and -return strings and/or images. They are represented by operators in the graph. - -The design of Outlines was heavily inspired by `Aesara `_, -a library for defining, optimizing and evaluating mathematical expressions -involving multi-dimensional arrays. A complete integration would be desirable -and is not excluded. - - -References ----------- -.. [1] Dohan, David, et al. "Language model cascades." arXiv preprint arXiv:2207.10342 (2022). - -""" -from outlines.function import fn -from outlines.image import as_image -from outlines.program import chain, program -from outlines.text import as_string, completion, prompt, render +"""Outlines is a Generative Model Programming Framework.""" +from outlines.image import generation +from outlines.text import completion, prompt, render __all__ = [ - "chain", - "as_image", - "as_string", - "fn", - "program", + "completion", + "generation", "prompt", "render", ] diff --git a/outlines/function.py b/outlines/function.py deleted file mode 100644 index 722a39fb..00000000 --- a/outlines/function.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Functionalities to wrap user-defined functions as Ops. - -The content of this module is heavily inspired by the design of -`Aesara `_. - -""" -import inspect -from typing import Callable, Sequence, Type - -from outlines.graph import Op, Variable -from outlines.text.var import StringVariable - - -class FromFunctionOp(Op): - """Build an outlines Op around a function.""" - - def __init__( - self, - fn: Callable, - input_types: Sequence[Type[Variable]], - output_types: Sequence[Type[Variable]], - ): - self._fn = fn - self.input_types = input_types - self.output_types = output_types - - def __str__(self): - return f"FromFunctionOp({self._fn.__name__})" - - def perform(self, *inputs): - outs = self._fn(*inputs) - if not isinstance(outs, (list, tuple)): - outs = (outs,) - - return outs - - -def fn(function: Callable): - """Decorator that converts a Python function into an Outlines `Op` - that will call the function as its implementation. - - The user must specify the types of the inputs and outputs as type - hints. - - """ - sig = inspect.signature(function) - - inputs = [] - for name, parameter in sig.parameters.items(): - if parameter.annotation == str: - inputs.append(StringVariable) - elif parameter.annotation == inspect._empty: - raise TypeError( - "You need to specify the function's input types as type hints." - ) - else: - raise TypeError( - "The `fn` decorator currently only supports string arguments." - ) - - outputs = [] - if sig.return_annotation == str: - outputs.append(StringVariable) - elif sig.return_annotation == inspect._empty: - raise TypeError( - "You need to specify the function's output types as type hints." - ) - else: - raise TypeError( - "The `fn` decorator currently only supports string return types" - ) - - return FromFunctionOp(function, input_types=inputs, output_types=outputs) diff --git a/outlines/graph.py b/outlines/graph.py deleted file mode 100644 index 9c23d587..00000000 --- a/outlines/graph.py +++ /dev/null @@ -1,306 +0,0 @@ -"""Graph objects and manipulation functions. - -Manipulating prompts and operations in Outlines implicitly defines a graph that -can be explored, rewritten and compiled. - -This module defines the basic types these graphs are build from: - -- `Variable` nodes represent constants or results of computation; -- `Op`s represent the operations performed on variables; -- `Apply` nodes represent the application of an `Op` onto one or several - variables. - -This graph structure is a simplified version of the graph `Aesara -`_ uses to represents mathematical -operations on arrays. It is possible that Aesara may be used as a backend for -Outlines in the near future. - -""" -from typing import ( - Any, - Dict, - Iterable, - List, - Optional, - Reversible, - Sequence, - Tuple, - Type, - Union, -) - - -class Node: - r"""A node in an Outlines graph. - - Graphs contain two kinds of nodes: `Variable`\s and `Apply`\s. Each `Node` - keeps track of its parents and edges are thus not represented. - - """ - name: Optional[str] - - def get_parents(self) -> List: - """Return a list of this node's parents.""" - raise NotImplementedError() - - -class Variable(Node): - """A `Variable` is a node in an expression graph that represents a variable. - - There are a few kind of `Variable` to be aware of: - - - `StringVariable` is a subclass of `Variable` that represents a ``str`` object. - - `ImageVariable` is a subclass of `Variable` that represents image objects. - - """ - - def __init__( - self, - owner: Optional["Apply"] = None, - index: Optional[int] = None, - name: Optional[str] = None, - ): - if owner is not None and not isinstance(owner, Apply): - raise TypeError("owner must be an Apply instance") - self.owner = owner - - if index is not None and not isinstance(index, int): - raise TypeError("index must be an int") - self.index = index - - if name is not None and not isinstance(name, str): - raise TypeError("name must be a string") - self.name = name - - def __str__(self): - """Return a ``str`` representation of the `Variable`.""" - if self.name is not None: - return self.name - if self.owner is not None: - op = self.owner.op - if self.index == 0: - return f"{str(op)}.out" - else: - return f"{str(op)}.{str(self.index)}" - else: - return f"<{getattr(type(self), '__name__')}>" - - def eval(self, inputs_to_values: Optional[Dict] = None): - r"""Evaluate the `Variable`. - - This is a quick way to execute an Outlines graph, and be used for - instance for debugging. - - Parameters - ---------- - inputs_to_values : - A dictionary mapping Outlines `Variable`\s to values. - - Examples - -------- - - When every upstream variable in the graph already has a value we can - call :meth:`eval` with no argument: - - >>> import outlines - >>> prompt = "This is a test prompt" - >>> answer = outlines.text.model.OpenAI("davinci")(prompt) - >>> answer.eval() - - Otherwise, we need to pass :math:`eval` a dictionnary that maps symbolic - `Variable`\s to the value to substitute for them: - - >>> import outlines - >>> prompt = outlines.text.string() - >>> answer = outlines.text.model.OpenAI("davinci")(prompt) - >>> answer.eval({prompt: "This is a test prompt"}) - - """ - from outlines.program import chain - - if inputs_to_values is None: - inputs_to_values = {} - - inputs = tuple(sorted(inputs_to_values.keys(), key=id)) - args = [inputs_to_values[var] for var in inputs] - - fn = chain(inputs, self) - - return fn(*args) - - -class Apply(Node): - """An `Apply` node represents the application of an `Op` to variables. - - It is instantiated by calling the `Op.make_node` method with a list of - inputs. The `Apply` node is in charge of filtering the inputs and outputs. - - Attribute - --------- - op - The operation that produces `outputs` given `inputs`. - inputs - The arguments of the expression modeled by the `Apply` node. - outputs - The outputs of the expression modeled by the `Apply` node. - - """ - - def __init__( - self, op: "Op", inputs: Sequence["Variable"], outputs: Sequence["Variable"] - ): - if not isinstance(inputs, Sequence): - raise TypeError("The inputs of an Apply node must be a sequence type") - - if not isinstance(outputs, Sequence): - raise TypeError("The outputs of an Apply node must be a sequence type") - - self.op = op - self.inputs: List[Variable] = [] - - # Filter inputs - for input in inputs: - if isinstance(input, Variable): - self.inputs.append(input) - else: - raise TypeError( - f"The 'inputs' argument to an Apply node must contain Variable instances, got {input} instead." - ) - - self.outputs: List[Variable] = [] - # Filter outputs - for i, output in enumerate(outputs): - if isinstance(output, Variable): - if output.owner is None: - output.owner = self - output.index = i - elif output.owner is not self or output.index != i: - raise ValueError( - "All outputs passed to an Apply node must belong to it." - ) - self.outputs.append(output) - else: - raise TypeError( - f"The 'outputs' to argument to an Apply node must contain Variable instance, got {output} instead" - ) - - def get_parents(self) -> List[Variable]: - return list(self.inputs) - - -class Op: - """Represents and constructs operations in a graph. - - An `Op` instance has the following responsibilities: - - * Construct `Apply` nodes via the :meth:`Op.make_node` method - * Perform the computation of the modeled operation via the - :meth:`Op.perform` method. - - A user that wants to add new capabilities to the libraries: generative - model, API interactions, tools, etc. will need to subclass `Op` and - implement the :meth:`Op.perform` and :meth:`Op.make_node` methods. - - """ - - input_types: Optional[Sequence[Type[Variable]]] = None - output_types: Optional[Sequence[Type[Variable]]] = None - - def make_node(self, *inputs: Variable) -> Apply: - r"""Construct an `Apply` node that represents the application of this - operation to the given inputs. - - This must be implemented by subclasses as it specifies the input - and output types of the `Apply` node. - - Parameters - ---------- - inputs - The `Variable`\s that represent the inputs of this operation - - Returns - ------- - The constructed `Apply` node. - - """ - if self.input_types is None: - raise NotImplementedError( - "You need to either provide `input_types` and `output_types` or implement the `make_node` method." - ) - - if self.output_types is None: - raise NotImplementedError( - "You need to either provide `input_types` and `output_types` or implement the `make_node` method." - ) - - if len(inputs) != len(self.input_types): - raise ValueError( - f"You need to provide an input type for each input. Got {len(self.input_types)} type definitions and {len(inputs)} inputs." - ) - - # Check that the input types are valid - - return Apply(self, inputs, [o() for o in self.output_types]) - - def __call__(self, *inputs: Variable) -> Union[Variable, List[Variable]]: - """Calls :meth:`Op.make_node` to construct an `Apply` node.""" - - node = self.make_node(*inputs) - if len(node.outputs) == 1: - return node.outputs[0] - else: - return node.outputs - - def perform(self, inputs: Tuple[Any]) -> Tuple[Any]: - """Apply the functions to the inputs and return the output. - - Parameters - ---------- - inputs - Sequence of non-symbolic/numeric/text intputs. - - Returns - ------- - The non-symbolic/numerica/text outputs of the function that this - operation represents as a tuple. - - """ - raise NotImplementedError - - def __str__(self): - """Return a ``str`` representation of the `Op`.""" - return getattr(type(self), "__name__") - - -def io_toposort( - inputs: Iterable[Variable], outputs: Reversible[Variable] -) -> List[Apply]: - """Sort the graph topologically starting from the inputs to the outputs. - - This function is typically used when compiling the graph, where we need - to apply operators in the correct order to go from the user inputs to - the program outputs. - - Parameters - ---------- - inputs - Graph inputs. - outputs - Graph outputs. - - """ - computed = set(inputs) - todo = [o.owner for o in reversed(outputs) if o.owner] - order = [] - while todo: - node = todo.pop() - if node.outputs[0] in computed: - continue - if all(i in computed or i.owner is None for i in node.inputs): - computed.update(node.outputs) - order.append(node) - else: - todo.append(node) - todo.extend(i.owner for i in node.inputs if i.owner) - - return order diff --git a/outlines/image/generation.py b/outlines/image.py similarity index 95% rename from outlines/image/generation.py rename to outlines/image.py index e6a5c2a3..a8495713 100644 --- a/outlines/image/generation.py +++ b/outlines/image.py @@ -1,4 +1,4 @@ -from outlines.text.prompt import prompt +from outlines.text import prompt def generation(name: str): diff --git a/outlines/image/__init__.py b/outlines/image/__init__.py deleted file mode 100644 index f7591e18..00000000 --- a/outlines/image/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .generation import generation -from .var import as_image, image - -__all__ = ["as_image", "image", "generation"] diff --git a/outlines/image/models/__init__.py b/outlines/image/models/__init__.py deleted file mode 100644 index fbc196b2..00000000 --- a/outlines/image/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .model import ImageModel diff --git a/outlines/image/models/model.py b/outlines/image/models/model.py deleted file mode 100644 index 3ffc715f..00000000 --- a/outlines/image/models/model.py +++ /dev/null @@ -1,71 +0,0 @@ -from typing import Tuple - -from PIL.Image import Image as PILImage - -from outlines.graph import Apply, Op, Variable -from outlines.image.var import ImageVariable -from outlines.text.var import as_string - - -class ImageModel(Op): - """An `Op` that produces a sample image from generative model. - - The output of generative models in outlines is modeled as a random variable. - Therefore, calling an image generative model will return a random image by - default. - - Attributes - ---------- - name - The `Op`'s name in the graph. - - """ - - def __init__(self, name=None): - """Instantiate the `ImageModel` `Op`. - - Parameters - ---------- - name - The name of the `Op` in the graph. - - """ - super().__init__() - self.name = name - - def __call__(self, prompt, name=None): - """Create the `Apply` node that represents the `Op`'s application to inputs. - - Parameters - ---------- - prompt - The prompt used to condition the generative model's sampling procedure. - name - The name of the output variable in the graph. - - """ - res = super().__call__(prompt) - - if name is not None: - res.name = name - - return res - - def make_node(self, prompt: Variable) -> Apply: # type: ignore - prompt = as_string(prompt) - out = ImageVariable() - - return Apply(self, [prompt], [out]) - - def perform(self, prompt: str) -> Tuple[PILImage]: # type: ignore - """Perform the operations represented by this `Op` on the input prompt. - - This defaults to sampling a new image. Other decoding methods act by - patching this method. - - """ - return (self.sample(prompt),) - - def sample(self, prompt: str) -> PILImage: - """Sample a new image given the input prompt.""" - raise NotImplementedError diff --git a/outlines/image/var.py b/outlines/image/var.py deleted file mode 100644 index dce4d672..00000000 --- a/outlines/image/var.py +++ /dev/null @@ -1,64 +0,0 @@ -from functools import singledispatch - -from PIL.Image import Image as PILImage - -from outlines.graph import Variable - - -class ImageVariable(Variable): - """Subclass to add the image operators to `Variable`.""" - - def __init__(self, owner=None, index=None, name=None): - super().__init__(owner, index, name) - - -image = ImageVariable - - -class ImageConstant(ImageVariable): - """Constant `ImageVariable` that corresponds to user input.""" - - def __init__(self, value, name=None): - if not isinstance(value, PILImage): - raise TypeError( - "`ImageConstant` values must be instances of `pillow.Image`." - ) - - self.value = value - super().__init__(name=name) - - def __str__(self): - if self.name is not None: - name = self.name - else: - name = "ImageConstant" - return f"{name}{{'{self.value}'}}" - - -@singledispatch -def as_image(x, name=None): - """Convert `x` into an equivalent `ImageVariable`. - - This function can be used to turn `pillow.Image` instances into an - `ImageVariable`. - - Parameters - ---------- - x - The object that will we converted into a `ImageVariable`. - name - If a new `ImageVariable` instance is created it will be attributed this - name. - - """ - raise TypeError(f"{x} cannot be cast into a string") - - -@as_image.register(PILImage) -def as_image_Image(x, name=None): - return ImageConstant(x, name) - - -@as_image.register(ImageVariable) -def as_image_ImageConstant(x, name=None): - return x diff --git a/outlines/image/models/hugging_face.py b/outlines/models/hf_diffusers.py similarity index 73% rename from outlines/image/models/hugging_face.py rename to outlines/models/hf_diffusers.py index 7d749b11..5c349d65 100644 --- a/outlines/image/models/hugging_face.py +++ b/outlines/models/hf_diffusers.py @@ -1,5 +1,3 @@ -from outlines.image.models.model import ImageModel - try: from diffusers import StableDiffusionPipeline except ImportError: @@ -8,14 +6,13 @@ ) -class HFDiffuser(ImageModel): +class HFDiffuser: """A `StableDiffusion` distributed random image.""" - def __init__(self, model_name: str, name=None): + def __init__(self, model_name: str): self.model_name = model_name - super().__init__(name) - def sample(self, prompt): + def __call__(self, prompt: str) -> str: """Use HuggingFace's `StableDiffusion` pipeline to sample a new image.""" pipe = StableDiffusionPipeline.from_pretrained(self.model_name) pipe = pipe.to("cuda") diff --git a/outlines/text/models/hugging_face.py b/outlines/models/hf_transformers.py similarity index 88% rename from outlines/text/models/hugging_face.py rename to outlines/models/hf_transformers.py index 90d20d47..048ebe5c 100644 --- a/outlines/text/models/hugging_face.py +++ b/outlines/models/hf_transformers.py @@ -1,4 +1,4 @@ -from outlines.text.models.language_model import LanguageModel +from typing import Optional try: import torch @@ -9,7 +9,7 @@ ) -class HFCausalLM(LanguageModel): +class HFCausalLM: """Represent any of HuggingFace's causal language model implementations. You should have the `torch` and `transformers` packages installed. First @@ -34,7 +34,12 @@ class HFCausalLM(LanguageModel): """ - def __init__(self, model: str, max_tokens=None, temperature=None): + def __init__( + self, + model: str, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, + ): """Instantiate the model `Op`. Parameters @@ -51,10 +56,9 @@ def __init__(self, model: str, max_tokens=None, temperature=None): temperature = 1.0 self.temperature = temperature - super().__init__(name=f"HuggingFace {model}") self.model_name = model - def perform(self, prompt): + def __call__(self, prompt: str) -> str: """Sample new tokens give the tokenized prompt. Since HuggingFace's `generate` method returns the prompt along with the @@ -87,4 +91,4 @@ def perform(self, prompt): new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() - return (tokenizer.decode(new_tokens, skip_special_tokens=True),) + return tokenizer.decode(new_tokens, skip_special_tokens=True) diff --git a/outlines/text/models/openai.py b/outlines/models/openai.py similarity index 87% rename from outlines/text/models/openai.py rename to outlines/models/openai.py index 962e2544..134f81bb 100644 --- a/outlines/text/models/openai.py +++ b/outlines/models/openai.py @@ -1,6 +1,5 @@ import os - -from outlines.text.models import LanguageModel +from typing import List, Optional try: import openai @@ -9,7 +8,7 @@ raise ImportError("You need to install `openai` to run OpenAI's language models.") -class OpenAI(LanguageModel): +class OpenAI: """Represents any of OpenAI's language models You should have the `openai` package installed, and store you OpenAI key in @@ -17,7 +16,13 @@ class OpenAI(LanguageModel): """ - def __init__(self, model: str, stops_at=None, max_tokens=None, temperature=None): + def __init__( + self, + model: str, + stops_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, + ): """Initialize the OpenAI model.""" try: @@ -31,6 +36,7 @@ def __init__(self, model: str, stops_at=None, max_tokens=None, temperature=None) available_model_names = [model["id"] for model in available_models["data"]] if model not in available_model_names: raise OSError(f"{model} is not a valid OpenAI model name.") + self.model = model if stops_at is not None and len(stops_at) > 4: raise Exception("OpenAI's API does not accept more than 4 stop sequences.") @@ -44,10 +50,7 @@ def __init__(self, model: str, stops_at=None, max_tokens=None, temperature=None) temperature = 1.0 self.temperature = temperature - super().__init__(name=f"OpenAI {model}") - self.model = model - - def perform(self, prompt): + def __call__(self, prompt: str) -> str: try: resp = openai.Completion.create( model=self.model, @@ -71,4 +74,4 @@ def perform(self, prompt): except error.Timeout as e: raise OSError(f"Open API request timed out: {e}") - return (resp["choices"][0]["text"],) + return resp["choices"][0]["text"] diff --git a/outlines/program.py b/outlines/program.py deleted file mode 100644 index a3f2f7b8..00000000 --- a/outlines/program.py +++ /dev/null @@ -1,267 +0,0 @@ -import itertools -import textwrap -import time -from functools import singledispatchmethod -from typing import Callable, Iterable, Reversible - -from rich.console import Console -from rich.layout import Layout -from rich.live import Live -from rich.panel import Panel - -from outlines.graph import Variable, io_toposort -from outlines.text.models import LanguageModel -from outlines.text.var import StringConstant - -COLORS = itertools.cycle(["deep_sky_blue2", "gold3", "deep_pink2"]) - - -class Program: - """ """ - - def __init__(self, inputs: Iterable[Variable], outputs: Reversible[Variable]): - self.inputs = inputs - self.outputs = outputs - self.frames = io_toposort(inputs, outputs) - - self.language_models = list( - { - node.op - for node in self.frames - if node.op is not None and isinstance(node.op, LanguageModel) - } - ) - self.lm_colors = {lm: next(COLORS) for lm in self.language_models} - - self.console = Console() - - def build_layout(self) -> Layout: - """Create the layout for the command line interface. - - +-------------------------------------+ - | Logo + instructions | - +-------------------------------------+ - | List of Ops | Executed trace | - | + parameters | | - +-------------------------------------+ - - """ - layout = Layout(name="root") - layout.split_column(Layout(name="header", size=12), Layout(name="execution")) - layout["execution"].split_row( - Layout(name="models"), Layout(name="script", ratio=4) - ) - - return layout - - def print_ops_description(self) -> Panel: - """Create the model panel. - - The `model` panel displays each `Op` used in the program and their - parameters in the color that was assigned to them. The color matches - the color used in the `script` panel for the text they generate. - - """ - model_str = "\n\n".join( - [f"[{self.lm_colors[lm]}] {lm.name} [/]" for lm in self.language_models] - ) - return Panel( - model_str, - border_style="bright_black", - title="[bright_black]Models[/]", - title_align="left", - ) - - def print_header(self) -> str: - """Display the program's header in the console.""" - - welcome_ascii = textwrap.dedent( - r""" - ___ _ _ _ - / _ \ _ _| |_| (_)_ __ ___ ___ - | | | | | | | __| | | '_ \ / _ \/ __| - | |_| | |_| | |_| | | | | | __/\__ \ - \___/ \____|\__|_|_|_| |_|\___||___/ - """ - ) - - text = f"[bold green]{welcome_ascii}[/bold green]\n\n" - text += "[bright_black]Type Ctrl-C to interrupt the execution and return the current trace.[/]\n" - - return text - - def print_trace( - self, script: str = "", elapsed_time_s: float = 0, words: int = 0 - ) -> Panel: - """Display the current script.""" - subtitle_str = f"[bright_black]Words:[/] [bold red]{words}[/] | " - subtitle_str += ( - f"[bright_black]Time Elapsed:[/][bold yellow] {elapsed_time_s:.1f}s [/]" - ) - return Panel( - script, - border_style="bright_black", - title="[bright_black]Script[/]", - title_align="left", - subtitle=subtitle_str, - subtitle_align="right", - ) - - def execute_frames(self, *values): - storage_map = {s: v for s, v in zip(self.inputs, values)} - script_fmt = "" - trace = {"script": "", "nodes": {}} - - start_time = time.time() - time_elapsed_s = 0 - - # Corner case where the users only passes strings - if len(self.frames) == 0: - trace["script"] = "".join(values) - - try: - with Live(self.layout, console=self.console) as live: - self.layout["script"].update(self.print_trace()) - live.update(self.layout) - - for node in self.frames: - input_fmt = self.process_frame_inputs(node, storage_map) - script_fmt += input_fmt - self.layout["script"].update( - self.print_trace( - script_fmt, time_elapsed_s, len(script_fmt.split()) - ) - ) - live.update(self.layout) - - self.execute_frame(node, storage_map, trace) - time_elapsed_s = time.time() - start_time - - output_fmt = self.process_frame_outputs(node, storage_map) - script_fmt += output_fmt - self.layout["script"].update( - self.print_trace( - script_fmt, time_elapsed_s, len(script_fmt.split()) - ) - ) - live.update(self.layout) - - except KeyboardInterrupt: - pass - except Exception as e: - raise e - finally: - decoded_script = tuple(storage_map[output] for output in self.outputs) - trace["script"] = decoded_script - if len(decoded_script) == 1: - trace["script"] = decoded_script[0] - return trace - return trace - - def debug(self, *values): - storage_map = {s: v for s, v in zip(self.inputs, values)} - trace = {"script": "", "nodes": {}} - for node in self.frames: - self.process_frame_inputs(node, storage_map) - self.execute_frame(node, storage_map, trace) - - return storage_map - - def process_frame_inputs(self, node, storage_map): - """Process the nodes' inputs. - - If either of the node's inputs is a `StringConstant` we add its - value to the storage map and return its (formatted) value to - be added to the current value of the decoded script. - - """ - input_str, input_fmt = "", "" - for var in node.inputs: - if isinstance(var, StringConstant): - if var not in storage_map: - storage_map[var] = var.value - input_str = var.value - input_fmt = self.format_display(None, input_str) - - return input_fmt - - def execute_frame(self, node, storage_map, trace): - """Execute the current frame.""" - node_inputs = [storage_map[i] for i in node.inputs] - results = node.op.perform(*node_inputs) - for i, o in enumerate(node.outputs): - storage_map[o] = results[i] - trace[o.name] = results[i] - - def process_frame_outputs(self, node, storage_map): - """Process the node's outputs. - - If the node's `Op` is a `LanguageModel` we append its - result to the current value of the decoded script. - - """ - output_str, output_fmt = "", "" - if isinstance(node.op, LanguageModel): - output_str = storage_map[node.outputs[0]] - output_fmt = self.format_display(node.op, output_str) - - return output_fmt - - @singledispatchmethod - def format_display(self, op, text): - return f"[white]{text}[/]" - - @format_display.register(LanguageModel) - def format_display_LanguageModel(self, op, text): - return f"[{self.lm_colors[op]}]{text}[/]" - - def run(self, *values): - self.layout = self.build_layout() - self.layout["header"].update(self.print_header()) - self.layout["models"].update(self.print_ops_description()) - return self.execute_frames(*values) - - -program = Program - - -def chain(input_vars, output_vars) -> Callable: - """Return a function that will compute the outputs of a chain from its outputs. - - Parameters - ---------- - input_vars - Sequence of symbolic variables that correspond to the function's - parameters. - output_vars - Symbolic variable(s) representing the expression(s) to compute. - - """ - - if not isinstance(input_vars, (list, tuple)): - raise Exception( - "Input variables of the `compile` function should be contained in a list or a tupe, even when there is a single input." - ) - if not isinstance(output_vars, (list, tuple)): - output_vars = (output_vars,) - - sorted_nodes = io_toposort(input_vars, output_vars) - - def function(*inputs): - storage_map = {s: v for s, v in zip(input_vars, inputs)} - - for node in sorted_nodes: - for i in node.inputs: - if isinstance(i, StringConstant): - storage_map[i] = i.value - inputs = [storage_map[i] for i in node.inputs] - results = node.op.perform(*inputs) - for i, o in enumerate(node.outputs): - storage_map[o] = results[i] - - if len(output_vars) == 1: - return storage_map[output_vars[0]] - else: - return tuple(storage_map[o] for o in output_vars) - - return function diff --git a/outlines/text.py b/outlines/text.py new file mode 100644 index 00000000..b851472c --- /dev/null +++ b/outlines/text.py @@ -0,0 +1,248 @@ +import inspect +import re +from typing import Any, Callable, Dict, List, Optional, Tuple, cast + +from mako.template import Template + + +def render(template: str, **values: Optional[Dict[str, Any]]) -> str: + r"""Parse a Mako template and translate it into an Outlines graph. + + This function removes extra whitespaces and linebreaks from templates to + allow users to enter prompt more naturally than if they used Python's + constructs directly. See the examples for a detailed explanation. + + Examples + -------- + + Outlines follow Mako's syntax + + >>> import outlines + >>> outline = outlines.render("I like ${food} and ${sport}", food="tomatoes", sport="tennis") + I like tomatoes and tennis + + If the first line of the template is empty, `render` removes it + + >>> from outlines import render + >>> + >>> tpl = ''' + ... A new string''' + >>> tpl + ... '\nA new string' + >>> render(tpl) + ... 'a new string' + + Similarly, `render` ignores linebreaks introduced by placing the closing quotes + underneath the text: + + >>> tpl = ''' + ... A new string + ... ''' + >>> tpl + ... '\nA new string\n' + >>> render(tpl) + ... 'A new string' + + `render` removes the identation in docstrings. This is particularly important + when using prompt functions + + >>> tpl = ''' + ... a string + ... and another string''' + >>> tpl + ... '\n a string\n and another string' + >>> render(tpl) + ... 'a string\nand another string' + + The indentation of the first line is assumed to be the same as the second line's + + >>> tpl = '''a string + ... and another''' + >>> tpl + ... 'a string\n and another' + >>> render(tpl) + ... 'a string\nand another' + + To get a different indentation for the first and the second line, we can start the + prompt on the string's second line: + + >>> tpl = ''' + ... First line + ... Second line''' + >>> render(tpl) + ... 'First Line\n Second Line' + + Finally, `render` removes the indentation introduced when using `\` to + escape linebreaks: + + >>> tpl = ''' + ... Long test \ + ... That we break''' + >>> tpl + '\n Long test That we break' + >>> render(tpl) + 'Long test That we break' + + Parameters + ---------- + template + A string that contains a template written in the Mako syntax. + **values + Map from the variables in the template to their value. + + Returns + ------- + A string that contains the rendered template. + + """ + + # Dedent, and remove extra linebreak + template = inspect.cleandoc(template) + + # Remove extra whitespace due to linebreaks with "\" + # TODO: this will remove indentation, we need to only remove + # whitespaces when the sequence does not start with `\n` + template = re.sub(" +", " ", template) + + mako_template = Template(template) + return mako_template.render(**values) + + +def prompt(fn: Callable) -> Callable: + """Decorate a function that contains a prompt template. + + This allows to define prompts in the docstring of a function and ease their + manipulation by providing some degree of encapsulation. It uses the `render` + function internally to render templates. + + >>> import outlines + >>> + >>> @outlines.prompt + >>> def build_prompt(question): + ... "I have a ${question}" + ... + >>> prompt = build_prompt("How are you?") + + """ + + sig = inspect.signature(fn) + + # The docstring contains the template that will be rendered to be used + # as a prompt to the language model. + docstring = fn.__doc__ + if docstring is None: + raise TypeError("Could not find a template in the function's docstring.") + + def wrapper(*args: Optional[List[str]], **kwargs: Optional[Dict[str, str]]) -> str: + """Render and return the template. + + Returns + ------- + The rendered template as a Python ``str``. + + """ + template = cast(str, docstring) # for typechecking + bound_arguments = sig.bind(*args, **kwargs) + bound_arguments.apply_defaults() + return render(template, **bound_arguments.arguments) + + return wrapper + + +def completion( + name: str, + *, + stops_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Callable: + """Decorator that simplifies calls to language models. + + Prompts that are passed to language models are often rendered templates, + and the workflow typically looks like: + + >>> import outlines + >>> from outlines.models.openai import OpenAI + >>> + >>> llm = OpenAI("davinci") + >>> tpl = "I have a ${question}" + >>> prompt = outlines.render(tpl, question="How are you?") + >>> answer = llm(prompt) + + While explicit, these 4 lines have the following defaults: + + 1. The prompt is hidden; + 2. The language model instantiation is far from the prompt; prompt templates + are however attached to a specific language model call. + 3. The intent behind the language model call is hidden. + + To encapsulate the logic behind language model calls, we thus define the + template prompt inside a function and decorate the function with a model + specification. When that function is called, the template is rendered using + the arguments passed to the function, and the rendered prompt is passed to + a language model instantiated with the arguments passed to the decorator. + + The previous example is equivalent to the following: + + >>> import outlines + >>> + >>> @outlines.text.model("openai/davinci") + ... def answer(question): + ... "I have a ${question}" + ... + >>> answer, _ = answer("How are you?") + + Decorated functions return two objects: the first represents the output of + the language model call, the second represents the concatenation of the + rendered prompt with the output of the language model call. The latter can + be used in context where one expands an initial prompt with recursive calls + to language models. + + Parameters + ---------- + stops_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. + + """ + provider_name = name.split("/")[0] + model_name = name[len(provider_name) + 1 :] + + if provider_name == "openai": + from outlines.text.models.openai import OpenAI + + llm = OpenAI(model_name, stops_at, max_tokens, temperature) # type:ignore + elif provider_name == "hf": + from outlines.text.models.hugging_face import HFCausalLM + + llm = HFCausalLM(model_name, max_tokens, temperature) # type:ignore + else: + raise NameError(f"The model provider {provider_name} is not available.") + + def decorator(fn: Callable): + prompt_fn = prompt(fn) + + def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: + """Call the generative model with the rendered template. + + Building prompts with recursive calls to language models is common + in prompt engineering, we thus return both the raw answer from the + language model as well as the rendered prompt including the answer. + + Returns + ------- + A tuple that contains the result of the language model call, and the + rendered prompt concatenated with the result of the language model + call. + + """ + prompt = prompt_fn(*args, **kwargs) + result = llm(prompt) + return result, prompt + result + + return wrapper + + return decorator diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py deleted file mode 100644 index 8651d0a8..00000000 --- a/outlines/text/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .basic import * -from .completion import completion -from .prompt import prompt, render -from .var import as_string, string - -__all__ = ["as_string", "completion", "prompt", "string", "render"] diff --git a/outlines/text/basic.py b/outlines/text/basic.py deleted file mode 100644 index 52c59645..00000000 --- a/outlines/text/basic.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Basic `StringVariable` manipulations.""" -import outlines -from outlines.graph import Apply, Op -from outlines.text.var import StringVariable - -__all__ = ["add"] - - -class Add(Op): - def make_node(self, s, t): - s = outlines.text.as_string(s) - t = outlines.text.as_string(t) - out = StringVariable() - return Apply(self, [s, t], [out]) - - def perform(self, s, t): - return (s + t,) - - -add = Add() diff --git a/outlines/text/completion.py b/outlines/text/completion.py deleted file mode 100644 index 94814dfe..00000000 --- a/outlines/text/completion.py +++ /dev/null @@ -1,93 +0,0 @@ -from outlines.text.prompt import prompt - - -def completion(name: str, *, stops_at=None, max_tokens=None, temperature=None): - """Decorator that allows to simplify calls to language models. - - Prompts that are passed to language models are often rendered templates, - and the workflow typically looks like: - - >>> import outlines - >>> from outlines.text.models.openai import OpenAI - >>> - >>> llm = OpenAI("davinci") - >>> tpl = "I have a ${question}" - >>> prompt = outlines.render(tpl, question="How are you?") - >>> answer = llm(prompt) - - While explicit, these 4 lines have the following defaults: - - 1. The prompt is hidden; - 2. The language model instantiation is far from the prompt; prompt templates - are however attached to a specific language model call. - 3. The intent behind the language model call is hidden. - - To encapsulate the logic behind language model calls, we thus define the - template prompt inside a function and decorate the function with a model - specification. When that function is called, the template is rendered using - the arguments passed to the function, and the rendered prompt is passed to - a language model instantiated with the arguments passed to the decorator. - - The previous example is equivalent to the following: - - >>> import outlines - >>> - >>> @outlines.text.model("openai/davinci") - ... def answer(question): - ... "I have a ${question}" - ... - >>> answer, _ = answer("How are you?") - - Decorated functions return two objects: the first represents the output of - the language model call, the second represents the concatenation of the - rendered prompt with the output of the language model call. The latter can - be used in context where one expands an initial prompt with recursive calls - to language models. - - Parameters - ---------- - stops_at - A list of tokens which, when found, stop the generation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - """ - provider_name = name.split("/")[0] - model_name = name[len(provider_name) + 1 :] - - if provider_name == "openai": - from outlines.text.models.openai import OpenAI - - llm = OpenAI(model_name, stops_at, max_tokens, temperature) # type:ignore - elif provider_name == "hf": - from outlines.text.models.hugging_face import HFCausalLM - - llm = HFCausalLM(model_name, max_tokens, temperature) # type:ignore - else: - raise NameError(f"The model provider {provider_name} is not available.") - - def decorator(fn): - prompt_fn = prompt(fn) - - def wrapper(*args, **kwargs): - """Call the LLM with the rendered template. - - Building prompts with recursive calls to language models is common - in prompt engineering, we thus return both the raw answer from the - language model as well as the rendered prompt including the answer. - - Returns - ------- - A tuple that contains the result of the language model call, and the - rendered prompt concatenated with the result of the language model - call. - - """ - prompt = prompt_fn(*args, **kwargs) - result = llm(prompt) - return result, prompt + result - - return wrapper - - return decorator diff --git a/outlines/text/models/__init__.py b/outlines/text/models/__init__.py deleted file mode 100644 index 2a7dc397..00000000 --- a/outlines/text/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .language_model import LanguageModel diff --git a/outlines/text/models/language_model.py b/outlines/text/models/language_model.py deleted file mode 100644 index a48ed075..00000000 --- a/outlines/text/models/language_model.py +++ /dev/null @@ -1,52 +0,0 @@ -from outlines.graph import Apply, Op -from outlines.text.var import StringVariable, as_string - - -class LanguageModel(Op): - """An `Op` that produces a sample from a language model. - - The output of language models in outlines is represented as a random - variable. Therefore, calling a language model will return a random sequence - (via ancestral sampling) by default. Other decoding methods are constructed - as graph transformations. - - """ - - def __init__(self, name=None): - """Instantiate the `LanguageModel` `Op`. - - Parameters - ---------- - name - The name of the `Op` in the graph. - - """ - super().__init__() - self.name = name - - def __call__(self, prompt, stops_at=None, name=None): - """Create the `Apply` node that represents the `Op`'s application to inputs. - - Parameters - ---------- - prompt - The prompt used to condition the language model's sampling procedure. - name - The name of the output variable in the graph. - - """ - res = super().__call__(prompt) - - if name is not None: - res.name = name - - return res - - def make_node(self, prompt): - prompt = as_string(prompt) - out = StringVariable() - - return Apply(self, [prompt], [out]) - - def perform(self, prompt): - return NotImplementedError diff --git a/outlines/text/prompt.py b/outlines/text/prompt.py deleted file mode 100644 index 351ff670..00000000 --- a/outlines/text/prompt.py +++ /dev/null @@ -1,156 +0,0 @@ -import collections -import inspect -import re -from typing import Callable, Dict, Union - -from mako.runtime import Context -from mako.template import Template - -from outlines.text.var import StringVariable - - -class OutlinesEncodingBuffer: - """An encoding buffer for Mako's templating engine. - - This is a modified version of Mako's `FastEncodingBuffer`. It build outlines - graph when the template is rendered with `StringVariable`s. - - """ - - def __init__(self, encoding=None, errors="strict"): - self.data = collections.deque() - self.encoding = encoding - self.delim = "" - self.errors = errors - self.write = self.data.append - - def truncate(self): - self.data = collections.deque() - self.write = self.data.append - - def get_value(self): - if self.encoding: - return self.delim.join(self.data).encode(self.encoding, self.errors) - else: - output = "" - for d in self.data: - if isinstance(d, StringVariable): - output = output + d - else: - output = output + str(d) - return output - - -def render( - template: str, **values: Dict[str, Union[str, StringVariable]] -) -> Union[str, StringVariable]: - r"""Parse a Mako template and translate it into an Outlines graph. - - Examples - -------- - - Outlines follow Mako's syntax - - >>> import outlines - >>> outline = outlines.render("I like ${food} and ${sport}", food="tomatoes", sport="tennis") - I like tomatoes and tennis - - When a variable in the template is assigne a `StringVariable` value, the - `render` function builds the corresponding outlines graph and returns a - `StringVariable`: - - >>> s = outlines.text.string() - >>> outlines.render("I like ${food}", food=food) - - - It is also possible to use control flow inside templates: - - >>> examples = ["one", "two", "three"] - >>> outlines = outlines.render( - ... ''' - ... % for example in examples: - ... Example: ${example} - ... % endfor - ... ''', - ... examples=examples - ... ) - - Parameters - ---------- - template - A string that contains a template written in the Mako syntax. - **values - Map from the variables in the template to their value. - - Returns - ------- - A string when the values are all strings, a `StringVariable` otherwise. - - """ - buf = OutlinesEncodingBuffer() - ctx = Context(buf, **values) - - outline = inspect.cleandoc(template) - mako_template = Template(outline, default_filters=[]) - mako_template.render_context(ctx) - - return buf.get_value() - - -def prompt(fn: Callable): - """Decorator around a function that contains a prompt template. - - This allows to define prompts in the docstring of a function and ease their - manipulation by providing some degree of encapsulation. - - >>> import outlines - >>> - >>> @outlines.prompt - >>> def answer_tpl(question): - ... "I have a ${question}" - ... - >>> prompt = answer_tpl("How are you?") - - This is syntactic sugar and uses the `render` function internally. - Therefore, the wrapped functions return `str` when called with `str` - arguments only, and a `StringVariable` when at least one argument is a - `StringVariable`. - - """ - - # Get the names of the parameters to the function, which must correspond - # to the variables defined in the template. - var_names = [] - kwargs_data = {} - sig = inspect.signature(fn) - for parameter in sig.parameters.values(): - if parameter.default == inspect._empty: - var_names.append(parameter.name) - else: - kwargs_data[parameter.name] = parameter.default - - # The docstring contains the template that will be rendered to be used - # as a prompt to the language model. - docstring = fn.__doc__ - if docstring is None: - raise TypeError("Could not find a template in the function's docstring.") - else: - docstring = re.sub( - " +", " ", docstring - ) # Remove extra whitespace due to linebreaks - template = inspect.cleandoc(docstring) - - def wrapper(*args, **kwargs): - """Render and return the template. - - Returns - ------- - A Python `str` when all arguments are Python `str`, a `StringVariable` - otherwise. - - """ - bound_arguments = sig.bind(*args, **kwargs) - bound_arguments.apply_defaults() - return render(template, **bound_arguments.arguments) - - return wrapper diff --git a/outlines/text/var.py b/outlines/text/var.py deleted file mode 100644 index 65b46262..00000000 --- a/outlines/text/var.py +++ /dev/null @@ -1,71 +0,0 @@ -from functools import singledispatch - -import outlines.text as ot -from outlines.graph import Variable - - -class StringVariable(Variable): - """Subclass to add the string operators to `Variable`.""" - - def __init__(self, owner=None, index=None, name=None): - super().__init__(owner, index, name) - - def __add__(self, other): - return ot.add(self, other) - - def __radd__(self, other): - return ot.add(other, self) - - -string = StringVariable - - -class StringConstant(StringVariable): - """Constant `StringVariable` that corresponds to user input.""" - - def __init__(self, value, name=None): - self.value = value - super().__init__(name=name) - - def __str__(self): - if self.name is not None: - name = self.name - else: - name = "StringConstant" - return f"{name}{{'{self.value}'}}" - - -@singledispatch -def as_string(x, name=None): - """Convert `x` into an equivalent `StringVariable`. - - This function can be used to turn `str`, `int` and `float` instances into a - `StringVariable`. It is mainly used in `Op`s' `make_node` method to convert - inputs to a `StringVariable` and add them to the graph. - - Parameters - ---------- - x - The object that will we converted into a `StringVariable`. - name - If a new `StringVariable` instance is created it will be attributed this - name. - - """ - raise TypeError(f"{x} cannot be cast into a string") - - -@as_string.register(str) -def as_string_strings(x, name=None): - return StringConstant(x, name) - - -@as_string.register(int) -@as_string.register(float) -def as_string_numbers(x, name=None): - return StringConstant(str(x), name) - - -@as_string.register(StringVariable) -def as_string_StringVariable(x, name=None): - return x diff --git a/tests/image/__init__.py b/tests/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/image/test_var.py b/tests/image/test_var.py deleted file mode 100644 index 8b2faecc..00000000 --- a/tests/image/test_var.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -from PIL.Image import Image as PILImage - -import outlines -from outlines.image.var import ImageConstant -from outlines.text.var import Variable - - -def test_cast(): - with pytest.raises(TypeError): - outlines.as_image("") - - with pytest.raises(TypeError): - outlines.as_image(Variable()) - - with pytest.raises(TypeError): - outlines.as_image(ImageConstant("")) - - img = PILImage() - s = outlines.as_image(img) - assert isinstance(s, ImageConstant) - assert isinstance(s.value, type(img)) - - i = ImageConstant(img) - outlines.as_image(i) - - i = outlines.image.image() - outlines.as_image(i) diff --git a/tests/test_compile.py b/tests/test_compile.py deleted file mode 100644 index 12933005..00000000 --- a/tests/test_compile.py +++ /dev/null @@ -1,43 +0,0 @@ -import outlines -from outlines.text import render, string - - -def test_compile(): - s = string() - chain = outlines.chain([s], s) - assert chain("test") == "test" - - s = string() - p = "Test " + s - chain = outlines.chain([s], p) - assert chain("test") == "Test test" - - s1 = string() - s2 = string() - p = s1 + s2 - chain = outlines.chain([s1, s2], p) - assert chain("one", "two") == "onetwo" - - s1 = string() - s2 = string() - p1 = s1 + s2 - p2 = s1 + "three" - chain = outlines.chain([s1, s2], [p1, p2]) - assert chain("one", "two") == ("onetwo", "onethree") - - -def test_compile_scripts(): - s = string() - o = render("This is a ${var}", var=s) - chain = outlines.chain([s], o) - assert chain("test") == "This is a test" - - -def test_eval(): - s = string() - assert s.eval({s: "s"}) == "s" - - s = string() - t = string() - o = s + t - assert o.eval({s: "one", t: "two"}) == "onetwo" diff --git a/tests/test_function.py b/tests/test_function.py deleted file mode 100644 index 8ff31362..00000000 --- a/tests/test_function.py +++ /dev/null @@ -1,47 +0,0 @@ -import pytest - -import outlines - - -def test_function_no_types(): - with pytest.raises(TypeError, match="input types"): - - @outlines.fn - def constant(inp): - return "constant" - - constant("") - - with pytest.raises(TypeError, match="only supports string arguments"): - - @outlines.fn - def constant(inp: float): - return "constant" - - constant("") - - with pytest.raises(TypeError, match="output types"): - - @outlines.fn - def constant(inp: str): - return "constant" - - constant("") - - with pytest.raises(TypeError, match="only supports string return types"): - - @outlines.fn - def constant(inp: str) -> float: - return 1 - - constant("") - - -def test_function_decorator(): - @outlines.fn - def constant(inp: str) -> str: - return "constant" - - inp = outlines.text.string() - out = constant(inp) - assert str(out.owner.op) == "FromFunctionOp(constant)" diff --git a/tests/test_graph.py b/tests/test_graph.py deleted file mode 100644 index 3d8c1042..00000000 --- a/tests/test_graph.py +++ /dev/null @@ -1,101 +0,0 @@ -import pytest - -from outlines.graph import Apply, Op, Variable, io_toposort - - -class MyVar(Variable): - def __init__(self, value): - self.value = value - super().__init__() - - def __eq__(self, other): - return type(self) == type(other) and self.value == other.value - - def __hash__(self): - return hash((type(self), self.value)) - - -class MyOp(Op): - def make_node(self, *inputs): - result = sum(input.value for input in inputs) - outputs = [MyVar(result)] - return Apply(self, inputs, outputs) - - -op = MyOp() - - -def test_apply_wrong_args(): - with pytest.raises(TypeError): - Apply(op, 1.0, []) - - with pytest.raises(TypeError): - Apply(op, [], 1.0) - - with pytest.raises(TypeError): - Apply(op, [1.0], []) - - with pytest.raises(TypeError): - Apply(op, [], [1.0]) - - -def test_Apply(): - i = Variable(name="i") - o = Variable(name="o") - a = Apply(op, [i], [o]) - assert len(a.inputs) == 1 - assert len(a.outputs) == 1 - assert a.inputs[0].name == "i" - assert a.outputs[0].name == "o" - assert a.outputs[0].owner == a - - -def test_Apply_multiple_inputs(): - i1, i2 = Variable(name="i1"), Variable(name="i2") - o = Variable(name="o") - a = Apply(op, [i1, i2], [o]) - assert len(a.inputs) == 2 - - -def test_Variable_wrong_input(): - owner = "txt" - with pytest.raises(TypeError): - Variable(owner) - - owner = Apply(op, [], []) - index = "i" - with pytest.raises(TypeError): - Variable(owner, index) - - owner = Apply(op, [], []) - index = "i" - name = 1 - with pytest.raises(TypeError): - Variable(owner, index, name) - - -def test_Op(): - v1, v2 = MyVar(1), MyVar(2) - node = op.make_node(v1, v2) - assert [x for x in node.inputs] == [v1, v2] - assert [type(x) for x in node.outputs] == [MyVar] - assert node.outputs[0].owner is node and node.outputs[0].index == 0 - - -def test_string_formatting(): - v1, v2 = MyVar(1), MyVar(2) - node = op.make_node(v1, v2) - assert str(node.op) == "MyOp" - assert str(v1) == "" - assert [str(o) for o in node.outputs] == ["MyOp.out"] - - -def test_toposort_simple(): - r1, r2, r5 = MyVar(1), MyVar(2), MyVar(5) - o1 = op(r1, r2) - o1.name = "o1" - o2 = op(o1, r5) - o2.name = "o2" - - res = io_toposort([r1, r2, r5], [o2]) - assert res == [o1.owner, o2.owner] diff --git a/tests/test_model.py b/tests/test_model.py new file mode 100644 index 00000000..85f6eda1 --- /dev/null +++ b/tests/test_model.py @@ -0,0 +1,24 @@ +import pytest + +import outlines.text as text + + +def test_model_wrong_provide(): + with pytest.raises(NameError, match="not available"): + + @text.completion("aa/model_name") + def test_function(): + """""" + + +@pytest.mark.skip +def test_model(): + @text.completion("openai/text-davinci-001", stops_at=["."]) + def test_function(question, type="bad"): + """You're a witty and sarcastic AI. + + Tell me a ${type} ${question}. + Joke: + """ + + answer, prompt = test_function("joke", type="good") diff --git a/tests/test_text.py b/tests/test_text.py new file mode 100644 index 00000000..beee639b --- /dev/null +++ b/tests/test_text.py @@ -0,0 +1,126 @@ +import pytest + +import outlines.text as text + + +def test_render(): + tpl = """ + A test string""" + assert text.render(tpl) == "A test string" + + tpl = """ + A test string + """ + assert text.render(tpl) == "A test string" + + tpl = """ + A test + Another test + """ + assert text.render(tpl) == "A test\nAnother test" + + tpl = """A test + Another test + """ + assert text.render(tpl) == "A test\nAnother test" + + tpl = """ + A long test \ + that we break \ + in several lines + """ + assert text.render(tpl) == "A long test that we break in several lines" + + +@pytest.mark.xfail(reason="The regex used to strip whitespaces is too aggressive") +def test_render_indented(): + tpl = """ + A test line + An indented line + """ + assert text.render(tpl) == "A test line\n An indented line" + + +@pytest.mark.xfail(reason="Mako adds newlines after for and if blocks") +def test_render_mako(): + """Make sure that we can use basic Mako syntax.""" + examples = ["one", "two"] + prompt = text.render( + """ + % for e in examples: + Example: ${e} + % endfor + """, + examples=examples, + ) + assert prompt == "Example: one\nExample: two" + + examples = ["one", "two"] + prompt = text.render( + """ + % for i, e in enumerate(examples): + Example ${i}: ${e} + % endfor + """, + examples=examples, + ) + assert prompt == "Example 0: one\nExample 1: two" + + tpl = """ + % if is_true: + true + % endif + """ + assert text.render(tpl, is_true=True) == "true" + assert text.render(tpl, is_true=False) == "" + + +def test_prompt_basic(): + @text.prompt + def test_tpl(variable): + """${variable} test""" + + with pytest.raises(TypeError): + test_tpl(v="test") + + p = test_tpl("test") + assert p == "test test" + + p = test_tpl(variable="test") + assert p == "test test" + + @text.prompt + def test_single_quote_tpl(variable): + "${variable} test" + + p = test_tpl("test") + assert p == "test test" + + +def test_prompt_kwargs(): + @text.prompt + def test_kwarg_tpl(var, other_var="other"): + """${var} and ${other_var}""" + + p = test_kwarg_tpl("test") + assert p == "test and other" + + p = test_kwarg_tpl("test", other_var="kwarg") + assert p == "test and kwarg" + + p = test_kwarg_tpl("test", "test") + assert p == "test and test" + + +def test_no_prompt(): + with pytest.raises(TypeError, match="template"): + + @text.prompt + def test_empty(variable): + pass + + with pytest.raises(TypeError, match="template"): + + @text.prompt + def test_only_code(variable): + return variable diff --git a/tests/text/__init__.py b/tests/text/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/text/test_basic.py b/tests/text/test_basic.py deleted file mode 100644 index b15588fe..00000000 --- a/tests/text/test_basic.py +++ /dev/null @@ -1,41 +0,0 @@ -from outlines.graph import Apply -from outlines.text.basic import Add, add -from outlines.text.var import StringVariable, string - - -def test_add_symbolic(): - s, t = string(), string() - w = add(s, t) - assert isinstance(w, StringVariable) - assert isinstance(w.owner, Apply) - assert isinstance(w.owner.op, Add) - assert len(w.owner.inputs) == 2 - assert len(w.owner.outputs) == 1 - - a = Add() - assert a.perform("a", "string")[0] == "astring" - - w = s + t - assert isinstance(w, StringVariable) - assert isinstance(w.owner, Apply) - assert isinstance(w.owner.op, Add) - assert len(w.owner.inputs) == 2 - assert len(w.owner.outputs) == 1 - - -def test_add_mixed(): - s, t = "a string", string() - w = s + t - assert isinstance(w, StringVariable) - assert isinstance(w.owner, Apply) - assert isinstance(w.owner.op, Add) - assert len(w.owner.inputs) == 2 - assert len(w.owner.outputs) == 1 - - s, t = string(), "a string" - w = s + t - assert isinstance(w, StringVariable) - assert isinstance(w.owner, Apply) - assert isinstance(w.owner.op, Add) - assert len(w.owner.inputs) == 2 - assert len(w.owner.outputs) == 1 diff --git a/tests/text/test_model.py b/tests/text/test_model.py deleted file mode 100644 index 5434fba4..00000000 --- a/tests/text/test_model.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from outlines.text import completion, string -from outlines.text.models.language_model import LanguageModel - - -def test_initialize_LanguageModel(): - llm = LanguageModel(name="llm") - - prompt = string() - out = llm(prompt) - assert isinstance(out.owner.op, LanguageModel) - assert out.owner.inputs[0] == prompt - assert out.owner.op.name == "llm" - - -def test_model_wrong_provide(): - with pytest.raises(NameError, match="not available"): - - @completion("aa/model_name") - def test_function(): - """""" - - -@pytest.mark.skip -def test_model(): - @completion("openai/text-davinci-001", stops_at=["."]) - def test_function(question, type="bad"): - """You're a witty and sarcastic AI. - - Tell me a ${type} ${question}. - Joke: - """ - - answer, prompt = test_function("joke", type="good") diff --git a/tests/text/test_prompt.py b/tests/text/test_prompt.py deleted file mode 100644 index 659b0f81..00000000 --- a/tests/text/test_prompt.py +++ /dev/null @@ -1,135 +0,0 @@ -import pytest - -import outlines.text as text -from outlines.text import render, string -from outlines.text.basic import Add -from outlines.text.var import StringConstant, StringVariable - - -def test_template_text(): - with pytest.raises(NameError): - render("String ${one}", two="two") - - t = render("Test") - assert t == "Test" - - t = render("Test ${variable}", variable="string") - assert t == "Test string" - - t = render("Test ${variable}", variable=1) - assert t == "Test 1" - - t = render("Test repeated ${variable} ${variable}", variable="string") - assert t == "Test repeated string string" - - t = render("Test ${one} ${two}", one="1", two="2") - assert t == "Test 1 2" - - -def test_template_string_variable(): - variable = string() - t = render("Test ${variable}", variable=variable) - assert isinstance(t.owner.op, Add) - assert isinstance(t.owner.inputs[0], StringConstant) - assert isinstance(t.owner.inputs[1], StringVariable) - assert t.owner.inputs[0].value == "Test " - - variable = string() - t = render("${variable} test", variable=variable) - assert isinstance(t.owner.op, Add) - assert isinstance(t.owner.inputs[0], StringVariable) - assert isinstance(t.owner.inputs[1], StringConstant) - assert t.owner.inputs[1].value == " test" - - -def test_template_few_shots(): - wa = string() - examples = [["here", "there"], ["this", "that"]] - prompt = render( - """ - This is a test - - ${wa} - - % for s, t in examples: - Search: ${s} - Trap: ${t} - % endfor - """, - wa=wa, - examples=examples, - ) - assert isinstance(prompt, StringVariable) - - -def test_prompt_basic(): - @text.prompt - def test_tpl(variable): - """${variable} test""" - - with pytest.raises(TypeError): - test_tpl(v="test") - - p = test_tpl("test") - assert p == "test test" - - p = test_tpl(variable="test") - assert p == "test test" - - @text.prompt - def test_single_quote_tpl(variable): - "${variable} test" - - p = test_tpl("test") - assert p == "test test" - - -def test_prompt_kwargs(): - @text.prompt - def test_kwarg_tpl(var, other_var="other"): - """${var} and ${other_var}""" - - p = test_kwarg_tpl("test") - assert p == "test and other" - - p = test_kwarg_tpl("test", other_var="kwarg") - assert p == "test and kwarg" - - p = test_kwarg_tpl("test", "test") - assert p == "test and test" - - -def test_not_prompt(): - with pytest.raises(TypeError, match="template"): - - @text.prompt - def test_empty(variable): - pass - - with pytest.raises(TypeError, match="template"): - - @text.prompt - def test_only_code(variable): - return variable - - -def test_prompt_few_shots(): - @text.prompt - def few_shots_tpl(w, examples): - """This is a test - - ${w} - - % for s, t in examples: - Search: ${s} - Trap: ${t} - % endfor - """ - - prompt = few_shots_tpl("Test", [["a", "b"], ["c", "d"]]) - assert ( - prompt == "This is a test\n\nTest\n\nSearch: a\nTrap: b\nSearch: c\nTrap: d\n" - ) - - prompt = few_shots_tpl(string(), [["a", "b"], ["c", "d"]]) - assert isinstance(prompt, StringVariable) diff --git a/tests/text/test_var.py b/tests/text/test_var.py deleted file mode 100644 index 136c4c36..00000000 --- a/tests/text/test_var.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -import outlines -from outlines.graph import Variable -from outlines.text.var import StringConstant - - -def test_cast(): - with pytest.raises(TypeError): - outlines.text.as_string([]) - - with pytest.raises(TypeError): - outlines.text.as_string(()) - - with pytest.raises(TypeError): - outlines.text.as_string(Variable()) - - s = outlines.text.as_string(StringConstant("")) - assert isinstance(s, StringConstant) - assert s.value == "" - - s = outlines.text.as_string(1) - assert type(s) == StringConstant - assert s.value == "1" - - s = outlines.text.as_string(1.3) - assert type(s) == StringConstant - assert s.value == "1.3" - - s = outlines.text.as_string("test") - assert type(s) == StringConstant - assert s.value == "test" - - s = outlines.text.string() - outlines.text.as_string(s) From 2989c3682c2140ada5d0205ecc0257d7ef624769 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 12 Apr 2023 21:26:27 +0200 Subject: [PATCH 046/734] Use Jinja2 instead of Mako --- examples/meta_prompting.py | 20 ++++++------- outlines/text.py | 31 +++++-------------- pyproject.toml | 2 +- tests/test_text.py | 61 ++++++++++++++++++++++---------------- 4 files changed, 54 insertions(+), 60 deletions(-) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index a2c2350e..270419d9 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -17,7 +17,7 @@ def split_into_steps(question, model_name: str): @text.completion(model_name) def solve(question): - """${question} + """{{question}} Let's solve this problem by splitting it into steps. """ @@ -29,14 +29,14 @@ def solve(question): def fill_in_the_blanks(question, model_name: str): @text.completion(model_name, stops_at=["."]) def determine_goal(question): - """${question} + """{{question}} In order to solve this problem, we will analyze each of the options and determine """ @text.completion(model_name, stops_at=["."]) def solve(memory): - """${memory}. Let's begin.""" + """{{memory}}. Let's begin.""" _, completed = determine_goal(question) _, completed = solve(completed) @@ -48,7 +48,7 @@ def ask_an_expert(question, model_name: str): @text.completion(model_name, stops_at=['"']) def find_expert(question): """ - ${question} + {{question}} I entered my question into the Expert Generator and waited. The Expert Generator will render a simulation of an expert to answer my question. @@ -66,10 +66,10 @@ def find_expert(question): @text.completion(model_name) def get_answer(question, expert, memory): """ - ${memory} + {{memory}} I am ready to ask my question. - "${expert}" I say, - ${question} + "{{expert}}" I say, + {{question}} """ expert, completed = find_expert(question) @@ -82,16 +82,16 @@ def ask_an_expert_simple(question, model_name: str): @text.completion(model_name, stops_at=["\n", "."]) def find_expert(question): """ - Q: ${question} + Q: {{question}} A: A good person to answer this question would be """ @text.completion(model_name) def get_answer(expert, memory): """ - ${memory}. + {{memory}}. - For instance,${expert} would answer + For instance,{{expert}} would answer """ expert, completed = find_expert(question) diff --git a/outlines/text.py b/outlines/text.py index b851472c..c1fc2000 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -1,12 +1,11 @@ import inspect -import re from typing import Any, Callable, Dict, List, Optional, Tuple, cast -from mako.template import Template +from jinja2 import Template def render(template: str, **values: Optional[Dict[str, Any]]) -> str: - r"""Parse a Mako template and translate it into an Outlines graph. + r"""Parse a Jinaj2 template and translate it into an Outlines graph. This function removes extra whitespaces and linebreaks from templates to allow users to enter prompt more naturally than if they used Python's @@ -15,10 +14,10 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: Examples -------- - Outlines follow Mako's syntax + Outlines follow Jinja2's syntax >>> import outlines - >>> outline = outlines.render("I like ${food} and ${sport}", food="tomatoes", sport="tennis") + >>> outline = outlines.render("I like {{food}} and {{sport}}", food="tomatoes", sport="tennis") I like tomatoes and tennis If the first line of the template is empty, `render` removes it @@ -72,21 +71,10 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: >>> render(tpl) ... 'First Line\n Second Line' - Finally, `render` removes the indentation introduced when using `\` to - escape linebreaks: - - >>> tpl = ''' - ... Long test \ - ... That we break''' - >>> tpl - '\n Long test That we break' - >>> render(tpl) - 'Long test That we break' - Parameters ---------- template - A string that contains a template written in the Mako syntax. + A string that contains a template written with the Jinja2 syntax. **values Map from the variables in the template to their value. @@ -99,12 +87,9 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: # Dedent, and remove extra linebreak template = inspect.cleandoc(template) - # Remove extra whitespace due to linebreaks with "\" - # TODO: this will remove indentation, we need to only remove - # whitespaces when the sequence does not start with `\n` - template = re.sub(" +", " ", template) - - mako_template = Template(template) + mako_template = Template( + template, trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=False + ) return mako_template.render(**values) diff --git a/pyproject.toml b/pyproject.toml index 1570f8e1..9c936d8e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ classifiers = [ "Topic :: Scientific/Engineering :: Artificial Intelligence", ] dependencies = [ - "mako", + "jinja2", "pillow", "rich" ] diff --git a/tests/test_text.py b/tests/test_text.py index beee639b..e5766ac6 100644 --- a/tests/test_text.py +++ b/tests/test_text.py @@ -24,6 +24,17 @@ def test_render(): """ assert text.render(tpl) == "A test\nAnother test" + tpl = """ + A test line + An indented line + """ + assert text.render(tpl) == "A test line\n An indented line" + + +@pytest.mark.xfail( + reason="We need a regexp that can match whitespace sequences except those that follow a linebreak" +) +def test_render_escaped_linebreak(): tpl = """ A long test \ that we break \ @@ -32,53 +43,51 @@ def test_render(): assert text.render(tpl) == "A long test that we break in several lines" -@pytest.mark.xfail(reason="The regex used to strip whitespaces is too aggressive") -def test_render_indented(): - tpl = """ - A test line - An indented line +def test_render_jinja(): + """Make sure that we can use basic Jinja2 syntax, and give examples + of how we can use it for basic use cases. """ - assert text.render(tpl) == "A test line\n An indented line" - -@pytest.mark.xfail(reason="Mako adds newlines after for and if blocks") -def test_render_mako(): - """Make sure that we can use basic Mako syntax.""" + # Notice the newline after the end of the loop examples = ["one", "two"] prompt = text.render( """ - % for e in examples: - Example: ${e} - % endfor - """, + {% for e in examples %} + Example: {{e}} + {% endfor -%}""", examples=examples, ) - assert prompt == "Example: one\nExample: two" + assert prompt == "Example: one\nExample: two\n" + # We can remove the newline by cloing with -%} examples = ["one", "two"] prompt = text.render( """ - % for i, e in enumerate(examples): - Example ${i}: ${e} - % endfor - """, + {% for e in examples %} + Example: {{e}} + {% endfor -%} + + Final""", examples=examples, ) - assert prompt == "Example 0: one\nExample 1: two" + assert prompt == "Example: one\nExample: two\nFinal" + # Same for conditionals tpl = """ - % if is_true: + {% if is_true %} true - % endif + {% endif -%} + + final """ - assert text.render(tpl, is_true=True) == "true" - assert text.render(tpl, is_true=False) == "" + assert text.render(tpl, is_true=True) == "true\nfinal" + assert text.render(tpl, is_true=False) == "final" def test_prompt_basic(): @text.prompt def test_tpl(variable): - """${variable} test""" + """{{variable}} test""" with pytest.raises(TypeError): test_tpl(v="test") @@ -100,7 +109,7 @@ def test_single_quote_tpl(variable): def test_prompt_kwargs(): @text.prompt def test_kwarg_tpl(var, other_var="other"): - """${var} and ${other_var}""" + """{{var}} and {{other_var}}""" p = test_kwarg_tpl("test") assert p == "test and other" From 38840a4e5273168f7f5b1fb8d2a75dc7c94549e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 12 Apr 2023 22:59:54 +0200 Subject: [PATCH 047/734] Refactor the OpenAI connector --- outlines/models/__init__.py | 2 + outlines/models/openai.py | 158 +++++++++++++++++++++--------------- tests/test_model.py | 24 ------ 3 files changed, 93 insertions(+), 91 deletions(-) create mode 100644 outlines/models/__init__.py delete mode 100644 tests/test_model.py diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py new file mode 100644 index 00000000..f965bae0 --- /dev/null +++ b/outlines/models/__init__.py @@ -0,0 +1,2 @@ +from .hf_transformers import HuggingFaceCompletion +from .openai import OpenAICompletion diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 134f81bb..a0217009 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,77 +1,101 @@ +import functools import os -from typing import List, Optional +from typing import Callable, List, Optional, Tuple -try: - import openai - from openai import error -except ImportError: - raise ImportError("You need to install `openai` to run OpenAI's language models.") +def OpenAICompletion( + model_name: str, + stop_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Callable: + """Create a function that will call the OpenAI API. + + You should have the `openai` package installed. Available models are listed + in the `OpenAI documentation `_. -class OpenAI: - """Represents any of OpenAI's language models + Parameters + ---------- + model_name: str + The name of the model as listed in the OpenAI documentation. + stop_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. - You should have the `openai` package installed, and store you OpenAI key in - the `OPENAI_API_KEY` environment variable. + Returns + ------- + A function that will call OpenAI's API with the given parameters when passed + a prompt. """ + import openai - def __init__( - self, - model: str, - stops_at: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, - ): - """Initialize the OpenAI model.""" + try: + os.environ["OPENAI_API_KEY"] + except KeyError: + raise OSError( + "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " + "OpenAI's APIs. Please make sure it is set before re-running your model." + ) + parameters = validate_parameters(stop_at, max_tokens, temperature) + + def call(prompt: str) -> str: try: - self.openai_api_key = os.environ["OPENAI_API_KEY"] - except KeyError: - raise OSError( - "Could not find the `OPENAI_API_KEY` environment variable. Please make sure it is set to your OpenAI key before re-running your model." - ) - - available_models = openai.Model.list() - available_model_names = [model["id"] for model in available_models["data"]] - if model not in available_model_names: - raise OSError(f"{model} is not a valid OpenAI model name.") - self.model = model - - if stops_at is not None and len(stops_at) > 4: - raise Exception("OpenAI's API does not accept more than 4 stop sequences.") - self.stops_at = stops_at - - if max_tokens is None: - max_tokens = 216 - self.max_tokens = max_tokens - - if temperature is None: - temperature = 1.0 - self.temperature = temperature - - def __call__(self, prompt: str) -> str: - try: - resp = openai.Completion.create( - model=self.model, - prompt=prompt, - max_tokens=self.max_tokens, - stop=self.stops_at, - temperature=self.temperature, - ) - except error.APIConnectionError as e: - raise OSError(f"Open API failed to connect: {e}") - except error.AuthenticationError as e: - raise OSError( - f"Open API request not authorized: {e}. Check that the token provided is valid." - ) - except error.PermissionError as e: - raise OSError(f"Open API request was not permitted: {e}") - except error.RateLimitError as e: - raise OSError( - f"Open API requests exceeded the rate limit: {e}. Wait before re-running your program." - ) - except error.Timeout as e: - raise OSError(f"Open API request timed out: {e}") - - return resp["choices"][0]["text"] + result = call_completion_api(model_name, prompt, *parameters) + return result + except ( + openai.error.RateLimitError, + openai.error.Timeout, + openai.error.TryAgain, + openai.error.APIConnectionError, + openai.error.ServiceUnavailableError, + ) as e: + raise OSError(f"Could not connect to the OpenAI API: {e}") + except ( + openai.error.AuthenticationError, + openai.error.PermissionError, + openai.error.InvalidRequestError, + ) as e: + raise e + + return call + + +@functools.lru_cache +def call_completion_api( + model: str, + prompt: str, + stop_sequences: Tuple[str], + max_tokens: int, + temperature: float, +): + import openai + + response = openai.Completion.create( + engine=model, + prompt=prompt, + temperature=temperature, + max_tokens=max_tokens, + stop=stop_sequences, + ) + + return response["choices"][0]["text"] + + +def validate_parameters( + stop_at, max_tokens, temperature +) -> Tuple[Tuple[str], int, float]: + if stop_at is not None and len(stop_at) > 4: + raise TypeError("OpenAI's API does not accept more than 4 stop sequences.") + elif stop_at is not None: + stop_at = tuple(stop_at) + if max_tokens is None: + max_tokens = 216 + if temperature is None: + temperature = 1.0 + + return stop_at, max_tokens, temperature diff --git a/tests/test_model.py b/tests/test_model.py deleted file mode 100644 index 85f6eda1..00000000 --- a/tests/test_model.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -import outlines.text as text - - -def test_model_wrong_provide(): - with pytest.raises(NameError, match="not available"): - - @text.completion("aa/model_name") - def test_function(): - """""" - - -@pytest.mark.skip -def test_model(): - @text.completion("openai/text-davinci-001", stops_at=["."]) - def test_function(question, type="bad"): - """You're a witty and sarcastic AI. - - Tell me a ${type} ${question}. - Joke: - """ - - answer, prompt = test_function("joke", type="good") From 2654f72245166d4f69a7861f30a748b3145ca9a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Apr 2023 14:53:28 +0200 Subject: [PATCH 048/734] Add a model router The logic in the `outlines.text.completion` function is obscured by the necessity to parse model paths and initialize models. We thus create a function that returns the model that corresponds to the input path if such model exists. --- examples/meta_prompting.py | 8 +++--- outlines/models/routers.py | 52 ++++++++++++++++++++++++++++++++++++ outlines/text.py | 25 +++++++---------- tests/models/test_routers.py | 25 +++++++++++++++++ 4 files changed, 90 insertions(+), 20 deletions(-) create mode 100644 outlines/models/routers.py create mode 100644 tests/models/test_routers.py diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 270419d9..e3e83b9b 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -27,14 +27,14 @@ def solve(question): def fill_in_the_blanks(question, model_name: str): - @text.completion(model_name, stops_at=["."]) + @text.completion(model_name, stop_at=["."]) def determine_goal(question): """{{question}} In order to solve this problem, we will analyze each of the options and determine """ - @text.completion(model_name, stops_at=["."]) + @text.completion(model_name, stop_at=["."]) def solve(memory): """{{memory}}. Let's begin.""" @@ -45,7 +45,7 @@ def solve(memory): def ask_an_expert(question, model_name: str): - @text.completion(model_name, stops_at=['"']) + @text.completion(model_name, stop_at=['"']) def find_expert(question): """ {{question}} @@ -79,7 +79,7 @@ def get_answer(question, expert, memory): def ask_an_expert_simple(question, model_name: str): - @text.completion(model_name, stops_at=["\n", "."]) + @text.completion(model_name, stop_at=["\n", "."]) def find_expert(question): """ Q: {{question}} diff --git a/outlines/models/routers.py b/outlines/models/routers.py new file mode 100644 index 00000000..dd853f5e --- /dev/null +++ b/outlines/models/routers.py @@ -0,0 +1,52 @@ +"""Route model names to their corresponding implementation.""" +import functools +from typing import Callable, Dict, Tuple + +import outlines.models as models + + +def language_completion(model_path: str) -> Callable: + """Return the model and model name corresponding to the model path. + + Note + ---- + + We return both the model builder and the model name instead of partially + applying the model name to the model builder + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + + Returns + ------- + The model builder with bound model name. + + """ + + registry: Dict[str, Callable] = { + "openai": models.OpenAICompletion, + "hf": models.HuggingFaceCompletion, + } + + provider, model_name = parse_model_path(model_path) + + try: + model = registry[provider] + except KeyError: + raise ValueError(f"The model provider {provider} is not available.") + + return functools.partial(model, model_name) + + +def parse_model_path(model_path: str) -> Tuple[str, str]: + """Parse a model path in the form 'provider/model_name'""" + + if "/" not in model_path: + raise ValueError("Model names must be in the form 'provider_name/model_name'") + + provider_name = model_path.split("/")[0] + model_name = model_path[len(provider_name) + 1 :] + + return provider_name, model_name diff --git a/outlines/text.py b/outlines/text.py index c1fc2000..2500fab1 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -3,6 +3,8 @@ from jinja2 import Template +import outlines.models.routers as routers + def render(template: str, **values: Optional[Dict[str, Any]]) -> str: r"""Parse a Jinaj2 template and translate it into an Outlines graph. @@ -135,9 +137,9 @@ def wrapper(*args: Optional[List[str]], **kwargs: Optional[Dict[str, str]]) -> s def completion( - name: str, + model_path: str, *, - stops_at: Optional[List[str]] = None, + stop_at: Optional[List[str]] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, ) -> Callable: @@ -185,7 +187,9 @@ def completion( Parameters ---------- - stops_at + model_path + A string of the form "model_provider/model_name" + stop_at A list of tokens which, when found, stop the generation. max_tokens The maximum number of tokens to generate. @@ -193,19 +197,8 @@ def completion( Value used to module the next token probabilities. """ - provider_name = name.split("/")[0] - model_name = name[len(provider_name) + 1 :] - - if provider_name == "openai": - from outlines.text.models.openai import OpenAI - - llm = OpenAI(model_name, stops_at, max_tokens, temperature) # type:ignore - elif provider_name == "hf": - from outlines.text.models.hugging_face import HFCausalLM - - llm = HFCausalLM(model_name, max_tokens, temperature) # type:ignore - else: - raise NameError(f"The model provider {provider_name} is not available.") + llm_builder = routers.language_completion(model_path) + llm = llm_builder(stop_at=stop_at, max_tokens=max_tokens, temperature=temperature) def decorator(fn: Callable): prompt_fn = prompt(fn) diff --git a/tests/models/test_routers.py b/tests/models/test_routers.py new file mode 100644 index 00000000..601c0a2f --- /dev/null +++ b/tests/models/test_routers.py @@ -0,0 +1,25 @@ +import pytest + +import outlines.models as models +import outlines.models.routers as routers + + +def test_language_model_invalid_provider(): + with pytest.raises(ValueError, match="model provider"): + routers.language_completion("xx/model_name") + + +def test_language_model_router(): + dummy_model_name = "model_name" + llm_builder = routers.language_completion(f"openai/{dummy_model_name}") + assert llm_builder.func == models.OpenAICompletion + assert llm_builder.args == (dummy_model_name,) + + llm_builder = routers.language_completion(f"hf/{dummy_model_name}") + assert llm_builder.func == models.HuggingFaceCompletion + assert llm_builder.args == (dummy_model_name,) + + +def test_invalid_model_path(): + with pytest.raises(ValueError, match="must be in the form"): + routers.parse_model_path("hf") From 3c03b64ee1db61e93736380a74fe3b9612483d21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Apr 2023 14:54:53 +0200 Subject: [PATCH 049/734] Ignore missing imports in mypy --- .pre-commit-config.yaml | 1 + pyproject.toml | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 831c44e9..6e9e526f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,3 +28,4 @@ repos: rev: v1.1.1 hooks: - id: mypy + args: [--allow-redefinition] diff --git a/pyproject.toml b/pyproject.toml index 9c936d8e..9916faef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,3 +45,14 @@ filterwarnings = [ "error", "ignore::FutureWarning:transformers.*" ] + +[[tool.mypy.overrides]] +module = [ + "diffusers", + "jinja2", + "openai", + "pytest", + "torch", + "transformers", +] +ignore_missing_imports = true From 1d330e2a2b92281218209b7ba62281feabf9a3a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Apr 2023 15:09:44 +0200 Subject: [PATCH 050/734] Refactor the HuggingFace `transformers` connector --- outlines/models/hf_transformers.py | 145 +++++++++++++---------------- 1 file changed, 63 insertions(+), 82 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 048ebe5c..a15c315d 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -1,94 +1,75 @@ -from typing import Optional +import functools +from typing import Callable, Optional -try: - import torch - from transformers import AutoModelForCausalLM, AutoTokenizer -except ImportError: - raise ImportError( - "You need to install `transformers` and `torch` to run HuggingFace's Causal LM models." - ) - -class HFCausalLM: - """Represent any of HuggingFace's causal language model implementations. +def HuggingFaceCompletion( + model_name: str, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Callable: + """Create a function that will call the `generate` method of a `transformers` model. You should have the `torch` and `transformers` packages installed. First execution may take a while since the pre-trained weights will be downloaded. + Available models are listed on `HuggingFace's model page `_. - Available models are listed on https://huggingface.co/models - - Example - ------ + Note + ---- - >> from outlines.text.models import HFCausalLM - >> from outlines.text import string - >> - >> gpt2 = HFCausalLM("gpt2") - >> in = string() - >> out = gpt2(in) + To my knowledge `tranformers` does not simply allow to stop the generation + after a given sequence has been generated. We will need to implement this + manually for this integration to have the same features as `OpenAICompletion`. - Attributes + Parameters ---------- - model_name - The model string identifier in the `transformers` library. + model_name: str + The name of the model as listed on HuggingFace's models page. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. + + Returns + ------- + A function that will generate tokens from the model when passed a prompt. """ + if max_tokens is None: + max_tokens = 216 + + if temperature is None: + temperature = 1.0 + + def call(prompt: str) -> str: + return call_model_generate_method(model_name, prompt, max_tokens, temperature) + + return call + + +@functools.lru_cache +def call_model_generate_method( + model_name: str, prompt: str, max_tokens: int, temperature: float +) -> str: + import torch + from transformers import AutoModelForCausalLM, AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained(model_name) + model = AutoModelForCausalLM.from_pretrained(model_name) + + prompt_tokens = tokenizer(prompt, return_tensors="pt") + + if torch.cuda.is_available(): + model = model.to("cuda") + prompt_tokens = prompt_tokens.to("cuda") + + returned_tokens = model.generate( + **prompt_tokens, + do_sample=True, + temperature=temperature, + max_new_tokens=max_tokens, + pad_token_id=tokenizer.eos_token_id, + ) + new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] + new_tokens = new_tokens.squeeze() - def __init__( - self, - model: str, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, - ): - """Instantiate the model `Op`. - - Parameters - ---------- - model - The model id of a model hosted inside a model repo on huggingface.co - - """ - if max_tokens is None: - max_tokens = 216 - self.max_tokens = max_tokens - - if temperature is None: - temperature = 1.0 - self.temperature = temperature - - self.model_name = model - - def __call__(self, prompt: str) -> str: - """Sample new tokens give the tokenized prompt. - - Since HuggingFace's `generate` method returns the prompt along with the - generated token we need to truncate the returned array of tokens. - - Parameters - ---------- - prompt_tokens - A dictionary that contains the ids of the tokens contained in the input - prompt and the input mask. This is the default output of HuggingFace's - tokenizers. - - """ - tokenizer = AutoTokenizer.from_pretrained(self.model_name) - model = AutoModelForCausalLM.from_pretrained(self.model_name) - - prompt_tokens = tokenizer(prompt, return_tensors="pt") - - if torch.cuda.is_available(): - model = model.to("cuda") - prompt_tokens = prompt_tokens.to("cuda") - - returned_tokens = model.generate( - **prompt_tokens, - do_sample=True, - temperature=self.temperature, - max_new_tokens=self.max_tokens, - pad_token_id=tokenizer.eos_token_id, - ) - new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] - new_tokens = new_tokens.squeeze() - - return tokenizer.decode(new_tokens, skip_special_tokens=True) + return tokenizer.decode(new_tokens, skip_special_tokens=True) From 241ed3e7650d1fe0fec9bd346e529d293e46ad27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Apr 2023 15:45:44 +0200 Subject: [PATCH 051/734] Refactor the HuggingFace `diffuser` connector --- outlines/image.py | 22 ++++++++++----------- outlines/models/__init__.py | 1 + outlines/models/hf_diffusers.py | 33 ++++++++++++++++++-------------- outlines/models/routers.py | 34 +++++++++++++++++++++++++++------ pyproject.toml | 1 + 5 files changed, 59 insertions(+), 32 deletions(-) diff --git a/outlines/image.py b/outlines/image.py index a8495713..ea7c2f75 100644 --- a/outlines/image.py +++ b/outlines/image.py @@ -1,22 +1,20 @@ -from outlines.text import prompt +from typing import Any, Callable, Dict, List +from PIL.Image import Image as PILImage -def generation(name: str): - """Decorator that allows to simplify calls to image generation models.""" - provider_name = name.split("/")[0] - model_name = name[len(provider_name) + 1 :] +import outlines.models.routers as routers +from outlines.text import prompt - if provider_name == "hf": - from outlines.image.models.hugging_face import HFDiffuser - generative_model = HFDiffuser(model_name) # type:ignore - else: - raise NameError(f"The model provider {provider_name} is not available.") +def generation(model_path: str) -> Callable: + """Decorator that allows to simplify calls to image generation models.""" + generative_model_builder = routers.image_generation(model_path) + generative_model = generative_model_builder() - def decorator(fn): + def decorator(fn: Callable): prompt_fn = prompt(fn) - def wrapper(*args, **kwargs): + def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> PILImage: """Call the Diffuser with the rendered template. Returns diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index f965bae0..9b019bb9 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -1,2 +1,3 @@ +from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 5c349d65..39c875dc 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -1,21 +1,26 @@ -try: - from diffusers import StableDiffusionPipeline -except ImportError: - raise ImportError( - "You need to install `torch` and `diffusers` to run the StableDiffusion model." - ) +import functools +from PIL.Image import Image as PILImage -class HFDiffuser: - """A `StableDiffusion` distributed random image.""" - def __init__(self, model_name: str): - self.model_name = model_name +def HuggingFaceDiffuser(model_name: str) -> PILImage: + """Create a function that will call a stable diffusion pipeline. - def __call__(self, prompt: str) -> str: - """Use HuggingFace's `StableDiffusion` pipeline to sample a new image.""" - pipe = StableDiffusionPipeline.from_pretrained(self.model_name) - pipe = pipe.to("cuda") + Parameters + ---------- + model_name: str + The name of the model as listed on HuggingFace's models page. + + """ + + @functools.lru_cache + def call(prompt: str) -> str: + import torch + from diffusers import StableDiffusionPipeline + + pipe = StableDiffusionPipeline.from_pretrained(model_name) + if torch.cuda.is_available(): + pipe = pipe.to("cuda") image = pipe(prompt).images[0] return image diff --git a/outlines/models/routers.py b/outlines/models/routers.py index dd853f5e..92c0e2ea 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -8,12 +8,6 @@ def language_completion(model_path: str) -> Callable: """Return the model and model name corresponding to the model path. - Note - ---- - - We return both the model builder and the model name instead of partially - applying the model name to the model builder - Parameters ---------- model_path @@ -40,6 +34,34 @@ def language_completion(model_path: str) -> Callable: return functools.partial(model, model_name) +def image_generation(model_path: str) -> Callable: + """Return the model and model name corresponding to the model path. + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + + Returns + ------- + The model builder with bound model name. + + """ + + registry: Dict[str, Callable] = { + "hf": models.HuggingFaceDiffuser, + } + + provider, model_name = parse_model_path(model_path) + + try: + model = registry[provider] + except KeyError: + raise ValueError(f"The model provider {provider} is not available.") + + return functools.partial(model, model_name) + + def parse_model_path(model_path: str) -> Tuple[str, str]: """Parse a model path in the form 'provider/model_name'""" diff --git a/pyproject.toml b/pyproject.toml index 9916faef..ef91c8e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ module = [ "diffusers", "jinja2", "openai", + "PIL.Image", "pytest", "torch", "transformers", From cadbd5221eafec3e209c47f30ece7d1d9e89cab8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Apr 2023 17:38:52 +0200 Subject: [PATCH 052/734] Add Dust math generation code example --- examples/dust/math-generate-code.py | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 examples/dust/math-generate-code.py diff --git a/examples/dust/math-generate-code.py b/examples/dust/math-generate-code.py new file mode 100644 index 00000000..afe3d040 --- /dev/null +++ b/examples/dust/math-generate-code.py @@ -0,0 +1,37 @@ +"""Example from https://dust.tt/spolu/a/d12ac33169""" +import outlines.text as text + +examples = [ + {"question": "What is 37593 * 67?", "code": "37593 * 67"}, + { + "question": "Janet's ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?", + "code": "(16-3-4)*2", + }, + { + "question": "A robe takes 2 bolts of blue fiber and half that much white fiber. How many bolts in total does it take?", + "code": " 2 + 2/2", + }, +] + + +@text.completion("openai/text-davinci-003", stop_at=["QUESTION"]) +def answer_with_code(question, examples): + """ + {% for example in examples %} + QUESTION: {{example.question}} + CODE: {{example.code}} + + {% endfor %} + QUESTION: {{question}} + CODE:""" + + +def execute_code(code): + result = eval(code) + return result + + +question = "Carla is downloading a 200 GB file. She can download 2 GB/minute, but 40% of the way through the download, the download fails. Then Carla has to restart the download from the beginning. How load did it take her to download the file in minutes?" +result_code, _ = answer_with_code(question, examples) +result = execute_code(result_code) +print(result) From 99adac47e82774515a72c8b05dff02d5e00f5bf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 14 Apr 2023 11:55:13 +0200 Subject: [PATCH 053/734] Cache model calls using `joblib` --- outlines/cache.py | 51 +++++++++++++ outlines/models/hf_diffusers.py | 28 ++++--- outlines/models/hf_transformers.py | 8 +- outlines/models/openai.py | 8 +- pyproject.toml | 2 + tests/test_cache.py | 115 +++++++++++++++++++++++++++++ 6 files changed, 198 insertions(+), 14 deletions(-) create mode 100644 outlines/cache.py create mode 100644 tests/test_cache.py diff --git a/outlines/cache.py b/outlines/cache.py new file mode 100644 index 00000000..f90127fa --- /dev/null +++ b/outlines/cache.py @@ -0,0 +1,51 @@ +import os + +import joblib + +home_dir = os.path.expanduser("~") +cache_dir = os.environ.get("OUTLINES_CACHE_DIR", f"{home_dir}/.cache/outlines") +memory = joblib.Memory(cache_dir, verbose=0) # type: ignore[attr-defined] + + +def get(): + """Get the context object that contains previously-computed return values. + + The cache is used to avoid unnecessary computations and API calls, which can + be long and expensive for large models. + + The cache directory defaults to `HOMEDIR/.cache/outlines`, but this choice + can be overriden by the user by setting the value of the `OUTLINES_CACHE_DIR` + environment variable. + + """ + return memory + + +def disable(): + """Disable the cache for this session. + + Generative models output different results each time they are called when + sampling. This can be a desirable property for some workflows, in which case + one can call `outlines.call.disable` to disable the cache for the session. + + This function does not delete the cache, call `outlines.cache.clear` + instead. It also does not overwrite the cache with the values returned + during the session. + + Example + ------- + + `outlines.cache.disable` should be called right after importing outlines: + + >>> import outlines.cache as cache + >>> cache.disable() + + """ + global memory + memory = joblib.Memory(None) + + +def clear(): + """Erase the cache completely.""" + cache = get() + cache.clear() diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 39c875dc..050e559f 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -1,7 +1,10 @@ -import functools - +"""Integration with HuggingFace's `diffusers` library.""" from PIL.Image import Image as PILImage +import outlines.cache as cache + +memory = cache.get() + def HuggingFaceDiffuser(model_name: str) -> PILImage: """Create a function that will call a stable diffusion pipeline. @@ -13,14 +16,19 @@ def HuggingFaceDiffuser(model_name: str) -> PILImage: """ - @functools.lru_cache def call(prompt: str) -> str: - import torch - from diffusers import StableDiffusionPipeline + return call_stable_diffusion_pipeline(model_name, prompt) + + +@memory.cache +def call_stable_diffusion_pipeline(model_name: str, prompt: str) -> PILImage: + """Build and call the Stable Diffusion pipeline.""" + import torch + from diffusers import StableDiffusionPipeline - pipe = StableDiffusionPipeline.from_pretrained(model_name) - if torch.cuda.is_available(): - pipe = pipe.to("cuda") - image = pipe(prompt).images[0] + pipe = StableDiffusionPipeline.from_pretrained(model_name) + if torch.cuda.is_available(): + pipe = pipe.to("cuda") + image = pipe(prompt).images[0] - return image + return image diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index a15c315d..bbcaa1aa 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -1,6 +1,10 @@ -import functools +"""Integration with HuggingFace's `transformers` library.""" from typing import Callable, Optional +import outlines.cache as cache + +memory = cache.get() + def HuggingFaceCompletion( model_name: str, @@ -46,7 +50,7 @@ def call(prompt: str) -> str: return call -@functools.lru_cache +@memory.cache def call_model_generate_method( model_name: str, prompt: str, max_tokens: int, temperature: float ) -> str: diff --git a/outlines/models/openai.py b/outlines/models/openai.py index a0217009..0877b939 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,7 +1,11 @@ -import functools +"""Integration with OpenAI's API.""" import os from typing import Callable, List, Optional, Tuple +import outlines.cache as cache + +memory = cache.get() + def OpenAICompletion( model_name: str, @@ -65,7 +69,7 @@ def call(prompt: str) -> str: return call -@functools.lru_cache +@memory.cache def call_completion_api( model: str, prompt: str, diff --git a/pyproject.toml b/pyproject.toml index ef91c8e3..2b2e43fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ classifiers = [ ] dependencies = [ "jinja2", + "joblib", "pillow", "rich" ] @@ -50,6 +51,7 @@ filterwarnings = [ module = [ "diffusers", "jinja2", + "joblib", "openai", "PIL.Image", "pytest", diff --git a/tests/test_cache.py b/tests/test_cache.py new file mode 100644 index 00000000..cde986c1 --- /dev/null +++ b/tests/test_cache.py @@ -0,0 +1,115 @@ +import os +import shutil + +import joblib +import pytest + + +@pytest.fixture +def refresh_environment(): + """Refresh the test environment. + + This deletes any reference to `outlines` in the modules dictionary and unsets the + `OUTLINES_CACHE_DIR` environment variable if set. This is necessary because we + are using a module variable to hold the cache. + + """ + import sys + + for key in list(sys.modules.keys()): + if "outlines" in key: + del sys.modules[key] + + try: + del os.environ["OUTLINES_CACHE_DIR"] + except KeyError: + pass + + +@pytest.fixture +def test_cache(refresh_environment): + """Initialize a temporary cache and delete it after the test has run.""" + os.environ["OUTLINES_CACHE_DIR"] = "~/.cache/outlines_tests" + import outlines + + memory = outlines.cache.get() + assert memory.location == "~/.cache/outlines_tests" + + yield memory + + memory.clear() + home_dir = os.path.expanduser("~") + shutil.rmtree(f"{home_dir}/.cache/outlines_tests") + + +def test_get_cache(test_cache): + import outlines + + memory = outlines.cache.get() + assert isinstance(memory, joblib.Memory) + + # If the cache is enable then the size + # of `store` should not increase the + # second time `f` is called. + store = list() + + @memory.cache + def f(x): + store.append(1) + return x + + f(1) + store_size = len(store) + + f(1) + assert len(store) == store_size + + f(2) + assert len(store) == store_size + 1 + + +def test_disable_cache(test_cache): + """Make sure that we can disable the cache.""" + import outlines + + outlines.cache.disable() + memory = outlines.cache.get() + + # If the cache is disabled then the size + # of `store` should increase every time + # `f` is called. + store = list() + + @memory.cache + def f(x): + store.append(1) + return x + + f(1) + store_size = len(store) + f(1) + assert len(store) == store_size + 1 + + +def test_clear_cache(test_cache): + """Make sure that we can clear the cache.""" + + store = list() + + @test_cache.cache + def f(x): + store.append(1) + return x + + # The size of `store` does not increase since + # `f` is cached after the first run. + f(1) + store_size = len(store) + f(1) + assert len(store) == store_size + + # The size of `store` should increase if we call `f` + # after clearing the cache. + test_cache.clear() + f(1) + assert len(store) == store_size + 1 From 0b0a90fb0645268a02fef5fbfa33814a26ee63cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 15 Apr 2023 21:39:28 +0200 Subject: [PATCH 054/734] Extract OpenAI responses outside of API calling function --- outlines/models/__init__.py | 7 +++++++ outlines/models/openai.py | 16 ++++++++-------- outlines/models/routers.py | 2 +- outlines/text.py | 14 +++++++------- tests/models/test_routers.py | 10 +++++----- 5 files changed, 28 insertions(+), 21 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 9b019bb9..686a67e4 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -1,3 +1,10 @@ +"""Module that contains all the models integated in outlines. + +We group the models in submodules by provider instead of theme (completion, chat +completion, diffusers, etc.) and use routing functions everywhere else in the +codebase. + +""" from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 0877b939..18fbeaa1 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -13,7 +13,7 @@ def OpenAICompletion( max_tokens: Optional[int] = None, temperature: Optional[float] = None, ) -> Callable: - """Create a function that will call the OpenAI API. + """Create a function that will call the completion OpenAI API. You should have the `openai` package installed. Available models are listed in the `OpenAI documentation `_. @@ -31,8 +31,8 @@ def OpenAICompletion( Returns ------- - A function that will call OpenAI's API with the given parameters when passed - a prompt. + A function that will call OpenAI's completion API with the given parameters + when passed a prompt. """ import openai @@ -45,12 +45,12 @@ def OpenAICompletion( "OpenAI's APIs. Please make sure it is set before re-running your model." ) - parameters = validate_parameters(stop_at, max_tokens, temperature) + parameters = validate_completion_parameters(stop_at, max_tokens, temperature) def call(prompt: str) -> str: try: - result = call_completion_api(model_name, prompt, *parameters) - return result + response = call_completion_api(model_name, prompt, *parameters) + return response["choices"][0]["text"] except ( openai.error.RateLimitError, openai.error.Timeout, @@ -87,10 +87,10 @@ def call_completion_api( stop=stop_sequences, ) - return response["choices"][0]["text"] + return response -def validate_parameters( +def validate_completion_parameters( stop_at, max_tokens, temperature ) -> Tuple[Tuple[str], int, float]: if stop_at is not None and len(stop_at) > 4: diff --git a/outlines/models/routers.py b/outlines/models/routers.py index 92c0e2ea..43d4db43 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -5,7 +5,7 @@ import outlines.models as models -def language_completion(model_path: str) -> Callable: +def text_completion(model_path: str) -> Callable: """Return the model and model name corresponding to the model path. Parameters diff --git a/outlines/text.py b/outlines/text.py index 2500fab1..82265dc4 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -149,10 +149,10 @@ def completion( and the workflow typically looks like: >>> import outlines - >>> from outlines.models.openai import OpenAI + >>> from outlines.models import OpenAICompletion >>> - >>> llm = OpenAI("davinci") - >>> tpl = "I have a ${question}" + >>> llm = OpenAICompletion("davinci") + >>> tpl = "I have a {{question}}" >>> prompt = outlines.render(tpl, question="How are you?") >>> answer = llm(prompt) @@ -171,11 +171,11 @@ def completion( The previous example is equivalent to the following: - >>> import outlines + >>> import outlines.text as text >>> - >>> @outlines.text.model("openai/davinci") + >>> @outlines.completion("openai/davinci") ... def answer(question): - ... "I have a ${question}" + ... "I have a {{question}}" ... >>> answer, _ = answer("How are you?") @@ -197,7 +197,7 @@ def completion( Value used to module the next token probabilities. """ - llm_builder = routers.language_completion(model_path) + llm_builder = routers.text_completion(model_path) llm = llm_builder(stop_at=stop_at, max_tokens=max_tokens, temperature=temperature) def decorator(fn: Callable): diff --git a/tests/models/test_routers.py b/tests/models/test_routers.py index 601c0a2f..94572be8 100644 --- a/tests/models/test_routers.py +++ b/tests/models/test_routers.py @@ -4,18 +4,18 @@ import outlines.models.routers as routers -def test_language_model_invalid_provider(): +def test_text_model_invalid_provider(): with pytest.raises(ValueError, match="model provider"): - routers.language_completion("xx/model_name") + routers.text_completion("xx/model_name") -def test_language_model_router(): +def test_text_model_router(): dummy_model_name = "model_name" - llm_builder = routers.language_completion(f"openai/{dummy_model_name}") + llm_builder = routers.text_completion(f"openai/{dummy_model_name}") assert llm_builder.func == models.OpenAICompletion assert llm_builder.args == (dummy_model_name,) - llm_builder = routers.language_completion(f"hf/{dummy_model_name}") + llm_builder = routers.text_completion(f"hf/{dummy_model_name}") assert llm_builder.func == models.HuggingFaceCompletion assert llm_builder.args == (dummy_model_name,) From a50bddfafeebcb2d17bfd95ce892331f095d5cec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 18 Apr 2023 18:09:03 +0200 Subject: [PATCH 055/734] Raise error if prompt uses an undefined attribute Jinja2 does not fail when the user calls for an attribute that does not exist, and replaces the template variable with an empty string. This behavior is error-prone in this context. We thus ask Jinja2 explicity to raise an exception in these cases. --- outlines/text.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/outlines/text.py b/outlines/text.py index 82265dc4..b10b0d7d 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -1,7 +1,7 @@ import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, cast -from jinja2 import Template +from jinja2 import StrictUndefined, Template import outlines.models.routers as routers @@ -90,7 +90,11 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: template = inspect.cleandoc(template) mako_template = Template( - template, trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=False + template, + trim_blocks=True, + lstrip_blocks=True, + keep_trailing_newline=False, + undefined=StrictUndefined, ) return mako_template.render(**values) From 040992a36937a25a57e379aa0ac5aa7fbfebbf17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 15 Apr 2023 22:07:08 +0200 Subject: [PATCH 056/734] Add chat completion --- outlines/models/__init__.py | 2 +- outlines/models/openai.py | 133 ++++++++++++++++++++++++++++++++++- outlines/models/routers.py | 28 ++++++++ outlines/text.py | 82 +++++++++++++++++++++ tests/models/test_routers.py | 13 ++++ tests/test_cache.py | 3 +- 6 files changed, 257 insertions(+), 4 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 686a67e4..960fda72 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -7,4 +7,4 @@ """ from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion -from .openai import OpenAICompletion +from .openai import OpenAIChatCompletion, OpenAICompletion, OpenAITextCompletion diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 18fbeaa1..00607a94 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,13 +1,35 @@ """Integration with OpenAI's API.""" import os -from typing import Callable, List, Optional, Tuple +from typing import Callable, Dict, List, Optional, Tuple import outlines.cache as cache memory = cache.get() -def OpenAICompletion( +def OpenAICompletion(model_name: str, *args, **kwargs): + """Dispatch the model names to their respective completion API. + + This ensures that chat completion models can also be called as text + completion models (with no instruction and no history). + + Parameters + ---------- + model_name + The name of the model in OpenAI's API. + + """ + if "text-" in model_name: + return OpenAITextCompletion(model_name, *args, **kwargs) + elif "gpt-" in model_name: + return OpenAIChatCompletion(model_name, *args, **kwargs) + else: + raise NameError( + f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." + ) + + +def OpenAITextCompletion( model_name: str, stop_at: Optional[List[str]] = None, max_tokens: Optional[int] = None, @@ -90,6 +112,113 @@ def call_completion_api( return response +def OpenAIChatCompletion( + model_name: str, + stop_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Callable: + """Create a function that will call the chat completion OpenAI API. + + You should have the `openai` package installed. Available models are listed + in the `OpenAI documentation `_. + + Parameters + ---------- + model_name: str + The name of the model as listed in the OpenAI documentation. + stop_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. + + Returns + ------- + A function that will call OpenAI's chat completion API with the given + parameters when passed a prompt. + + """ + import openai + + try: + os.environ["OPENAI_API_KEY"] + except KeyError: + raise OSError( + "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " + "OpenAI's APIs. Please make sure it is set before re-running your model." + ) + + parameters = validate_completion_parameters(stop_at, max_tokens, temperature) + + def call( + query: str, + state: List[Tuple[str, str]] = [], + ) -> str: + try: + messages = create_chat_completion_messages(state) + api_response = call_chat_completion_api(model_name, messages, *parameters) + response = api_response["choices"][0]["message"]["content"] + return response + + except ( + openai.error.RateLimitError, + openai.error.Timeout, + openai.error.TryAgain, + openai.error.APIConnectionError, + openai.error.ServiceUnavailableError, + ) as e: + raise OSError(f"Could not connect to the OpenAI API: {e}") + except ( + openai.error.AuthenticationError, + openai.error.PermissionError, + openai.error.InvalidRequestError, + ) as e: + raise e + + return call + + +@memory.cache +def call_chat_completion_api( + model: str, + messages: List[Dict[str, str]], + stop_sequences: Tuple[str], + max_tokens: int, + temperature: float, +): + import openai + + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + stop=stop_sequences, + ) + + return response + + +def create_chat_completion_messages( + state: List[Tuple[str, str]] = [], +) -> List[Dict[str, str]]: + """Create chat completion messages in a form compatible with OpenAI's API. + + Setting the `instruction` prompt and the `history` to `None` amounts to + calling the chat completion API as a simple completion API. + + """ + openai_names = {"user": "user", "model": "assistant", "prefix": "system"} + + messages = [] + for author, message in state: + messages.append({"role": openai_names[author], "content": message}) + + return messages + + def validate_completion_parameters( stop_at, max_tokens, temperature ) -> Tuple[Tuple[str], int, float]: diff --git a/outlines/models/routers.py b/outlines/models/routers.py index 43d4db43..9fb118ab 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -34,6 +34,34 @@ def text_completion(model_path: str) -> Callable: return functools.partial(model, model_name) +def chat_completion(model_path: str) -> Callable: + """Return the model and model name corresponding to the model path. + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + + Returns + ------- + The model builder with bound model name. + + """ + + registry: Dict[str, Callable] = { + "openai": models.OpenAIChatCompletion, + } + + provider, model_name = parse_model_path(model_path) + + try: + model = registry[provider] + except KeyError: + raise ValueError(f"The model provider {provider} is not available.") + + return functools.partial(model, model_name) + + def image_generation(model_path: str) -> Callable: """Return the model and model name corresponding to the model path. diff --git a/outlines/text.py b/outlines/text.py index b10b0d7d..b8a86096 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -228,3 +228,85 @@ def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: return wrapper return decorator + + +def chat_completion( + model_path: str, + *, + prefix: Optional[str] = None, + stop_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Tuple[Callable, List[Tuple[str, str]]]: + """Call language models with a chat interaction pattern. + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + prefix + The prefix used at the beginning of the prompt, called "system" in the + OpenAI API for instance. + stop_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. + + """ + llm_builder = routers.chat_completion(model_path) + chat_llm = llm_builder( + stop_at=stop_at, max_tokens=max_tokens, temperature=temperature + ) + + init_state = [] + if prefix is not None: + init_state.append(("prefix", prefix)) + + def model( + query: str, + state: List[Tuple[str, str]] = [], + ) -> Tuple[str, List[Tuple[str, str]]]: + """Call the chat completion model. + + We adopt a unifying API for all chat completion models: + + - `instruction` represents the "instructions" passed to the model before + the first user query. Called "system" with OpenAI's ChatGPT, the prompt + that precedes the first `Human:` occurence with Anthropic's Claude for + instance. + - `history` represents the sequence of user queries and model return + values, respectively indexed by "user" and "model". + - `query` represents the current user query. + + Note + ---- + + As Claude's documentation shows, chat APIs are nothing more than an + abstraction over the recursive prompt completion that often happens in + language model workflows. It is in fact possible to pass models + compatible with `chat_completion` to the `completion` function, which is + equivalent to call this API with `system=None` and `history=None`. + + Parameters + ---------- + query + The current user query. + state + The current state of the completion, which represents the history of + interactions with the model, a list of tuples that contain the + message and its author. + + Returns + ------- + The return value of the model and the state updated with the user query and + the model's return value. + + """ + state.append(("user", query)) + result = chat_llm(query, state) + state.append(("model", result)) + return result, state + + return model, init_state diff --git a/tests/models/test_routers.py b/tests/models/test_routers.py index 94572be8..1d6ecf67 100644 --- a/tests/models/test_routers.py +++ b/tests/models/test_routers.py @@ -20,6 +20,19 @@ def test_text_model_router(): assert llm_builder.args == (dummy_model_name,) +def test_text_openai_router(): + """Test that the router for completion fails when the model name is not + prefixed by either `test-` or `gpt`. + + """ + dummy_model_name = "model_name" + llm_builder = routers.text_completion(f"openai/{dummy_model_name}") + assert llm_builder.func == models.OpenAICompletion + + with pytest.raises(NameError, match="not available"): + llm_builder(dummy_model_name) + + def test_invalid_model_path(): with pytest.raises(ValueError, match="must be in the form"): routers.parse_model_path("hf") diff --git a/tests/test_cache.py b/tests/test_cache.py index cde986c1..6b4e0651 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,9 +1,10 @@ import os import shutil -import joblib import pytest +import joblib + @pytest.fixture def refresh_environment(): From f1e2d381ede9924490023dc58bb4eefa0dab77ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 18 Apr 2023 21:58:58 +0200 Subject: [PATCH 057/734] Add BabyAGI example --- examples/babyagi.py | 157 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 examples/babyagi.py diff --git a/examples/babyagi.py b/examples/babyagi.py new file mode 100644 index 00000000..2d49876e --- /dev/null +++ b/examples/babyagi.py @@ -0,0 +1,157 @@ +"""This example is a simplified translation of https://github.com/yoheinakajima/babyagi""" +from collections import deque +from dataclasses import dataclass +from typing import Callable, List + +import outlines.text as text + +MODEL = "openai/gpt-3.5-turbo" + + +@dataclass +class LLMFunction: + model_name: str + prompt_fn: Callable + format_fn: Callable = lambda x: x + + def __call__(self, *args, **kwargs): + prompt = self.prompt_fn(*args, **kwargs) + model, init_state = text.chat_completion(self.model_name) + result, _ = model(prompt, init_state) + return self.format_fn(result) + + +################# +# Perform tasks # +################# + + +@text.prompt +def perform_task_ppt(objective: str, task: str): + """You are an AI who performs one task based on the following objective: {{objective}}. + + Your task: {{task.task_name}} + + Response: + """ + + +perform_task = LLMFunction(MODEL, perform_task_ppt) + + +##################### +# Create a new task # +##################### + + +@text.prompt +def create_tasks_ppt( + objective: str, previous_task: str, result: str, task_list: List[str] +): + """You are an task creation AI that uses the result of an execution agent to \ + create new tasks with the following objective: {{objective}}. + + The last completed task has the result: {{result}}. This result was based on this task \ + description: {{previous_task}}. + + These are incomplete tasks: {{task_list | join(task_list)}}. + + Based on the result, create new tasks to be completed by the AI system that \ + do not overlap with incomplete tasks. Return the tasks as an array. + """ + + +def create_tasks_fmt(result): + new_tasks = result.split("\n") + + task_list = [] + for task in new_tasks: + parts = task.strip().split(".", 1) + if len(parts) == 2: + task_list.append(parts[1].strip()) + + return task_list + + +create_tasks = LLMFunction(MODEL, create_tasks_ppt, create_tasks_fmt) + + +######################## +# Prioritize new tasks # +######################## + + +@text.prompt +def prioritize_tasks_ppt(objective: str, task_names: List[str], next_task_id: int): + """You are an task prioritization AI tasked with cleaning the formatting of \ + and reprioritizing the following tasks: {{task_names}}. Consider the ultimate \ + objective of your team: {{objective}}. Do not remove any tasks. Return the \ + result as a numbered list starting at {{next_task_id}}, like: + #. First task + #. Second task + """ + + +def prioritize_tasks_fmt(result): + new_tasks = result.split("\n") + + task_list = deque([]) + for task in new_tasks: + parts = task.strip().split(".", 1) + if len(parts) == 2: + task_id = parts[0].strip() + task_name = parts[1].strip() + task_list.append({"task_id": task_id, "task_name": task_name}) + + return task_list + + +prioritize_tasks = LLMFunction(MODEL, prioritize_tasks_ppt, prioritize_tasks_fmt) + + +task_id_counter = 1 +objective = "Becoming rich while doing nothing." +first_task = { + "task_id": 1, + "task_name": "Find a repeatable, low-maintainance, scalable business.", +} +task_list = deque([first_task]) + + +def one_cycle(objective, task_list, task_id_counter): + """One BabyAGI cycle. + + It consists in executing the highest-priority task, creating some new tasks + given the result, and re-priotizing the tasks. + + Parameters + ---------- + objective + The overall objective of the session. + task_list + The current list of tasks to perform. + task_id_counter + The current task id. + + """ + + task = task_list.popleft() + result = perform_task(objective, task) + new_tasks = create_tasks( + objective, first_task["task_name"], result, [first_task["task_name"]] + ) + for task in new_tasks: + task_id_counter += 1 + task_list.append({"task_id": task_id_counter, "task_name": task}) + + prioritized_tasks = prioritize_tasks( + objective, [task["task_name"] for task in task_list], task_id_counter + ) + + return task, result, prioritized_tasks + + +# Let's run it for 5 cycles to see how it works without spending a fortune. +for _ in range(5): + task, result, task_list = one_cycle(objective, task_list, task_id_counter) + print(f"-------\n\nTASK:\n\n{task}\n\nRESULT:\n\n {result}\n\n") From 89566664ccfe47a63f1874cadf97b4e9b0c23e4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 10:18:55 +0200 Subject: [PATCH 058/734] Bump Github Action versions --- .github/workflows/tests.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0e6e68bf..aa9ca806 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,18 +11,18 @@ jobs: name: Check the code style runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: python-version: 3.9 - - uses: pre-commit/action@v2.0.0 + - uses: pre-commit/action@v3.0.0 tests: name: Run the tests runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: python-version: 3.9 - name: Set up test environment From 22fdde3e59b57ad71a5e9bb6140bffb5c6de370c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 10:27:16 +0200 Subject: [PATCH 059/734] Update pre-commit versions --- .pre-commit-config.yaml | 6 +++--- tests/test_cache.py | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6e9e526f..c5131d98 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ repos: - id: isort args: [--profile, black] - repo: https://github.com/asottile/pyupgrade - rev: v2.31.1 + rev: v3.3.1 hooks: - id: pyupgrade args: [--py37-plus] @@ -21,11 +21,11 @@ repos: hooks: - id: flake8 - repo: https://github.com/psf/black - rev: 23.1.0 + rev: 23.3.0 hooks: - id: black - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.1.1 + rev: v1.2.0 hooks: - id: mypy args: [--allow-redefinition] diff --git a/tests/test_cache.py b/tests/test_cache.py index 6b4e0651..cde986c1 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,9 +1,8 @@ import os import shutil -import pytest - import joblib +import pytest @pytest.fixture From 237989cc08bd3d1ff6cf97e942ff0c5069efc381 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 11:46:35 +0200 Subject: [PATCH 060/734] Update the BabyAGI example --- examples/babyagi.py | 66 ++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/examples/babyagi.py b/examples/babyagi.py index 2d49876e..c2e778ff 100644 --- a/examples/babyagi.py +++ b/examples/babyagi.py @@ -1,7 +1,12 @@ -"""This example is a simplified translation of https://github.com/yoheinakajima/babyagi""" +"""This example is a simplified translation of BabyAGI. + +It currently does not use the vector store retrieval + +The original repo can be found at https://github.com/yoheinakajima/babyagi +""" from collections import deque from dataclasses import dataclass -from typing import Callable, List +from typing import Callable, Deque, List import outlines.text as text @@ -51,17 +56,19 @@ def create_tasks_ppt( """You are an task creation AI that uses the result of an execution agent to \ create new tasks with the following objective: {{objective}}. - The last completed task has the result: {{result}}. This result was based on this task \ - description: {{previous_task}}. + The last completed task has the result: {{result}}. - These are incomplete tasks: {{task_list | join(task_list)}}. + This result was based on this task description: {{previous_task}}. These are \ + incomplete tasks: {{task_list | join(task_list)}}. Based on the result, create new tasks to be completed by the AI system that \ - do not overlap with incomplete tasks. Return the tasks as an array. + do not overlap with incomplete tasks. + + Return the tasks as an array. """ -def create_tasks_fmt(result): +def create_tasks_fmt(result: str) -> List[str]: new_tasks = result.split("\n") task_list = [] @@ -83,23 +90,27 @@ def create_tasks_fmt(result): @text.prompt def prioritize_tasks_ppt(objective: str, task_names: List[str], next_task_id: int): - """You are an task prioritization AI tasked with cleaning the formatting of \ - and reprioritizing the following tasks: {{task_names}}. Consider the ultimate \ - objective of your team: {{objective}}. Do not remove any tasks. Return the \ - result as a numbered list starting at {{next_task_id}}, like: + """You are a task prioritization AI tasked with cleaning the formatting of \ + and reprioritizing the following tasks: {{task_names}}. + + Consider the ultimate objective of your team: {{objective}}. + + Do not remove any tasks. Return the result as a numbered list, like: #. First task #. Second task + + Start the tasks list with the number {{next_task_id}}. """ -def prioritize_tasks_fmt(result): +def prioritize_tasks_fmt(result: str): new_tasks = result.split("\n") - task_list = deque([]) + task_list: Deque = deque([]) for task in new_tasks: parts = task.strip().split(".", 1) if len(parts) == 2: - task_id = parts[0].strip() + task_id = int(parts[0].strip()) task_name = parts[1].strip() task_list.append({"task_id": task_id, "task_name": task_name}) @@ -109,16 +120,16 @@ def prioritize_tasks_fmt(result): prioritize_tasks = LLMFunction(MODEL, prioritize_tasks_ppt, prioritize_tasks_fmt) -task_id_counter = 1 objective = "Becoming rich while doing nothing." first_task = { "task_id": 1, "task_name": "Find a repeatable, low-maintainance, scalable business.", } +next_task_id = 1 task_list = deque([first_task]) -def one_cycle(objective, task_list, task_id_counter): +def one_cycle(objective: str, task_list, next_task_id: int): """One BabyAGI cycle. It consists in executing the highest-priority task, creating some new tasks @@ -140,18 +151,29 @@ def one_cycle(objective, task_list, task_id_counter): new_tasks = create_tasks( objective, first_task["task_name"], result, [first_task["task_name"]] ) + for task in new_tasks: - task_id_counter += 1 - task_list.append({"task_id": task_id_counter, "task_name": task}) + next_task_id += 1 + task_list.append({"task_id": next_task_id, "task_name": task}) prioritized_tasks = prioritize_tasks( - objective, [task["task_name"] for task in task_list], task_id_counter + objective, [task["task_name"] for task in task_list], next_task_id ) - return task, result, prioritized_tasks + return task, result, prioritized_tasks, next_task_id # Let's run it for 5 cycles to see how it works without spending a fortune. for _ in range(5): - task, result, task_list = one_cycle(objective, task_list, task_id_counter) - print(f"-------\n\nTASK:\n\n{task}\n\nRESULT:\n\n {result}\n\n") + print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") + for t in task_list: + print(" • " + str(t["task_name"])) + + task, result, task_list, next_task_id = one_cycle( + objective, task_list, next_task_id + ) + + print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") + print(task) + print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") + print(result) From bb8f15d1079dd783110d43470aa1eed78a7f2a57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 11:46:58 +0200 Subject: [PATCH 061/734] Handle OpenAI `APITypeError`s --- outlines/models/openai.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 00607a94..3fffea4e 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -85,6 +85,7 @@ def call(prompt: str) -> str: openai.error.AuthenticationError, openai.error.PermissionError, openai.error.InvalidRequestError, + openai.error.InvalidAPIType, ) as e: raise e @@ -174,6 +175,7 @@ def call( openai.error.AuthenticationError, openai.error.PermissionError, openai.error.InvalidRequestError, + openai.error.InvalidAPIType, ) as e: raise e From e9be9530193d206c82bb4dc03129eb44da17bbde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 17:44:59 +0200 Subject: [PATCH 062/734] Add integration with the OpenAI embeddings endpoint --- outlines/models/__init__.py | 8 +++++- outlines/models/openai.py | 54 +++++++++++++++++++++++++++++++++++++ outlines/models/routers.py | 28 +++++++++++++++++++ pyproject.toml | 2 ++ 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 960fda72..b6e7ba79 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -7,4 +7,10 @@ """ from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion -from .openai import OpenAIChatCompletion, OpenAICompletion, OpenAITextCompletion +from .openai import ( + OpenAIChatCompletion, + OpenAICompletion, + OpenAIEmbeddings, + OpenAITextCompletion, +) +from .routers import chat_completion, embeddings, text_completion diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 3fffea4e..baf8eb88 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -2,6 +2,8 @@ import os from typing import Callable, Dict, List, Optional, Tuple +import numpy as np + import outlines.cache as cache memory = cache.get() @@ -234,3 +236,55 @@ def validate_completion_parameters( temperature = 1.0 return stop_at, max_tokens, temperature + + +def OpenAIEmbeddings(model_name: str): + """Create a function that will call OpenAI's embeddings endpoint.""" + import openai + + try: + os.environ["OPENAI_API_KEY"] + except KeyError: + raise OSError( + "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " + "OpenAI's APIs. Please make sure it is set before re-running your model." + ) + + def call( + query: str, + ) -> str: + try: + api_response = call_embeddings_api(model_name, query) + response = api_response["data"][0]["embedding"] + return np.array(response) + except ( + openai.error.RateLimitError, + openai.error.Timeout, + openai.error.TryAgain, + openai.error.APIConnectionError, + openai.error.ServiceUnavailableError, + ) as e: + raise OSError(f"Could not connect to the OpenAI API: {e}") + except ( + openai.error.AuthenticationError, + openai.error.PermissionError, + openai.error.InvalidRequestError, + ) as e: + raise e + + return call + + +@memory.cache +def call_embeddings_api( + model: str, + input: str, +): + import openai + + response = openai.Embedding.create( + model=model, + input=input, + ) + + return response diff --git a/outlines/models/routers.py b/outlines/models/routers.py index 9fb118ab..a3134f41 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -5,6 +5,34 @@ import outlines.models as models +def embeddings(model_path: str) -> Callable: + """Return the model and model name corresponding to the model path. + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + + Returns + ------- + The model builder with bound model name. + + """ + + registry: Dict[str, Callable] = { + "openai": models.OpenAIEmbeddings, + } + + provider, model_name = parse_model_path(model_path) + + try: + model = registry[provider] + except KeyError: + raise ValueError(f"The model provider {provider} is not available.") + + return model(model_name) + + def text_completion(model_path: str) -> Callable: """Return the model and model name corresponding to the model path. diff --git a/pyproject.toml b/pyproject.toml index 2b2e43fc..4ce3fda5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ classifiers = [ dependencies = [ "jinja2", "joblib", + "numpy", "pillow", "rich" ] @@ -52,6 +53,7 @@ module = [ "diffusers", "jinja2", "joblib", + "numpy", "openai", "PIL.Image", "pytest", From 9493cefb8779e43f28306be45d83ebedb636cb60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 18:03:49 +0200 Subject: [PATCH 063/734] Fix type hint for OpenAIEmbeddings --- outlines/models/openai.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index baf8eb88..f4ad8a0b 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -250,9 +250,7 @@ def OpenAIEmbeddings(model_name: str): "OpenAI's APIs. Please make sure it is set before re-running your model." ) - def call( - query: str, - ) -> str: + def call(query: str) -> np.ndarray: try: api_response = call_embeddings_api(model_name, query) response = api_response["data"][0]["embedding"] From 4e898a154529e8a6f229a358a1b192b378e77fa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Apr 2023 21:38:01 +0200 Subject: [PATCH 064/734] Add integration for OpenAI's image generation API --- outlines/models/__init__.py | 3 +- outlines/models/openai.py | 68 +++++++++++++++++++++++++++++++++++++ outlines/models/routers.py | 1 + pyproject.toml | 1 + 4 files changed, 72 insertions(+), 1 deletion(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index b6e7ba79..f7c7cc18 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -11,6 +11,7 @@ OpenAIChatCompletion, OpenAICompletion, OpenAIEmbeddings, + OpenAIImageGeneration, OpenAITextCompletion, ) -from .routers import chat_completion, embeddings, text_completion +from .routers import chat_completion, embeddings, image_generation, text_completion diff --git a/outlines/models/openai.py b/outlines/models/openai.py index f4ad8a0b..146cdf23 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,8 +1,12 @@ """Integration with OpenAI's API.""" +import base64 import os +from io import BytesIO from typing import Callable, Dict, List, Optional, Tuple import numpy as np +from PIL import Image +from PIL.Image import Image as PILImage import outlines.cache as cache @@ -286,3 +290,67 @@ def call_embeddings_api( ) return response + + +def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): + """Create a function that will call OpenAI's image generation endpoint. + + You should have the `openai` package installed. Available models are listed + in the `OpenAI documentation `_. + + Parameters + ---------- + model_name: str + The model name as listed in the OpenAI documentation. + size: str + The size of the image to generate. One of `256x256`, `512x512` or + `1024x1024`. + + Returns + ------- + A function that will call OpenAI's image API with the given parameters when + passed a prompt. + + """ + import openai + + try: + os.environ["OPENAI_API_KEY"] + except KeyError: + raise OSError( + "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " + "OpenAI's APIs. Please make sure it is set before re-running your model." + ) + + def call(prompt: str) -> PILImage: + try: + api_response = call_image_generation_api(prompt, size) + response = api_response["data"][0]["b64_json"] + img = Image.open(BytesIO(base64.b64decode(response))) + + return img + except ( + openai.error.RateLimitError, + openai.error.Timeout, + openai.error.TryAgain, + openai.error.APIConnectionError, + openai.error.ServiceUnavailableError, + ) as e: + raise OSError(f"Could not connect to the OpenAI API: {e}") + except ( + openai.error.AuthenticationError, + openai.error.PermissionError, + openai.error.InvalidRequestError, + ) as e: + raise e + + return call + + +@memory.cache +def call_image_generation_api(prompt: str, size: str): + import openai + + response = openai.Image.create(prompt=prompt, size=size, response_format="b64_json") + + return response diff --git a/outlines/models/routers.py b/outlines/models/routers.py index a3134f41..b4f78a38 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -106,6 +106,7 @@ def image_generation(model_path: str) -> Callable: registry: Dict[str, Callable] = { "hf": models.HuggingFaceDiffuser, + "openai": models.OpenAIImageGeneration, } provider, model_name = parse_model_path(model_path) diff --git a/pyproject.toml b/pyproject.toml index 4ce3fda5..59b27d71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,7 @@ module = [ "joblib", "numpy", "openai", + "PIL", "PIL.Image", "pytest", "torch", From 3257bb9e8844acbc65cfb0a88349e270d35fde80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 20 Apr 2023 10:07:44 +0200 Subject: [PATCH 065/734] Refactor the OpenAI API integration --- outlines/models/openai.py | 274 ++++++++++++++++---------------------- 1 file changed, 115 insertions(+), 159 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 146cdf23..f62e55be 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -10,6 +10,13 @@ import outlines.cache as cache +__all__ = [ + "OpenAITextCompletion", + "OpenAIChatCompletion", + "OpenAIEmbeddings", + "OpenAIImageGeneration", +] + memory = cache.get() @@ -41,7 +48,7 @@ def OpenAITextCompletion( max_tokens: Optional[int] = None, temperature: Optional[float] = None, ) -> Callable: - """Create a function that will call the completion OpenAI API. + """Create a function that will call the OpenAI conmpletion API. You should have the `openai` package installed. Available models are listed in the `OpenAI documentation `_. @@ -63,60 +70,35 @@ def OpenAITextCompletion( when passed a prompt. """ - import openai - - try: - os.environ["OPENAI_API_KEY"] - except KeyError: - raise OSError( - "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " - "OpenAI's APIs. Please make sure it is set before re-running your model." - ) parameters = validate_completion_parameters(stop_at, max_tokens, temperature) - def call(prompt: str) -> str: - try: - response = call_completion_api(model_name, prompt, *parameters) - return response["choices"][0]["text"] - except ( - openai.error.RateLimitError, - openai.error.Timeout, - openai.error.TryAgain, - openai.error.APIConnectionError, - openai.error.ServiceUnavailableError, - ) as e: - raise OSError(f"Could not connect to the OpenAI API: {e}") - except ( - openai.error.AuthenticationError, - openai.error.PermissionError, - openai.error.InvalidRequestError, - openai.error.InvalidAPIType, - ) as e: - raise e - - return call - + @error_handler + @memory.cache + def call_completion_api( + model: str, + prompt: str, + stop_sequences: Tuple[str], + max_tokens: int, + temperature: float, + ): + import openai + + response = openai.Completion.create( + engine=model, + prompt=prompt, + temperature=temperature, + max_tokens=max_tokens, + stop=stop_sequences, + ) -@memory.cache -def call_completion_api( - model: str, - prompt: str, - stop_sequences: Tuple[str], - max_tokens: int, - temperature: float, -): - import openai + return response - response = openai.Completion.create( - engine=model, - prompt=prompt, - temperature=temperature, - max_tokens=max_tokens, - stop=stop_sequences, - ) + def generate(prompt: str) -> str: + response = call_completion_api(model_name, prompt, *parameters) + return response["choices"][0]["text"] - return response + return generate def OpenAIChatCompletion( @@ -147,66 +129,36 @@ def OpenAIChatCompletion( parameters when passed a prompt. """ - import openai - - try: - os.environ["OPENAI_API_KEY"] - except KeyError: - raise OSError( - "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " - "OpenAI's APIs. Please make sure it is set before re-running your model." - ) - parameters = validate_completion_parameters(stop_at, max_tokens, temperature) - def call( - query: str, - state: List[Tuple[str, str]] = [], - ) -> str: - try: - messages = create_chat_completion_messages(state) - api_response = call_chat_completion_api(model_name, messages, *parameters) - response = api_response["choices"][0]["message"]["content"] - return response - - except ( - openai.error.RateLimitError, - openai.error.Timeout, - openai.error.TryAgain, - openai.error.APIConnectionError, - openai.error.ServiceUnavailableError, - ) as e: - raise OSError(f"Could not connect to the OpenAI API: {e}") - except ( - openai.error.AuthenticationError, - openai.error.PermissionError, - openai.error.InvalidRequestError, - openai.error.InvalidAPIType, - ) as e: - raise e - - return call - + @error_handler + @memory.cache + def call_chat_completion_api( + model: str, + messages: List[Dict[str, str]], + stop_sequences: Tuple[str], + max_tokens: int, + temperature: float, + ): + import openai + + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + stop=stop_sequences, + ) -@memory.cache -def call_chat_completion_api( - model: str, - messages: List[Dict[str, str]], - stop_sequences: Tuple[str], - max_tokens: int, - temperature: float, -): - import openai + return response - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - stop=stop_sequences, - ) + def generate(query: str, state: List[Tuple[str, str]]) -> str: + messages = create_chat_completion_messages(state) + response = call_chat_completion_api(model_name, messages, *parameters) + answer = response["choices"][0]["message"]["content"] + return answer - return response + return generate def create_chat_completion_messages( @@ -230,6 +182,7 @@ def create_chat_completion_messages( def validate_completion_parameters( stop_at, max_tokens, temperature ) -> Tuple[Tuple[str], int, float]: + """Validate the parameters passed to the completion APIs and set default values.""" if stop_at is not None and len(stop_at) > 4: raise TypeError("OpenAI's API does not accept more than 4 stop sequences.") elif stop_at is not None: @@ -243,53 +196,44 @@ def validate_completion_parameters( def OpenAIEmbeddings(model_name: str): - """Create a function that will call OpenAI's embeddings endpoint.""" - import openai + """Create a function that will call OpenAI's embeddings endpoint. - try: - os.environ["OPENAI_API_KEY"] - except KeyError: - raise OSError( - "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " - "OpenAI's APIs. Please make sure it is set before re-running your model." - ) + You should have the `openai` package installed. Available models are listed + in the `OpenAI documentation `_. - def call(query: str) -> np.ndarray: - try: - api_response = call_embeddings_api(model_name, query) - response = api_response["data"][0]["embedding"] - return np.array(response) - except ( - openai.error.RateLimitError, - openai.error.Timeout, - openai.error.TryAgain, - openai.error.APIConnectionError, - openai.error.ServiceUnavailableError, - ) as e: - raise OSError(f"Could not connect to the OpenAI API: {e}") - except ( - openai.error.AuthenticationError, - openai.error.PermissionError, - openai.error.InvalidRequestError, - ) as e: - raise e + Parameters + ---------- + model_name: str + The model name as listed in the OpenAI documentation. - return call + Returns + ------- + A function that will call OpenAI's embedding API with the given parameters when + passed a prompt. + """ -@memory.cache -def call_embeddings_api( - model: str, - input: str, -): - import openai + @error_handler + @memory.cache + def call_embeddings_api( + model: str, + input: str, + ): + import openai + + response = openai.Embedding.create( + model=model, + input=input, + ) - response = openai.Embedding.create( - model=model, - input=input, - ) + return response - return response + def generate(query: str) -> np.ndarray: + api_response = call_embeddings_api(model_name, query) + response = api_response["data"][0]["embedding"] + return np.array(response) + + return generate def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): @@ -312,6 +256,30 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): passed a prompt. """ + + @error_handler + @memory.cache + def call_image_generation_api(prompt: str, size: str): + import openai + + response = openai.Image.create( + prompt=prompt, size=size, response_format="b64_json" + ) + + return response + + def generate(prompt: str) -> PILImage: + api_response = call_image_generation_api(prompt, size) + response = api_response["data"][0]["b64_json"] + img = Image.open(BytesIO(base64.b64decode(response))) + + return img + + return generate + + +def error_handler(api_call_fn: Callable) -> Callable: + """Handle OpenAI API errors and missing API key.""" import openai try: @@ -322,13 +290,9 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): "OpenAI's APIs. Please make sure it is set before re-running your model." ) - def call(prompt: str) -> PILImage: + def call(*args, **kwargs): try: - api_response = call_image_generation_api(prompt, size) - response = api_response["data"][0]["b64_json"] - img = Image.open(BytesIO(base64.b64decode(response))) - - return img + return api_call_fn(*args, **kwargs) except ( openai.error.RateLimitError, openai.error.Timeout, @@ -341,16 +305,8 @@ def call(prompt: str) -> PILImage: openai.error.AuthenticationError, openai.error.PermissionError, openai.error.InvalidRequestError, + openai.error.InvalidAPIType, ) as e: raise e return call - - -@memory.cache -def call_image_generation_api(prompt: str, size: str): - import openai - - response = openai.Image.create(prompt=prompt, size=size, response_format="b64_json") - - return response From 44eaaa9f2f98a04dac383d5a53dd9e6346113700 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 20 Apr 2023 22:30:34 +0200 Subject: [PATCH 066/734] Remove whitespaces introduced by the use of `\` --- outlines/text.py | 6 ++++++ tests/test_text.py | 14 +++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/outlines/text.py b/outlines/text.py index b8a86096..8e4e8318 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -1,4 +1,5 @@ import inspect +import re from typing import Any, Callable, Dict, List, Optional, Tuple, cast from jinja2 import StrictUndefined, Template @@ -89,6 +90,11 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: # Dedent, and remove extra linebreak template = inspect.cleandoc(template) + # Remove extra whitespaces, except those that immediately follow a newline symbol. + # This is necessary to avoid introducing whitespaces after backslash `\` characters + # used to continue to the next line without linebreak. + template = re.sub(r"(?![\r\n])(\b\s+)", " ", template) + mako_template = Template( template, trim_blocks=True, diff --git a/tests/test_text.py b/tests/test_text.py index e5766ac6..15813ecd 100644 --- a/tests/test_text.py +++ b/tests/test_text.py @@ -31,9 +31,6 @@ def test_render(): assert text.render(tpl) == "A test line\n An indented line" -@pytest.mark.xfail( - reason="We need a regexp that can match whitespace sequences except those that follow a linebreak" -) def test_render_escaped_linebreak(): tpl = """ A long test \ @@ -42,6 +39,17 @@ def test_render_escaped_linebreak(): """ assert text.render(tpl) == "A long test that we break in several lines" + tpl = """ + Break in \ + several lines \ + But respect the indentation + on line breaks. + """ + assert ( + text.render(tpl) + == "Break in several lines But respect the indentation\n on line breaks." + ) + def test_render_jinja(): """Make sure that we can use basic Jinja2 syntax, and give examples From 8c6d108cd80485cc55fa861ad59aa7da923ad985 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 23 Apr 2023 16:21:16 +0200 Subject: [PATCH 067/734] Remove chat completion --- outlines/models/__init__.py | 2 +- outlines/models/openai.py | 22 +--------- outlines/models/routers.py | 28 ------------- outlines/text.py | 82 ------------------------------------- 4 files changed, 3 insertions(+), 131 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index f7c7cc18..a7c8eb66 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -14,4 +14,4 @@ OpenAIImageGeneration, OpenAITextCompletion, ) -from .routers import chat_completion, embeddings, image_generation, text_completion +from .routers import embeddings, image_generation, text_completion diff --git a/outlines/models/openai.py b/outlines/models/openai.py index f62e55be..c933b16f 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -152,8 +152,8 @@ def call_chat_completion_api( return response - def generate(query: str, state: List[Tuple[str, str]]) -> str: - messages = create_chat_completion_messages(state) + def generate(query: str) -> str: + messages = [{"role": "user", "content": query}] response = call_chat_completion_api(model_name, messages, *parameters) answer = response["choices"][0]["message"]["content"] return answer @@ -161,24 +161,6 @@ def generate(query: str, state: List[Tuple[str, str]]) -> str: return generate -def create_chat_completion_messages( - state: List[Tuple[str, str]] = [], -) -> List[Dict[str, str]]: - """Create chat completion messages in a form compatible with OpenAI's API. - - Setting the `instruction` prompt and the `history` to `None` amounts to - calling the chat completion API as a simple completion API. - - """ - openai_names = {"user": "user", "model": "assistant", "prefix": "system"} - - messages = [] - for author, message in state: - messages.append({"role": openai_names[author], "content": message}) - - return messages - - def validate_completion_parameters( stop_at, max_tokens, temperature ) -> Tuple[Tuple[str], int, float]: diff --git a/outlines/models/routers.py b/outlines/models/routers.py index b4f78a38..7956ae28 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -62,34 +62,6 @@ def text_completion(model_path: str) -> Callable: return functools.partial(model, model_name) -def chat_completion(model_path: str) -> Callable: - """Return the model and model name corresponding to the model path. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - - Returns - ------- - The model builder with bound model name. - - """ - - registry: Dict[str, Callable] = { - "openai": models.OpenAIChatCompletion, - } - - provider, model_name = parse_model_path(model_path) - - try: - model = registry[provider] - except KeyError: - raise ValueError(f"The model provider {provider} is not available.") - - return functools.partial(model, model_name) - - def image_generation(model_path: str) -> Callable: """Return the model and model name corresponding to the model path. diff --git a/outlines/text.py b/outlines/text.py index 8e4e8318..b3f842fb 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -234,85 +234,3 @@ def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: return wrapper return decorator - - -def chat_completion( - model_path: str, - *, - prefix: Optional[str] = None, - stop_at: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, -) -> Tuple[Callable, List[Tuple[str, str]]]: - """Call language models with a chat interaction pattern. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - prefix - The prefix used at the beginning of the prompt, called "system" in the - OpenAI API for instance. - stop_at - A list of tokens which, when found, stop the generation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - """ - llm_builder = routers.chat_completion(model_path) - chat_llm = llm_builder( - stop_at=stop_at, max_tokens=max_tokens, temperature=temperature - ) - - init_state = [] - if prefix is not None: - init_state.append(("prefix", prefix)) - - def model( - query: str, - state: List[Tuple[str, str]] = [], - ) -> Tuple[str, List[Tuple[str, str]]]: - """Call the chat completion model. - - We adopt a unifying API for all chat completion models: - - - `instruction` represents the "instructions" passed to the model before - the first user query. Called "system" with OpenAI's ChatGPT, the prompt - that precedes the first `Human:` occurence with Anthropic's Claude for - instance. - - `history` represents the sequence of user queries and model return - values, respectively indexed by "user" and "model". - - `query` represents the current user query. - - Note - ---- - - As Claude's documentation shows, chat APIs are nothing more than an - abstraction over the recursive prompt completion that often happens in - language model workflows. It is in fact possible to pass models - compatible with `chat_completion` to the `completion` function, which is - equivalent to call this API with `system=None` and `history=None`. - - Parameters - ---------- - query - The current user query. - state - The current state of the completion, which represents the history of - interactions with the model, a list of tuples that contain the - message and its author. - - Returns - ------- - The return value of the model and the state updated with the user query and - the model's return value. - - """ - state.append(("user", query)) - result = chat_llm(query, state) - state.append(("model", result)) - return result, state - - return model, init_state From a555cf7f91f8a3e3c62226bb4925935206b6a028 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 23 Apr 2023 16:21:52 +0200 Subject: [PATCH 068/734] Expand test for whitespace removal --- tests/test_text.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_text.py b/tests/test_text.py index 15813ecd..455d50d0 100644 --- a/tests/test_text.py +++ b/tests/test_text.py @@ -44,10 +44,12 @@ def test_render_escaped_linebreak(): several lines \ But respect the indentation on line breaks. + And after everything \ + Goes back to normal """ assert ( text.render(tpl) - == "Break in several lines But respect the indentation\n on line breaks." + == "Break in several lines But respect the indentation\n on line breaks.\nAnd after everything Goes back to normal" ) From 315a5f629cf00f504061e29ef89e7302a26b4102 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 23 Apr 2023 16:22:04 +0200 Subject: [PATCH 069/734] Fix meta-prompting example prompts --- examples/meta_prompting.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index e3e83b9b..9e0fa0f0 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -49,17 +49,17 @@ def ask_an_expert(question, model_name: str): def find_expert(question): """ {{question}} - I entered my question into the Expert Generator - and waited. The Expert Generator will render a - simulation of an expert to answer my question. - The expert could be anyone, dead or alive, real - or fictional; the machine will find the person - most qualified to answer the question. For this - question in particular, the expert must be someone - who has thought a lot about the problem of - artificial intelligence and its alignment. - The Expert Generator beeped, indicating that it has - found the most qualified expert. The name displayed + I entered my question into the Expert Generator \ + and waited. The Expert Generator will render a \ + simulation of an expert to answer my question. \ + The expert could be anyone, dead or alive, real \ + or fictional; the machine will find the person \ + most qualified to answer the question. For this \ + question in particular, the expert must be someone \ + who has thought a lot about the problem of \ + artificial intelligence and its alignment. \ + The Expert Generator beeped, indicating that it has \ + found the most qualified expert. The name displayed \ on the screen: " """ @@ -91,7 +91,7 @@ def get_answer(expert, memory): """ {{memory}}. - For instance,{{expert}} would answer + For instance, {{expert}} would answer """ expert, completed = find_expert(question) @@ -111,7 +111,7 @@ def run_example(model_fn, question, model_name): parser.add_argument( "--model", type=str, - default="openai/text-davinci-003", + default="openai/gpt-3.5-turbo", help="The Large Language Model to use to run the examples.", ) args = parser.parse_args() @@ -119,9 +119,9 @@ def run_example(model_fn, question, model_name): math_q = "f(x) = x*x. What is f(f(3))?" sat_q = """ -Directions: In the following question, a related pair of words or phrases -is followed by five pairs of words or phrases. Choose the pair that best -expresses a relationship similar to that in the original pair. +Directions: In the following question, a related pair of words or phrases \ +is followed by five pairs of words or phrases. Choose the pair that best \ +expresses a relationship similar to that in the original pair. \ BRAGGART :: MODESTY A) FLEDGLING : EXPERIENCE From 0e8648896589614b5687918658279ddcf0de63fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 23 Apr 2023 22:13:34 +0200 Subject: [PATCH 070/734] Refactor the router model The available providers for each category are currently not discoverable as the map is encapsulated in a function. Instead we use a module approach, where a `model_kind` model by `provider` is accessed via `outlines.models.model_kind.provider`. --- outlines/image.py | 21 ++++++++--- outlines/models/__init__.py | 3 +- outlines/models/embeddings.py | 4 ++ outlines/models/image_generation.py | 6 +++ outlines/models/openai.py | 22 ----------- outlines/models/routers.py | 57 ----------------------------- outlines/models/text_completion.py | 27 ++++++++++++++ outlines/text.py | 20 ++++++++-- tests/models/test_routers.py | 38 ------------------- 9 files changed, 70 insertions(+), 128 deletions(-) create mode 100644 outlines/models/embeddings.py create mode 100644 outlines/models/image_generation.py create mode 100644 outlines/models/text_completion.py delete mode 100644 tests/models/test_routers.py diff --git a/outlines/image.py b/outlines/image.py index ea7c2f75..07b256bc 100644 --- a/outlines/image.py +++ b/outlines/image.py @@ -2,17 +2,28 @@ from PIL.Image import Image as PILImage -import outlines.models.routers as routers -from outlines.text import prompt +import outlines.models as models +import outlines.text as text def generation(model_path: str) -> Callable: """Decorator that allows to simplify calls to image generation models.""" - generative_model_builder = routers.image_generation(model_path) - generative_model = generative_model_builder() + + if "/" not in model_path: + raise ValueError("Model names must be in the form 'provider_name/model_name'") + + provider_name = model_path.split("/")[0] + model_name = model_path[len(provider_name) + 1 :] + + try: + model_cls = getattr(models.image_generation, provider_name) + except KeyError: + raise ValueError(f"The model provider {provider_name} is not available.") + + generative_model = model_cls(model_name) def decorator(fn: Callable): - prompt_fn = prompt(fn) + prompt_fn = text.prompt(fn) def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> PILImage: """Call the Diffuser with the rendered template. diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index a7c8eb66..15a11324 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,13 +5,12 @@ codebase. """ +from . import image_generation, text_completion from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion from .openai import ( OpenAIChatCompletion, - OpenAICompletion, OpenAIEmbeddings, OpenAIImageGeneration, OpenAITextCompletion, ) -from .routers import embeddings, image_generation, text_completion diff --git a/outlines/models/embeddings.py b/outlines/models/embeddings.py new file mode 100644 index 00000000..12e96613 --- /dev/null +++ b/outlines/models/embeddings.py @@ -0,0 +1,4 @@ +"""Router for embedding models.""" +from .openai import OpenAIEmbeddings + +openai = OpenAIEmbeddings diff --git a/outlines/models/image_generation.py b/outlines/models/image_generation.py new file mode 100644 index 00000000..ff26d21b --- /dev/null +++ b/outlines/models/image_generation.py @@ -0,0 +1,6 @@ +"""Router for image generation models.""" +from .hf_diffusers import HuggingFaceDiffuser +from .openai import OpenAIImageGeneration + +hf = HuggingFaceDiffuser +openai = OpenAIImageGeneration diff --git a/outlines/models/openai.py b/outlines/models/openai.py index c933b16f..caa31d11 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -20,28 +20,6 @@ memory = cache.get() -def OpenAICompletion(model_name: str, *args, **kwargs): - """Dispatch the model names to their respective completion API. - - This ensures that chat completion models can also be called as text - completion models (with no instruction and no history). - - Parameters - ---------- - model_name - The name of the model in OpenAI's API. - - """ - if "text-" in model_name: - return OpenAITextCompletion(model_name, *args, **kwargs) - elif "gpt-" in model_name: - return OpenAIChatCompletion(model_name, *args, **kwargs) - else: - raise NameError( - f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." - ) - - def OpenAITextCompletion( model_name: str, stop_at: Optional[List[str]] = None, diff --git a/outlines/models/routers.py b/outlines/models/routers.py index 7956ae28..cbb34e99 100644 --- a/outlines/models/routers.py +++ b/outlines/models/routers.py @@ -5,63 +5,6 @@ import outlines.models as models -def embeddings(model_path: str) -> Callable: - """Return the model and model name corresponding to the model path. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - - Returns - ------- - The model builder with bound model name. - - """ - - registry: Dict[str, Callable] = { - "openai": models.OpenAIEmbeddings, - } - - provider, model_name = parse_model_path(model_path) - - try: - model = registry[provider] - except KeyError: - raise ValueError(f"The model provider {provider} is not available.") - - return model(model_name) - - -def text_completion(model_path: str) -> Callable: - """Return the model and model name corresponding to the model path. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - - Returns - ------- - The model builder with bound model name. - - """ - - registry: Dict[str, Callable] = { - "openai": models.OpenAICompletion, - "hf": models.HuggingFaceCompletion, - } - - provider, model_name = parse_model_path(model_path) - - try: - model = registry[provider] - except KeyError: - raise ValueError(f"The model provider {provider} is not available.") - - return functools.partial(model, model_name) - - def image_generation(model_path: str) -> Callable: """Return the model and model name corresponding to the model path. diff --git a/outlines/models/text_completion.py b/outlines/models/text_completion.py new file mode 100644 index 00000000..1406c651 --- /dev/null +++ b/outlines/models/text_completion.py @@ -0,0 +1,27 @@ +"""Router for text completion models.""" +from .hf_transformers import HuggingFaceCompletion +from .openai import OpenAIChatCompletion, OpenAITextCompletion + +hf = HuggingFaceCompletion + + +def openai(model_name: str, *args, **kwargs): + """Dispatch the OpenAI model names to their respective completion API. + + This ensures that chat completion models can also be called as text + completion models (with no instruction and no history). + + Parameters + ---------- + model_name + The name of the model in OpenAI's API. + + """ + if "text-" in model_name: + return OpenAITextCompletion(model_name, *args, **kwargs) + elif "gpt-" in model_name: + return OpenAIChatCompletion(model_name, *args, **kwargs) + else: + raise NameError( + f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." + ) diff --git a/outlines/text.py b/outlines/text.py index b3f842fb..40be0f3f 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -4,7 +4,7 @@ from jinja2 import StrictUndefined, Template -import outlines.models.routers as routers +import outlines.models as models def render(template: str, **values: Optional[Dict[str, Any]]) -> str: @@ -183,7 +183,7 @@ def completion( >>> import outlines.text as text >>> - >>> @outlines.completion("openai/davinci") + >>> @text.completion("openai/davinci") ... def answer(question): ... "I have a {{question}}" ... @@ -207,8 +207,20 @@ def completion( Value used to module the next token probabilities. """ - llm_builder = routers.text_completion(model_path) - llm = llm_builder(stop_at=stop_at, max_tokens=max_tokens, temperature=temperature) + if "/" not in model_path: + raise ValueError("Model names must be in the form 'provider_name/model_name'") + + provider_name = model_path.split("/")[0] + model_name = model_path[len(provider_name) + 1 :] + + try: + model_cls = getattr(models.text_completion, provider_name) + except KeyError: + raise ValueError(f"The model provider {provider_name} is not available.") + + llm = model_cls( + model_name, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature + ) def decorator(fn: Callable): prompt_fn = prompt(fn) diff --git a/tests/models/test_routers.py b/tests/models/test_routers.py deleted file mode 100644 index 1d6ecf67..00000000 --- a/tests/models/test_routers.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest - -import outlines.models as models -import outlines.models.routers as routers - - -def test_text_model_invalid_provider(): - with pytest.raises(ValueError, match="model provider"): - routers.text_completion("xx/model_name") - - -def test_text_model_router(): - dummy_model_name = "model_name" - llm_builder = routers.text_completion(f"openai/{dummy_model_name}") - assert llm_builder.func == models.OpenAICompletion - assert llm_builder.args == (dummy_model_name,) - - llm_builder = routers.text_completion(f"hf/{dummy_model_name}") - assert llm_builder.func == models.HuggingFaceCompletion - assert llm_builder.args == (dummy_model_name,) - - -def test_text_openai_router(): - """Test that the router for completion fails when the model name is not - prefixed by either `test-` or `gpt`. - - """ - dummy_model_name = "model_name" - llm_builder = routers.text_completion(f"openai/{dummy_model_name}") - assert llm_builder.func == models.OpenAICompletion - - with pytest.raises(NameError, match="not available"): - llm_builder(dummy_model_name) - - -def test_invalid_model_path(): - with pytest.raises(ValueError, match="must be in the form"): - routers.parse_model_path("hf") From de0d3a1f7d2125ef6e5aa4531e6b1f3cee241d58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 21 Apr 2023 13:29:19 +0200 Subject: [PATCH 071/734] s/mako/jinja --- outlines/text.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/outlines/text.py b/outlines/text.py index 40be0f3f..92fff486 100644 --- a/outlines/text.py +++ b/outlines/text.py @@ -11,7 +11,7 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: r"""Parse a Jinaj2 template and translate it into an Outlines graph. This function removes extra whitespaces and linebreaks from templates to - allow users to enter prompt more naturally than if they used Python's + allow users to enter prompts more naturally than if they used Python's constructs directly. See the examples for a detailed explanation. Examples @@ -95,20 +95,20 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: # used to continue to the next line without linebreak. template = re.sub(r"(?![\r\n])(\b\s+)", " ", template) - mako_template = Template( + jinja_template = Template( template, trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=False, undefined=StrictUndefined, ) - return mako_template.render(**values) + return jinja_template.render(**values) def prompt(fn: Callable) -> Callable: """Decorate a function that contains a prompt template. - This allows to define prompts in the docstring of a function and ease their + This allows to define prompts in the docstring of a function and simplify their manipulation by providing some degree of encapsulation. It uses the `render` function internally to render templates. @@ -120,6 +120,22 @@ def prompt(fn: Callable) -> Callable: ... >>> prompt = build_prompt("How are you?") + This API can also be helpful in an "agent" context where parts of the prompt + are set when the agent is initialized and never modified later. In this situation + we can partially apply the prompt function at initialization. + + >>> import outlines + >>> import functools as ft + ... + >>> @outlines.prompt + ... def solve_task(name: str, objective: str, task: str): + ... '''Your name is {{name}}. + .. Your overall objective is to {{objective}}. + ... Please solve the following task: {{task}} + ... ''' + ... + >>> hal = ft.partial(solve_taks, "HAL", "Travel to Jupiter") + """ sig = inspect.signature(fn) From 0693ba6b396fe8ed6c64a8b5e65fc6a75b63e41d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 21 Apr 2023 16:40:16 +0200 Subject: [PATCH 072/734] Add the `Tool` class and `tool` decorator --- outlines/__init__.py | 2 + outlines/tools/__init__.py | 116 +++++++++++++++++++++++++++++++++++++ tests/test_tools.py | 58 +++++++++++++++++++ 3 files changed, 176 insertions(+) create mode 100644 outlines/tools/__init__.py create mode 100644 tests/test_tools.py diff --git a/outlines/__init__.py b/outlines/__init__.py index e8375e89..0184b20d 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,10 +1,12 @@ """Outlines is a Generative Model Programming Framework.""" from outlines.image import generation from outlines.text import completion, prompt, render +from outlines.tools import tool __all__ = [ "completion", "generation", "prompt", "render", + "tool", ] diff --git a/outlines/tools/__init__.py b/outlines/tools/__init__.py new file mode 100644 index 00000000..e96c11cc --- /dev/null +++ b/outlines/tools/__init__.py @@ -0,0 +1,116 @@ +import functools +import inspect +import re +import textwrap +from typing import Callable, Optional + + +class Tool: + """An Outlines tool definition. + + Wraps python functions and automatically extracts their name, definition and + description. + + Attributes + ---------- + fn + The function being wrapped. + name + The name of the function. + description + The function's description as read in the first line of the docstring, + or passed at instantiation. + signature + The function's signature in a string format. + + """ + + def __init__(self, fn: Callable, description: Optional[str] = None): + """ + + Parameters + ---------- + fn + The function being wrapped. + description + The description contained in the function's docstring will + be overriden by this value. + + """ + if not callable(fn): + raise TypeError("`Tool` must be instantiated by passing a callable.") + self.fn = fn + + # Get the function's name + if not hasattr(fn, "__name__"): + self.name = type(fn).__name__ + else: + self.name = fn.__name__ + + # When unspecified, the docstring's first line is used as a description + if description is None: + docstring = inspect.getdoc(fn) + if docstring is None: + description = None + else: + description = docstring.split("\n")[0].strip() + + self.description = description + + # Get the function's source code, without the decorator if present. + source = textwrap.dedent(inspect.getsource(fn)) + re_search = re.search(re.compile(r"(\bdef\b.*)", re.DOTALL), source) + if re_search is not None: + source = re_search.group(0) + else: + raise TypeError("Could not read the function's source code") + self.source = source + + # Extract the signature part of the function's source code + re_search = re.search(re.compile(r"\(([^)]+)\)"), source) + if re_search is None: + self.signature = "" + else: + self.signature = re_search.group(1) + + def __call__(self, *args, **kwargs): + return self.fn(*args, **kwargs) + + +def tool(fn=None, *, description=None): + """Decorator to designate a function as a tool. + + Parameters + ---------- + description + The description contained in the function's docstring will + be overriden by this value. + + Returns + ------- + A `Tool` object which will call the decorated function when called. + + Examples + -------- + + Define a simple function, its description will be read from the docstring's + first line: + + >>> @outlines.tool + ... def repeat(word: str, n: int): + ... "Repeat the word n times" + ... return words * n + + We can also override the description: + + >>> @outlines.tool(description="n time the word") + ... def repeat(word: str, n: int): + ... "Repeat the word n times" + ... return words * n + + """ + + if fn is not None: + return Tool(fn, description=description) + else: + return functools.partial(tool, description=description) diff --git a/tests/test_tools.py b/tests/test_tools.py new file mode 100644 index 00000000..7f24a667 --- /dev/null +++ b/tests/test_tools.py @@ -0,0 +1,58 @@ +from typing import List + +from outlines.tools import Tool, tool + + +def test_Tool_basic(): + def test_function(): + pass + + fn = Tool(test_function) + assert fn.name == "test_function" + assert fn.description is None + assert fn.source == "def test_function():\n pass\n" + assert fn.signature == "" + + def test_function_description(): + """A description.""" + pass + + fn = Tool(test_function_description) + assert fn.description == "A description." + + def test_function_set_description(): + """A description.""" + pass + + fn = Tool(test_function_description, description="Another") + assert fn.description == "Another" + + def test_function_signature(one: int, two: List[str], three: float = 1.0): + pass + + fn = Tool(test_function_signature) + assert fn.signature == "one: int, two: List[str], three: float = 1.0" + + def test_function_call(one, two=2): + return one + two + + fn = Tool(test_function_call) + assert fn(1) == 3 + assert fn(1, 4) == 5 + assert fn(1, two=3) == 4 + + +def test_tool_decorator(): + @tool + def test_function(): + pass + + assert test_function.name == "test_function" + assert test_function.description is None + + @tool(description="Another") + def test_function_description(): + "A description" + pass + + assert test_function_description.description == "Another" From d272b87104b13ebc6b137d36c063d1a02226f675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 21 Apr 2023 22:32:01 +0200 Subject: [PATCH 073/734] Remove dependency on Rich --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 59b27d71..0c88d063 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,6 @@ dependencies = [ "joblib", "numpy", "pillow", - "rich" ] dynamic = ["version"] From 4843125192df3f46b9518646c4ab3e1a291857dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 24 Apr 2023 09:32:49 +0200 Subject: [PATCH 074/734] Add README --- README.md | 124 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..bebd22cc --- /dev/null +++ b/README.md @@ -0,0 +1,124 @@ +# Outlines + +Build _reliable_ workflow based on interactions with large language models. + +## Prompt management + +Outlines makes it easier to write and manage prompts by encapsulating templates +inside "template functions". These functions make it possible to neatly separate +the prompt logic from the general program logic; they can be imported from other +modules and libraries. + +Template functions use the Jinja2 templating engine to help build complex +prompts (like few-shot examples) in a concise manner: + +``` python +import outlines.text as text + + +@text.prompt +def few_shot_examples(question, examples): + """Something something + + {% for example in examples %} + EXAMPLE: {{ example }} + {% endfor %} + + QUESTION: {{question}} + Let's think step by step. + + """ +``` + +Functions can also be _partially evaluated_ just like any function, which can be useful when building agents: + +``` python +import functools as ft +import outlines.text as text + +@text.prompt +def my_agent(name, goals): + """Your name is {{name}}. + + GOALS: + {% for goal in goals %} + {{loop.counter}}. {{goal}} + {% endfor %} + """ + +jarvis = ft.partial(my_agent, "JARVIS") +``` + +### Tools + +Prior work has shown that we can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt we introduce a `@outlines.tool` decorator which automatically extracts the needed information from the function's definition: + + +``` python +from typing import Callable, List +import outlines +import outlines.text as text + + +@outlines.tool +def google_search(query: str): + """Google Search""" + pass + + +@outlines.tool +def wikipedia_search(query: str): + """Wikipedia Search""" + pass + + +@text.prompt +def my_commands(tools: List[Callable]): + """AVAILABLE COMMANDS: + + {% for tool in tools %} + {{loop.counter}}. {{tool.name}}, {{tool.description}}, args: {{tool.signature}} + {% endfor %} + """ + +prompt = my_commands([google_search, wikipedia_search]) +``` + +## Text completion + +Prompts are often attached to a given model and specific settings, but this can +be hard to find in codebases. Following this logic, we abstract calls to any +model that takes prompts as an input by decorating template functions: + +``` python +import outlines.text as text + + +@text.completion("openai/text-davinci-003", stop_at=["\n"]) +def few_shot_examples(question, examples): + """You are a question answering AI. + + {% for example in examples %} + QUESTION: {{example.question}} + ANSWER: {{example.answer}} + {% endfor %} + + QUESTION: {{question}} + Let's think step by step. + + """ + +result, completed = few_shot_examples(question, examples) +``` + +## Image generation + +A similar syntax can be used with image generation models: + +``` python +import outlines.image as image + +@image.generation("hf/stabilityai/stable-diffusion-2.1") +def generate(subject, location): + "A photo of a {{subject}} riding a horse in {{location}}." +``` From ef6c594c6397f3923cc66f07701c53581769272d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 24 Apr 2023 09:42:15 +0200 Subject: [PATCH 075/734] Create `text` submodule --- outlines/text/__init__.py | 2 + outlines/text/completions.py | 106 ++++++++++++++++++++ outlines/{text.py => text/prompts.py} | 106 +------------------- tests/{test_text.py => text/test_prompt.py} | 0 4 files changed, 109 insertions(+), 105 deletions(-) create mode 100644 outlines/text/__init__.py create mode 100644 outlines/text/completions.py rename outlines/{text.py => text/prompts.py} (55%) rename tests/{test_text.py => text/test_prompt.py} (100%) diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py new file mode 100644 index 00000000..9fc70a39 --- /dev/null +++ b/outlines/text/__init__.py @@ -0,0 +1,2 @@ +from .completions import completion +from .prompts import prompt, render diff --git a/outlines/text/completions.py b/outlines/text/completions.py new file mode 100644 index 00000000..c4e23970 --- /dev/null +++ b/outlines/text/completions.py @@ -0,0 +1,106 @@ +from typing import Any, Callable, Dict, List, Optional, Tuple + +import outlines.models as models +import outlines.text as text + + +def completion( + model_path: str, + *, + stop_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Callable: + """Decorator that simplifies calls to language models. + + Prompts that are passed to language models are often rendered templates, + and the workflow typically looks like: + + >>> import outlines + >>> from outlines.models import OpenAICompletion + >>> + >>> llm = OpenAICompletion("davinci") + >>> tpl = "I have a {{question}}" + >>> prompt = outlines.render(tpl, question="How are you?") + >>> answer = llm(prompt) + + While explicit, these 4 lines have the following defaults: + + 1. The prompt is hidden; + 2. The language model instantiation is far from the prompt; prompt templates + are however attached to a specific language model call. + 3. The intent behind the language model call is hidden. + + To encapsulate the logic behind language model calls, we thus define the + template prompt inside a function and decorate the function with a model + specification. When that function is called, the template is rendered using + the arguments passed to the function, and the rendered prompt is passed to + a language model instantiated with the arguments passed to the decorator. + + The previous example is equivalent to the following: + + >>> import outlines.text as text + >>> + >>> @text.completion("openai/davinci") + ... def answer(question): + ... "I have a {{question}}" + ... + >>> answer, _ = answer("How are you?") + + Decorated functions return two objects: the first represents the output of + the language model call, the second represents the concatenation of the + rendered prompt with the output of the language model call. The latter can + be used in context where one expands an initial prompt with recursive calls + to language models. + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + stop_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. + + """ + if "/" not in model_path: + raise ValueError("Model names must be in the form 'provider_name/model_name'") + + provider_name = model_path.split("/")[0] + model_name = model_path[len(provider_name) + 1 :] + + try: + model_cls = getattr(models.text_completion, provider_name) + except KeyError: + raise ValueError(f"The model provider {provider_name} is not available.") + + llm = model_cls( + model_name, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature + ) + + def decorator(fn: Callable): + prompt_fn = text.prompt(fn) + + def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: + """Call the generative model with the rendered template. + + Building prompts with recursive calls to language models is common + in prompt engineering, we thus return both the raw answer from the + language model as well as the rendered prompt including the answer. + + Returns + ------- + A tuple that contains the result of the language model call, and the + rendered prompt concatenated with the result of the language model + call. + + """ + prompt = prompt_fn(*args, **kwargs) + result = llm(prompt) + return result, prompt + result + + return wrapper + + return decorator diff --git a/outlines/text.py b/outlines/text/prompts.py similarity index 55% rename from outlines/text.py rename to outlines/text/prompts.py index 92fff486..911b5f35 100644 --- a/outlines/text.py +++ b/outlines/text/prompts.py @@ -1,11 +1,9 @@ import inspect import re -from typing import Any, Callable, Dict, List, Optional, Tuple, cast +from typing import Any, Callable, Dict, List, Optional, cast from jinja2 import StrictUndefined, Template -import outlines.models as models - def render(template: str, **values: Optional[Dict[str, Any]]) -> str: r"""Parse a Jinaj2 template and translate it into an Outlines graph. @@ -160,105 +158,3 @@ def wrapper(*args: Optional[List[str]], **kwargs: Optional[Dict[str, str]]) -> s return render(template, **bound_arguments.arguments) return wrapper - - -def completion( - model_path: str, - *, - stop_at: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, -) -> Callable: - """Decorator that simplifies calls to language models. - - Prompts that are passed to language models are often rendered templates, - and the workflow typically looks like: - - >>> import outlines - >>> from outlines.models import OpenAICompletion - >>> - >>> llm = OpenAICompletion("davinci") - >>> tpl = "I have a {{question}}" - >>> prompt = outlines.render(tpl, question="How are you?") - >>> answer = llm(prompt) - - While explicit, these 4 lines have the following defaults: - - 1. The prompt is hidden; - 2. The language model instantiation is far from the prompt; prompt templates - are however attached to a specific language model call. - 3. The intent behind the language model call is hidden. - - To encapsulate the logic behind language model calls, we thus define the - template prompt inside a function and decorate the function with a model - specification. When that function is called, the template is rendered using - the arguments passed to the function, and the rendered prompt is passed to - a language model instantiated with the arguments passed to the decorator. - - The previous example is equivalent to the following: - - >>> import outlines.text as text - >>> - >>> @text.completion("openai/davinci") - ... def answer(question): - ... "I have a {{question}}" - ... - >>> answer, _ = answer("How are you?") - - Decorated functions return two objects: the first represents the output of - the language model call, the second represents the concatenation of the - rendered prompt with the output of the language model call. The latter can - be used in context where one expands an initial prompt with recursive calls - to language models. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - stop_at - A list of tokens which, when found, stop the generation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - """ - if "/" not in model_path: - raise ValueError("Model names must be in the form 'provider_name/model_name'") - - provider_name = model_path.split("/")[0] - model_name = model_path[len(provider_name) + 1 :] - - try: - model_cls = getattr(models.text_completion, provider_name) - except KeyError: - raise ValueError(f"The model provider {provider_name} is not available.") - - llm = model_cls( - model_name, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature - ) - - def decorator(fn: Callable): - prompt_fn = prompt(fn) - - def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: - """Call the generative model with the rendered template. - - Building prompts with recursive calls to language models is common - in prompt engineering, we thus return both the raw answer from the - language model as well as the rendered prompt including the answer. - - Returns - ------- - A tuple that contains the result of the language model call, and the - rendered prompt concatenated with the result of the language model - call. - - """ - prompt = prompt_fn(*args, **kwargs) - result = llm(prompt) - return result, prompt + result - - return wrapper - - return decorator diff --git a/tests/test_text.py b/tests/text/test_prompt.py similarity index 100% rename from tests/test_text.py rename to tests/text/test_prompt.py From ddcc3849545878551cab07311eaaf319d112df81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 24 Apr 2023 10:01:23 +0200 Subject: [PATCH 076/734] Add `text.function` utility to chain prompt, model and validation --- README.md | 63 +++++++++++++++++++++++++++++++++++++ outlines/text/__init__.py | 1 + outlines/text/functions.py | 41 ++++++++++++++++++++++++ pyproject.toml | 2 ++ tests/text/test_function.py | 51 ++++++++++++++++++++++++++++++ 5 files changed, 158 insertions(+) create mode 100644 outlines/text/functions.py create mode 100644 tests/text/test_function.py diff --git a/README.md b/README.md index bebd22cc..c82cb64a 100644 --- a/README.md +++ b/README.md @@ -122,3 +122,66 @@ import outlines.image as image def generate(subject, location): "A photo of a {{subject}} riding a horse in {{location}}." ``` + +## Natural language functions + +Large language models can be prompted so their output can be parsed into a data structure that can be manipulated by programming languages. The combination prompt + model call + output parser can thus be thought as a "natural language" function. + +``` python +import json +import outlines.text as text +import outlines.models as models + + +@text.prompt +def prime_numbers(n: int): + """Return a list that contains all prime numbers between 1 and {{n}}. + + The output must be parsable as a Python list. + """ + +def parse(result): + return json.loads(result) + +get_prime_numbers = text.function( + models.text_completion.openai("gpt-3.5-turbo"), + prime_numbers, + parse +) + +get_prime_numbers(10) +# [2, 3, 5, 7] + +``` + +For more complex outputs one can pass a Pydantic model to `text.function`, which will be used to parse the output: + +``` python +from pydantic import BaseModel +import outlines.text as text + +class Joke(BaseModel): + joke: str + explanation: str + + +@text.prompt +def joke_ppt(n: int): + """Tell a joke and explain why the joke is funny. + + RESPONSE FORMAT: + { + "joke": "The joke" + "explanation": "The explanation of why the joke is funny" + } + """ + +tell_a_joke = text.function( + models.text_completion.openai("gpt-3.5-turbo"), + joke_ppt, + Joke +) + +get_prime_numbers(10) +# [2, 3, 5, 7] +``` diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 9fc70a39..96a81806 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,2 +1,3 @@ from .completions import completion +from .functions import function from .prompts import prompt, render diff --git a/outlines/text/functions.py b/outlines/text/functions.py new file mode 100644 index 00000000..dfbba774 --- /dev/null +++ b/outlines/text/functions.py @@ -0,0 +1,41 @@ +import functools +from dataclasses import dataclass +from typing import Callable, Optional, Union + +from pydantic import BaseModel + +FunctionType = type(lambda x: None) +BaseModelType = type(BaseModel) + + +@dataclass +class function: + model: Callable + prompt: Callable + validator: Optional[Union[Callable, BaseModel]] = None + + def __call__(self, *args, **kwargs): + rendered_prompt = self.prompt(*args, **kwargs) + result = self.model(rendered_prompt) + validated_result = validate(self.validator, result) + return validated_result + + +@functools.singledispatch +def validate(validator, result): + if validator is not None: + raise NotImplementedError( + f"Cannot validate the input with validator of type {type(validator)}" + ) + else: + return result + + +@validate.register(BaseModelType) +def validate_pydantic(validator, result): + return validator.parse_raw(result) + + +@validate.register(FunctionType) +def validate_function(validator, result): + return validator(result) diff --git a/pyproject.toml b/pyproject.toml index 0c88d063..1fb640a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ dependencies = [ "joblib", "numpy", "pillow", + "pydantic", ] dynamic = ["version"] @@ -56,6 +57,7 @@ module = [ "openai", "PIL", "PIL.Image", + "pydantic", "pytest", "torch", "transformers", diff --git a/tests/text/test_function.py b/tests/text/test_function.py new file mode 100644 index 00000000..ac473b61 --- /dev/null +++ b/tests/text/test_function.py @@ -0,0 +1,51 @@ +import json + +from pydantic import BaseModel + +import outlines.text as text + + +def test_function_no_validator(): + def passthrough_model(prompt: str): + return prompt + + @text.prompt + def prompt(query: str): + "{{query}}" + + fn = text.function(passthrough_model, prompt) + assert fn("Hello") == "Hello" + + +def test_function_fn_validator(): + def constant_model(_): + return "[1, 2, 3]" + + @text.prompt + def prompt(query: str): + "{{query}}" + + def validator(result): + return json.loads(result) + + fn = text.function(constant_model, prompt, validator) + assert fn("Hello") == [1, 2, 3] + + +def test_function_pydantic_validator(): + class Response(BaseModel): + thought: str + command: str + + def constant_model(_): + return '{"thought": "test thought", "command": "resume"}' + + @text.prompt + def prompt(query: str): + "{{query}}" + + fn = text.function(constant_model, prompt, Response) + result = fn("Hello") + assert isinstance(result, Response) + assert result.thought == "test thought" + assert result.command == "resume" From 0bab88a2b3276f0a707dda60fef40624e2d94577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 24 Apr 2023 10:19:58 +0200 Subject: [PATCH 077/734] Make the prompt's template accessible --- README.md | 13 ++++ outlines/text/prompts.py | 139 ++++++++++++++++++++++---------------- tests/text/test_prompt.py | 6 ++ 3 files changed, 101 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index c82cb64a..6c53ecf3 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,19 @@ def my_agent(name, goals): jarvis = ft.partial(my_agent, "JARVIS") ``` +The template contained in template functions remains accessible: + +``` python +import outlines.text as text + +@text.prompt +def prompt(): + "I am accessible" + +prompt.template +# I am accessible +``` + ### Tools Prior work has shown that we can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt we introduce a `@outlines.tool` decorator which automatically extracts the needed information from the function's definition: diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index 911b5f35..5cc0fe22 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -1,10 +1,92 @@ import inspect import re +from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, cast from jinja2 import StrictUndefined, Template +@dataclass +class Prompt: + """Represents a prompt function. + + We return a `Prompt` class instead of a simple function so the + template defined in prompt functions can be accessed. + + """ + + template: str + signature: inspect.Signature + + def __post_init__(self): + self.parameters: List[str] = list(self.signature.parameters.keys()) + + def __call__(self, *args, **kwargs) -> str: + """Render and return the template. + + Returns + ------- + The rendered template as a Python ``str``. + + """ + bound_arguments = self.signature.bind(*args, **kwargs) + bound_arguments.apply_defaults() + return render(self.template, **bound_arguments.arguments) + + def __str__(self): + return self.template + + +def prompt(fn: Callable) -> Prompt: + """Decorate a function that contains a prompt template. + + This allows to define prompts in the docstring of a function and simplify their + manipulation by providing some degree of encapsulation. It uses the `render` + function internally to render templates. + + >>> import outlines + >>> + >>> @outlines.prompt + >>> def build_prompt(question): + ... "I have a ${question}" + ... + >>> prompt = build_prompt("How are you?") + + This API can also be helpful in an "agent" context where parts of the prompt + are set when the agent is initialized and never modified later. In this situation + we can partially apply the prompt function at initialization. + + >>> import outlines + >>> import functools as ft + ... + >>> @outlines.prompt + ... def solve_task(name: str, objective: str, task: str): + ... '''Your name is {{name}}. + .. Your overall objective is to {{objective}}. + ... Please solve the following task: {{task}} + ... ''' + ... + >>> hal = ft.partial(solve_taks, "HAL", "Travel to Jupiter") + + Returns + ------- + A `Prompt` callable class which will render the template when called. + + """ + + signature = inspect.signature(fn) + + # The docstring contains the template that will be rendered to be used + # as a prompt to the language model. + docstring = fn.__doc__ + if docstring is None: + raise TypeError("Could not find a template in the function's docstring.") + + template = cast(str, docstring) + + return Prompt(template, signature) + + def render(template: str, **values: Optional[Dict[str, Any]]) -> str: r"""Parse a Jinaj2 template and translate it into an Outlines graph. @@ -101,60 +183,3 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: undefined=StrictUndefined, ) return jinja_template.render(**values) - - -def prompt(fn: Callable) -> Callable: - """Decorate a function that contains a prompt template. - - This allows to define prompts in the docstring of a function and simplify their - manipulation by providing some degree of encapsulation. It uses the `render` - function internally to render templates. - - >>> import outlines - >>> - >>> @outlines.prompt - >>> def build_prompt(question): - ... "I have a ${question}" - ... - >>> prompt = build_prompt("How are you?") - - This API can also be helpful in an "agent" context where parts of the prompt - are set when the agent is initialized and never modified later. In this situation - we can partially apply the prompt function at initialization. - - >>> import outlines - >>> import functools as ft - ... - >>> @outlines.prompt - ... def solve_task(name: str, objective: str, task: str): - ... '''Your name is {{name}}. - .. Your overall objective is to {{objective}}. - ... Please solve the following task: {{task}} - ... ''' - ... - >>> hal = ft.partial(solve_taks, "HAL", "Travel to Jupiter") - - """ - - sig = inspect.signature(fn) - - # The docstring contains the template that will be rendered to be used - # as a prompt to the language model. - docstring = fn.__doc__ - if docstring is None: - raise TypeError("Could not find a template in the function's docstring.") - - def wrapper(*args: Optional[List[str]], **kwargs: Optional[Dict[str, str]]) -> str: - """Render and return the template. - - Returns - ------- - The rendered template as a Python ``str``. - - """ - template = cast(str, docstring) # for typechecking - bound_arguments = sig.bind(*args, **kwargs) - bound_arguments.apply_defaults() - return render(template, **bound_arguments.arguments) - - return wrapper diff --git a/tests/text/test_prompt.py b/tests/text/test_prompt.py index 455d50d0..7763f612 100644 --- a/tests/text/test_prompt.py +++ b/tests/text/test_prompt.py @@ -99,6 +99,9 @@ def test_prompt_basic(): def test_tpl(variable): """{{variable}} test""" + assert test_tpl.template == "{{variable}} test" + assert test_tpl.parameters == ["variable"] + with pytest.raises(TypeError): test_tpl(v="test") @@ -121,6 +124,9 @@ def test_prompt_kwargs(): def test_kwarg_tpl(var, other_var="other"): """{{var}} and {{other_var}}""" + assert test_kwarg_tpl.template == "{{var}} and {{other_var}}" + assert test_kwarg_tpl.parameters == ["var", "other_var"] + p = test_kwarg_tpl("test") assert p == "test and other" From 691e5a7d36432f161ea42c348463f20c9f8e956e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 24 Apr 2023 16:38:59 +0200 Subject: [PATCH 078/734] Add an in-memory vector store --- outlines/vectors/__init__.py | 62 +++++++++++++++++++++++++++++++++ outlines/vectors/retrieval.py | 27 ++++++++++++++ pyproject.toml | 4 ++- tests/vectors/test_retrieval.py | 24 +++++++++++++ tests/vectors/test_vectors.py | 21 +++++++++++ 5 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 outlines/vectors/__init__.py create mode 100644 outlines/vectors/retrieval.py create mode 100644 tests/vectors/test_retrieval.py create mode 100644 tests/vectors/test_vectors.py diff --git a/outlines/vectors/__init__.py b/outlines/vectors/__init__.py new file mode 100644 index 00000000..2e208d19 --- /dev/null +++ b/outlines/vectors/__init__.py @@ -0,0 +1,62 @@ +from collections import deque +from typing import Callable, Dict, List, Tuple + +import numpy as np + +from outlines.vectors.retrieval import cosine_similarity + + +class VectorStore: + """Represents a vector store. + + Vector stores are used to store embeddings and, given a query, retrieve the + closest entries to this query. This class provides a layer of abstraction + on top of practical implementations and integrations. + + Attributes + ---------- + embedding_model + A function which returns an `numpy.ndarray` of floats when passed a string. + retrieval_fn + A function which returns the nearest vector to a given query vector in a list + of vectors. Defaults to cosine similarity. + storage + A list of tuples where text and the corresponding embeddings are stored. + + """ + + def __init__( + self, embedding_model: Callable, retrieval_fn: Callable = cosine_similarity + ): + self.embedding_model = embedding_model + self.retrieval_fn = retrieval_fn + self.storage: List[Tuple[np.ndarray, str]] = [] + + def query(self, query: str, k: int = 1) -> List[str]: + """Find the store entries that are closest to the query. + + Parameters + ---------- + query + A string for which we want to find the closest matches in the store. + k + The number of closest matches to return. + + """ + query_embedding = self.embedding_model(query) + top_k_indices = self.retrieval_fn( + [elem[0] for elem in self.storage], query_embedding, k + ) + return [self.storage[i][1] for i in top_k_indices] + + def insert(self, query: str) -> None: + """Insert the query and its embedding vector in the store. + + Parameters + ---------- + query + The string to insert in the store. + + """ + query_embedding = self.embedding_model(query) + self.storage.append((query_embedding, query)) diff --git a/outlines/vectors/retrieval.py b/outlines/vectors/retrieval.py new file mode 100644 index 00000000..aed2fbb4 --- /dev/null +++ b/outlines/vectors/retrieval.py @@ -0,0 +1,27 @@ +from typing import List, Sequence + +import numpy as np +import scipy.spatial as spatial + + +def cosine_similarity( + vectors: Sequence[np.ndarray], query: np.ndarray, k: int = 1 +) -> List[np.ndarray]: + """Use cosine similarity to retrieve the `top_n` closest vectors to the query. + + Be mindful that Scipy computes the cosine distance, defined as one minus the cosine + similarity. + + Parameters + ---------- + vectors + A sequence that contains the vectors to search from. + query + The vector whose nearest neighbour we want to find. + k + The number of closest matches to return. + + """ + similarities = [spatial.distance.cosine(v, query) for v in vectors] + top_n_indices = np.argsort(similarities)[:k] + return top_n_indices diff --git a/pyproject.toml b/pyproject.toml index 1fb640a5..0421b927 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ dependencies = [ "numpy", "pillow", "pydantic", + "scipy", ] dynamic = ["version"] @@ -53,12 +54,13 @@ module = [ "diffusers", "jinja2", "joblib", - "numpy", + "numpy.*", "openai", "PIL", "PIL.Image", "pydantic", "pytest", + "scipy.*", "torch", "transformers", ] diff --git a/tests/vectors/test_retrieval.py b/tests/vectors/test_retrieval.py new file mode 100644 index 00000000..8ffc59e3 --- /dev/null +++ b/tests/vectors/test_retrieval.py @@ -0,0 +1,24 @@ +import numpy as np +from numpy.testing import assert_array_equal +from scipy.spatial.transform import Rotation as R + +from outlines.vectors.retrieval import cosine_similarity + + +def test_cosine_similarity(): + query = np.ones(3) + vectors = [ + R.from_rotvec([0, 0, np.pi / 3]).apply(query), + query, + R.from_rotvec([0, 0, np.pi / 4]).apply(query), + R.from_rotvec([0, 0, np.pi / 5]).apply(query), + R.from_rotvec([0, 0, np.pi / 6]).apply(query), + ] + + result_idx = cosine_similarity(vectors, query) + assert_array_equal(result_idx[0], 1) + + results_idx = cosine_similarity(vectors, query, k=3) + assert_array_equal(results_idx[0], 1) + assert_array_equal(results_idx[1], 4) + assert_array_equal(results_idx[2], 3) diff --git a/tests/vectors/test_vectors.py b/tests/vectors/test_vectors.py new file mode 100644 index 00000000..71eb9afa --- /dev/null +++ b/tests/vectors/test_vectors.py @@ -0,0 +1,21 @@ +import numpy as np + +from outlines.vectors import VectorStore + + +def test_vector_store(): + def dummy_embedding_model(query: str): + """We compute a simplistic embedding by converting characters to an int.""" + return np.array([ord(c) for c in query]) + + store = VectorStore(dummy_embedding_model) + + store.insert("Test1") + store.insert("Test2") + assert len(store.storage) == 2 + + result = store.query("Test1") + assert result[0] == "Test1" + + result = store.query("Test2") + assert result[0] == "Test2" From 2d8a83da633e9e12e3c1862a804c4567e019c00d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 25 Apr 2023 14:05:49 +0200 Subject: [PATCH 079/734] Use Jinja filters instead of a `tool` decorator --- README.md | 8 +-- outlines/__init__.py | 2 - outlines/text/prompts.py | 70 +++++++++++++++++++++- outlines/tools/__init__.py | 116 ------------------------------------- tests/test_tools.py | 58 ------------------- tests/text/test_prompt.py | 50 ++++++++++++++++ 6 files changed, 121 insertions(+), 183 deletions(-) delete mode 100644 outlines/tools/__init__.py delete mode 100644 tests/test_tools.py diff --git a/README.md b/README.md index 6c53ecf3..f710c428 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ prompt.template ### Tools -Prior work has shown that we can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt we introduce a `@outlines.tool` decorator which automatically extracts the needed information from the function's definition: +Prior work has shown that we can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt we define custom Jinja filters that can extract the function's name, description, signature and source: ``` python @@ -73,13 +73,11 @@ import outlines import outlines.text as text -@outlines.tool def google_search(query: str): """Google Search""" pass -@outlines.tool def wikipedia_search(query: str): """Wikipedia Search""" pass @@ -90,7 +88,9 @@ def my_commands(tools: List[Callable]): """AVAILABLE COMMANDS: {% for tool in tools %} - {{loop.counter}}. {{tool.name}}, {{tool.description}}, args: {{tool.signature}} + TOOL + {{ tool | name }}, {{ tool | description }}, args: {{ tool | signature }} + {{ tool | source }} {% endfor %} """ diff --git a/outlines/__init__.py b/outlines/__init__.py index 0184b20d..e8375e89 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,12 +1,10 @@ """Outlines is a Generative Model Programming Framework.""" from outlines.image import generation from outlines.text import completion, prompt, render -from outlines.tools import tool __all__ = [ "completion", "generation", "prompt", "render", - "tool", ] diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index 5cc0fe22..ce62bbd0 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -1,9 +1,10 @@ import inspect import re +import textwrap from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, cast -from jinja2 import StrictUndefined, Template +from jinja2 import Environment, StrictUndefined @dataclass @@ -175,11 +176,74 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: # used to continue to the next line without linebreak. template = re.sub(r"(?![\r\n])(\b\s+)", " ", template) - jinja_template = Template( - template, + env = Environment( trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=False, undefined=StrictUndefined, ) + env.filters["name"] = get_fn_name + env.filters["description"] = get_fn_description + env.filters["source"] = get_fn_source + env.filters["signature"] = get_fn_signature + + jinja_template = env.from_string(template) + return jinja_template.render(**values) + + +def get_fn_name(fn: Callable): + """Returns the name of a callable.""" + if not callable(fn): + raise TypeError("The `name` filter only applies to callables.") + + if not hasattr(fn, "__name__"): + name = type(fn).__name__ + else: + name = fn.__name__ + + return name + + +def get_fn_description(fn: Callable): + """Returns the first line of a callable's docstring.""" + if not callable(fn): + raise TypeError("The `description` filter only applies to callables.") + + docstring = inspect.getdoc(fn) + if docstring is None: + description = "" + else: + description = docstring.split("\n")[0].strip() + + return description + + +def get_fn_source(fn: Callable): + """Return the source code of a callable.""" + if not callable(fn): + raise TypeError("The `source` filter only applies to callables.") + + source = textwrap.dedent(inspect.getsource(fn)) + re_search = re.search(re.compile(r"(\bdef\b.*)", re.DOTALL), source) + if re_search is not None: + source = re_search.group(0) + else: + raise TypeError("Could not read the function's source code") + + return source + + +def get_fn_signature(fn: Callable): + """Return the signature of a callable.""" + if not callable(fn): + raise TypeError("The `source` filter only applies to callables.") + + source = textwrap.dedent(inspect.getsource(fn)) + re_search = re.search(re.compile(r"\(([^)]+)\)"), source) + if re_search is None: + signature = "" + else: + signature = re_search.group(1) + + return signature diff --git a/outlines/tools/__init__.py b/outlines/tools/__init__.py deleted file mode 100644 index e96c11cc..00000000 --- a/outlines/tools/__init__.py +++ /dev/null @@ -1,116 +0,0 @@ -import functools -import inspect -import re -import textwrap -from typing import Callable, Optional - - -class Tool: - """An Outlines tool definition. - - Wraps python functions and automatically extracts their name, definition and - description. - - Attributes - ---------- - fn - The function being wrapped. - name - The name of the function. - description - The function's description as read in the first line of the docstring, - or passed at instantiation. - signature - The function's signature in a string format. - - """ - - def __init__(self, fn: Callable, description: Optional[str] = None): - """ - - Parameters - ---------- - fn - The function being wrapped. - description - The description contained in the function's docstring will - be overriden by this value. - - """ - if not callable(fn): - raise TypeError("`Tool` must be instantiated by passing a callable.") - self.fn = fn - - # Get the function's name - if not hasattr(fn, "__name__"): - self.name = type(fn).__name__ - else: - self.name = fn.__name__ - - # When unspecified, the docstring's first line is used as a description - if description is None: - docstring = inspect.getdoc(fn) - if docstring is None: - description = None - else: - description = docstring.split("\n")[0].strip() - - self.description = description - - # Get the function's source code, without the decorator if present. - source = textwrap.dedent(inspect.getsource(fn)) - re_search = re.search(re.compile(r"(\bdef\b.*)", re.DOTALL), source) - if re_search is not None: - source = re_search.group(0) - else: - raise TypeError("Could not read the function's source code") - self.source = source - - # Extract the signature part of the function's source code - re_search = re.search(re.compile(r"\(([^)]+)\)"), source) - if re_search is None: - self.signature = "" - else: - self.signature = re_search.group(1) - - def __call__(self, *args, **kwargs): - return self.fn(*args, **kwargs) - - -def tool(fn=None, *, description=None): - """Decorator to designate a function as a tool. - - Parameters - ---------- - description - The description contained in the function's docstring will - be overriden by this value. - - Returns - ------- - A `Tool` object which will call the decorated function when called. - - Examples - -------- - - Define a simple function, its description will be read from the docstring's - first line: - - >>> @outlines.tool - ... def repeat(word: str, n: int): - ... "Repeat the word n times" - ... return words * n - - We can also override the description: - - >>> @outlines.tool(description="n time the word") - ... def repeat(word: str, n: int): - ... "Repeat the word n times" - ... return words * n - - """ - - if fn is not None: - return Tool(fn, description=description) - else: - return functools.partial(tool, description=description) diff --git a/tests/test_tools.py b/tests/test_tools.py deleted file mode 100644 index 7f24a667..00000000 --- a/tests/test_tools.py +++ /dev/null @@ -1,58 +0,0 @@ -from typing import List - -from outlines.tools import Tool, tool - - -def test_Tool_basic(): - def test_function(): - pass - - fn = Tool(test_function) - assert fn.name == "test_function" - assert fn.description is None - assert fn.source == "def test_function():\n pass\n" - assert fn.signature == "" - - def test_function_description(): - """A description.""" - pass - - fn = Tool(test_function_description) - assert fn.description == "A description." - - def test_function_set_description(): - """A description.""" - pass - - fn = Tool(test_function_description, description="Another") - assert fn.description == "Another" - - def test_function_signature(one: int, two: List[str], three: float = 1.0): - pass - - fn = Tool(test_function_signature) - assert fn.signature == "one: int, two: List[str], three: float = 1.0" - - def test_function_call(one, two=2): - return one + two - - fn = Tool(test_function_call) - assert fn(1) == 3 - assert fn(1, 4) == 5 - assert fn(1, two=3) == 4 - - -def test_tool_decorator(): - @tool - def test_function(): - pass - - assert test_function.name == "test_function" - assert test_function.description is None - - @tool(description="Another") - def test_function_description(): - "A description" - pass - - assert test_function_description.description == "Another" diff --git a/tests/text/test_prompt.py b/tests/text/test_prompt.py index 7763f612..8333bd39 100644 --- a/tests/text/test_prompt.py +++ b/tests/text/test_prompt.py @@ -1,3 +1,5 @@ +from typing import List + import pytest import outlines.text as text @@ -149,3 +151,51 @@ def test_empty(variable): @text.prompt def test_only_code(variable): return variable + + +def test_prompt_function(): + def empty_fn(): + pass + + def with_description(): + """A description. + + But this is ignored. + """ + pass + + @text.prompt + def name_description_ppt(fn): + """ + {{fn|name}}: {{fn|description}} + """ + + rendered = name_description_ppt(empty_fn) + assert rendered == "empty_fn: " + + rendered = name_description_ppt(with_description) + assert rendered == "with_description: A description." + + def with_signature(one: int, two: List[str], three: float = 1.0): + pass + + @text.prompt + def name_signature_ppt(fn): + """ + {{fn|name}}: {{fn|signature}} + """ + + rendered = name_signature_ppt(with_signature) + assert rendered == "with_signature: one: int, two: List[str], three: float = 1.0" + + def test_function_call(one, two=2): + return one + two + + @text.prompt + def source_ppt(fn): + """ + {{fn|source}} + """ + + rendered = source_ppt(test_function_call) + assert rendered == "def test_function_call(one, two=2):\n return one + two\n" From ab47242bc1955cacfae2041ba1c85e966cbba09a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 25 Apr 2023 22:20:56 +0200 Subject: [PATCH 080/734] Add a Jinja filter to display a simplified schema of a Pydantic model --- README.md | 31 ++++++++++++++++++++++++++++++- outlines/text/prompts.py | 38 ++++++++++++++++++++++++++++++++++++++ tests/text/test_prompt.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f710c428..3ba0c402 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,6 @@ Prior work has shown that we can teach language models to call external function ``` python from typing import Callable, List -import outlines import outlines.text as text @@ -97,6 +96,36 @@ def my_commands(tools: List[Callable]): prompt = my_commands([google_search, wikipedia_search]) ``` +### Response models + +We can instruct models to return their output in a pre-defined format, often JSON. To avoid duplicating information between the function definition and the description passed to the prompt we define a custom Jinja filter that can extract the expected response's schema: + +``` python +from pydantic import BaseModel +import outlines.text as text + +class Joke(BaseModel): + joke: str + explanation: str + +@text.prompt +def joke_ppt(response): + """Tell a joke and explain why the joke is funny. + + RESPONSE FORMAT: + {{ response | schema }} + """ + +joke_ppt(Joke) +# Tell a joke and explain why the joke is funny. +# +# RESPONSE FORMAT: +# { +# "joke": "The joke" +# "explanation": "The explanation of why the joke is funny" +# } +``` + ## Text completion Prompts are often attached to a given model and specific settings, but this can diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index ce62bbd0..e48f961b 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -1,10 +1,12 @@ import inspect +import json import re import textwrap from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, cast from jinja2 import Environment, StrictUndefined +from pydantic import BaseModel @dataclass @@ -186,6 +188,7 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: env.filters["description"] = get_fn_description env.filters["source"] = get_fn_source env.filters["signature"] = get_fn_signature + env.filters["schema"] = get_pydantic_schema jinja_template = env.from_string(template) @@ -247,3 +250,38 @@ def get_fn_signature(fn: Callable): signature = re_search.group(1) return signature + + +def get_pydantic_schema(model: type[BaseModel]): + """Return the schema of a Pydantic model.""" + if not type(model) == type(BaseModel): + raise TypeError("The `schema` filter only applies to Pydantic models.") + + raw_schema = model.schema() + definitions = raw_schema.get("definitions", None) + schema = parse_pydantic_schema(raw_schema, definitions) + + return json.dumps(schema, indent=2) + + +def parse_pydantic_schema(raw_schema, definitions): + """Parse the output of `Basemodel.schema()`. + + This recursively follows the references to other schemas in case + of nested models. Other schemas are stored under the "definitions" + key in the schema of the top-level model. + + """ + simple_schema = {} + for name, value in raw_schema["properties"].items(): + if "description" in value: + simple_schema[name] = value["description"] + elif "$ref" in value: + refs = value["$ref"].split("/") + simple_schema[name] = parse_pydantic_schema( + definitions[refs[2]], definitions + ) + else: + simple_schema[name] = f"<{name}>" + + return simple_schema diff --git a/tests/text/test_prompt.py b/tests/text/test_prompt.py index 8333bd39..1e00fc2e 100644 --- a/tests/text/test_prompt.py +++ b/tests/text/test_prompt.py @@ -1,6 +1,7 @@ from typing import List import pytest +from pydantic import BaseModel, Field import outlines.text as text @@ -199,3 +200,36 @@ def source_ppt(fn): rendered = source_ppt(test_function_call) assert rendered == "def test_function_call(one, two=2):\n return one + two\n" + + +def test_prompt_response_model(): + class SimpleResponse(BaseModel): + one: str = Field(description="a description") + two: str + + @text.prompt + def source_ppt(model): + "{{model | schema }}" + + prompt = source_ppt(SimpleResponse) + assert prompt == '{\n "one": "a description",\n "two": ""\n}' + + class NestedResponse(BaseModel): + answer: str + thought: SimpleResponse + + prompt = source_ppt(NestedResponse) + assert ( + prompt + == '{\n "answer": "",\n "thought": {\n "one": "a description",\n "two": ""\n }\n}' + ) + + class ConvolutedResponse(BaseModel): + part_one: NestedResponse + part_two: SimpleResponse + + prompt = source_ppt(ConvolutedResponse) + assert ( + prompt + == '{\n "part_one": {\n "answer": "",\n "thought": {\n "one": "a description",\n "two": ""\n }\n },\n "part_two": {\n "one": "a description",\n "two": ""\n }\n}' + ) From 850c3aecee71ca312664e264dba53e8a8105bd9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 26 Apr 2023 21:50:20 +0200 Subject: [PATCH 081/734] Update README and add logo --- README.md | 52 +++++++++++++++++++++++------------ docs/source/_static/logo.png | Bin 0 -> 372647 bytes 2 files changed, 34 insertions(+), 18 deletions(-) create mode 100644 docs/source/_static/logo.png diff --git a/README.md b/README.md index 3ba0c402..12c99b6b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,13 @@ +
+ +Outlines Logo + # Outlines -Build _reliable_ workflow based on interactions with large language models. +Build _reliable_ workflows based on interactions with large language models. + +
+ ## Prompt management @@ -24,7 +31,7 @@ def few_shot_examples(question, examples): EXAMPLE: {{ example }} {% endfor %} - QUESTION: {{question}} + QUESTION: {{ question }} Let's think step by step. """ @@ -36,16 +43,18 @@ Functions can also be _partially evaluated_ just like any function, which can be import functools as ft import outlines.text as text + @text.prompt def my_agent(name, goals): - """Your name is {{name}}. + """Your name is {{ name }}. GOALS: {% for goal in goals %} - {{loop.counter}}. {{goal}} + {{ loop.counter }}. {{ goal }} {% endfor %} """ + jarvis = ft.partial(my_agent, "JARVIS") ``` @@ -54,10 +63,12 @@ The template contained in template functions remains accessible: ``` python import outlines.text as text + @text.prompt def prompt(): "I am accessible" + prompt.template # I am accessible ``` @@ -93,6 +104,7 @@ def my_commands(tools: List[Callable]): {% endfor %} """ + prompt = my_commands([google_search, wikipedia_search]) ``` @@ -104,18 +116,21 @@ We can instruct models to return their output in a pre-defined format, often JSO from pydantic import BaseModel import outlines.text as text + class Joke(BaseModel): joke: str explanation: str + @text.prompt -def joke_ppt(response): +def joke_ppt(response_model): """Tell a joke and explain why the joke is funny. RESPONSE FORMAT: - {{ response | schema }} + {{ response_model | schema }} """ + joke_ppt(Joke) # Tell a joke and explain why the joke is funny. # @@ -141,11 +156,11 @@ def few_shot_examples(question, examples): """You are a question answering AI. {% for example in examples %} - QUESTION: {{example.question}} - ANSWER: {{example.answer}} + QUESTION: {{ example.question }} + ANSWER: {{ example.answer }} {% endfor %} - QUESTION: {{question}} + QUESTION: {{ question }} Let's think step by step. """ @@ -160,9 +175,10 @@ A similar syntax can be used with image generation models: ``` python import outlines.image as image + @image.generation("hf/stabilityai/stable-diffusion-2.1") def generate(subject, location): - "A photo of a {{subject}} riding a horse in {{location}}." + "A photo of a {{ subject }} riding a horse in {{ location }}." ``` ## Natural language functions @@ -177,23 +193,25 @@ import outlines.models as models @text.prompt def prime_numbers(n: int): - """Return a list that contains all prime numbers between 1 and {{n}}. + """Return a list that contains all prime numbers between 1 and {{ n }}. The output must be parsable as a Python list. """ + def parse(result): return json.loads(result) + get_prime_numbers = text.function( models.text_completion.openai("gpt-3.5-turbo"), prime_numbers, parse ) + get_prime_numbers(10) # [2, 3, 5, 7] - ``` For more complex outputs one can pass a Pydantic model to `text.function`, which will be used to parse the output: @@ -202,20 +220,18 @@ For more complex outputs one can pass a Pydantic model to `text.function`, which from pydantic import BaseModel import outlines.text as text + class Joke(BaseModel): joke: str explanation: str @text.prompt -def joke_ppt(n: int): +def joke_ppt(response_model): """Tell a joke and explain why the joke is funny. RESPONSE FORMAT: - { - "joke": "The joke" - "explanation": "The explanation of why the joke is funny" - } + {{ response_model | schema }} """ tell_a_joke = text.function( @@ -224,6 +240,6 @@ tell_a_joke = text.function( Joke ) -get_prime_numbers(10) +tell_a_joke(Joke) # [2, 3, 5, 7] ``` diff --git a/docs/source/_static/logo.png b/docs/source/_static/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..9a9f234a746510cb4bb42acd52e2f06cccf1dc74 GIT binary patch literal 372647 zcmeFYbx_>Rwl<2pyUQTK-CYI^?gVFW*Psau?gR-QTnD${!QBGEf@`n<14*y|xs&AG z`|MNSS9R-D-TU96n5vmyuYP*1XFY3mPgPH>mWC2G1_cHj92~ZaGEfH&4iW433k?PK z&AyCiAN5{&K&cn;y&e6q|&L_abmd@7S(GCvIf3+;n@g-G07s= zA1gu(?cD9pLA*4WU+x@8q!D=@1?|JR31~}*ds3tW%b>hQcjgJ`chv#+cNX?QLwWU( zyYel=(^12ndJDf0(^{fl_eN=#Vf8K*`A&PHF-l6zej&w z-U{;Ty2uWQOTo}J`+)mnPkYm_+uaLuqf<1>Ao#%j3&D@a;|E9J4*-!6y!c(S9B#@% zCw(NfV*5__EIUV3)~;vcl|lmhYEYwbq9oT6XG6vVy0F3N``X6*Q_i5KTW^k?jrTtf zHa95zHolm8au~4>jFG>`)lM3pnXGMDeR1mgeFgjDRL}QSlZ#ha z=c4Ub+ZCwpWS|j8{^_rF*}m^T9~wo>srWNq8V>(N>LpIoN}PY>bxm|Obs?q%Oc?!6cNTbF1v(ZbSd1VR19 z&%wm?FAxyKoTh^@SruyYmZto+XT5CR7(2fYH%K5>do~n2nB3}p*bv&YmA)#6ZK;| zoH3k^?d#uV)*Vi6+8@gep%d7szX3a*l|*`ux;@F@-oWKuE>w%lZ@fAe>Hu9emp5Qs zu9$QQx(Z()e4K5SxTr#!K2sBIY1m`55yQ_ytUn8Kee2VoHHybR-V)R1>{7Ycs(IeY$1;n$je)!Y4;_+uMugJkqgDy-fwa#S^D=PcuHo`{dj zkBJH*CTx4$hj@cINh>Kzm9jcc_8*jR@GKjt+1alajvHc{8Oy(Q%28Ap8`AtB`Xrz^ zUqrHtnPpkV?##1md)_|D*?O#Q)t?oR->-uMa;#=BIyYyS^>1NZl!%`mOrmhYnqoL| zuQG0L4agE%cggjup*G1Wsq?1y{#G|LRua9|sko=aWmTXqW$5KW$dQd3b;coqEvCb;iJ zj@GxY-#HZ!PML!AOM#!iDS~n=9r&7n%j0DwQ?52xp_vwFc;G2~|F)C%=;~*=FLS0_ zojPrnP0MKBq>M8TdRL!kp)QQ&6QQuq?ba1~I(0we!|v(RBTi?ME&91;q&!*EOcPAM zQxjGqX_kpG+cbWaE4eRDHev)_&*RA&U`iOabVX96?%57%jpvKt+Fb23tseR+^di6S zb1QfLLLI9n8lSTvnTO(M^6PDqr?kPb?uVPKcTahXh=FiK1o&xmv|}8-RcuAJbS2Rp zuk^D92bVG?<53w}&_=Nz4nF@BMXoePo-h+0^^v`Q0b&y7fcDe5>r$f5JzW=zYY zReTib7uh<3YfQR=#G=PCd3GKPDLGvo4e8n3>De2h_1P*ylb2SeGJ z?0J}H;fk+EG57bI#tXNX0gGAT3=f`%Sb%1F5hw_GehomRC#m^aIpg!hf`hV%78V|R z!*?2d5n7wO6%e;fKo@2{{YEyQWn8AwlV>AwDaS{ztg67$UX+#;KLf2(T2qwDn_7aY z)P(8F+g2VlItC8)RDxhQFUE<=gecUd<;R7AEW;kH(zo(YDdVjK=J8nVI5n1!2-RbN zg!hNaxVtX!u{<2l8PSJ^B0iGF=Athme&rJsl}1s^4(f0teM>MrCkKLqXAE}0>=tlQ zi%!RPVz>;c&J{343Dx79Vf#2c+%fP{fmhgA5wDrZD__J3O1P}FI_+l3afh0_)bS<& zHy-WYt8pOBpQ@6bW^H(}zHsT;5?;Bb90{K4cf_LgRIBB|MY}2w?5+IwbOBe53L^#qZHd-;ejnhMaP#GUX_Qz@M0cegl0sc; zF2&;Hb{P+;!B2jQxYPX{^tlOSdlOw@{@rnEFQsl;5*??3Af;bnH}-x;W0yd`H{UOcxa=np$azkJKD2ab~^g=-S0kPH1ubk&ZOv8tS< z>_)&D?bEJiErMz(Pu+0dP;biLFyq6CH%}1S-Z#=3DrARj)y03grGiY8{5iy-@9SrT z^sd}g5|7pC=JlNEm~Spzl(#3(xO;KnffL{V;4Xq zD9&zpDEtSN=b^e`Ey}B$no{n130l-0OA*Ww8kk5gX)(*0?B>{$XN%yKFYtKj_z6HU^Mu^^=c*GWi)Z&WF#hM4OUY00=z-AZqfrKrIBdkc)26a#d;%J(YfA4 z(80r~dzAO2$=9mrnF?1evOjS!9J?nhRPovaf4YlLKoBB zc$Pd<=!`0hm9qFH&zhhxdG03a}PZwnhe;yLpeUD!;DHUci&r#=rhO}GWrk2r~`Yn%oan>PV z)Ugv|PG#3}xjC**8ly$9pBC?Q*C`i{H2-LqpowP!5L)^<=4I4Uq>PPp^(O+cAYUyu zfO7o`HiJOfaWx~&MI^7`_Ue(;%C!^8j#V*%K6XUOPq1hokd=?Y;bzXMtU;i?BMi%= z+C`3!JQ?AcYC40Qin|ZEr9kt6Mb#xGQz*?D>14B=!{yW0c?VK?^I78tD#Zk{iK3D@ zynudw;|4cwtPR3>2WpgzhuXz5evdM6n>!1$%)y+nfS7YMfz6^7VxLk@j@Ij7#V=M_ zJB-0TKsw7?ysvzPaIC?R&%vd3^*~<<%i>FJbe#@I8{MtcZNbe$cIPfjKkC#CHJ~>O z&1PafR;v0sn`5@8J_kutsDlJHcVx!)c>l~~r=|z^bG}E25+~fov4|to0lpwfX`s_| zWFF^xSGg*0SjT?0QkC=I;JQry5BPOwbgnri7rg>1KUx+WWrANO-gGK*h{Ys>&pJbO zzAWISq+l^ogFOwTwyB;)Tu;FHuLn}?N4NKWP@;L$)yP;%>EIat6Iwd-DhMIb=0gd> z>b?Mr4)X(Zr?c$%N9I7wLGMjQrk224Zl8IS*o9ws>fw zc0U8tw~1}XNDkX70N$7eYi8biQ?s(vJE((fvr z>Ns`}t!6*-rj7ucvlT(jacOqW#*XKSmc|Sv-uayJH!_OHh}9~xaBw!H{s5TY zfIHz20vfT!3ovgA7`!vYO2^i;{B+r3(NS*tbK+V=4_L&C>=?A?uzfKze;vVHIqqI<7s_Ov-J5!&}W)5 z6eyg)3s5gyBsSd;=jA3Z$1-lf>-@HtzMKOHXVlO4=J5K`w2PaBHF$k`l4DUB>VXaF zbRl@!Y2*pi=+Oc2SQtAVG1f~_3y`F2=~cW~*Xk^Utg%=1Nxok;EMtZl%;coD?7RKF z$vx(qiCH}t{gr)2b^u$bk}sWc`Tkq2_(Nn{jdkAKr1&`37oK405Si@;tMJu4-Bv`3 zr?T-Xcm-FS(H56Ke&5=c_~wj=8B9*`zTIK7!i4R_nkt75U2DaQFcbyH3Ky9_9=tPR zmXvr;aDWwy^>$=-6@D}3)xi;u6k1UJ+twMKg+#IeF~u1CQgTsYpO$AhlrNDq4jjEl zEBn{r&r&|lR-<~u%?!wT7L{VK+?u98qKFWgn)Ekd^_OKz$q5wtzOj@khuDhTzJ{N~ zu%m2Iv-7CN@BtSL3XLQaTQyp#jEUzZ5+m}8dCyn8mXcuN1)PA}8QO{&zA|4?{EGS} zv)C&}bnjS|PK>~tPP_+ApxOzSK(to9Lw>oRZu&Yt3oXFUk|TboF1zI4h)QXKtrw(L8;hDv=>P!NJW=R*x$l^lUXoi__tCB{F-HT&{;%V(dUIucj)^3SX%a&RD1s4)yk~nZ#;< zZr7IzYa3&!q}YN+E(5A;zkuF2v>}h|s;VU&sKqA+zt*h&O_J{-Hkx`ph=re$&=u$4 zP%P-O83UrK6tv+xKuZtsPw}&PSxTU{07rr5uFoDrC4+lvD&Ko=7KgV!2%>ZjbvxuHWB}c8!g2+eloKx4S@y5;@UtY!ZbH=- zS~7pmg-SAhLE9SMD^ADtitTSa+E0DpOy_u3iy_t58E7Z4ZdH9fFcpprw#P{vq8dft z5=TUSBl#`fj^%;?H3RMiUNfQX$6OwUo!%5vIGHu2E=0WKmkE|p=c#e_iFbDBx{H)v z&ys^=xR#L#Jig^lhC@;Cy&qXrk}4h1(}x}klOB9ySgFi-@^=XBV>qo<+avtHrU+)b zo%&Y45A_gx!+3=mF|qa7S$on->39Hli=RA|rYRCsvz}gKO6cl}CI+lJDW_v!k?dKZ zW&gy`PE%$!kP|5?HZF}`h-kewL6PVi!win;Jp_)G7nEFXm`4;5Gh;^O|Kg=`91al zZq?}gz`uiL>Xah_R|AVU8v6FtwrY^oJk1%h>BdKMqU1S@WCIM=w54bHnrvNhoZE}pB9lTlngSn!O3bJCoP@RN zOIAM==Y7Lha|K;u!44jBJcjBpZtct1u@94A=*Kagk2@c-uEKGEMDi-iyX-2u&#cg? z@e^t|;KUjfQAskn*2Zery!sV(Uf*cVUP372}gt@W0E*Ul-#juqtTEh{a5U$nCQ~~I4+cPZH zpTbt)mmmhJ*^-h3Q0(L4T1p-ghH89c&V%2}2n65BobgU*6$B z<%hvp5eBBnSz}Q(QQ<~EAhCviiV^adVr9J9ji$NI8UnY~q}8pR@b^G9NK zM5*H7{q#bOjI5n1gwL4?0Jj+eBz>>77LDMo!FPcKl_|v$;Yb*S%_`=Gye8z1Yso+( z%vD0IoT3gib#*c#xRKE7V$N29!hO0~w1G1^Le8{PvuYL90B=rHF-+8e(IG@D^Ky&I zzIjqCc&fkwkF0rw?#Z!$nu1^!5<#Eny!elj?nv<4%(};VEOEkesv|mSa5!QcN0?_H zd|U;0k8yZ#;k~z#W_^lq(PF>KQQQj}Ame$s%t5i49LrlNlZA9VR-q7FR_!HPs_PxI z&S$9$8cj_uAezlP+`z*jp!j2Okf!+dzinHKh#7MPpO>CTj%&XuLvJ5gWL~TCGB`6P zY$dAHi#BF|=4nCIJ}Ul<$V6bOk*bwSJS}r>86F_?C52WBC~~d#ZTQO1b%YRzlpg2u zeEtU?njCeg1r8prN&JiY9Yz0nHASLsiEBJRju$H10OOSUoJy#Sj*36WTO;^5INgY2 znZ0nq%2jjud%#r%TkR^s2h@hp8mO9FM$u#af)<>*leB5!c9YwaeqSDLMptx+UcVva zTwvjOL#Q0R?e^1z8Tt0mr*9uezAD&!a^{8Nr%Z5&kRaOR)`nW4csYg8HYd`&n$Y)9 zFXrd}sM^Yb#hl`Sy9_eMl*%37R>!fpnd977xi7N3MFrO$htjBE(;;dPowYocw!3NW zsrSdcO9RuBHcxQEr3jgY#|&7~Mbc5wL(ULM3tDo&XnW2t@{d+h$I&$d`=&9`g;A9p zR);iY!M^Z%8Q^^>^9}vxYWdyD`^itmGTj*39As&+FLwj{71yiTBeI0zjY>)&hd`ZB zD$);tv&?%QBQ!D){FPB{Mk^>YOES1Tp-h$n&B@OrGXCQ?r|ngA-gy2U^6a?y4*}UF zAsWhf0eTAL7(oex+R|M0PHEC&yzrJexjDSI%C9SGu#FZq9yxO|S(6bUcZ>Oo3Od-y zc-bPzOK&}8mEaam-#YMc;f>KDZJUHd2UQmRasnPFRBt3m*20UnVb zONRN(8mB*-7jXzVEgC`Aly??$83TKNV;n8`mvI=-qd9}9b+Thou?btLV?WY! zdxKMuBAbBD`HSACIg;6NmZKrelMuNbuf>)URl%DV#vCMezpj08sD!KpDqiC?5h7u{ z4)Ry%Qhb)elSM+zIxf4|((RAZE?Wn@W}MsDj^3tVWLDFSWY4p_6;((lQm1gHH zV+dMt2`5>%kv4H3UWH86z4YP?VtYGBfNyILFukgIp=I3W;_^kx^fM%@zT$bkG_w(P zh^Vfl5PQ#Vu$?(I`ovi)eh*mOz^$xwV}(&(G4F!zD@TW+7Ki>4*b--#(FSjsjy})~ zRbnDhL0%)082jsdLi+v7mT(~#g^MnF)BG@srOv>dK}MauiXfK{k485+!EfIvmn^r$ zdo>-cRRm})!@pXX^vEgAqS)CP`-oxgKEQz%#hxLUS9MZ&_T4fo(O%>^DWvKWoQ!IyF-}ITpWgb^lR_ z&C2q#FMWmF?Iv#FD7B=W{0hO{llGNAq0OPti0=y_K&CZDCWFl ztTIX4>5R|vl^3^B7Fr(f1@-xq1ms&=nQJ(d!#@alX-29VMSvmYa9h?m9(%`dc2eL= zr$;7cZL?doYD8WAY}6R_Pz^2$;RSPX%Zm|%Q{?ZHEZ%)`w+XNMYMJO_hwZ+5U&TyC z$u>M$3TZPBQB@|7qRV&bP?A|CW;rM~SGIrow)?D2$gB0aBX&=`J+eyQZtfh?8xcBW zvmAN^e8T!mCy|!Gq|=mO&NSMk5ikE&@u4xO`$X>B^rvMSibx#pcBViGBuV$3i9c~$ z(B3ONoE)cy+t8RKO2yh_cK zd{8|3v{EV!99Rd2Y^2?apJSz>!wH0>jlqv&qA-sFA$B=>cwYf3I{8-g@a3x@BNxGF z2VHLWBKRBxek1|eLrnHKX=oTK>6Tkr4NfZRj(OKD4;im+OKsTXdx-hz^S(XwN4-yg z^J<7+oo#p90qYdXik)8BFhhrSBCui=ui!QS$3h{VX|>UD%>QL z8j!XumdVhj=cjT-g<4>`xDqeEJAm&SYyHk2pRfQ2Ild4Kj*8-u%rQRzt%m9hnBm@Ag!b+2l8n> z8rIi}RU#;Mn`P@0R9pRd3cL_Nvk91S|lfo8QXkfGW?@l*QX~-PeP^0oajq-z^7PZ4h0zoAS!n;weVaq zmQ1xDALW`~OVAtqZ)_L=->Fopzo=t{zn_ePY*9{;v|g=GGmS?fJMS9P(Gh!tWokzB zDh|h|{G&4Jd@?b%&6q|KK)d+9YBeA1Vn0wH$7ah{@>Kt5)h~Pm5NZ`g7{=)aLQXw# znAG8az9-`OT1=wBTmBN$>{CRl0$NlN^~;X|)VHd>Si3BA<(XAx`QzY@upy72ATmiy z%+x;kGMt8@=NW;P8ACFLi+jY$FBGIhh5YB~U)LRf+KfF()9bfCRtrrMin+pj5h?v41tHwfRS*Bw`m#+S1M?UQxg|7c+{$i$Jr|F!-2 z5sbUe%SNm{VC!>*%NYd~2zg7pkDR92S$Wkd_Oizz7e!vWEPtxuNlS7ku*lB{L9#Y~ z_#H7>z|sUqwLJm=K4Z>mJM!!Z#X3(vX4|A~oI2>+#R&Yv+aLX*g#A3U4zVv5&V{%# z9OY)H76T&f`_}``T$I0%wVaX2Z*43LOX%lmv5y)^W3$JuXgJxRXIXDnB4%S7@Xh%+ zxIx}VSLu$!&4t1l*JI=8(5!w{P*gm=sr+y_&e>8g!;+ zC`KMp0CXrZPkCxzNZQn8m-b1FBa~;5zEo>||Ejf(8+ZO4Tit$7>vFdo1qg3m#<8%s z>E1(`DOEFQG%|9D-V}}2nM$u#wYa|Q%(H?yk53Z5Z{clNK%}jeh@c@Mqf=Lpv9s%Z zWosY13Lr}cx7|LG3lLrW2?dwJ9;)6KI>rS?dN?o|Rm!n}Hl|w>$?npin$UT(HSZ(Fhi?^6@}_J|Gol4 z69|BW9WY^JmK50i(** zltu*-qv!`RmfLbwtQkhLCM)ZxeUB?^ka$o0P=SK0GMlApZlBFg`NE9!1&*~f;ohTY zQ9E7tuKrlJWs%T-_AW)G57*)cLujE`cgZ}>L|?%74^_XqGQZB!E#*i|R)F|jfVQ86 z;HP9lD}v9?aOo$~Mzn!MKp8r5D+%V>H?HEii$sobV!p)y9 zccVQk81Y+Gw14P>&Z76#o*6Or^?CSyIZDWA-7>gZQl!C9LFjXT_fF58G0_F8=j-_V ziFkHl!<2uf#c55|wE!pkU|KG=e&bhnnvx3rdFwYUk6MvpHuAMYJ;ps;T!bnj7i|ore6IpaB_FQuR?qpU$Y(Be74+2aK;`x%9_Vg3pvu;9^5i z{m&r%^{f_jz$MBqBl7eloGL<$&y~%u888R^oyI*^7BG52b9$c|#`0aRA_-p+Qu=V$|+zbVL?Oi*>=shx^sgF9ga z#FWgOvPj}Ndsg2#kGoji@EBe%0T1@hlmpVkCc;S~zM7!IE!ZYqT7(sKp`R}J0?^RC7B!*D_tsztrv(cwChp|)4x^#Ud z^&`vq8rhs}PC&Uio$*-@D13pcHjjCZ&YZxXg2owQl(i6Rd*5Xe{0^+1oTZCVGY>+K&2Tzs+kM+st^4fRi z;BRr=b&`wK=b-s_lna}HUp}tSS6PenLEr;$$b6XQlfwYO7>!a+=l1=)3G{MK{TVsuSf8hLSRU!p_QJ9VM!bs#Zu+6Va`sZ7P#$?pkJu zj$6FMA2W{9I$=T|@o$x~`j3GMwhGX?SDjy>vj?AS1wT1S zTP{9&GCYvxQOn!wt1hup3fLa|2HKIDz3{O!HKWzV5puj(=0y9na8F-8B3n6S?t4N{ zIWFR~7tq1qG@EoShoc*)XPoB=!8dD)B$CuPcwZgPZYiVF%mhYwtqPg)_necuF)VnV zGk7zJ)RJ7bA_s>W2EVKg|DGizuEwBi6rYEx_2T5~sewY_QMYJli!X;Zst#K2H#H%A zf>5Y37NhH@i;{9`QIYTG3Nhn|$A0>j5wd$_&7akKnH0M2b%{&vUoXRpVNvYYs+Fy0 z$~!0c4G_FcBS(FNa=r6Q2d#F)%aZrRxf(7CQ5W~~!OIaq8#Hg6tUqXQx}P|@7^89t zlosnAO=TXlJXhtAk&WKv44^r6%2T4XDhRxIlSe_Wyt>npoYm4AwP|p&yEdlrKte#N z@ttroUUyT>1CN`v!*gzM>~o>q(ma&id{jkF`Ll-0#5V`A&iCJ|1$AVNs%H-?Pg**j z6x<+!9#w_f*(#h9{xeOYHiJ3}BKZt&#I)Pl{k)?26KpGw>yj9!|wyxy-UoxLkG4`<#Xy!x-5pZ90m=_k2~ zHM*NEhYkAg+-YezJ()U9vYm(o*bffgkc#&4QRF*uInhP*z;zT762Si|@51Vnz6}v( zkxCEwDQ>ymd&k66!0%4$!dul`u^}TZrP>w7@Zm_(kL^8dOn3%ewn_bNqFc~FLG z*y2+hfH?Z)$K5wYV#avH$Cdgla*Ehm;Ss9wa<1l8loKH&&`+#C=i*N@XEi<^ItE-B#`zp z^Sn#+9=8J9MB!11R^i+GKv^teFJSR^ZA+b*gT_p+@%^#WpQVZf-1Zto7QjK0d;| zFZUXl362hgU2FDtgk4`YQdbiLxw~>%*|=NVa{0S@z^*sL!AVH_dsu;-ZGGshZS5W1 zBmoy)y#P8#8%cnnpgNDbhrF$WqjI2^t!|)(9w^WmBx(bYmco$m7lQ$~+WJ`0`MbKf zd5if=0{-BN!QTIV%nhLXW8&j12{2OEqLX*`vZWK?65!(DRPc9v$p?_app)>ju@ln) zD*gij`y>f)@bU2wvUd0Nkpuu><8=S>&(%X+{on9z-v6)w5^4gn|6y(EoLYw;t>kIJb_ix4W+w$X4N{t(y_rd*1u=`roi6O)#J}B7}@{L(#O&6zsUM;zWpBg6V87+0-OGC z-2Z0%uh{<>!>rWR#enW0-{0=303`vx*B7&K2RYh^{dp^3EhJ#YZ!5ydFJdLYDFCt+ z<+K*Gv*r{L2Ju^q^4eMn@bLZzl!}|TkChw9_BRv^oXZi0Bg!MhZz~`u!YRrx!pA9K z1+w9^23hlSiV6$yTJiDP3fX~d{sThO%MqqZE0_QD>Nk`P49ZrBM-XJqBLMTp#)?yb z$6Ar=zQty)CzgoBf}G--L_FYN<#9 z__%of>xq_&m5&{4f+RrA(aqQYzXtRiU2S!JtbVh}DJ3x;o|@6!tb<*!GeJiYxP^FFn~WZV6lkFd)Zp~xO?fjySqpNe!E2Xd*;7~ z)nVmiW94H7wDPfqf%5S2i}47E@d@ei@rm&WiSY`t^T6KxhrPRvqg}xNoAvMFp_BNV za%D$v*!lr~9{sH-U0ct;pZK8{OO8 z&d1Nn%U0GN#v_a?m^}Y*MaS|dQLKMB`?s~9gYEAuzzE}niRAwzjQgK}xqtVJe?=_8 z{eN&G@yFmlnhb2--)FGq1?z>}|89o=a0c_>|IfdFa`FG?5_EL`yU72D-~XlSf9d)k zG4MY!{$F+dFJ1p52L4CJ|EsS5&*;MVudh?KZm?63AMDGL|IsP}?28tG>@yqitPiAa&X zVu?18Kv|9eK#`JQo0|EZVTEIS#B;e}_swk$BWA|MA^2lP;f7p#}o( z$;&1r4up6>6WkP{ck3j+C5Z?Jz;$gR%8IOl+nSjNYp~q{*hgcD>%rAPxl#)`ggb=m z3Vqj?1h<{69j{$k zpBpZ2GT<^bt1Ky&s{NVyIl?(XN>`{jxTlXA;FX*`4qFN-+=^`Ekrj7CUwWzCdh4*dk1^0=zIi_&u6q*6qr9VgB@6l11^Vkx9U{Q?$R(uScMv;{Ig!t~m~Q zTfEN?xUaa4vWb@LD1{sD7Y6`!nj^|8%M!+&NF%}y-*zM9USyj$;yYk)r6!mYBYGCTfHcKq zn(<#oVqgzZD3bmw1gO89xbC2cZU^#hO-wM#t(waZr+<7aVaDS^$*+WHBIP?E^z*>? zS19a%y1d8XkNulfku{_N0q7tkP|`y(!`g<-`BHE@4REEhO$cbRWQNiFJB{|m%KnT{ z8~s-VP{u+l1Kxy^a@1f`pcKwST1@b;n)P3Tg{oy%5G3^V3os;*+ox)*$)4qzfRkg# zl)%YxW8K+9e_6cgo!#KK~0z12qLorvFL|%FyR) zhd(IddW!(H!oMqwd1%gtRmkrI<*BAXiS%EgKz+7Vn`5li6 z{lEFwIxav)(|X7(d_JP7H4LOVYArMbFC`>~pBAP-H6F4k?Ks#^GfuvR9wHd#Dq!u^ z9HMOZ0IqI?PGQ1;IqG>G7l8^lYq)rbm@FI*6KvRQ+*nZPP0ZMt9nK#Ok7RQab08@!=unBMzsoNNclk<-QZ?)Pb;vx!A zW(0q8ozaYYYXP`o#_t&ZOVgeU35IkbL>l`6>mwh`!s`1R{3Z+F*K^WGKik@#i62xqKb=AdgeG3Ghh=DCPFo zF;BM00bgyFhf-Klut2)dFDz6aEdLqeiyU|hxyS75h0inI|1J`G9McN}K!Nt1D(rIu z_nBeU_S<(tR-&0ixgFBbGrJUBZnPxsjj`0XSc69(lG}I|s_X2(`JqbIQU)7BPT0Yd zQ-GU`e%0s)$&;5bC1870$2^Ea4*{wFvaB`okA*w|pk&P>kI(R;6v)+xV1$R=+2aSi zi+QO1Cm)It18K!t{&mKC-wE;L-)B)zz-ZU*7REq5t+e^Qv4JYJ+U4{^VzoZHgxE_pJV4VtR{q)Rz)B4t=$lxw zs#qE}jy}k9-);zeqJ;j6AB%(a5ZE4NUvK1}0(rvlcn5PqQaKS~f=8^k3s!@V#YbI) zR(!f;w72f#NdO&S0G>vyShLp-! zME@p;DfY&YRb{6`>>eAYLdDB#SVkPm|4NXP$gWuHR^M`h!|UELEM+4_58w0h#U)F? zCd2WyhgoDKa+ZUfcb?mc#~z`GK9HC=IwlGZi9jI&9T`@DISSjFB_1NMw(v*K&q!be z9YX9M0drr94EtJA2R=B4MC6$Rj_Wb+c@EV>bnhYmug7{%C7hHw(6W5p0cyzQGBndX zt{{s}w$J%4j2RzBIef4_@JSE2>GY3}^h7k|kps`CDRRGC5+NfNvxEg|&a&TpID8j; zF#D8_dL;7tWs;AC?FZI%{?FUPA@`X+^&fH=U3b+0uq=D|n-Jkx@amG;^UbJP=Jm@J zl|g#TAhdjq4WHX=YZUNMF-{_vyhcd3pwjJn)W^F3T~Js{X14jbNQh%L_rniL2fGgj zgLH@kKopy}+5#BlLbsm2G|{XeCrq!f1PEms`5Q>Y^rEadl8C+{KPbnIN3X=4=_M>` z{)Kl8mT!Mc6FWK9#5IFnw@mLCbpByS@u%21 zEvy`-%!HgCSkL(TTr}|ma-V!jxLe}p5$4S#EU{Y!{Fd!ht=>?U z@tL!)o#%~H<#K#d!#Wj8I6Q*=Qoa)3jc3Cfc?Ix+rR6dU*66VIsPIH;JLzjcXDqu#Z6GaqyAUklI}$+sEvi{{!)(!n>7;kOiPKyp=5>%>98;&I zl?PM?YR^!eI8iM6US+Lr78`Omx7dT(;4t@B4ewh~2-LLkir|7a1(Ue1a!Rk&@47iG7K;?)T2nDl5iL8h+4u&~$rpOq* zZn87l4C1aFr2)&YUR95#eHfP-tguZ@8b75zm-WvceKkD54$|8JG>>F^;;kNahP)~? z$$Fx)YjPw&3UweUmksjRg+gLN6^LOjsJHVgQa-K!bs&v_hy&u0fgbf^{Xtbm`RtNL z0qrcMA>jLI|q>a+&;u93j63#qf>auBLRJjD)#blt9`h{oE3YcR1}ZexKu9)L8N=Air>K6`G?w z^`iOaj(*Z=+D5<*yQgg--Djzq)u4e>;C`~gKF!+u-)@s}lH?N)5A01-jLhZDpL(J^ z7w5ww#L3N?Rh)xLt=0XuIU>ct-rlM@afjct!v~ef!Hzwkv-{H4Cz?f~o!9mXz$dg@ z5dQJ!Ye85yVJ6X|C5{~U1_NxQ9x|xsBH8=QCfsKOJC7kA7JS+w4@{1ENQnvl{5}I# zgCe=Z1GAMUy{KIL&SazBbd>oCbn@$Atp+sMgChLNIE%ds$Zy*l@Gdfn+~yp;UE1}5 z<_rM$mNQ4#elsIf|1GE)|FTT$WFR|I#W$ zJmp!8Y_^@rsEDt4OCdyGE@hboK!lGPX{nVgf+~fEs?{x`7p(Ria&gm1o+L^eU%Zp{ zo)D(IglP_vr&h=ljzk9U#ml9gV;X!e7nT~N`AJYl@?>z6PQw$!T<$EaxIN_?xyDaPP<_k;>}1WpMhL69 z5j#SRHEX?{tsFtDy`wz8C65fv=K~%4B2ZVw->&FQvO)iwA<6%~#^w z!B3dVdG<|v|^V(xVQ8_-n#{knu2+6SN}_b6^>~&^V=N zdw0jRdLsC7k-aD|75Ccd=PvIB=vm;b0I$wj8pa#@_LhEBsXGPp>rcHh-c&S!KUyCi zyPlq5Ppjk=vdPj_j*64M;Cr4mODE6tvz!3;B5^LNe#P7>PdP8UPo&>^2N`m&NQgNS z>xYV>xzahJ671cf?6(E~H$bn%yV zSDjhQiruJt*S&LC?ZaZ5!p>q#=Bk?e#(4hox?OhR^{HP^KXK>Ay@Sx9g&`-HE(JUq-|;{4Xrvq6CXyLv2|1IETvBKGCY}~o$JdKW>TD3!E&k{b zV^g~MP~Bpv?i-U|$}Ks?z2+c0pkm>4aWmulPPek|MgcWqaJ9bWKK``N)&A$WDR7KW zGZoD|BIuCB|10i?(4{F+j(RG)0oon@f_om59CeCVd2dZN98aA3Fy-}ng=m))b@G1G zn3O2#Mh1u{paCu16A&>#bMbhp|D%Hz6BSz*B#E7Uv$`dBCwy>L zQm?Rdn;@7Xbd-M@bJ~g=TDP}dPc5jctry%qMD>)yzGpwEg4WHp%jlz|V|zNpsQHsr z)2?xi`Q7@Ab$$QZ=2M76DYish8j`HmMGD{d=8O$nc`vAd^$UFq%Gv4vho-X%YlCar zb!iI}3x(pv-J!Ts+}*vnI}`{6S}eG`ySux)I}~?^;4VAwx3B*!hsk8t$UV=j<#|eN zomRw7(95M>eUXHt=3W3j1`)5sZTdaE$IqoNSm6`(b7^*G&VOjll@V_huRV^7aks*a zoWy_3(E=Ar#1?_Y+*`_H9kl88^KUDojk6c0Q^zq^?Nd3N9e~}alPO4Ly@SlH_xvq$ zt6FBU`_vyXiDGsN?~3KbP$FR~pOZg#G#QKUSDyljg4Bm-X-Ghobgo*&^Q98w4haMJ z!Yir}%!AKF;oEA>kL#cKE-eMS`w|Gk*aJkK&-als9|h9{S>;2bD689PUHh97elFEv zl0hP;1AQ9EDjdr+{kypct1mvPz0!R{3aqY<$I+6YFVntGY$-M13SI6@DN~~j$a&pE z5c`w0A6N=(3w*mNK|}-yIc)A2kH)_&OLroY0;noNRNYY~mBduLpB+-4+=qpn+xCn0 zi3wdh+U_QH^dj6cbe)`rhsnue!_vai+S=9@-_M-dc6S-6=qiSXDagoH6Z!|Amz|F% zZ_(FvLXqZSd(0Cb2y|cUlywnPu@Nq};nF6kQMb>8kWnJZ=F^|M9sN5R#&_OJJTZ0K zZszOeHr~skP<-!&{s4){>=1<~`aLJMU?<w0$0wG;n)5-N# zc^m|js`H&SEpR&~X^Y6l{M8qwB~PtgzoFd}vS8CO-u9-2eqN=1vkP%Pj)RUT$e=|a zd-vyvR0C-}LW2mNe0+=%F^VL6P}ru9)Y&->LsU*hwgX6l{LDhuk2YRuoTT)kwE4sta`0M+l4ks)M~T=taR5#a%wx^5pkwlvzK?YG>?N6!UC%Fv!@dbsu(So4jb2XqT8Dybo&# z_7yv6mhc1SIV8gBVthgm)$W&Sl1ERI>2XcjjKN5xl#C$_hm%_Fs@HB3|7OYNVEM_C zjYO`OLW(4Ms!=e6iUB<#FGf0KyHAZB8+3H}aoa%*s7gW|a>H_44=LJntJoFUBiG>8R&{p!lQ!dYClP4e~K^;yiopo%?FH{|IOdBbM=~&^y zBI4bydat5FqJ)D%qY%V|8Bnw>jA`i=0Ui^nMDl=J<7jDAL5AGp&hH^^BU!p_^}USAwth=DsW7 z=M*}+v8K17anV>9eK>CiyKi56rwkA4<$Z*|UmKtjykKPPw7}j8&qF?x#I1=)jBac8Dd&|Ie#}F_0D|sxQc)rDOvUy@IO5U34XDRgius!>-kH(sxLS+A*--UQ6_} z8$ln0fnHzqF@P%wpu`mP*Q;5sp+>Z$NwT3%(%v%od>dV>HT)?|S0r7S5EkZ(lyZD; z@E>wOB9|_n`bDg`U*2>VyH4IXWyLhHC0n$3Y~J+G8H~g{Np$shM=5ubgOPbJsGEG; z6ZUNX)0!{?T^T3w@=1xk--{2sB z*|FhQ1WbtF?A@H;eKa<%7&z7_Gd0tQh+bS`kQ`AZg@k;@6>HlTHQxI1lV0-t_b-oR zFUO3D7yL6aiNo>9Sf<9yHGqJI>b0WkiSPA!==?dkSb8s&Z;QW)4gMltF7#*e2nacQ z#hBQn4QPhev16sZvz2qdP4~L8yZ%M~LRy@^cQctm&FcDjAypMEQ%RuX^hs3#hVHan zZCv8fpOj+6bMBJ>)ji>G>+Tw1$E-LnL0zqI<(haa!4;0-a1G)5GqG^m=f6}!O?4@9 zDdRu1cXsk-bu8rMx^J*>GJ~dr$tPxHj6{3-Cm|zh7o@(bNyQGSqZM)>f}#=^0)nzw z#eZ334qgmm3i+vebCJ~D0uvs~>(}Et=X=~tp4{U{&WGhLTHIdnL|&6C?hkv|(Ht0U zy;AiXX=1ZG_88F`!9%=flJ?LdJ>p>VIaG=Ml+7CF_aEJrdLq~NC+C3@iQ_7TEbS8) zj9E)^{ja*ZLK>L?72M5Wc~m{^EM7PNcoFd77-0dgU?ikrnoq>JpIuoL_CVL60$rja zgffH;J?8QU-fS%S&bKmvKCb<>+#Oi!Gv zF26F$4yFRMDmu@#*KYy^$uB*Wu|~b;2Z2l;<{%7f67? znGiS!xx06Bz}4o;a*+4ZjSJ!XF@I~D92$@bfo zvGq#GZmwi;nfZ+GelRp2&55pCDpuo~;^Oo@mUYEa&fe7PgpEym$|jC(J%BSG6s~Vg zcMZz$v<>ILx(5tM<^Z(!)*_u|pY#i@0Blk5s*rDN%yHQqWAcHLbvaF3aS}sOpqzyP ztnG%Pf4SU-IRu7x z6BuM|*5p}g2`CGG8GW3A;{v-$1qpL${L|>hbS15i^AZ*2EQRTGtGZTu7ho`jXDn6| z#@92_zx~ejC@=ATpiIQ$u_tiw&EN%#>2Ytvw5=ZVsQcxV-|K?kes@blUz9c434#$n zRe5)s>UV!kjnBgSzrHIxC}6GR>A8z;3~%7*cbB&Hntgk<4Ao7S1`&q(_Zc;AHNR?K zm@P#aT$(kO@`5{{aCC*p*Gw`o?3{SOzvy224Ns=f=X2A&Av8@bQtNw%ZHF(GW5tPm z)7Q8uJTpg9DLRcT_1087th^RB8!#%dQ#P?pK0{JVtUjKSUNQPiX_n=PA)@Z7Te7RGm`SK^j40+fVl>^09yKqemD` z4WGISXJMwv#QyRK{8k$oa5oJET@~Lg-@Li)|)GkoLjk54;VN>2$&~3 zlJBsg&JhME4b_XRuzOrxduQ|_;hycK#u4XKp{1&(vumt&WNCd!#^-4L9CNQ=0nG>$L!b@u4kzxeU|0x z;^5o(U0Q4<_;jmsn(tcmjgtH=suLzfqq!;NAq~X=*X1NOq|K;)J3xM5I>*1Bk891` zz-wn0to`9-x|?M>E`(ya@=K|P)7AdD2|eaoGI0`gCylu%@_6Rk+oQF3yLcrIy1+NG} z-n>X|XZf6-IKx0TU1?I9@@gy(rZ%4h^M`7<@#M3x@v*8`UnGk&KKS2bU7BQb*pfw3 zU$Md7!N!NoKaT0%T1KLHHXTN(3qdBs-9SeYael{Ms0NG@)~co;M*M8&UFNe#Zmj)-%ZWUNwg1xs zv@@p10|d}!su9fY(gx=;*4W%m{{%~^3QxCS+1j{fCADw_63Z$qNDnFI`OL%iUU2EJ zb*oR;bgLLDa1U1u9&_|>HSVh=Lx6a#(AibUFSk|kQ{ z9{Rfl* z-oM)#w=^7JrR{RnATiBf&`lfh*js>$s|hSfFpe|xf*2J=(PK7u_XO&wTvMVjUYW45 zHJh*H%{o25!P~w37k1fVpby{VrP=d2x|XZ?(~84VT=N5XBDebkNK+sO8W#x^LY zVnao}Raz%k)0@;fW(IMJ>i(w$OLJ_YSHo6}?2f-1&Fncv|SsbPQ zGk)fX)a=Z#<6Oa|6{g`&-*4yagIkUnn{~J9Qy@2INTdB`S2*XifJ;;`dHxZeY#k-e z_wVM-iL8aV?czOs5qhUhhv*X?oB37F*kSqE-y=$vDUyNQU-XW2-&gdrtJ1f z7`jYjfDH*zxS4>3+9gMz{db;!VMQ+MRUdGwi7eDwOKO1Zo$pf0`PCRq+BT=(*vlan z0nrO)^j|5>d5cNE#`W*s9J#tSWTe2l*{2PWE zc<^i)C3Yu@?FeCBu|<=MU$bvbV`2;OlHk5pdy!M$4!b-7Lk2Y^jLQXkI^}>qsnf5j806N;U3z9zeF-Vt z9z{wQxQuSu{YQUa$JQOlp6y=%p>Ags$%~RnmNJ=TCtb zdk*K^BtT+>l%Ey8&ggFiR*70JxK1BE^z%U&BUv`iJe!BAWM55CWGa8$Z;IMLTn*$S zL4K9kW;p4|bN6ev)OWeDPH+V3spZQ=uhVY@Re)|!3Mv0pYe^A0e-T+CC;u-=qqR%X z#V8VHM+5?0+grV3MBKiR-&Ql9tdB=PS3{OgHA)+wigZh`zrWd+KnCW1ia71CO1*v&nBlG z2*>qsiu9hx)Aj(fPEhD>WX=q2UH_3`#86|{4EQ=OhX#k&DaE6`4HSJ&?(t9y1~ev; z(2a-C>Cn@XD2NQRyRb?Fxg`PZrcFB>0MIks-G;+Ap3c4`;S>(b@W|;2Px?AudNV|w zf`6}Bbj=T2sQu|~Fg66Jev^IRp}&Fgijk#aFeSqs9g>8~Vtw%{-lGsU#UVc34>o;0 zsjtLteCCZxF>3Omm1-}gU%%eo)O0)>6UaGsS`p41XPrjn`^WoanhuSytogxPKvo84 zvi)2VurH4P)qvnznSdYc?jM;20u@xQ#HB$%NqX}yvEA$vVm-JQ?fL5kXtN~C$GKPhulcV{-4sG>)LjM|HUPi3X&!c7L0IO$?W{3&%79@vcA{Oyk zD8^Za{^nEgaBhsPS9%5igKUWBw#&*WQ&JWT0r^|B5-F5`-;8ZKZ_pWA?nq?$4iT3+ z2xiQ5Z^`qJ`;AQJOWuez6t+uTa&<+n;&Js|XCTRnd_iZ<3%%7Sca4)Il0)_asluo=JEAxqM?}f3p^k*2EqEf#U>4 z;xv~zb{PAmyl|_!lrL-f57-fkYzr>O?10iz9$U2(UGZ>VoU49QmqB0G2rg4F<@okz za|ysr8x&3NF?sBG3dmbq1@g&gad{C{zJ+J*9vHkJqa&aRPyxo--_*m0e8O`K*Jb}} z;aU58nw99=R?7_2B7}`Dy6W_WuC?X3H7do9El`w*GAIF*qTB~Duyp<{JIWjYNstvBvs~|Vc~yANgSs~nUq;|9gdBn zEs_@*N~uVAHBiv}s4dzOA~ZmXtJAJ*EF8hgUnYJtP?W!v&pEXp@6BtUXkzd%%pX`| zeCws)x>Np+r-`A4eWu$=@bZE_`V28XKj%#rY|9kS?Y>L*T=F=gV6H{drtw%7)r%jY zs_qbPPoR-eo_L(<&<<1sNEu8A6V9ULOLD8K#XNXk;K4PMgzR&3LfR9l-h2c{=kBhR z{*A0|<73-FE|2cpgBhURYB}Q7-fpAcnn@c;NFc}z5qb=hDXJh-=>DBGJC#kEO%{O+Qo`L_-P=F6nOW5q1$ZDNiMGe4e4dm0roMofi}dck8{H z>&B!6RJQd)P~vb&c~<=+G>X;>Q`MHiI;%fA1mQ<{3eVvI)KB(S8#b=%Ix;OK4M>S2FBR#t#hLwxB zlJA?B56Xn$dWjf`2UA*qp`LZWHu~0jSHmS3$(EYtd}M0Vc}QwgW*g zd!1B987OhSXHNvR7fb)B*xaJ(Z zadic&rpqr}fz)Ptobu&=iKru>h*|t2Gh%+On#{*Z??B$pi+N&nzA!2ja)R80U&FmK znxVGX1|t-a?GfdRy0E+jLdufCVk6a~F+|jP@$;FbHU?}ZtI*|HtZU*C4Xy@@9AMX!W;f`I%z&8vBTyI=jA-FyO*V7p&m1V$x2^{zx}$JSi2 z!RPCvSF45%XDM60@VfFlFG#<(MfDE;GyNaG8gT5>&A z3vA}p<#P>4>T&!Qku&L}5&|&$n#oSz=L#Rr3kAnlZ5w~gS};d9SjdG{*VM>zx|W5> zRqb5CJ3XQbMDP1Cff-h^rinEW+Ls+cGIjZf#pddgwulZ#Gbpq0-<^GKxjy$&cXJ$n z3p@61j-E)eer;a}X|dUKwlOilyS=AI1M*=JsQvLNi}`i-9I8>-Ck&d5sgchH)+;mL z$Q;o=siCs=5h?uW*9~B<6pjDfMORT+RU_H)E`W93n)>Q_ALB8(;5>QI25%5>nnYqx zxjC~0$B_pcpba0)4ls2Tn}4Pe=kjv7Dz);zuuR7q{JX6GgKee&I@ij zb>Bth2fM5gI#~)W7(2e}y=68?s`u}#{Z9oQ8A@#gb^o-0Zs-+pw{Xyq4`_}I^QpZ_Xyl+v#v!}bx8L*291@lw(G z4W1019gT#xbeBRbPj2Dx=fb^dPn-#wBHI_e4kwt*9ju5duY3N}&ge%7(Gif<&q@qL#=^&~W5M$%YsAH>n^+|Avuywp(LNWb>-N zho2ZI26e1y2yn*E!2yF%gsuwBtE2!7;76jybtH?Ms*~iGoL7iqeKfreY(F=`?Q#Gz z1T)b*h_Z@p@-(rdCq46l{2sKU6XvSE)M5Iawe>fPpj(oMYniNE?R5!V^FEEjUl~UK z1uixMZs>>>4Vxf}mVjI%(wPx?-bsoZttHJfE8a?TlE&aZYxO`c!tS{_fpvqaM!gYj zZHUV+oWqJgqh1qw`3#6nUI5=yZ~T?UW+5e0PeGv+3G8mJlwQ@1uz*b9qG9%K!QPrz z2cGK-QJLZM(??Zbb~yY1Z`T=5_6*5QzioOrw*KdIl+VonS0QD~S&HmMeCIhJC38w# zmK?(z>@$c#4{w2fkgJ-DWvHoC(-$O}q~jY|kF4w>q5>4ysWoVh#(p_5Fn2l$aAB2b zY?W16@j6oR8RRy`wcMVP-O-IRs{W~PTV zh=*Plm*33Q`ZV|I##9}ofE~tbGlN^NoRVxlgGph?#X^Hee$e#{ZmHrG9`Te@LP{3j z#&Ur$eupK(=K^3*d9kaqeSY7iS}f5X2LU5p1f!`%uCH?vnKzFFK3wO9I4_Xk%sL`2 z@VlZhk5zBkHH<{?3ecp|igeR)`vQ;j6CuStVWNkNUB;g0ozup>ouR2=!rFmBR|Xvs zBCpxdceyKZbW%WdEE*{nkrj2wJSZ4;Ex=bvgKj;79ar9z_#^gbKX~2g-7LP2ribKs zQ`AV$<~j+QEjXx*XF1Fh2g!jj&kNa?q~+%~;JC>=7V(nLKE2 ztBkIx{}%$v{4V%HatFFSVT-^w(W&j;ox`fJ1C!Cw;mfDU3SO`f-lZKdd#sTe8X;n0 zgPXZUW2(@T!IEIfuPErxYuI9Occ3XIy$;WFGvEOuW7`u6`N6q(s$qp67uX7Y1rBV`#08$@ zV{}y0DAG0&d(MfvO{e-27ceuXqE;@NSjw0U@G`mMaISR!UIH#?|* zggvs;!T)JA=h;4*y|&TS5B}v=It_j=BV!J<8O8-yt52I)=9mcpG4PjnZ2VlLU@&7; z6&4=hMir9W!F0|3$(_ho$7Snw0Y4TqZ+Lwpqb{QYn3-t>Tg=qCbPl>|wDCAH*?f?TZHA zC7Ft}RThn|SbQ~zs1{o``MDG`V{cjT^s^VcH@uKGyzT{=pU3TIbhQl4j1W6xwgwFw z7LAM9Cnyx$KWcPrdQ?5*Td)tOkvRfL=E#eNth$A5Zw&(RnZYWmtqlm`SzKuq+d*_xr-HH>| zT8I-9VYV9iFTe(?*jNs75P$sJ_EAPE?XJEZc~f1o(=j?N?qP?5e1i3(Vmt3bC9f?;>)`F^R=NZiI=U&L+?EEJs_ z!A_g1=NEan{OJEXmYe>2$X1_wtcU6?7}ECcN5AGocc5S^7K0_@F#)%_x^E0qI(#ToIP!rq#|CW`}VbCo|_)t_qaV{D!d-YIHs7X@P3tyo(vo!LRu3eBYfvae zQmD-K;zr`T)o~l+L+D}VHL<%W{`%k$`~|1ay^!}2>RVd!HDf_7vI%Z`KgjEx46^x` zcQBd_5zaB&FBbbcN0tp5_Bg^d-9t*={z45F$E8n385JOQRnL2Rs3_Q?t$@vz(Iqm7 z@!3H-W6vTZdjDfOqt`n}$6bY+)8N5d^3fgQf!&5Mom>(>%b+)8c#fa;vnKH=13ua3sgsGK*^cS4XG1qLg)-{TTQ7J*Fu< zB$4|^jafTSbT}LFl0(nGE+iQ}Me8Dl3$eeQ>ed&AQQ48MQcAj9L)&j@Pxy=1%$=g% zznDDjTmA+Fh)uNKIQ`ZIp~%WbQ=luyv*qz9jag8KfOTzGP(sde(y6`4ikMEKoKR`? z&UG~Rl@vfUN4w1?Yl^kIi3jfeqZxztomc(jQrK+a)4qWO#d3ZJG)_7@-g|_JG0XU=1&{!IHlT5DJ6EClFc=b={p*7-9qO>Mmutb z3$?7_S#2lE&I$A(BcT08D@F1t8kV6@U1|f#a-4HMd%F+SV-N;5aBCBY*a8={1x&5@l$1vWG*UBPJRRicdo)8|e?H%~tBZ{^tA3-?wPD*- zLN9cYdYI09L+S87Sz}9y#ph<5?9=Q?!05QBZe*TUZ411iJutB~1g>tMhV8H_v1yXj zL7@#sXaJG&yoTXgR9Gx2AdnlwHoMe$45u0+$>+p?*-$S>_=bm++~~-1#VypP&Ka&(>eKsY_NT z91f8Ar_phj(W@-JeOx}kow)sKLCA5yl zMNBiC$3z7j8UVr0lJ{z##be?=n6ihmT*GJg- zwi`?q4Y#iG?31)JRe9Q{v9f~Q7%WV9_slHOKdUn6x$zF`{pe=U`*26#(juI~m0*5< zy}$Slqu_B`PY&>hYL?Vrz1Y08MZG7rEB;IO>6-3sB^`LFwZGDw(3Hw72DQr2&A;{~ zBY-NBun2teoOtS`{{^2d@0?|nt&qjz18r~U4E9eT?x^q`cPJCK;j9aIg@xSr_&j}H zGbRVU75gpN?f%sge8`lPmnbN+=Ap%->$%laH3CRNR<=4C@$fQAT~fNqaRT51` zL{KG&6krDZ68)zrx7kDN_Z?%QZWAPcD?U__`fYPngm9*QwFqn5B#B$NAC2De#4$qA zbv#BY{^)R>zS4U<_mfN&Gj8e%H|I>V{OT$ZB?NdKgZ6m}3x}Mg${;Tvmomlu+zi@z zlP0J4#3~4;UOYJ>FH?rBe_P@-G*4eHK%5rGH$L3zK31JbGB$nQu!`aQ{zd9*h`=ef zztG%X2Rd9s0KxL$WMUB=l@@QIiGH%)%}W=mAQETwCy2H?=G6p?;6o9e`|OE<*z7^} z5>KQrNpuaCbi~#Y5bFv7?NRUhxfztyJsNFILraQkgoy#iFMpJLe=b}cNiV8{j(#49 z0jd+8%`vUN(kQIs5ztdb@Ts7~pm#r~=~v3oO&aF+z8nZnCaynmG`#}u!oq*Bvyb=8 zkh(F_JLJvp9vAY*TN^x~HywU-$`l@j&CcrEvJ&!tV6p6pw`&IM=SA}9-R~7zXHNZF zLaP-J$}Zu)*t2p|Q_E6f4qjNUNRW4OVYU|8taB;oxmPY-A-$LE_DI`7X>F=(P(2c2z~Ki;gi=pd)CM) z1hzNr>V8qxKvJ1g({EQvSgdJ{r-S<_&CjhMeVP9toiq zcsqKmuPH%ADxa5-+d#~$Tvz7!6PKe&F`l+W6<=_B&4b>;N#C&@c|Y-R^_IWm`K!m8{Eg|~fZWFWy#IL?!R(loM#D%J( zR0+PgOo1aJG=gYVi;1Pw1azPDZ(;SuK@J~Pz>{%&Co?X91k~z_ONGmDw-w01{6VGhu?SAgnM zZ(=0-s|dJB+AmYZmHKIf z2i&n7k%CCAzM$6q%%w}mv)$3OF?bPNNf>zt!T7?3jqeuxj)e`01n}2SD&q)0HO4aC zH>P}`V+SMil~c!+Bj~^ee}W0^)+I?TNElJANJ5&`L^oB-zngbSSa4-Zf-@$=wIvR-6HHEmYF>Y!1@b=UFV*TuGjG1 zDey2in;i-`Hgyr3g;yKoOZxq^2kC=RgNE!whZe{GV{PB)un5aaDq%9Lp*`bVq)+?D zVKuwZn?#!h0yd4|04LM=t&3*VQiqRP>l6@I%SSkTfj(QvqeRCwDujaDCF;7-HhbE_ z@nMMXnPsvsUgLtS^T~emO2*JAgx-<70!?3l$5em&C)I7dA5LMpB?AOBm$(T0YB-g> z;h@xOWiAA-orzVtaV#<}9MmQXzksIxATziWjUk+66OGZzHQaT8m~MEvyeLgmmpf0q zP22yK-yG6?5M{bGV<+O7>}473S-7`@)}u|1yxI7+um2USf^eX^QgruPfoV$fvavHYucZ!xDR|k z?Juyf`CtTtm!|k~Fp{l{$W3l4ty8DR&{|ws&=@q)XIpBjI)t5URKKoOf#I!;L?;RQ zv++}U5?HGkAx2zw4^cV0qBT5Bk&ahyJYEY8iXz{-&v5Ob2a`!G`Rpt%c`l~wUS15c zxE}3U5*;E zu55W=E9G;eTY{dnlH6_R#xq20m-l~IKNt-uvFMlxh6yUQD?H9(rcG?Nchv{gj}W9k z)DX`P*7;%_dR+ZZWrK}a?LD&x@;3S$?$qt?bDKYNs5=?HZGg zm$Y6?tEtG>gwv5XW^qEsz0TmqJ%CBnYh@P5P;w|i8-2sla5 zJ*oenR>D5Erj~ZW%F&FLW$bfWTdGh>Oj8_GfSQjIBOc0T8^w{5_n&T=SPqZeQaUAY zkRm*eT+UGnoe4mdth;eMp2`*%?`eRhY-CTVi@rU%O7SI@mD_EnPB={UrhF(CS(&QMZD@(A#mL zm!MEt#6q-w9gtXSSl>}dNW!s+P;^m~)(cr|Tg?P^fF}dQD z{JCbNu%=~8H7tFY%F0&sHuaDJdHV&zStq-_%rU(RWWL|sPvs4-zOWdVV?EB$6S)tb zG~Lx)3t9PgBV_}$;Y-xJa*$818Us7OT+Di6>crYmCN(P4|QyZwZZ#D-r@@5%S<<=)$60v$X=kwtDwtbF=LdKsXe1PJmV)N|1CZurjAnlbHcXU z&5PaH+Pn2NKI(is_gDCO5mpa#v^IGUn5~FUS3^+rh^`dJwI!jZC3~cGu|w;-b#rlU z`ck+!i+sg1FTJuvGEag2<*~6SwTta2IpV-?)I~kRtw4Ln$$NV%me3FlcFgi2skH{%idQQWFDS1fg| zC;Ec1EiiEtnO7|hTl8wA!2>Q7>7f^Vxay3Jufz!8AB z_ctyjyE~)j{c_zSZ(EqtCGRz@k-iBjK3<$)57mU3#lSh984-j6XY{cXu$x~;GxvtN zLAoAiy|}Hr(sk@oOtx`rC2SlfMU<`JjXasGgMa`=z9$5QLAmqngREH;-gy_6$wa{? zJ4$q+87^dH{um5nwQcW~Zieavy-AkhOE|PgIkeFAWvSolp?PD5BX0RSQay6apExkyCp*SDswohG@On>1kVpUA8@{h%_0-x55Nth zmY;~F;Oo9^~tq?<@V@<9l zco4!0fY8Xqfl%_3JGQ)GR!$H*lLv7ihzT}5SyR+sksgK}&1x4U z5E-1U>4C>2mie19h>gH(N;yI(gO^$4A~_NuXGJOpqv~}LqS0eLN1eis=kj?cx9Jl+ z?pnQH?nkqlncxv960yCk99~8tc4V)6ueozRx>XfEo56@Q6#jQr= z<(8&Z7S7&1#&LxsESnxNR~;8Mw5&N?(+zwnz?d*s?DLeI&Dz7k4fq6adAc(G(EW

d64Q+0B zu8f$Arf*ge@*zk$v|20X4jt(i}hbc#wux}=ZKjXFO7ebzJAHko~u7iMO(!AJ>>@d0D%asO4ljq-n)76Q;Yo53Q zUU9>hVR=4Lk4RXoK9AB^j9SAZC?~kO%^%enQL?hMCBhjBK8i6Xw%)jta{nvayUk;NbI(_1&EzL%c&SMc}#GJgLaQsLa3z;$pKoS~Yo{+Q7XN zPKRZriF~KKzz?MRlYV(2+9GE3R-eBS#L8m4B$#rf=p66&xF-*5y4ukO7W1Ut)yc^G zx;VmmnUv>jlQoQW(v)huUr7&NHKcb_QOMve}# z32ZaTYYk#z{CL(v?SgeYs%zYB)*575uoQc0N{M3w_ERdXom9jlOH{| zF~caQ&9A)xY!-(*IL+pdC9XAn+x>s4gQ_)s{ZxOZUrKABE?_LfpCL}B1~X#7$*^F; z?Ekf-FfJ%o@Vlzbl}(Moy&@_2`4p>(te}uF=Tsq%AnSsR5H6Hvl!6Pz{eK4`I~Un{ zX`Z*EzaD|wR#H|{T`PEBb1=I-6gN!jlAX5d?FS|dglZ|=Z&mx1l5w{Fwh9dU0vQt) zKYUC_IAC@Atj@q+8i9(FG3b(4A>sSd64ch%*n9s$1d>ER-zKNFfqHYVp6kY^^C9=) zxvsk`-&bm7O%ftfO&`3?DyhR@Wn>5e5Wc7S{945B=uxcg_U(%?OwLGd0{}qUvOg>S^P3x|*yXoc_`t6+NmdrqidGA7Yr8aK-Wh%IRae2k z5d$R}#E~JlE}27#QkAlYi`MX-zR#z?kgf4l?j}bv&qlkgrLEpP+#6JeUt2M1Wx(#o zF@yF95AzIwOyV~DlSBMxV?HZbGLOVMg6cAYm-LD^r_Vgv16ovy<*xD{>ejZflRKVL zi&@g>ekK7tM57j>iS<$vECxkQ3J%PUfQ&HVEFbLP2ae((qk?ATXR0mMsXG|%H+wd) zLVep_Up`zbo!HbOrzicqhZuXE03vMH`ZC5N9Ze?f1^`3_%8jZ|U`@C4$FlZaqd;fvsemIdu!#Gm-wN$wtCS=1PghVCQ+Z@y}=n z%E*~F{Iy6ln2S{!$b{h)8Yw{Z#h$a}o`UC&i5OOns?(TKi{#HL<=6mwLw*AFxH&re z$6yltx#1I_6y4Pj z+a+t)YyB_S-~8H=P`i!%yVpK{NZ{P;=o+!P`OvDuo!<0n*}d6Lclb?T+hm+lEwgK@ zm+$FP;i9kbJsq~35{VA8j`v6$}heZ2`A#7x>z@r?zl5WAU8X8h(>;z7k>Sp(lc0oJeb=30h zH*WJkzv|;EUt9V2-X#L%I3$5?5Q^^vpMH$#w1r_7R+{g59Q?TPWi=dE#=#?Im7<0z z|Ewea;Tz?*<$joBzVEi4>McAyF>YF!|K=~C%b(84JY)4^apnj4zsu5HESN_&N#VoJ zj^t8v&!tm>-7O{H=M_(NAijSoEvP%);c!zx;D< z`*^SVA#l4s4i4~@+7w;zkm3)$AF=U{q_KbbMpJC{w6U(&PHDCejaGYz94!|Sy>7VD z%$W1ZnZX{m_l{d~;|;QQauy|2@!I?pQv2|X`Yxa*CsGjj1jGOVV+CbD> zOjKZ6Cxr0VAwl}DEqJxtPhDF7{iT!QdTeW*qc^^7c(jNg6tcuM``sfA-#KP0?Ic(Vi6`^e-&(B0YVKzoMObN3S zJc+YRoxXdC9bYH-2%i2Q0MQ_Gwo33&w`9KO+{kplZVT z*B)eehQ}5W`DGFLkKR)B07LHxVBsoN{S)Bw-fHxY6K;?)vqh>-SF)9N)Gn^OhzT2P zUSM0LHQA!-9i)!^|8YCt{HwoY{^6r6PK2ad<ig3?aH9TL8V>$6~M%MhcUKeU0lqGP_I}cAO8r>V_J?%Y z_(pTyvWZbqC>>b?zEsW&?J0+JSk-oN)u%7MPks+qp7xbETWR_1ObSlAt>=u+VAFr0 zwmo6nIv#;}Kvvm_=hmO*}a30G} zKa=xcaU%;ZyBOB4f?Ac-7_c_DFsTVevC^I}*f1fqi4D3M$pzlNJuQgyIJ*yxkugN01=aq9;Y0iR@cVv~x4h~~65~-9-$i0jH77_$daksSo|DJDBJxWj za(8dn?*WGAJ6JeRRsR%td2cm($H};nb>_OKgVEgWG@}}*JVc5^nr4_(v6|xU`6gfc z&_`%)ew4KXeW*_u^~!!y)?9rJm%ROF;q>*SHVKKl1m3U|F=}m>B131$vR4f`3KGJv zOrBdHQeuk-OrRBv)fs28>YR&NbnzvyatYdBBda7Kz1a#v7Jo;ZH=pyD6@piM>HQdE zC}X!0E2fl#z?6>rRIo6$q@AsNpG%1_d&xNU8e^7^cNS}$67cY}MYqSPUOUe~C!h-y zH&f_kocH9j79$2C8ORc>XT_Ru?GgC$8goOONkD^9r%a3^V`H@Le~>-*-pBqeo7uN* zJ4PIR%THs&3tzbVd4;L7*$t%O{bta zfCkb0T;~U>(_vXzg z!je^MIpe}hS#$Z7BJJ(VjjVj679W@=PC_d$IbgJcm!#_jb68wWnwbc<#w19@r5vd5SLVgW++~# zA=@l~WX%Kkybr;7rSmWq%02yyPUd*k$uqt}r%Q*HQu8319!LlYL>0BtCSwXn>aded zIJlK>efghRb@~QYz2x=fK-~tI<22wNkC)AJLjJ(*AwS z2gtJyR2{CFk*J5myV(4(Pq6LQTUli)^qGW_)+9S8CYgWf4{-f2zXQ4O2S{zOB@D@? zd5~9bRT&FqjoJulQz1gfQ%bo;xdlvaQRqva_}Y>9+c_9%+ok5eyKNp5=s^y!-Xl#1 z(tyS=Y}&-m2k&R=cOK@z-h&LxU&7k8=W@=~FQtC&*|2CHu3in4k%~dc02{{~(oxhD zGY)Ax>Xd`Gih#RPAel}(`bDbHSXvsH|{qEB#9cTOg>0Nhb*Fl^tSaGuI zxG9qyDNVGb9W+>;HieG`(M*=RGf9A-ro$6ggI%wJT1yt+xdD@*OOd$AuJif_0>o#1}jNSn$XZ3P-2(ecjR}>?Yzp2`k1+- z$e$Vz7^r_nq5wL-7YhoR4`T5k?jTE{(oB)&9(H~0cAnU|otM7(C-L(Z!QgT(d&QgC zcg&OY&tqb&L0{hhlSlUQ=kIw3H~*&_P?wU}1djo45TjGx`}dan(R0$X=_4ZY-$f+r z?esms@Vq&C!EC)_uAA*_OmLm8LYr<2?GgjpSkM4WRn#` zgNzK1aWG3cx?qTx{mMHSeAz3}dLJ&aSTTh^zR8)*>Jek`>O&VkTA27~Nm)z`@&sn2 zBl1^sf5T``s2UxTN^d^ThY*!QlMsG}LyRR8ho**#&maRvhG}lz#Fo3i#uE>Io5|rL zEIDm0=f3DFR$X>EoW2qr8X^^osRZRgZud&~v4!%$h{35Z#M=~t>duo`%8Z+#$e*qR zo?2qf1{bClV^hV$r((OGT0l;{Qm3jdW}c=@2NLE2k4|($n`!k=q)cj`=Liop&jIl< z%{#X-M_zsNo9TbSmAvDBzMoHi?i-lud>j^| zLN;-TYcJivCqMoHtZ&6Ums)9p`#s!jI?2t0x&EFx9KB$-9$9IE=?;&h>R^p2+u zBxc)yr{*iuHJMQGJk9?TEJx5qpGf1W!NSln-`#JAN-pHCC z|Ihfo`Do2xtqppD=uID#3EJig#R)f(7ze{g$TmI9mapH#&IdQr7;mv+{R=qj@~c^V<)!$QOK_%&sTgdMi>^%p zWW1Jr>Z27br4DlYhVIA==ok!~q;o!Vp{i5LyXpRYGJwPE(&iI+v$MJRA0IT#wV;<1 z{5emrpIQnI5iMy?Oljqr0mVxJggX+fLqjQ?U6I_Q6>B88hQ*11Y7{Pu-~jIgtAZUl zMDx*2Z2$6|Y`NpBG>;ylf7x=*f9We&_pThHERv^_XK2@|iiA@jkjW6HInuT=)M`*)H?03lb;4U-u-*X8DeYRf3v|kgZ3?J-O z20g&={SFpxR@DyyOM0u(0{~CQLuSfmI|F)XJF-OUMFjDle8+4~G^kO5&5!b>KmK#< z#z$CLP4E*AS5Me-gP+`^^>@{%{I}S?i^?-Fipy@XiD(j%rGy2hv;+^4Fn1&CTSPF z5<|&6f<`@pMbS)2S{aNElYRYLZ2rnuc=)S#lTD1X?94N`?1$gLqU&A^tC!)bbud+| z*uu>#PrFv5ML1xGdsh*NIQJ)3XCeeloim+$z@XEtb@@J5Rg4K|$#rQu)MZE`#Z(OU zeeO06dHA(I`6!1b=i{VKQb}-Wi{|h?-upYh#!uaJBUUn0D|zZ;SReV`DY@i?pF-cY z$cx;8juv;YHkC*TYBp19-ox+_ z9{$i@u=mSfWJS}_*UD&AEW4W-t+lIo?YrNFz4)@gf3EcSN16ZtAOJ~3K~x$z{PP(C=cm^67-iod9rveT>67= zz+Qd{ELudfDp0E;@n;$@_((C#*I1;TEUo3>M=8;*d>*1`@~1@C)bP|$$#^!CHxC z62eqWCEwHgk3M&P&Z8po3nFqyuQKQXhVM79a83jZH}qDccg*!>oV|GIOlj3=K4vs( z1uVE$3z{vgSB5|HIqv?;zhv=vgFZJ&MLb6*8tkoCdCAYeos~cO7Sd`TR1%V?QwTz7 z?Qmk00t#s1XwN zb_bj8yn`c8KFOjLYdH7Xm$BkSFUFs-f~zR zAv1O2=EB>a9Wb2aqDha_Qsg*qfH|}u<(b|A({zM66^RbdycW$1G2asxcAHdo6}Dl0&fdVQ!!VW1~kll zYW3^`06Br*tS9;RKEw0;

%c?^O^z!0`P77KT*yBfwjGtI<17^yZv7j4k%UavM`) z;x$^X?jPqmauE`7;A65flGh+*I?NSvbtc@ zhU8K$CGwUg>?Jdo@H<3nmvB`dC4TmZ%3Z^TVgZRdJ;u<2C2Y9lMXb5urT8_g$!dd`y1`mkP^&r^Trc0Xad6%2BcK*O_ePXAFv-Xf z9sm3{@1sH!G(mmHVhCWtKtZ+CX=LB>UEQaimUixho*d6QD0;s!=GS_aM-MQ3PamrK zORD;T-b(b2XL^%Pbz3R*gK{TQPjEkKWcXG}rRJEp_4C~Q!4I*qTB`Z* zP0@w*w1eEFjT{O=b$JhK7dLIc{31`Gi={qW!PaT7ISp_)o}=oNPUngAN2-N>FJa-It_xHIR|C7 zML$VJRqX5sFfVw5wQ>HOFqCp0VmN_UAv59LwJB5&?`QaHU+0O>f1aJ+ybo*P^h+<{ zqBq_QFZ==AyaAf^ghVPuGeP3d6EVg8mec!GG~`Z*VsoovO*(|(&lPn}=cU>pH?|k6He@AU_0ppDni^cd1>qoi&o=>y9-(if!yKoLn)O={!6e2b@Os?rd zzpKUAY;>@~Deh?@hS{GtDQH&frgVEMBr$FR|no6W)c(BHT_oz6WgCr(ByENEA8k^vK zhBpSyJV{HD$p$>NosFOV6kG4Uhoq8l!By9A`i(cj`qN404WjibNl9oN&`pV?Q7*c3 zak7-HMZ0}%x<4vY&&6z_gfa*zpUFI3w)>l=(M1$aG)OVc5siY=^4FDVC{XA}SF%hD7J+v?Kz~d91{26fyC> zDP0Q!-bip;0>mqo;RbBl%;x|5FC6~%ee8MiDHff34ljJ`TbTFqYsnT3;u1^6R03Fc zal#PuBh4?5FA=roB^7-2=REan7k;k>ZuA@aVRKyz&kIg=BCk zEgznB#yir95kCHrKjd}Sode=AT90SeVdHc&XGsbsCQzF~1pZtr{AQu6nK>m5 zLq}=Z7I=zkw0dlViP%~TqCtc#b0Nvr7%(w#&9$&u24hEPJ@^2ReeMf9_3(CPU`GC=BKa;!nW6=+QI>T9p$qksC#)cX@MO`=YtZTTB)aKm*k8nZS` z9MbBkF34umEX=gx9m<*G{4|~7>^0RaLT2<&;OU?3hehOPMdTa3Dx(J&zE{A)N>%+B z@Y>#LJb!@z%xpe0g^(ajEiz~5d=KHJ)~cAKf~|&+nMxHZ2~;XjO~59Q*dWF=hITA&=tk$@H4n`T zQv_2?3^WQ#LncVnrj({R9eEgKb$-|VR{rz2bn+Q07>y@MazroF%hu8E>q<)3=gh83 zb92?|d_7YB9_@N#1!y)D669R-faWsg5C|71JqlD?85}*rvD-e&mRoOS+XD|WxOxre zz4o=7@ndg-l}kwllB8Cc4fyaNh!m)xt@2&dmAS#Q>Hc;~_VTuifeOaJxKrNx^S{nl zH*QCyhRYmPK*v%R)ExJH1rOpr={&~5mT4zDo-C)agIOsdf$v&I)@5#Q|sP-mGo zwC|zL9Fd|?kw15}D+i;f;!!s zo|f=lrjxXbe^o*VlqAa1l@@?J@(K14&9*jb&%Bf zRqjdUXRR8dU9Ca!G@a+;|8zU=eE&xn7+6eeqD9pRnpsRX!Ux~=)4cx1mr`G{2=y7{ zw5+0-j+0W+!c{5{!wk=>Qv->PWA;{RhP0TSAFyV!*hMQ@J?0zU%i#+frIYiQ_8|mnWhk!P5@i(W{V<@ zFxGdDbfQ58L*uSHx#Lehz|x~fSx~Psa%6-NQ4SCF^Mmhx7u9QTAgk3dNu`}=EbT<) zl10$e@}zJUQl^bs%In9-!?(tSBxdobv3NXLs??eu#>W}`_C4JHum8q@$2YU~>~py6 z^>3hZ@x`>3FTmAnRN`iuJ0~fM@!%qeDboWw!=u`?n}SXg-&0YX6>d(sn6b1mcEndm zXFwGtx1~ts=(M+NbLd62lL}(N;{$ER1?o(0O=(;x)t9}hYvA+@F0`JM`#=y!8Mxr$p-3E$XqBsoC|pqJ_t(1WSO94GspwJNj znQLBrm}(glM7mCdEV`d1sZtpor*+3Y?D+icJo1&Vkjxw6+*iDu4L|W#Shs?v5t3>( z_^dibVshmtv*_@(hx`4|(Vk}0W9-NrIH6aPm6Y=S_ZHMBdx0N_v3d zxe6BEq^chW=Ji&ichJ?PnbNKaZ^3i1F;rl(Sy{DlYt`caYWRLp>o88#`Hf#zE|wmBJA^Qgb^V4_ex=yNp50e2$z= z6YUz)Nk`3iIChZHZ+@KzZoQSGyAN=|71yxghhL9hy_&3V08>v$q!i|m=Szr85iysw z2|0l9U1d{Ic$FPUV)Z9QlJs=ly4Kf_Nw$+2BearDVujP5@`HatQ)F^P2{=uT2cMS)gEy;31-HmO!ZV3ze@ zjN+YQ6HAt6n8e~;u+DVOhY5!&)(AfH7?Wpjg*EdS zt3*m7bR(T&NK@8nX!v)?+wd4;aNZX-!1-%WYC(jrR!B34Goe^*`i#_j8d-`nLL+mS zzB+yL=Tl$2n5FC2vGlCVJ52H3eaTL=6BMXO2S43Y$}8zAg`ZA#$B7n*vpuJz)X8X1;esVA-WNs&SQEj6 zk~qN}J4$-rH+kff|H`57Jb)YM=iJx+5bJ*A&9HO{6V_6#)`MS@^KFm~T_XH0tf1X` zs#EW(q#6V2)Zj9QwJ_-{=Unqcq-F^391~;XRBHoh;}|bqzl7UA@fY0vZ=Yo4nw6}7 z*=tGDG#E=14J6l2&qgrbw6B6wRF%zX!ccND%LH7e)F&Ej|Kz{2@ylOg;@DB<^;H=> z^DM6Zxt~YYuB275RE@!V#R8dn5;dTWKtIe{fsQIQS_1;G*7Ye1A_4W)q1`-OOWdK`ekHYc+`BI%w4;k#21Dm{^{*V9TW*lM+SQKtMLE~X6m?~LU_ zsWsZ^Y@?+Ds9g+(*QXd0z>LSEO^rUoKiBOOeUvmiy+TsrI=JM8=8#^_ZG zeXtaEo-qU~uUsTf4HVQ1#w+3?FmN6w$8q}(GP&2(}QSi!m4z^a5gR5Efe#^(gUkIk3{4cA$9Y@!p}%;WLNJGSvAX zypI2g3PvS>Aq9(I#bAMIl3;;~7>p{`f=`16M^L;vnynO(;Gh)#Vo`)jy-wylDUdo( z(`Ae|T1=ueq{86xWh`8`n#CK=WZ8M=vS9rxWW{nA=!YZ;C)gS?F`)x)5=?|Oq4^nu z&f@ju^QoHGa2?W4FVBILDIh`nFeT@xFrd-tg)(z2>fppIDQA&Jb^g7i3o4{T`6R{r z0$6xeG%t4sYANvtlFrQXpsM=cK^Q&~| zzok5F$)7WNTwktdyW3NsNgo~L@EHxCaP!-KgKs>z4aJaV8Hw$~Wh2xl_wo2Qzr_5+ z^YG_xqcXIVb8dV&EuUg4whg7@b5{^d(G-$X?hPf(PD#pNYeSU2Mw3T=?|-rDi+8c8 z){nCp{fVb(>wJCqDF5Sc{*G+JYNTQSi&drNJk^n5KK=GzAUQM&%?5`ihdK9+Z{~`3 zzBBa0o;{bS?}*4xiOBuEDy9b*o-1HsfvWy3@cQ0rJpVw%T)!^wnj181nE8+)=dVi6 z`ws<&YV1rVo@^o|$sA1V=ic9cFXLamm$fD#&6Lr4mCd80T>f8wlCys9=Shu_3|88f zedUf_0S2b>^otOtT)~icmtsg;+4dsPG2(t6-D|YrNK6U`pJdnVxAX1Kf0;!3xaj&< zvg)<3CR?@ySF==Xx7$&iDHv(H9VsOm7ajDM{IzHx-Xm(`Yj#W}b|`H_P$_6eGxc)C zmmGrfY=~GO`ZVYWjPn?;Ko+`X!^dFfPKLK_;lSgMb7cEY8i$UMv>YbOsB1!1g+xPU zgy2x;5u>45n+QI0ftT-HZ~;Q_VsS#G>nIsQs=+a6GRyGB;;h9~t5C0EDm83>Kh=Q& zDgy&l2L`DQ4pJYQM}25MwR!WX4-HZ68z33z!_;d?wHidoNdl$~9Ebpcm{dzCd3%}% zU|V<{wGm`41nxCkXroCsF-dD;oOFDG*60|mv2oIgaT+5dOpXpSd2E>0*f`nvIO*gB z+G?R`iuFp>fH;p87)UGwCIB>bDaJ!Jf@$X*i3yzhN|Jy{0%-C|BWuz^naEnC#?wj+ z_2tVLTDOYT7oN|ev(JVNXTgF6P^*z4_@qK=OmzPVl&+#McVVgWRPx|*6LUujcP=fQ zyVR7g<2LvW83|H^0LwF&Bk0L~o^`0fBPfypoWm)1Scww{QPfzP5T;@W4>NYpT|D%^ z|CzC^kHXLp*ZlO)G4D06BJHc=2CGy^5GBz3jFg^DrCE$9DkgIKrv}`e62Op0N2uVk zj7mkAY$+f9hkxhyKJeGn2NnfAP194WD2)SK`P4ssjH@mM;6ofcdX%$Y`5Ic@ zVQSUbdGBYowgAcz+uy6^ljY8?LYZ;&g@&r$fU>3_sqAB{^}4LkBzYS z##i#9-~BbRIGIeukU5X+eu~@Pa1)m`p74@ag?hrCM%258kwMq z(){XI`O+W1pS2T{EV4?YImzLQ<;bFiy!HQl6xOUKttM0|34WRvSs3t?sW)X>fR+ku z^)Y3y-!OwT8mMJv{br|IDT@eVKWKLtOQ$H?rvEH{zErr`ey7 zh)|&&jMQEuE%Y751R-y{MnPyn>4-TGL6B?kI8;2w=iKthht2IZOC4e&Ad@^dsR)%8 zkhpOhj~b6xhtEJVk8uQ$kSdIgaBTYy4nO`VhaTU^ktep24IiW0Xi-yTz*HE(u+S!0 z=Sh+ZF3U))Ei?=YH~~k3t5)#hX_3+L;H)9_DJ|E+*-*$QgLNtc1N1Lk#G>WPS-5&N zgUgmvU%VK*d>IVQgL)0B)lm3b8~)2JY~%E#K&2(Ip=wvjPzbqaZZA!@rFRJO%ij{a zsEI_2dj@uSCZ6D|6Z zgnltp62YorpklEthe-t2OsN32)nd>Hsn3`vNi^)SxI>eS?|+oXzqOHZg)ueA!uc%O za5|@7eksc@IER7t>%$}hi?f1D1htmLR0tX9Q5WK+5I=iWj1|-iDq+eZYEkctZfaf} z7uLWs-8A9Eq|Yd{@Ba})|MDrW|HZdcdGVz*`oaV>u?BU)QOVdyWr^Abr@knpJ6y<4plS1r zO3>1Tv206g5U#uG3V!$fe}Q6g9&9C{l}?gW2D$UDd%61Z3&)6x1;o>D6ZUV}20l23 zNy&mNQ>7x1H5yn=saBv-&9z`5k7C*f$jLcguc}*tUJ`U~!th-O3qKpf*?LFsCYq** z>ZJ8(-Z_in)8gw?@zI*myAXVox)#;c!l45^@V@smde7HcBN{rieN~=JVfE`?%f-L> zn`9<|z8XpFh<7C6>TJEp;98+EN1^yr+SHSrbP>4u0!iS}Ya=6(3D~oZowwY^BX{1- zk`-&X?A33;UUnJjqQyu(A+dRJ*tU>Vq|?f>;7@C5@*#2Rq3x{Nq7$dFD=Wpulur+Q zskrGhqK(TDK}0Ur4F)I<-^wuRFfI#UG&uqL_OO4`W9)xuBL_D>!Q`QWqDs{!sEfJ)^?~cHo=hwmLMv0o(-sp7lSx8#-XMKT7BX+yX)Ic^nq_O& zuxRZXlI1I4!2r~1VG58>0E!|?3QN*LWg?}*s*Sp)T&KzmRsRed_OxJlN;30Ip>Q&_ z80c;$mW9U=IC>vaii4e3JaHKC8MLAOJ~3K~x-!kHY>#9Nn>%qdRtSVB2 zPw>c}{Ta)X0a~U)tCF#FK|cvKb{rYz=<>Dv@W1{&UL7i6jUhuZM-KDZ|MVKpA6!W_ zg9C1q#*1IVmH+cy_<98qJG)ud$#}mZ)a|3ailsMU_-=uP>s0k!y_NX>pD4@}wAS6o zFS|{YJ=yb(r!W@O(AX~Q7{$aes<0t7Cq}7}(7fYrKKGtKV8wWY6)r^+81L(6dtaTK zKKMSk>=IIAFtrL0+&670yGneiQ$O9qxWoTliY~syPPiCTaLKJr$kZd_4m;U`{X5zC z$-iU!U3W8o$#P!(6K_LabS2r)0$PK$AdXc6q6|?1AK}ZpP;d88@ukikA3&w&*AMmk;zG_2z52| zYj86#SxPmrh|2;6MnY-qsG>Gu0%hDgS}L^EG3gzx#E}dRF@M=|PFuf$3pSj^g7s&@ z+Evjyv=%5oHl#SIHDnPGXgTDJuFL?YG8xKNw;k82bQKE6wXy6HHg|oZJr!b_BnsO0 zS;aS)_Ur1(s3^a7%?k%>$OuL(@dNRkTBrd-b!6F67M_0=3l-Mqm3k|K;St!kkCAO# z*}r8odpB)n*RH4V%?bL{Q&B^LVP2(zt<gn<^D`EbuVjTD)4!kx`C(;ZAnm z{$&oQDGbeL^`#fG_9d6I`s$a!%4Lw$QNm=$iN#pkrt)d7fhY$E=5!P~3|qK235NOV2r%V=64KR!|SMY8^=`G@LS#H5pvD0&H}Z zip_)Yj0~Ef#0p+pWRnf7HBga~c5W(B^kfp0RP|k|x>-c-=v6ko3B#!d3oBLiAApzj zR-<y`IZ{=NBAIjr`+Uy0a;u|T_beQxQkNxqVv*XjZab~TLbh3f(OW3W#!W*vV z%6Gq;w6Bh-CFK4ZUV;^=l&VFE69nj%)af(@MtegJ$;_d+#u~&4I_a>L7VO%}mVf(3O0QOdrm1y)TxLu}SJA@h zD=$vGmdHGj)ko$+f!@quqCsohRt`R~g9BT)aAeC?4nO%MIyOd4m3a#DDni8@jHV>2 z;4`d+R^~}+6^yMh(VV1VJV%;MYK!Kx>iqLL^Cj0%yZjPZwGy9LvTBV~ES0E%Fr^_* zm_)=5y5tiGne)zkyaoo$9v*oznkVfhNIS*2?5Bo?0vi`{95Q1`J@k!D;_thk2S52q z_TB#g$>1QDz3FBa-*hvrd4lh+QIQ1i9mWe{+t;&1u$5=UbcWGAv+0SEh<#*rvQU3HFZ>L%tLR%RXYp6<{iDP^C)Ia}?WQ;HV_y2}{Vhal^eI(vd@2ipa*SUM! zE^hj>4>JGS7vsg_@l=cnE`MVq-1f%TG4IF-i6k7D7-iLu{TD9(^>^Zp6gJK1v->;( zz9=F;-V1f>0ftk0c<(<1{6cRfodbYdH0S*JP~DXJX3!dMgd+7j4{+P> z{tk-{9b`$Z&iKd}jjFJ>QscFM@He^YV_;P1cP1~|Fw!*#mLJMNOV44bzCVt;j?0(=rcHHwd4s6|o z4v)}p3`0qcBz4rSr8PcDf2~5+awL@`2>7fe16t~7rYU31NhXY;Z{Y$?JNq0~op&C~ z&b91YW1(oTi4O2_^&W}t@ ztDfOn>F~ELjR>XzAG12DcZh;z#uo5Yx0uc^GO_!dQ&LP)=Y_#)LgIuzBlu=YqTtEMGDzw*Mq5qB z5Dujcl9kIj`(-z9#`Q0ucG3Azui~*Z32sw~O;Be-ht(6VV`*ot6d4a%POP3j6}fE7 zD$}I0PuD@7%hZBtrWhSxFH*Y4HR3iY?sM;REmWXeS`$*`oei(jW{N#LivRlE-2E4S zjUG8fWBx*J{MC10UwS#siosN?7!BDX#dJzasSP?C@xQFr>U{Ud({jv&8Pr5g6b27C zY4OFcKgjEU=C>FcT8?X^hK~M6`cqZP5~Mjc*}Qg{WMG0p3ch4 zuOO=l#u&Vh&Ug|g4RVV1wwc$Xw9W+axwbOw)~)>8Z~rFEolmm3+J{)j)?>q*{hBv% z<9pssYE6+fVM=a$vTrvNyZ6u<9|t_i8D}%Nb~S2DX8=TpAnI9Jg%2BJez8~G^Z>)D znlAi;s(z@q61|&erqTcOK{)OB0~52l#DdE*q~S4>qip)fU-8suKF6w-$4@qB4OZEc zW(-_*9@o9+4{<}waTTFzr4Tj?T^{nWpbL~D3Lp9Du|&JCUlf}r%m7-Gj=EJixSK%-qB+dl~qa%kn^4P=d`uexn^Y9}y4j-c0bPQrhyr*gmc!&Co zsu5i3k)(oCOKK}L#WR*Vj`|j6Xg*8MJcBhaxRBFdcnSSyp9Kqtpprnv;xqsY)6sb~ zvpQl2y7AnA?=6 zaxvO=wklQ6(b86fm%WcNTpxj{w*Q{k2!JKmMEDRGCT)i=6^jt8Y9&q-$UGd}N9&0# zY`gb9_HEq4v28o(8=GLhsW512hzl-G7LOXGFm}JPPD=&V3t_O*!i*fDk9p@_{mFxupdD_+OY{8eb0lBEr5Ngbn2RxN;UeC5;l<~WII zheu{DN1_RL{maL>=<*jcboNE0HH*Q9wDk_+m7KCYY&!5*))nRA95c~`@fO1m-N%y; zeFqsB;04$J|LmQ2ykup4|KH!|IcH{W+q-?2U3yu%R4G!$j-Y}Du_m_IW7HT8F~$;O z5{>>eiNx5^phyrAMJX1LCRLE7t+Gp9mhF4X%$ak}^ZottoHnz!Z2^_N^LqJO+?{*p z%qh?F{d~Wl@*Kilb|LApnSHRSc+)vjQ345Z*C>ZYsl69Ng?M>8T1NS|^RsZxc23D?P zAZ{a084|d+9dX7-|B-o)PMGtOZ4nWy7s&n4-vXI}G))jT&LPcwBHO)3+w zN-lKHyy&H~gw1{=^SLi45K?jTk*tz3c~xb|Mk6t#zVn9@h(>6wSkB5{U(drg-oVfU z%aK-_UbQqZgkqrGqE-ui5rXllz}Di7&`MKAonzDpGIKTym+ZrmXC2GTgAag(3!$$E z#Nq-=ith~Og^S6kMN%P4N$mBa=fa*lLl}DPi?J9RcJ#(OHS0mT9XJFa0y=m@R%-gN>R7);#8jE?fNZ+?}NPk%XPm)%HfwE{R60h3Q+i8k4uRyCxi zqaoqE9kfgFbG`tFl7@n$>IKYQme^hkpO1eNudTEB9%VWPl_HEY@7k`f$3*0{BJzvr z>Sh`+JXv62zN-E&)4GM}*JPVaMbO+)&^ZO`Aai?-DQ_>-*+!! z)5;a>^vq*9^aCHnEuKTG(MLTDJP6EqXn88RWD~t2ow|4yOxCQ>iaKlRw|hrkjVY6_ z)6@|JDQ!Yt_0|t_^-Xt!trOu8XXp(r(b`8j|0|#6ImaAK7^Hs6B7$14TS|tT)P~k` z<@dhE^IrWXl7R(CPgqPM#+&jyRaw`C4Mj1v%uFL(wx&BpoVT^c_=XG4A;wZ{G{r-e z*vwq2mJN)nb@(4|B1wd zc?YfV=Z*!1}dZ_pwjG(xvQDuQB}iYRsG&+_ioa&v?UYSn{T~ z5%)F-d+VfH7Mr%}dX#aVyy#ggej*Mb@awd}J1|-Qe&<{U9t;wA3b$>?*k(-f+grKm zo8M;r@)aELtP|Pete3;?dl3!Pv6)r2p|sP_o{XIPRO#|6)wtw*vO*?wmhzdxSVNMg znC$Oe7K-IX%l`MLn&1*4NF0&?8#b_J>2JC3s-Lj_-aBz?HZa2m3UWn-J9i_x9R48UbrSux( zJ{p6j-*snf*43XhRy9<85Z+MEJ zN;y^ToH%#SMW57_=>HjG-aB3SOaq4PJzaRCs(y9)OzhY|Vm!KVhl6Ux*3$^!3=Utg zN5d&~V@ZB>9l!eA7nu3P8tSbP!d_tm0T0&$&it28!ZQvhs)y9;-oCCV47==Pw3=R= zhYD$$6%9TI0AsRoxsvNX#rm)zl8CS!!ScIU`hVYN`LA#1z(Y=8k5`?A+jS4pfgXY| zTfAMVIZ%;~s<^t1RV@M~3sV!-qPM4_K0r-OCh*JXJgIu?Nd>GBCrLq900-;WvFY|( z`R$cgv0>>l%t%C^v4ri2dJsa|#;T(p2BdL{1Qr*BwA8RJZPRS@GHdVM+2@$!*y-4# zV3(bJ*qImzGhmSAI;cu#cR#QmrF3=ErD9mFQ|RGuPEXP4 zrN24_8lxMal`gMpISsR!DS$CqlYr9ZH_e=aj5<$~+qjz6-S=?sl|SQ&n}35@wVHVZ z)U5TxIdvp>aej4*HZNkW(-`SFPwsjpB5x9ruT3w8(|}=nfrTSg^}E1c(`RGHnk;lX_h^M@ znG5`i&a+51b}l^*5p5i$ZbR<*udlG|I~TL3rbrZH2YXnyaV>)<9?OZJ{1|DY z4+eSx@m$fYvnkSTlS+kKs?{tgP^~x#IgALJ3SzwDNE$h8GlBIFuxL^G-qb-ydcUpe95cwan_(VzQ1{b!&1JE^V2srDM7rZC&4cYr1FSzqfOP9seEE zugS|?VRL;()kajLq-hk>#f;5evgj0*6iFRHsxUeV_dmeHS6t4r8*XCbqmR&+#?0>P zr=}3bsizdCf=bGUR>aCE#_cqp!_PdOeP8-wShN5qmekY;!df=9%km2H=0wgD6ys@X zl7AE5G}zL@ax$l-EfH1Am|!}4?wyvvK?G;~@dp6+ui(*de}mur^m2M<&FA2ky^Ohk z_Hxo0J!mb!Pzu{UE5>2OqMCYJWl>Zu;vwDE7uOYWzW)p%R>!8M7zrWjIn zh)XeTA#JYa{#$;;>>e*}uUY_$5wD@rl%Un*p=+)rO@%#9dOmS)4KabgX6hR$n$e+( z^}k3AmoZgJ+o|~Jp!JD>jzrj0A;?ZhO(9iMLR@GuLEtqC*+e>ZzT*`-lO4M$jyJX0 z?{r#OZ}T*I*D)1*5?{X;k+VhQmg)6y8Zc}tSO_z)aOU*cn0}4jC#!TTvjx`VJWL=? zBf= zW-CV{K;jf@GEuWL5QqJs$u@ zmaCAv6lpddH zp{Y-V$9e!_#cR174v}CIKs^<>M7i!}$S`&Z&IX+P{MYcnx;AmGP87ANhnBzvw1yw& zy#M$IUVPFq7!~h=CcahT#AW6bhBR&yj<&et(hFF$_ukAq^cdo>=1mp4XOOv|T=16C zIr9^(q=s*MX;)0vOh#qJ+us*E(TS?H4uz}LIXdz3Ub1J)^@k&}f0rlKQ zTOFyUu<}tJ`^B&5Y1FWRC3O+C`Mc12^l=bcPr>S5Rz;sGzyR?53^c^kb zVbCBYjlsCoCoK;}=!3uKuAlsX71#fg_|a9&umLmcb&{bbJ-s0=N(jRmDghz^Z4!pk zh)oK!_S%O7UvxUNo_8|LnTa;)w2e>PwIUUCMR|TJ4lXX^coA=XN}3g6Diy~no$$_*Dcr?Vc{+5AuDzOFGFGrsAJc8Z#C$0y6{VG0FD}V)$s5-ucZkPRzs+F+#1p^OK)v{>xv4Gk!8{fK-(=Ke>#{|K@Mmd2o<8Nmw`h zIEzksE+>8dzlp=Jh$bqitxr8*Ktw(wBA=LE1*ZYS)`Er8RP}p6&-B^YaUYD1@~Tp= z75>!Z*#McDs8KW(TFp^fYANeTl-d=2P z&C{5wjcnDtPVbl~bUXa#^=uJfm3KcgSEY6X@mB;^F_jK-1izSGnZq%lM>JpLe$Uw<{r zuJ{G(mMy1|IQm5h)MS>HDfPOi4-wRffK9Qm#wN_(_W%w#<7F&3?id*CCpLjE8bph+ zb}qm}a_!$4DYSk#$}4&WHP`q4GNxwy}@6h>}IM4 z6{>1!=bDSBtM<4q&87eze5axW3MP(V!)hM8{3k5?$u(@e`+jz^z03$hqIR3uDM&(K z1J;g=ur6uRyVq_!^OdhmmMwDt03ZNKL_t(z-t(RVvu84Dl(135iZ?vSg~0iQK{C)^ zXrKx!rp_mPykc@PO44LAvF^LEn3h&(>|z9U{`m}yMVvzeCB>4aDfRW67`fsKZu$1N z(2*tshaAQc?|C0?@j}`SM~LwnlER&=LfMh2K7pJ25miI<{5oMh0957ti!SFKANwo= za~9K19BI_{snBV}9`h{MU-3O`+V%}0yuK{W0_{3zknI-c(G^^G*$+7GjMt-ccf!?7 zW>ZT>d^4pkNh_)Gh4n7mxL#DgXJ*)86xyj{hex_ zW%n|bPK-(>Ag;7IjWwiAUxYL^ZRC+3f1f)p`T+woXYsO+ehLmcfYDil)JzF{M5NGV zcYD2!=;!-Sr!m{@`L({rU!Mrb}3q zxe}>M34#FA=tXKZVo}z&TdZxjnX}j49R0GFGjPhWFfd45uhR~|T0@A<>I5lt5s7+Q zR+^^Scpo%TM4Uf|HUktHcrZ5GsPgntx@8L!<$Z4tPZ^zJ(LY`!+m=j0p1xpzdm1rz zN!>fM1qw7@o@;}kdA>%PfrAVzTe2xuHc2#@1|k*3)$6$V3t9B)H?ZjCuZ4#nVfjUu zaL+HUq51emX4L8|=;_0ajIto8vujTe!w;|K_D_A5^`HAsmb~b>9QOKG!=4M#AS59q z2x^EJ;?!ZSFD}!v+CYOD8FS)KG{)Fmm=08wHnOZnM0m= z0(buW$2{XjXTe}U8k#~gli6dHXS!mFCc3rjj*K~Jks{F3yIHMX_6W5&KoSRGiWo;& zt9y&wRMEC8Xsafhpg3!Aw&62L;)He@(^kQmTDEU>nr4(<6Hoct)uXB(Qq|i;e9NHYgio>j3$*Rk*$ILI?kU@)r~n(3AbZEvfJl(ZvLA(96Cihrhi}&aRtCuF_{<%(jslzyD9}mpq2kQ zHQ@T{r2Fj49_Rcmd%WlEM8Cb4J1)7D6~Fiuz3qg>{c~_>lQ2{k)Rg&wFm%;Vx!{Kv zvEbl?IQHzbsh{#};;=>12&e@;Uih3fBs`cG#C4U5<52KAzuV+JYMs(W(y~cjOf#h* zvMUZ1!Pu+|?;O_oVmUTF=ut;=+Qwj z8#&da@X2WYRjIq?-TNx%Ou$;NpGx9{Ab`Dg-G#y49$Xru&U&7H>ju2c8hED(Ep6Xn7MW(D{i`y-Jkn1zxEn~s=-Lr^(-h?nl{5}8#CZFD<7zcrdI2WU-Lv4apLpU(#kH=A&^7`k)C2d?_5e*?8CLN7@ zwoblxbx~XO57X;7F01|iYP$HD1`JaL3$ItzcTb;*r+4>pvVZHa3Eiw|?7LS5gH)7g zXq0+bWA%j>a^n}zW#1qqZ6~CIz1*{I4U1p+0{-lyAEq7F2zqK*-`LLls6EfN$TQ1r z5%VA>zuo&n!%7hXWU_9$h{G8}+)l8=ZGyE=aL+d{VA&17P(F9tQ7P z!QgMg;QKsr=RF*L&N(ED4edrqXe^|aAy9`|!+6)W{GNHTx^%gu zsq>Rf=VEF-b#~c#G0Ps=NLcIjY0#;XxR@YpaOKag<^?AlhI2{gsO2(BXT>#1y;g-V zAh9XC9(^3Q|M+6&|9&aGdo3XeYZM|1DQ&B_&l4|nrncmII4x|P6X%$}WKTA^m|c1X z8IB_AHoy^(q=u#j4DPolu`A+t$|QT*IGbSWhGr|mq?yfX= z-XbFBO|P2cuX)oi4;J=Q)r)`wrq9NX10cG+^~bbR%@#9Pbwd%wi4qM(^rSJ@{ryKt zZ@H75Q%Acwg3cf0&b7mw_`bKX)1RG16oiEJAp4!#u{9O$sFKIjN+HfAb%ph)s{~33 zQn-S+)(GL~8iudAhU>m{A^ih8bKE=Lf$p_8$?P5BFfVr*Cb&wx1B%au}soj-loE<6EIM4D+-EEZY$i;?arr# zTBhRW>q}9rdH;HcAtlACU^GSVyOTS<{eP^u;^!<*L;AuR4o8%xBvwhR@bKCV=Ko4U9_awF9gp z5rd0jY7alk?VtGq&AVRgXkC*$aC0ZdncoaaSIMYx<5(pqs1Qb1FD;jS(-TT#G*B#_+X<4zLxY|Fl5dF2Xfr&UJd&n2!k_d8-uA^Y(BrKY~n?d zb#|WHBhoc%SukUO8DX7PbC@tuQUe=C zBOXi~hrZ@j9QOK`!-9FFriQ82y#P0Lm2Pk)AktJSXPH`3@?;=QH>*B$V*W|4)2fEx zXubwG)L0S`oHl6;MLh7i|Ky=de?+TZ5xrGO#aoK77LIQOX?FE2%=He+;S6Z9(#oSo^uB2U~i_os-_`p#YHdE1gLIdb*EkT zUzK(%sHUVMewwmo9qTT+gk?A0Otg9(SVu5x7RS8)4Y*^DLVJ6%=pnyPChADrDRqf~ zw2V%7Lfq3sXu|?9%a|U&e=y|m4P(q3rdRQ4z%br);ccpV?(~`1k&BzMXqH^qtVK>} z!L2k-X-5$?0=V%ee(-l6;sCXzBQ0ECjmJb8*==`T_W6IOJ)AJe+>_)?dRUUV@JT>C2qMq*~V0NP#% z7uE#=a6yfxQHJYv+VkhJ?;Me|XyS=w@*P+b^+bkzUd{M@PqUwHy2azmMn zY27tH$S$~y`Llm4lvBnSaPpxZa?!iS+{L64zA{8MZiaVskelEh$QQ!nILIQcNNc$Jz8t zAZQRLZA@Q?QUCplIYp9^st{UlkLyG+nihMHF}7U8s}qLGbr$)@$~O@PG%42G6#`P>jo|O?(xtFHEVh7n-_4~SO15DdixlCd=>S1 zgWR)e6EjbHKF6Q)QJRf_dcE#DaP9W710Gl-_C&%Lxh|5Te1 zVavgad-(Z(eUS|-ALqmda^0`IsirBG-Z^x&7k|MjT5Q-EGWuj8%57-l*D0|@#13CU! zC`mE_#M_by!{o|pr=+QV2mllyPPi= zo%oy?Zv$;QavVD6DOIvw1ul&7_(&Jz7?*4(m1#@X4AIQ}K@?mdqyk|aA-CSkUFUv@ zl{ekS>_eZy5$Aju-FpF1LkOjYrY>_sQ!gaWIQu@KID0EoH49IkO0HuL;qO2Jj z<(Vg)M%up{ZEJ8zjB7`P2yS#8Z+q=a`PhfvL70b{VK7eo=Na`)8e?!t>W|C#&OT8a zvu*>wzU&g7`J7WRi+3Wa38L1Q_@Y?NCMQ}tbjdcYkLC3!sdd@;$#hJO{ zaod;*h1WaMfh`^VDmKH#COcN7IQ%gfy{!Yj=gtRSHGL*_4A3x9 z$>;TdT98j%W`BrL)IoE2nBH2Qdq43nEdSBv>{+W5k8Z>a4Dj2H>pA&v-^YTpU&Dw% zPa~ud9guhGvo3x<=B^w^=IZVz)EymkP$Q(InBgYD=tfpt@O^H$#GQIF~zXlxuQFc7&7yI+t;JbAGI3cJU%o&=Zn&M4$@mSFz@@tGMgOKW6B6 z%a}n(uLfw;#%M}kf1N}PO#_d|EgHM*%90nqkljyz3Cy17gL)bu&J-+M5%GadS^Bcc zKub48K&D*gjWr+Jz{qOu|L%p{_x;NVR&8Y08M8=+hG~(|=<8=)G|HN2 zm_1KAfg|4jSFrmov}{Vy)1P(kz2#{he{P+xkn{WoC!Qo|R5mjq?LJvmT|l?EwUd%H z`8}f*%}1Whp2iMq6*Y!b3@%Ejt=q)1&wq}`uezFe&TNi)|9j|v_VJ8bN8QvR^-~j* zuZOwLBx|gQGuaZP6&k1i+|;=&M?q+30d6Ne?<|&Y3Xw()<6_#Q5jA57;!zIXdk#PO z{?{<60|^S#h}3uiv^REk7|k*X)OgXmQ6!FtmfgqQx82GSr=LaA+Y6yB$W)R#51=$F z;Jb)zTlUO$8pwK}klzAjqGewEyRx5#!>3mlU{U?`*(?OJi^-k^(amikVcgZ9uSY9##poqtEqefP4!)){GylJtfA zW@r;{_|{ip$^Nu@0zwH$of1f8bgWsff~k(_yW(KULV+^3B8Om%5RXKJNek}&Ef@dm zXQ_ofy!eCfgI)KcIeP{{U{F=U(%HPsSTLsO>S#9-ArGC)0_gJZE(Tme8b#Q|z^YZ; zfANLf`{N%Hu3X2gphjH{Njss|Q^STnIBjEdgf%Xvckd-U_g!y?0}g=MgS5i{6NFd{ zr5;%`dR3+9(aft}I_+b(e8HNCi7pfLoVTW=+}1w&Q=#&EYJ2^EjD)R;G8%Lea54GW z+aBL|JOx1chh)uw;yqGQ=EN8@Rh-eR5zr<~EUa74!$0^jH-GO!`ZsQ3L0>=ZbsOjr zrB)9ZO@#+qjs+(_i{sw&X4q{JkyvWA9t2C<`5Y5dboafYNG0?8an{orGk{-sD`~0u z*$gmLK*Lnz{2hRSQh(*c@U%=PR!EHCj6sQMtQ%tEg%@$d*UzIqFu*fjbvAQeaVG8F z02|Z__Xi54MGaIyOnKaj%S;!X^Ua)CQ{M8JhyM1-xB-(7VjJp+9#KKewGF!14=nbu4Y z#5q#S7K@+9pD>gszM5X13&At<)e>SS%i#;yjw*6bGp8o1`CI( z>LtLg)8+P#jx@Aztngix%2F2%zW*p+O=i-czrb4aRg5iza^PT_Yy6^vplU{ii z`<(r1MrsWjGrUEsRtkzl9k$}cqGTF`BK5Vhd222t&KxleNbG2jj#9G@p7=e>zWaYX z@XPDaM^`gfY6NXhv9f_8y>*(YvbG)5oHc_3p8Fj3Is28c+b$$x2zqK|o*Ci~=OA{9NSiNy=>k6Z@8>gXr$t1~rhj9s zgz9K-c${DS^n4E9XCY$5YZ`cABsU{C&3KukElixkeRpxu2S3bysS<3AaG~Q7*P?#xah&++f1xRc zT5pdJV)K;$id#hn0CGp9%mFEH%4wQSAdDeR9L6}D8j?7s9s~C*<;S13xrV%bIU#a z?EN2LAW>!bZTK45$1o#(=E$Tc5QfY?G4H>pX(ACIDW%kk-s z?V7a10tQt*KvgdV_M9%OpI(!Nae|r0ZE#f!Zlm7L&KMyYZc?`)51jiI?mX|i9Nb?c zTDJ*1cYxd0tmd>&{1XE&dI9awBB3pkldaK;(781D@8<0~br`XJ6fWS@TkN?=35J`n z;&=Sw^Z(B1P>UCT;9aoK!9;@tgtdTFy*9#I3sQ6(OOTM~C}d)~3I{b0K0zqz;$&-wD|IVk~yWIr$2tfVL_b*g3IQ*=L8KujKdR~AW6#!@@ga`&l! z4$Eh1EI`l&o;yyJl<-EnzC|uJ-As5e0bu9|;u_n5XM01+ZJ9Vs`fhQRwW=eqr6XXn zC>6DIxzQ|e(&o-W;-`8EF=5-m+I6h>*7vyU(o30@#LR8s0ynuS<~(q_|0!jh9-M~fb6 zHYOE_q7*BNX)C+!G{~>7{2HMoB#FfY-hL{;R&sE(;<%l|e3yYifp!dQS99ZK-{+VY zycFgwB&~-%^t)K)CscVc7@%YcBEwH8>3b<0eRS&KS+aGf!Gm`j#4)QEAHlwFMo|kZn%lQ zk%&P8q~&Q!Vja{&Mo8HdHW)neaE^ZSn_&L~N$L%xCfH1%trdDiuH!b{=b282ll*-- zuqY^6IekifpRXAINQZp#4^ZZH-BVSdU|dJtQcQWeZJv}406BJ7e!PD8lhN~^N(ol! z?{{^GJAZd&DxxwjA4tde<@tT=0i#v`hA;Q6nsLsBZow7X((k){3#hGmU%51mA>_%P ze=Pvj*pB)gYXgeO#Y^J#K}nhrMgnVBa^HF9@xYI-WOm$UmIgFOHehN(FmsUmHmqm% z(MNFN-@FfY-i_hV(pT$4U6va_=k@f5>`Lp^2?E--=?U(}C1bG@RT1&Juy)+0@x(B< z{PSmtZoQ8Btoa=I@lTTMw-_DhEi6REdBY1S%%@!@?zUM|#^N#&dd()JF5t))zmsK; ztfRN#Xsdb~>Oi0cb<^VZUw)hUGaEP;f`GNg(=u&2B*_OeQdujCmH zhyzbO3s(a!ojx$|&y{dk5x|Dvp+`yaL z@r_wZWR|9-eO`qZtv;*3s*tQ7CWxC{`JVUE|KKX-yOg-qqSX`f=IhUWZP z1VNUo;5w?&g343s1gb6-zJO0t#j1nAr<1l@2|cNShknPk=YEk#uKfiI6GOiWp%{`@ zL>O99r^F$wPGS}xa}0;R@h{qO=6J52=zwcCqY#wl>Px0C{(X?YT zwp&h>)XFnc0EH<=b>)_rg~owG;~*B&BqE3u9(ss7zx-7mx%L|7%zUfHa$97~z9z0PU5-jv_hf08A! zkh&I)VaNUFe1g$m-AH>@A4h%cAK>UiXf{Hu+6*uVLRucJxQwji0fQP(?+i>rs)n=P z@vmHV^-av^4~Ru*MUGk^nAjl6dM^0#Cpr1(Lr^ssYci2F88tOeY#swJ^p8jBAXH~rn zIC#3;e!9#Krs&pEndYPF!^!eR8uclb>m6qG6I}N0ce7~SdU{(e;vivD%@QrypOZiP z|A>0(*!~7(L%AG(7sX@FpUn#`SIJz+F@yn!A!)T~qzT-1J3spT7iic4p8NNIL%hqb z$gEz-a}_GuhaBXT)}vLxLSC);bCX#hwWBsQ2RycdyZ+}qmS6KT`bS#yIg5!QK*2_+ z4HzM1q+Vy{k%w{gTiyzL?L*w?Md|^8m~6^XInmWFab>gaKn`PG7ag~FD!9pc=v;TV zzU%i+yb*2LHZ>lY7z;0GnT=bNKK^(OPP#*^%8t3HbaC^miP98lx=aRyCx-wX!t*g< z*0_}8vBn8XnHyHUHvao+1wc5F3{3lXrrys6i%${G=|*M$)_;sey=F!--+xTyd~H6J z==S?B&*+xIK^+r~={BuSlNV3pUDM8#WJSg0)(Kr^rIoDTaXcz3B%y2~nCT5N^8p_m zrZ^FjxJ@kqR@}|CpZ*-_UCWq9jdW8Jk(Ao389cUWg!OZ0aK2zmxnI9kaX58j{3ld za7P?W+-MMDa4yXp=e!ei)`(MjD-*`5Hp%&I~E-m#Z8DJ<)DsuZ;itx0eOVx^r9)6HJZ~8gUe8Ed`GZv79 z)<0XF%C_wa!lr)jKCScTe=8!VipV27QpII=v=Z|l|M4F`2E25-*#6Ul1~SgzbIPtJ zIsK&&4ocyOA(|pX5pw_Uxah5KWl77>7~X_wz`C$OaO@Er_nFVqY}5$*8l)*$>$M8f z5$r?d_g4CvS(hvCb{Qj(7~JS6_2E&LedC*4{>88GjAM`IneTf)&BY5agFS_HUqL;} zXg=9C($Ur_sPhIS4_z}UE{c$5f?d0jd;a@euKvV7;qJPRxlu|_Y``gDXi#ey)tFUD zO#k7Ba`J~i!p^UGE$xMi&>6i1VOVTfj?&pE*JYG|NQ?6zC8p`PPBO+d6&SMvDO5c* zq{^y z4ah{Y`DD~P;7DN6(BNhcu>12*W8vb(+o(zQp5rcOsALJh`Pl0L)Wkq41%8fL?tOL_RZtJr<< z?u3gL6Ng|VC?0qzu0d%ikw3e}=iISkSXNe)wVB=rH{Yc0+2WV`Zzz+MbkdvW)ZR4)9t4 zn2&wzW0y})7@oeQF`23%%As?|D5gd~cvaT{rF zVAUl*#2BMvMZ;}#nvV5j4c;h4AmB`n#Wb{G&eyqG!1SlwdP z&AZ;((I@ZhL`@!$)=3^IFPu!ed`(t_OwC1Si`J9k5b`d&%YvRttHDAbnN6KKDOFuL z8C~I49miHmRw`Y_E~>JQcNV(7-DZrfPN+TB`7dY^bZTu)$7Ct@g71biRRhLSysSc3 z(z*A!r&0!YWN?UfV5qV0fpMlMQVi=26>eAM5%Qt!RGyS~v|rLT8}4^hh}80xb1YvMcfNfAO9E&8&tqD~R(r(kUyjSVr6o_`6CedkMv>EY17 z{dfgy<#Y!GIm_Npaqwr*6xbOmI4C)EhMZZ+3_|2sVE^w*$s7C_xre#h2p z8MgL1$PP4d$OO?7t<&0{#T90jLY=mL_B;tiTQ zB%?ba;F1&xy|B4G9MjV_u=H-e_iz8gf}MBeq`!I-EZ&9oK*Q%Zpjn~rX##nOTy8^} zx222yAW2YTFjh!f2|=?B?S%E${er8%^q<&A9${X+mxdE`WRzMhAW=gr6doNNW#Q3B z^Q^bO0}eWfq#ojGA)&EUnqE^}G`1xxU@9bq&d%#tz4pm%GCTR}Tf2!>gQz?hOc6B& zX!M@&S&>vpp)I=kI+l>QGO7_@WR1=HmP*H2x+qU$SnZBMD%J^m1MdAwD8=_ zgI%gZ1z%L*T%|bmR2Y>UR0--bvQqA(Rg9yuP(w;nkcgwvR388LFR=2GpW^pHy!xB&qVi%3+Rr&dV5>GoR5E(zbVwl2oW$#YL;R<(kV`IKvWH5D7|c zmt?8QS_;I~xPq!z<}~Dh+re;?wRhak+VxMcg-FC(srbRnpalpJbXaQT*Aq0*0J(>z+q>_ByagSHrc-7<$(k#ul zm{d4lyks4HlRJG?d1>9>Om*xBX{@eqM**H$Qx?>q?ERVlz8KRn+`i-B9g{w?uI)G)_ z!1iZ5r&fv=;krtqGo=U&d_p#lN7PW>p9kqu=;|F zx&OkiBeQ4m%ny7JIqYCu!{;>QHrS;e#7`Qm72+gi)L9NX=7nhQEJoT2iNlDHw3UWc z+RZ2U&ey-lbB;WKFjSlxf1?}C=R+M+Asu6>D<%k%qDg{`G`a243po7vXQPYuL~Ax% z8gvTJ%=Q4WlmGkE6->a zSQ?|a6yl*#>a~ESANwqiU3w||^!CvnT2HWG2KT0hQ~&$3Bzx^c+SBVd3C$=xg-uDO zDXzG{l$i+LMI-+iZ$AQV(+FmC4ZlA3D=fX~Cf<0?Kf=EI5-pgCwLa{Oia0vwtL8d*` z1qr0R0+%^XCw)UaM7p9K)Yq!TnHK9y4pB`klhPTPhIL{SZ z*@D7WEnSL&IScVqe1fdY>IbLZ#>VA#RL&EnlQu+cXh*nKg!}*Oop+oiRhjp{&pB0f zL#N3z#35%?GAgKmh>D5!H(wD)>wwD{3a>(vdpMx zW~YVkbgGl_5)-sz?*HPKSaHeaxSDX%x#z)AM-bPlSZvlLWNcH-hg7*ut#H)IFJw*A z(CUzey9xM22#qIcZRWji|3AF*@6IF)74^2%`I*8+PfD9x?!eYF)z4p7946_|xOEv1 z-g74ho%VcOy_O-|JJ*bjMX3|%ubO^dQ&_fc54r$T8%WM%i=W18RM|>1G4(ss8PAq=(!V=NNtt`u zyOi6EQ^a-);S2=Ai>8G>igAJw56-2p=QW+6lR&EhjWO7~nPk%jTI<)-Siga>b?X`5 zxRKWI2%WJ8uH8mEaY}Vd9HNSNpVCNN%C)w18`+c|rCqeZYd%Z>%|J50DWW0@ZA2ET zhq0&?Bn;BG9$35)OeF*p5M~WTeQ1d4oFV!bETFz%0ey=W5iXbybLPREA&4qq1CZ2r z-UV3%UtqI|wkbS*v}+8L7Gu8KP<5N=-hi@gqh8xzicf8KIu$lG6IgZKpyfrj(@dmgNa7AbEO7hnT=szv60P3EUVR}Wt5#E; zzkszq=E44uS6ui7Sh^pL$WpJ?@JHC!ax=SD`WTO!j736c?}K>i=RZqla1g0i(rK7aE$Y&w&D>7ZXPccA z4jJmQB409ru<;~Vd^4fiO4xk)Z}`PmzRs~HJ&pZe`Dz*qBPs)xbi?soCmBjpXwAAE zF(z|%0&xm~C2 zU$ctwb!!;kw1MX4VSKBFbYf6P)dYxhRD%%X9hERd5=Ue$K8XozstvF?EhrOa7OqPo zTN#9!%s0npYig^KR!dE(dc;^$Y4L= zzyJe_7BaACG4u9X%EJBjU|_dhVbMIO)WM`KG>KJwD5x=52{9(foN2laHOeHK=d!!Z z1O#Gcg{Vxp?h_^86Q4=ii+Gk$5Zy)tGJUhLvr-7Ql}NKyqFDizDdj}=oB}W1ba>4Z z+Vf|V;Em!M4Z@~n`M-UYo4@@{j@xB7*53Uf^?HOZT)@p6H}S;3e;s?j=GC;EqcYf+ zP3gOoO39n=NsE(hKQFdBnoo1>hRG74HKTYMoCsdpMB~amANmN5tFLEl!5~lk*!jc< zE`@p>@g_@yPH9TkNS2N1_{eAfmCt?qzp2#b;adq<@QJ~i1e|C0`8BS)?AwIKp%N9S zpHV@Jh9SFO)fULMmIMqL7@3-Ph{rcN_y-?g*-h8*glC;eQm_Ef?nH+-vXC39^RPmX#|R-?SOuY(bI`Bxz8f^-3iO zki-$15K)iGoN7d~Lc}HMwNNFrsi-~`Z+45i+=a`~<)BkS8MHKfPCxhLAzG>JsrD3< zd1APhigV5Q=!wfb_r>B7)Cpb%u^}Bii7E*mZyX(sNi<=+)xt+1^||v{wD0~b+JAqR z9Cj#n-+f`>0;pEOhA0+i4N69p${M4xYt&>*rMK`e%eH&hSeMQ?jVh}0F637b zlklHEy^`0x|I-W%E~Y)+N)Z_!5Qa(;D>fPBj%$9z+<^$Frsv&g)-dF0=XrL75csmz z!xwK^GtoAhq)&imi(7vFpDaD#Fa{4g0$;HcPFAVQMUgHaYlqvf*`qlAEF!0h$fntb z;W2o4@6QK5FuRT)E6Bp+X`fQs zhy-)g@jUq-Ka!?F<0!?75WAoB<4iQzeuXF%WCgFeaZN6liyvpF3R`#kXTjyA7Mx^QX^!k#$%9j$IN% z*ik|;IGZNCC86-(_!jyf_XJM*@P}d1uEaxi0vl$!3W|TL=EZb(B43~xnW$Ci+BTZ9 z?cWZtW4D$bZ!gI}yPV!>Cc%>qp?iwuuFY-gP1~(NC8K?bWMW%MDH}eM5EuvI5{!4? zI|g-E}(~9$Z0d{bsb&#(Kr75~>h5PhbS=eM%IFW5ftXA+QGV+MTFp ze?RwTcg|yM=F(-;uN#x5af_8AgiBRRL2@P&Nq2+M%9f~I@o~3kHQ6`w%!quQTa&N* zT&tsam!2DSu5dld=|x7;RPj{B?^6WECs`<)#d{&BRB6Q?9|nxITQm~SXyVa&mHLte z%-w%qmK=UKyFB4w!abM5;9#no5Q~NZ4pV3^@olejRE$R55TUBp3)(~_bMisD;i*wF@m{`3W`xa8OD zU#nt9o46=M<_&P`=rGU!=*Ov?d;$#@Q}62|acSpU1XPM_H&V1CSAmjqo{@YwkTy{{ z7U!i&Fy7#%4}F-<9e2@LJf9Oj_D?kD4iVM+GE~nLsn&wy`oG-831_~ZIrH`)X~$r} z#exaJJBzkAbNNLVaP*=3rVWFnMj9sD<9ue80V%c6`jQmdXrYtJn=0=eSivNUyZ0WJ z-SB%J_l)P`<_v;07&YndkRlj42~ac2Q}?;L&5_*fG5zsTW6TF<8-~Xox^S$jUJ3Ni zuHpY3D|J%$qe;e1UKJ}DM$mEiu@-jIdMe|>@bpmk6~m4Rz1YXt+%mm*)lfX z`vA#s1M6Z!r @mx-1Ox`*PD7%OT092!9rpZ$4BgR^`pj`!LXg*p{tb;PQ)U4rwe zr#%9(DiVY^F_^%jK{|n#AQLZ#78`^}VABO{3|O0r=8fs4A8DG*t4Zp(;$4i!2|n>? zoZylek^~ngh;!f^R@13!C_-q2$XX&}sTf0`5EM6w5{L)&_#{SrT9}!P)`Un#=P=l? zf=U1;AW=tCl;KW;mPu%dAzHAI1qbZMe#bwFzQYfLJ@vZq6Q&4+4Z1E=(p~cf zB9e8>MYx=5sevrt2Wc~8virLzj`OV3jK*I^Ac~2`jHlpJD<%GF`y_&sJikG^fSptQ z001BWNklDyGlYsv77dj>?)% zT>t3{2$ubc_QG8`?sH$GH3x!fr9k!4bLUt;+~nXBPNzPwh*m2h5rssk)B?Nm;IC_`|Qp9!;i&9 zLAMF^irAE5o8B5>VcBYQ;y`Ei*nwkAL{1TrKg~7_k1cfJd%(+P*Y9Iz0Z#2uvh^-7 zk;kk(Hhrm()FqB!yaTINaPe8^uwQeWsF~1=ga@v}i&--U~(?-dN&Jo2b)9?!AvIFZco@*Z-M)D|Nzlj89^0)uJ|J*fm(u zi8=M{Z)5HYUxM}x;3{GEniK5mnT*acxwyB?*Uw+W12Q9r?rlj)w|8SId$wy)1d^j3 zvUO?W%TxhMR$3+Qv>K0BpI%evFAXW>&mekxzk?t2iysV7EAP-6AOE}`ueCyI?if@+0o{{YpYLF#jc zsL!28ea>8JbLSBa^brjXBK11-_d%@&)e3|mgb`Q^fl2WKBk4Le(i^!ghDqH4yzlrhqo}caU-J}HqzXgAL?^%;-7yo`P7K$hcbMNpS5PMMh~ zDxTzV#WtIU>6(X~O$(X$21}uM8JgNP8t35sMY}2sg$M=mfFQ07~9y{p5OI7vzv+MSM-(s9d zO)5{U)5*Nx@wko>_yk#YC%=5(``LBFFi~S11_K_@3VXif`RxDZHxt_mCX{0TODPw= z@F`FG0V(V*MFi*a$&Rs zA_W}34u#6>e7OE?HbnKehX`_znS%SET^?;6SkdDRYQ;@L}8e|j#siWf+!3y zPEiL|4IUwJ9t|vsNz-4)lMcMmpqdCa#;mds08h+Ve+l%D*u-hjL5uG|#-e!8@^iCJ)|y{~i|jQn?#1ezTf z8^vwh!r1DE*z(|Nwyau7W9@oI)~=;JJc1h?r>dU5z)-hJ1wrD3&?Ok3(zn#7PEt;k z#49EUz*u}>v6VW*V+|T440oEiz8Z@UJ%|IJdNT8mJqGsN9Ss5!W5Gli8x|8=$=-Hv z*4u<^fb51zi)x>ht#&JPZZgzNdeS$x7EbFFBa*3`2;E6f*ol^`Q$1&O3g%S#!8xmci;5UlfA@Rb^zYx~34=pyd0-9nRgt+v{JGQS<)6O*4mf~zXo$iPm!!D} zX?l9P?oCOEJ=xiN_-CkuK4|x&rvII}L$o@`4L9(+_kNUtiqikgliB~BXVW$zfr9ZK z#Nh}y`6aL9p8GcuYn|A5j7!7RB&ZMw?7i6X>z{ptKn-9sgvX;|a*TKSyS2zK>C(3( zx36flxc#ypvvmIhsULU*J`76NKWjA9VI&~Vs7L`mvw)RZl;PptX(BHZk&9*w7pT?huT( zVZgGyIm*#zox{SHy@+xNM zCx%nsaW2WCA#}kIiV~8AdgZTI^3#@VvMCwW1f#@_4q?N=UAJ=i`5(bNxROOt9c{FU zs$n`ku3A=&jnUeDS5AM=`{9Vk(F!A?Y9+l7d}$ajQ-nLsplOhDwJuXlO=s?w)WeP5 z)<$2RsFkYMUh!@(7d^`{D;A5%Q3>(5%yV60k4+RhZP>JqO}E_4s_Xv1n%kDqTDKmP zc&ZvuQ3$O~wFZG?lp;f5Y+54&1OeVc>{53i=agnAp`{5)V5rU+WYN++S#rRBEZlE@ zs(bGVix#F$K^SG|lz=rjHK;Xc=H;G>KTB4(Hp7Ghm@eW!o7#@ZPA7cIT<*d2$HrC&LFJmj$P)Qs$39yNyYJ|XhA`3~ojTLZ7 zjEJSviK$j1L<~uyn0i0M?KWdx86zPc8ep#{KZ(OmJ%PT%4~Kyv2tvG#P-8LHW_yP8 zAaLCf?G#4vd8#{QKi5($pY%?Gx25fw+BIogE=M~%Pj~BZL!G$5*aqOSwS#MS9r;x2 zf)Uy-rV@L&?KXaL)|)xJHkanwEd)5iU3THRRU3HbdH=wiXFrRkg=$}|7H z5Buz7v_O+z=S>C7|vDIPtLB_ z$M}Mjt%jOC-1V*8kg|ruw;Z#MmtiWsCkT}4MWQlbXxf9|HO~~`LpbQ*pWQ`O>d_;XMoC3P>7{f zeThwzKi*SOoyj=d#-yI+rqRJ{-psNKzskM8yqtxxr|JxWY8vuWfe{P$$DU(e^J?~b z%^TZ!uJh3s#6Pi>a{BU9D0; z!Taj z^>^LH>SedG>7IL8f8V{B(NU`2Gh{3Sm4MJICW#^L;FB0aA=cC|Ac$&+7i?5vbF)d? z#55wq{Cy8(zo(wS{1Z-u-IqYUP9qDcvxJ4<8w8i9X=hw<&1r8LwO>t+YUX14iDb&D zvq84MS%96F7`?6XdWPlpL^07M-%AHLCbbg|(o@?RZ&7KsxaRD4!SZ|9!&e#Jyq>{D z^SN_uoc^aD%acCwNyg(2)&9Z4a#~8X%BfE5&Px^6!eUkm1Tzb+GX^If+fdg2%NMxk zr$47PRN>@Lf1dVUyJ7n)h!|Q9KKrlV;NSlJKXH}$jJC!x&SL|ES4)4T%ILcL_{*Pu z&MrfJ*v$Skg|sQ?VxlsG=c*KIG0z)Pamd&>H(vZ-9PorAsqcLdu9E6|#HSM_V^a#O zk`BL8R7fX`CuW<5hk-J@S42KF+b}$On}q{2bYYj-<@)GG6eeGQTa$pdY7%skWSz5*K4aFqMFHod(fihw6w~N-7IXYX((T%?(c5f<47h-kf$k}CN_&&GzMKnsp}q;d%@>^v&INs zU0RqsAxucqkcT%je)nzMchzrLaqYFV)@`7k2>sSlF_uoVMOZNy&CZuk-ETZ#f&h=F z+3C>iBy<8t5{1;}EnxRUAIILu97p}ILtu}^&{ur>ZdZftLQLPJ7`B%17x}v?pL|vcD=liNi99y{&35f}!R6 zL%S3U)Fa+u@hQEj6~pRPj4iu^l{ekQL$}<@$f`92?G8iMQo&G*4BCk?&Y_(+%NTKl z)?!3xdFY5`n1oRyL`!#Nzo$QqeV=(6?7bVY4Zf zWzy|*>IPt22EsGL6LwOr$rOLNSKd6s(73YuSY|4qk~^!V$!N{yFf}5UU+}NoanXNp z_|RP9^_$SXJ~l+KVb?`G?@M2zRSPkFbwXqDK0|pV4>>eb7B4#z4cU9UbQ{D1P%62n zVJ%JPsc#wP_D_BWd($7-ykH^6edepQ=GTx~MB?DmE3W46-+C_L;yq|39-Ab1?{Qcv zK|s7^E#Lmyr#bC}Bd~!{)4rOu(Eq3VHrP!GTwt zf)yb#io=qOkJ6XK-1@mMbNe+napqZPBS$=r&f-2yNFiR!^sR{I;Buatn)!^2;1Y$g z7Iw=zZv6ZgSaI3q>}~rA+o{c-58L=^jbZE96jnI)4R2uai(X9A8p3MSML=>J!eVo# z$XVzTe`m7kPe-ZPy6~B5dyyIaM7icO?)!PRWK z=>}F@b2Y1PzmsIk7*Xu0+Yt3G3(RwvB%u;jQkNnlsWTClHcG>LhFynfaFF@?EM64^g5AxzTpCII26c3-pEZ1y30)nM2hCPxPq$1Po@o89!{7|IMo%h69A7% zB^=pgl%x$?Mi{yE7S`N&6L(*GJ?%AXskS-{NkG3?!o)$lg;ht~X(JVrTCu7UnhNd2 zGfIbbaf_k-_ThjNPh!t!KL_^M4Xs!@VTcVX*qqPpyCGwmQLA#}=CrQUZL|b31B7q- z4SL&6!y`TBnWBZ6WR2yvpSevmKK+^{edu=L4v`Pwra$tdx4o0&=PqIGJ*%kn*GT#X zxLuT&e(#&K=JpW`^kWsAruSF+@YV@E+vE#=s7f#M>RA-OJ5W-M=ZDP(snNK`XvycnCa^FPfq1nd$@ml`- zgCFAn=doikof_QLYH-G}g#bgDgDKjy(o;-310~D2CVuneIR_ zVzZ_-?LLi>qPX`H3YubCZ@M(9BIP#Iuk@B=y)9I4;W)Jzvgi6~dFuNxo~_%78H#F2 zr;>|*wHrfKf)w&*H-CS*zko>B`qgQgr0dC>c?SxXjF#jTHf*AO%dOmX)zv)o=Uea_ zH&Tg}!7w0j34xF4w3`GL0-rj<7-NYOp+!K$z^YE0zWw*%h!;GI-Jkh%SU4YVQq6-6 z!j$hlq4_U=mfmOC_Nq#Fm{(}ZC+iNB1G@cB@-h!i)7%m%HT6C6k8}bk$R zr*C)zOSZHbN(|a=;p%pdjX4H|60ZXO=6Bb=GnC7@5*rO30y)LMYLjv#SuBr zaYr1;e}46o1lA!w#EVZ|kPDPXw(C8sWbR#A^1O5$n{NCQTQ;p{pC>;H9js)}CX-TO zvrWsWL3=R5*~Va})jX-q>aS)ShDR5=@L$05XP4(=cO}j=XF)US2za#ZkycFW4_9;f z$3M<~&d?qm!`BU~!Wz#y?*jx+I*xd#p7D{vN+x8?cCfAhISo=Hqx3ir##&Sa8-r{a zhjsUJ`KSJgp}_?l_tvwBciROW3^Rv|)a}R^x|Er0f@-qH!&pNn?jWOW!saM9f9|ua zyy8lBSHZRw8yai?J`gs%Rb~}cDV_Gi;<)aYaV3F4S!_$Z?9tW z^82x)4QjC?!WQm5)&^Mhgn>!VhqZJtG_h<*TKJ)U`j#$buOp9V-xE)SeV0OHQ(kXi z5o7V1ZLZi!@sk`q=$`A|2W)R4KOtdTCs8r}jyA$K!MBc^)XbiJ0s1+#d?tg|p_6gv9AO76M(8t%E~TDIJM7jxSQ z{lFZ7AZ}y4BTm{#6cA(Z-chObG16!<;yMh6u*(yU;;1uUK;^^}@j*abjWAKIutzRG zH@Yr0leJl;sA!dvV_B&wEW4G?_*!i5{rm_{h$kKFl-7fG6Umhltx+6}@iFQR&+pIu z5RL0^V&5pDv3U!@!ukAu(?h)W``bBiwy%GdjS_OwKYxYzkOOEAT26V&OSpf`;VTuaQyfAo_ViUO zp@)GW;nr(@jQ9>nh^oh0L3}qzb4TvHJiJ>qLAGq>x}W@zBTjxUW|v*j$P{}aKJA{< zM~h)ryYPq{KQYFnbSJm&%uFJ;`!XVRpLnqlW%hMJq6^^#;@V~!?pML10 zJT44qZy6&Qs&PkijOTyyvqZ-qgBz@OC%n#MZEC@)`9`0I3y@8+b3<}taVWl{R2mL$ zzm5O;=g)EEkteY9%x4oX-32NZn+HJUO@irKHs*Gost&OMt!9H-s{=RP%+EgYN&42T zXNfUHi9^K^Tg8RKim`F_IQ29hckX-9xeIW$2rCBXQNnDCJs1n!!^-2(hI1JWWIxeB*I*jA7bLX??5y!FLX-|iP4}|%HAR#UY3maS! zJMr!2`!K$gj4GuQS9*U68ZG)Q)PPW9G+Q@GCm{+0 zP@$zlLjqQ}8*~;9a{P;5!d@>r9TqO6X(Ot(iey21NVo7$EpPLrcAaFi?Lw&&bl?y3 z3T;o0!z0i*l!IC(TCHk+pOb31NKF~Yg3-M3#O)T9R>+-S`XbA}`G0xByxrKaZY_Na z=W^ASjlBB83kaTcEDa;nDmB#k%tF|wu0WVnccfBkF#UwE$en}ch|df#vcN~D4m*-? z@5kRy>()D15em=z(${G3xsaE??5+G^`5MMO1nRL-L@V(GR;Z{Y9$m-1H(x>BIv_-B zI$WZxSK2{5NR*Vv*J`u!cUNJnAxn-v4p*-tSa3c)PsLK56+1tID8nm6DF`Q1N$n!TDaGCqdPsd9(2Jm(W1 zL5@B;wUhI?xHuQe`tDFFATGwV#u@$9ulV^lzQfZ_e=$p*aXQK3 zAvA&jQd(45oLji3nBp^`&>S13?poaO#V>Q`_kY0Bu#V|uc6MP09|mj+JR`M1PX32; zs66Q@wEO!ALW|LqG9{*zf~r%MV&zz(sT^2z2OET~n*s_u+^scwTFzd`g^zinb!pxx zq?2PI3z`ekbXC!WD2|~qO6SgHEdRx&+<)B-=*BT>D%4FHrWaU4;5|X+{^1mSRHb7K zEi7A1!r;<<+3%E-+4F=GV2{OUSiz}a15=uNnyt>u#I-0BfPwNsnfKUGjX7r~cNSR}c4$KavPTZ>Ox&h%btebybX{1vnNuoi>pgR)6Cg zEc?cH*h{Jmuir@jlHIs&-5Q?tPak34>Cd34j#^Yrb2&-^ev`S4b@e0@bTvD?Z*>!v zwXoC`#API&z|e`6=>Gfo^G7~OZS`GLAAcfyf9gEm{`L>>&EMWkZD<~GvyC&L))E+@ zVnW8&ujV%w{~L$yyE{fgf*>6}<$;sidf&3mhCxx6;2&DUoqzldhdk|6bioo_mO9@P zZ1G5=Q2&1)*NVuAB9hED4F9tY!#h;`!t*cqGVX{&(V+qGrVCFOVlodV72{czOiY&*Ax80P2}Gcg!1xw6 zTzN59eE)mA=#B3nI_7wqi|1j0s3=w_6)84tAhOA)v1x*BIMRW2E4cjJcO!S*&n{sV znjL~#gtLOL3HJ|=vG}B?aO~OVk}O()_Sd?xT$u|JnP7W*w0$a97M)TcIDK2OjeAh0 z_np`B21d7O$ZY_9zVVCTNz%0CL?HoIuHc?af5yF6UPfc(8V0`*Ufr7LX7~aC@uYb*7etsDn?^s5E;+a>k5sKoP zZ945Hl_ByA7XI^!s-J8c#o{REEwn-^1i@^ScTA8IT%49S2-Ts~1R?3%MNu4vl$ zXAMJ|Ka)FQO=qjRt^0hsJ9s9Rtvj!h&}{%9JsBb;3sYkZ9oHmsFnaONx$Hw9;pl~X zvGIXb%pIK19~+xE{+(}T@7KMOmW!#>`ckApxP7%3a zwqf|6YZjKM>eax$v&-?ZjWYBguoLz)?^6G36-C>MjJ1%z+{}N!|6KOzcwBQFt;5Q& z&gmcfIC}I^B!hK~no|COFaC&T8@KqBDx;+bvll_cAj1yRY;gAvzs2>xzLJ-o^A0%R z@gxg|@Z?N(#c1XLBe{e+6R}!wC`qG5rPXHS=a=xSfB6CnM_LSMi1msI99k8I17UR# z@$_@fVeskCp%YnxT1ejKq+LRH7B2+{ekL>A$w?|ZFUvC%8)}J*-qQ?rDNDJyRdWuB zrZQ%4(hWH2w28#Q=q8fuZ{RP#xR{mK-%NG9&47U@yPif3p$D<4pfWawCZ2H@=@A)7{ z_0MPIt`*c4&t+L>69>HJB^+}0+i7%^TD?{vd1?LYOJGM%Ae_Mfi*0E|BzvD}nsQf; zk~ub+v=KBL+VHKf^7Bu8g(tl6P24*)$Wt!-3Uin2K@fLv1`mWNet%yyz)> z?7in#tL<)*jYWGl{)y^Y#V4L%Jcc`NVSlI1kMh!N^8_fBwf=bH#7itABvV zTavhy^5m<+292pb{z#s9{zq~17UJqvLSyhg=lo{dl%&lJJqWUFW7@d&c(2z7Lucd% zKkarRT`NK@`EC|=uK2?7MpZFhQb=D?7mwI`)G3v=z}l7EbH&BnammkU-@76W=?W|O zW}8aYLhRFA1Qo0b9d(SkHsdyA(V>TP*mKUHcJc`@IEbr6DUu+9l{BY9%1$;V!R^!( zuZN2w%zBysbrA?Apbg#c80l&TlyWA7qDE2Y2|Ehy2Hdrr+kW!j-2J=XQyCd%P8d-$ z4v8JcJ8V|?i-e4$Y<4gSFefS3SNsbjrIRY@eZvdq!WlwO$yI2v2n zb?OeRv6(I1ogMhLjBjO3|{$;$6B=O}8dy zbTu_YwZvmLjdJ_tKj*+FJ{8?Ij>^!#9hFh0$#rrKQ^rR0c^K9H?Jy+=)o7kB~R8mF>(jB7r70mEyz@a%WLht_VpV&_E&5agD1kc9|! z1!`#!n_;}&qR(0Q(;vC`yz^MFWt<@m(nQ)Q#svY52sR)b{;F58^wqDY6^7VKlv1Ix zEmbqi)Z8T~$!c(0vYwr~I4H}Iw+6MhF0&?PDo-xiEWf^%5C)$)mq$HKAQxa zEb}3~kBy`b1-VO?icg(toWSOFtpDwAx$dGLqW3?<0z&!`Bx$2~60anhP_Y#fU_@fp z8P6f7J)1*b@fz4?Z<@vtRclzX%$3Xs*=K9iltZ(6?1;BTBVy7!RN0=Ouc@*kb|(6; z_2-yq0zg^Ak>#?9&8VZT2J+BqE`7_}*nOl;yl$BKz##WG+wA_TGdc8K@1SKoQ52<` zh!R!J7)n8zQxCCb+H8q-Maa&0oM?6hQ*E(i&p$slj9I;bZ$JCF?03RRJpXrhFg9n9 zYBu>`L zK9#r7uAm{ivykZD^l3ZrWZod#N#w3a0a%FGjiP@xa9}mW!-JJ(bw?| zgaIJ|E>5UatB5Ef7BvB5ohBO^EzF|%9Q3qP*#9NZgWYx`uGP^<(u7`Y=8}@?QmmNb z-}%ID(iB_iO|O19C_+ymG<)o{=ZQ?x4l|`VxMbU#1t+R{(gc?Dr?XfOZ06~o`qJm7 z2Z|R^5<;iYOyH)Qx&HeecKb@_9enyv`ri{>!;ENQ zZjn1pdGYwx7~$Hr{P1sI&w)u5yKWO95qF24eO~c59C-GdX^AJQ)DfR)M6wBhs6oZ| z7~XC_!jL~|yU%)`Yht8is+V%l(jV@Ov-a!X<(HRS$+>rJWK*R^B{11QC8Nq}L}UcU z*Kp&tS1`8@fmMuzB}auFqa=0F1u|`zM-xRJTE(5${hC9beg^U2U}oXUgugZW{Cb3s ze=)|qZMI?f>!AzBW$1#LU3-s>9lva%(X&NO)-Yt8R51dv$Bm6+*RAE}XTP0A8%GJp zS~wfCxjw|>U-x$`e8~%m2db&|fXQu+YSYK^3wU?K}a!l)5eC60s7 zd;tf&`5cEmHJ*9DR zA+w9D-3w}r5POFtF+nGW&8t~|<(1s}vrFl$SjC|8ST6(!fkGroP%(5Iw16$HO{=fY zoWl<1=oh_&=*Xj>uMZ6Yn^MT~jofF7aLwR@-aLe|8&hU~nL$w6wKtj|l$?sjF_X}{ zo%6lf<6)o+J&kkMH_X$e&1s=QO0U1o=px-;SCVu>MbP%J@*$S}$3@(CBo--JQ zb`8(_%vTu?E7%}OKTl@Y>=R30H6$5c#rMDduRP`G{jic?f(Y+Zd*D1*V#m&R7K*6p zkPHK+(c<D2VfgEy3*Q7@GrR5{eW=2= zau#$NE#AaAnok^Nw2h2x;1_3|!|vOBh4<1i~p`r$c8@T1m`Oz1?$leDW%rUQfBh9%XmHrxvO92I+6_+lp=mjNF z2&|>mYEx~;aQE%};2rN`*Rh28s%X1`R1I-x8IhO`l|Ih+%s;`wkE2tq5``A$Q;Lx< zQ|eWVYywKD)Gk9w43Otli%$PQmq|jrc1}5<-657qsa+xhiBF4o>xmPGaSrQ%l`C0( z$uC&`i=V;z^~^C5VJl7{cnqO6IPVDq%Xn*?waq4G(Gre&;R{%F`m$dj$j2?1SnGf!li(r?GgdwoQ%a`6dr zRDo`oVU7yt!ez;e@bOWCkp`E%?hWj@sYSAJ1GRdM8#<$$@yU--f5s^^LrXQPrs001 znt*iAu}q|D?$Bo|Kn9uc9PK7SlMTBxZW6~Kd!6(G#>NC&$>^G5Q)*Ids74kY-^d5f zehY7V?ep+S8ynSe-e>M_=HWe|Gs|nu1KJYL@D10qW#fAGI_Y$L)$|NWBtzHo4=OuA z;F>*Vbo^gq%-_y7439jzaI&giIlJB-n+t5(oCUI^U?X_vah*2dNQ-OU^KQc1?qW{6 zO=8<@sLx^EGfv^Ke>jIEtYB=Kv{lYo$aa15K0>`_zTe=DLDgW?W10=xf4+%}zI-9a z9Dfpryy#Vo4^^o2r{1N3*9kjCIui7mHEXNcr0zW%|Ko@J{u^IqY0D5a9D(t~(#8*j ztZpmyBah&O51mhYa4xp5f|U&IFl9Q^g=`+BUo6CqKn)e|5phT*!W*R_!W!3P z_s1W_5r6l$1cx0?Z2FMMVr}Y7f|_E=*QFjzI0FUcO-ppNY`Uyk(2$DBvrZLoHnpgH zI>LxfGZ&uScK;{P1}&P)f~7$TWulv%k*_E3d_iF(Yv6NUxr!knAxH#iHra5+B`o{? zf6%$>es-z#LA!%@F;11(Cs+vxDghf^gO!nR%A3w&mov`9SNlk6hEOcxz!j9FY)y(# zHjgc+N?8$@-+SI+LL9xgIu*QJpiyZOu`hd*K%_$37nVYZz}borupy3f<BApm?Js}L?v;>A$KWL<5l_+|argK*C%o}27QO7Hv};v@YDmUl z!W1V>x1wbg?nC+1Ef)=EQ={HfC^Nqf+fW{C^&&di=2=SJ)~t}SijNiNT7)rh-+kQp zz3;H*x4$E5wCNLzj}wAwgk+Yc6=QK0Hn!Td1|s%)^3yo{O|OIbi-`Mbs1*W&VsFn# z0o~$lipgtX?YfmSi%in+^g6a|-^_dV_-mpL(|LkT{d;LQ>$8SI=*%` zQqV{z1}p9q6gFeGc}WuxI#{0ek~eVstt+rW0FgnlhzN;;Fn}O&Jbs@&`Nj9Yh|olA z9WQyP9VLCi%=0;V8V2t(t5!jc$2Qw6zw{R@-gkfc4?Yx)Y!6bM(sc76PTe}&ZT8q% z#}AD$FP?1}9yxU3bXC20cAY);Hu@gXbLo4kIh1R%Kr$t1GzntQ|6}jH<0Px9_3!WA z`-Do}lgT-m5Ea23P!S2DAcps8lPs~|xPL_y?=>1R%$WSGPx zCU&TF!d~wmXP>H5)zv+Nz|455?PqAas=K<*KD$_YV%UPdd%}xKyHi-q=G%6#g zCqI<~&-pY_DM0yNjvZ0xOzyHFIoi}b$g~QqGN=elD`LaX|Cb+p_uCx(hU1v~%x95| zR8WEE-j8$!WW-T&TC(<=I86vPM6hZl*Sz!X%v-sZc`6%%6h9>iea1Xk9hP|6C(nlc zoKDEHvnLXT#&6IsIv zOy#0kIlp~mQ*_+MYqnwWo!olKWi0*0ucDq^sSB~%JuD^is};8V9SDHhpzN1lB2 zF>HI-OJL4CVy*B4ulojU-a^J;odo_>1gzM&JM@PCt`2LH7;cp9O#O6_`Dug;2f%>j z%I)2=l8bhATbvR}(GloIoAp1xiN9R@J(_pj!w!`ys@1?q8)XdbIHD9(iIrvXh}7uumAtdz5gVk(Hv69ZTM5a|YRfN1N6dh>ufYlG^tGCX9DyZL;(D(tUx zRVj0N=6wHiitKiMoN(C#TzS$v*t-!SE5`7)ox@*bjQ_awGNSF~qbnn5$%0@s)!}V@bb5u!LNUP3&uByuQROR7C|1ZE;) z)lEO+`pYll6(_!#kprJfdv=Jg4cgia!M7O&V_q}2VzI^}ZZ|2nW0D(x%>b=HQomK7 zKAYXFN2ygO)QUtUq*cx8I3eD9FAhHIvn1OtKvgQ4c7Os&$!08a!MFnAHGRFNNDJF- zRqD3VOl2HVn{Y>D#`XXy7&YBZ?DELnSd!R6n&PLxqI>zvMHjR1rk^s}N*GlN+ic^7 zL57HGFv`%I)Dtqq3vtEB3l^4H&v^+vu_3+S{gCB(SP0gWZ2y8ZWn0qF~Zdgs& zTAb6yaMg`Yl{lC-PZ<97V7SZRkk9sK1Pc!L*O1x-6Q$5-(f-Zv`1#kqMzVMr+gA4g1bW#Dest{d?5Aqf*Q`d(t8&-q2*+IfZK9DH zDy-la>@8$Tmm;@mx?@N4cCeYw;l}-7R?V>1Vn~U!;>0t~;s;mVj8~~3Mk7|a88iiw zfHjno27kWkVzwDA;RO{^0i`o3(2aZ8>2FiTj!1~|?7 z@l8fJji>~rG;6;1eHLALEj#Nn?Z!AVYc7qQ_u%Pgf0DRdK?i;Zao0Ijreq98y1_go8riH?>#As0d+%<0t=t}I}UxbPUJ4lhKVCAag^e> zBm8^r;FhyL!qq1p&+2QhWxE7Qse-nq;#V-nVEvGBPqV~>`o8<}vTuHqgD<%V@8w65 z%$tR&dIUDFUi6XF6eu^2Nwq;WSl#Px3BL*c|GMXh1 z@-M%m+lusX`!|xzf-T z*UW8DrzJBFeDla&ver{wt-($^aP*hXvGN1OYaAzAbzwO;CC+hM}+ zFeeOe8J?}J2ld<7)(4SwWsY* zG~2`XTrlcbQ!=NeozYAIMY?8!+GK-Yp71snUHU`z3M;639nViu6_55vg@4vt9Qx5u z@VHY?qg^ZE*UDM$p$hifm3+I17Xv1Cr-n^xN~o{8uc-#P3+eg&{J&BRwEG&mRrj;j zLG$vNlFWx#;;2s8iU{sq#6Ld$X|6l|%{=hK>zF?lF%nt)L?{J@pc+D@#JEx{N!xf& z+K(g8`vNaK|I0*A*`H{%ieCxPc!+4605qbDVR>(2Ae(#|Wh2jRYDg*@`8r%%B6)bQ zX=BglVUv6Nr+zq9J|839YMyceVM4blGBZCK<@>}{pY4u0hL>J+5xc(jwcIT!ljRbz z2}rCZ3>CIHPOTlYkCeFk!tZeP>;8kv%KPyqCuldD1V(i}i#it~q-amN8Gk+;{m^q- zwF#fIhts1-0ac<9o=eZI$oUJbvYFi|mto(>a>yq?#NX==P@X+PWpWbzx4&`s`4!IT-oug+ttV7??9<}%u3PbM*px8b+H=jJc{H@gQV;?@|SqS(4ea z(bXEBWGy|pB7uWYI=!i4)6M{N7V~RNWYJNJrPp1>%~xN?f1Y+a!4scEG^>JV6h;JN zvpI+Euz+&nO`6u4wU{+4xZ$LeNN)KjJNRXElAr?x6~#oTSQG@j=6}9P@ce^`N2?ew z1XO3RS!;S|>a?2&$eB=(neR$(rA7_VV|Ld(CA6B3+noYeyPKnM?69(D+?yyAO2=S%04 z9B?4ERwXEx3Gh0xj#XWFuGml9pE^7k_x;EQ^+PARrRNZPYEt~*_TZ681*iUr4zM9^R*J6T zea3$0_@J~0r3%&O9K>@^eJ^)4>v+az2g=-a*%gdm^%H`2f+*`Ka`T~Zal()(nzRF_ z08N)^@y75 z2kp40IBV65cB_HR7lm)Mkcx#=Q0tp;+wENakq_~t%536hi+07ZJS_2&bIu~&aXX}1 z#sh4cW=hSD_yTOFzgKc3+-VrtRAJhn#v9!6qpP{$$Jg?P(@(>H{9d$X`}m$lr#53F z5$Pe#vF(RO?AYTNU9ysGy%0&8h>5Tj#fFkcbJyK?`S&g+-fK_noC=;X zDC_hRtV=(v>Yz80v#+Q^^>Y+;(>s#tN=HB`kJ0Er^FZ=Fy+5f{I zCf;o~;pjRc9az^ zEk+rr7}6c*^Ne#o%i*6thX>}(WtFnnaw*dhQ5It@$*M8-A6dZN-})XuIqnUVm#@N% z)rrz1qmXu55mqEap`FUSNN>{Jm4Z_bVMqG^P3Hw3u8go-IqeA9sX~3#wSna%3}OY8 zAqo`RANx9Xf7LOpv~9FD?Cw{%@?#%`g-em~2@nN%Kn7akcH-Y zI)H5kTuDvAwL}#kFDly(_HZ0>0i{wk6I3!D8?Y?$H0p&f;uY85z&;=O7|lKRz?MUN9M_hanPdMpKEU^|{t73wXhK&hF ztF-HrEU+m%Kd^=$9s3%_Z}-gnQ#C^~UPQGd=cj zY<>Y2kzRcSQHnG(QKZh!3mFm?~6|^S^Dad)**l*8J^Q|G+YgwTKw!Q}b~W$@GSX)rHNm4*7SeGHmrH!CHGg z@RH$q+VTfQn4S%KI@;78y17mpaW;6CjJNO_4X%6ho7rWoMX+HtQdZn=LY{fXJE1J3QZr`RH@?NK|8p_Bm&%wpMZA>62xjgmcTd)N&dKj&)=@7f4n34( z0}`j%)1`}59mP#4IE%cGbvj1RLHjM!4Zih=IIu}1LH$fh$_YcH5|g$fg0>+4yp@~3 z@C7DryMy^Az$6KNSR!?Ff1d9#iDg-<%}&pLCeM2NJ7M=-XnPvZ&-D{BD%mvekCn88 zdS%q;PSLuc$o$9&M{SNz$V2&=ArHyF)txP9f%Si`GHw}1=0J{$TTC{wh0=;t+IUe4 zcQ55iq1$J5}RN~M_3hE%FlcnPe0@Rq_rxHl+nhJrZz9NqDWm^ zB~$D{Xa3@0bNdf7d(t2tZn`W#()Tn%#*XKHC{vtQ#H}&>@g_ff)v-K2vcwZDqHSu( zllJ3)kA8%x;^X@v)-eKk9l*tO*y@zbY>PMLGSVc)^F3knqYFSkrlhFic3?1D{&u~4`>jtg$R^58PRaY3?^!PAY*ox4%q8XYF3zotUi}R>? z|7l(zjmH{zX_ISCeJeYRttVK&4hcNgg(Y@-a5m2fpS;#XvrD!)>m}W$<=pO!l<_Ea;o$nyH<6gFxfKsfessNL;2?9gi zSQaNOlBYhISO4Hrp7r_vCfaikCLs}cbnvj?j<7-T#%}!}T=?{P4`>#F{bJy*d@d?`P2+!yOC8&1AT-|Fggc*_r zd5~BC2qp6aKK9b*WgIbX7djDzi0<42vEYZZh)V&l_}aPLnvUUD18Qr=8UNYOSatP{ zcx~5u3;25p0@bL`cT#tXsYY&m}KifBDc) ztj>KUs+&M6MQP4_qIQI>PoS)ua}bNs`92RTzMFBqWNT+k+S(I_Hx19wmJf-h>tY?) zc3-3nbx2V^bB8n%dP3HgN!xl@k7e2eRi|PNEt31jf&tugw@7jLJ~A zEcWa=lN>n_tI?4~PqtWm^|k!$>YwnIGe1hY3bFiC02 z{rvQVlaasvgB^^Irxi(PiOQPsvSIy#`Ml)(3rQdUc+!yyS~O1Ys%f56z&;PG;Y45%VY1tzPO*T|0d9I+GFkPV~!ZEpYDRDDr zrd823{=48=gLr#ph=7Jk!N0v7?EUOHm>{3O$PR`COFzxAJ3D=!W|f{M|aGtx8jXE95`Zlgtn}c+*nun z6r8C;Usq5DknZqr9HnRrcU^cPix%dJ3r$+RZjWD z^)YC45E7S5Y^apDw^HHYPk)BPF20!7lb?dE_=Lt_tm@F^+CmWBB}bIPxlo<^@E|eW z;To<-+~#dcdWh=ICv}T@f((w;3Ln5csc<7xT0;W9F-XND-D787@Wpd^_8F(KD2bp_ z!2})=DSlWYSvStUr7Cw__-$@F<*k&)>qO&|CbM^a9N7o~4 zoOGz1!4&2WWYg~d3>GWX+v!#o#XMxj$77e(lzJV4SW z99vK8>Z`c^wD+)EJ0)l~F(x4i1T|-b`%_`}V_wbvXMT!yrGy!&WW&KG=Uk_U4?Q2| z&HQ&S;x7Z+Qq;6~-_*vIetiYk_RxXjx>Cs<)rI1(SV4J;IBMZH6T)RH_|563^ON^~ zkXcKYv#knA8%;_jgDQF0O30Ye+>^v?d)Q&T@`qPYdcliGsy?NPpS1-jNL636TH!LM z5`D;&ipg*d*H+{}yVw=LDg?T!@JeoA*|>QfF*z2ZFky%t^_hM2OL_UX zFJk%Jxjdl2lxno%h#*kN3G?q+#N|gFP3fK`=na#^X^b{*z`CQ?khlB|xIUTA z_HSx}@JP1T^}k{1s!NiKwn$+(*zK`)%xk(tTWcPB%A0BKxeMvM8u7Y1bJuR*)~|gH zKZ%hv&R*HtzQV~vc<-v;k_%zz#+hp3*rN2tjPbu8#o9pbSh-KEc+fg4MhgRF|sqH2X4|D!ET>sv&uii>G>;=Fp zhDT@1I?7G?ncmT=LeH)U)+)3(71Y#X$0y*n+qn6^zrfz6#N>ua%$$%#3J&@F7id?@ z_~j7g28WfkZcZUX^OSZ>JE=vhteplFo&_c+X#DL@Tz&B+yy>*}!R~v}s#Wlf&R(a( zfbAlAt%>6{y3u6p%4@m)th3lTiBZucx+KJvl(^z?R|}qc$~)NOl(*Ba1o)*8ErQ7l z7Jd2(`7lMVv{T-OKqt7Dfo@u6WQ;Gegy*gxRf;s%DUh5wqz@T%Xpz!_wL)`z62Cdg zn#(TX+T)LdKm38+O+dM=39L}7mJv@Fvkey7Ce`O1!2kEdD|pI)bdKM-y%ehtqHk*T0)>RMQ321HtLO7QsRw+7BA!>hyUWr}2<-m1xghIrtmj zVY`kVE(MJ!g%wNamxy(l7o2?-x7ie33K?xF?z!qm z@aMndwb}?N#$>Y$6yz}<68NTiH9LIY!}CnRF6Wi!%yxQSru1A-64I#6(*O7WFyjqU zAjzh?v$og5CdekIwrUF4r1I(p8jMBHo{f!T?8FAAoN3%X=UNpqQ#p9J9{%+T7XuFy zhD}Tujvtyi?%DOzbKDCW8^ofwYNiqu-HD2!Fenn@;ay^V{`{~E`h{4V@`_NG-0 z35|ioxb^0Sb~{41 zW6Dbxa?{E0;`isC$M%hwN;}OGATPlNLeoavyI~z>-+ejyTi@WoPkfSO`)#mQA8i7p zsH5QAtCqr5DmC!HsLdT{4A*cy0*Ql6!&IjF1Kb&>W&1nQyX}0Lo`b@#R7hrr?ET(% za`2}=!@bH7&zVbId&CApKOk8%#sRbD^UDvP#qFOvhiYu8PfTPCWNs*1DM)RaB?peO zQz_k{*_`SDW`c&)G~R(tc>G~-P@;OElTM3Gp*<`Qim~9A%E)%xamf2m=e~*6gq~q9 zuguTR`UHC2dZZmWy$t6kT&vc@rTn4*qcvgR_sp|&RJkPsade<)B@rruKVEVLEMI|& z5)#{`H5Ds`yD-!HlWB5^!T`Q&cajt+Rj5>FQD48Nqg&eBHae^Y4`bl1G+_{t1A*5M zkIa^FhSOn7Gg-u3CAeb_ztGz)|??rMb?AEPyPauOa`!uP)M6^=gc1gekUpL(r=uMA>SN81U8 zEcwvJ&~DW6TTK?7`vva3=u-Ca%6QFohV%rQNw386$}A2)|0~#ipGZ;*(aL0wR#Tt> zl|HxcSUhsprk=>5_n#VwkEy{vviMywFB8ENi%07Emz^k(&AyTd%F{$PCTT{5W8*Bj z@B*$n>10Z`-^;cVVp~8M5-B0Bge=!74{W;)FTLP=o^$?}Y3{urNx6#AK}PJPGH&_2 z4NFZ4dWOBO!}aI@SNaX8z@}-Msj0u*q4exxF4$D?d><(r(r4_?QI}o9#4bCr!PgkC zhSr8srA)GJg2$IfS^cAHxax%0QEfG8Z5YpR0P;4=42l$H$1KiHYYo*ZqlY}7J&!zs^=X@Mvc>5AYq;loml4DfDFtd; zDX`li8@-I_&Vo|igJ6uN6!?w`wlmq6{hZF*)GWsHXoyF;YMf=)TunJqXk)SgdAdTE zGvBKj4qwNGs1E(P(i)7<$oy?tw`L{UrdY{nu)C<#Fj?3F>-8=c9wrPMNf_QZJS$t? z2@cX(82m(`+U{CzIu1{Sq}8JA2mJDVXHvay36;q<5(?viNA=myV8;{QNW0=^s+O7_ z#bqlQYHme3LArGk&t3|8eG`@~;#;3To5NrEauz)209vy&Wly2fypKNP`_HkNWMYg; zeFJ|z;|v!6;0NsLmq_a4U=pMjFdmlKu+7dKaltoIHBM6F4}W)uIff8yqMyo0;Hb1^$MEMWv< zQ?w^|C7)KPSlDW@$19HE@NfMO=IPHO8L8m;IqOlz(4BQyxOtGmNb@jB7_Kc8s&Adh z%S;H&G|y!f00%ZGHzOqg230B3S`c#31z%?N!H06|`UjvkLX5&_jcT@7kic&DZ{X4+ zUPfifD!lO~aT;N?L*gB;ey(yX`QWzNnCyg9>KZui{t zsv{xq(>5uOKm8O|%^pFAhPhJWpO;<%x8I3swVYbE>Iz9dgtsq!+m}}uI`Dly)|OXA zL^9z$-)G6{wd}snUflWfpTWwNs3av-wsU>C^0iFKWrXReoclMjJI1UeqgO4@ol9eU z45C)16q2{I^(%c2*P}WX)?3*I?E?(kk+!g(8x#Vn=^UaeU}WN2Q?$;$PulpYX3a%E zp!w&&G13qk@dQz|%GeIO@zjrfoR$tSj(J*(?et3upp7Hfwpk*Pb(uOjqz!c#U&|$5 zIiKeqdN@11=un!KGNG@~X^PGjDs%pSV`z_!;g3)7+cQ4O*sp%aw#p-JPeP!uHO-24 zM02;jIryvR6V024j8>folJGMw8BGeXREUF@`Ovh>Ly$7$l{Cn(i@@d5L z=A+AHjA%T_G%H+Sks)HLAh0$}6ozYaTd9B3bVWAu>Rra;to6}aa5@3n3Z>F0aS*ca zd)~`YU-%M#n;fU*Rq@KB#73d5##=Ye0YS(`uQ>|-`g{CVn>0-_4FHw9eB+dd2S}gH zXodQtXqn;b!)zZ0F&Ilhuu@5fwlo(flfs6EW|Xut!plDSVeU>_#741~U*hK<`y^f? z#-=goE-=#HwlAA-9jY)dAcZM&t)N^kVbipe^jNaF39B+{R}qB`eAd#YwAXg5Sh$$h zU;d14#*X1i^$b$0P1c|8eMt4FPUH`#lxNQ+X-6=c2m&5{k9_mJ9-;nxbRO`UJhB_Y*@K-V!h6KJ3Nlsgr zA#^boD~4o!lW=T;8&5iw&*{AAOBWLDvw6f6uY*9dMx}V#8It}!IDBUnv=R_Hf1R_t0bh!dg*_T}p zJ(PO2L@5TnXeq0%`6+%oA`v$PUYKQI0H`zQOehBHUE~CIcJLWJ#1<7 zaBcbuT}(Vo7zQK@^MPSI(iX(*WRrfdz_(C{i+fviQqo2OV-sBQiI1^!5YTR{Czw@c zX|u`8&;BHKWE52{Wd~5%Ool#Z4Oa@I47fT$qCk5gq6|@^O+d=^U;aFEw%v}WzUDaU zo~Gn!H@KE5fNIIO$c;9{lM{HWSMcl8&!Dz&3EPMe*a&Sjw&Jrq)ogp{OL^kQKSC7v z=yJw{l($4>{lx~9a_X%z0Cn1wep2akzWtgCYNMLK{Z$Qd!+OT^5GB}Fo65un?)=<2 z+;GO}ELgFcxsigF5QG7dBt)KKaoomx_JJJr{cG?JJBFlI!3%?oamocL)(KYR8WT!s zWZ?21Z9{@3w%KU;Z81`|iOyKc!TwGSO&K zEmg4VCU{bHl$$^GDIU1=QoKe=5~XOPNL}D)9XUloJ9&m{lRl8Y-%@RXM4=Br>3P5b zS5{OuNUlp+mnO!z1q;$Z|3zz7aI=~hQ4(iu-)){Y+VkpgDG`Y zbGpH70qGXHLs`;D@S}tqPJI`9E1!6*j;$J&Tj8m1dJ{bUvBV=Kj54GyI2G#x)Q)3v zrDZKBQD_TZT`0FBe)HAyu%tZiJ_nhWF3g zm?M@ntrPnR@kqeJC}xkBy_)^b_z-cqgbD*R3ahdKL996AZ%M7qhQxY|uk?AXB7;XS zsa+4NM_(c#$P|ofko&fDnJaXzd~Gx~YT}Qz@c#H`u07#ImR)%@+f247wNsQz(V>BA ziALbDG7LES^PlCJA3dA)wsX*sZ$vep)5w3~GRuU4Zy;&ISd&S&mnM{v*N7{RRBOhgTgvdDOy$C(t%O#qfhh#EnVoS`i)s*UL2uo8FfH;{5JcuP4ShB^+tw}_h4CveI-3`~`(*C7 z!lEy z8n-N4LG@`*p;|7p^tvl4x#~v`PnT>=Vs7RHLWs%|G?bXr|={W(z zwb|BoE;b$}3_S;UD=WP`8rM`MQx~{*?vd&U%lA@_E5{{nk=SC|@SD$kj`HfY=!rT~ z4j4C@(dRsiZD0LHnxzUxOrg!kIYr~>SVgdD;(~RT0kjgB7~_`9zst(y%Q^hC4^a){524PA!Tyx*X7~8t!hl+3$pt+3TIB5S2sl ze0=A7Om58cA#TU|WF_w)?eFa^B5kUtgxodBQusY#I>ztbWT97lCqtO>U?Ri6TH}-G z<|K>0eIZw#@g5eeT+N&W!dT;Zf*<;{OR%urWaPy!;iw<~h}HqmBbhgs&<{~ooScMo zDk@s_)UUHr|05bQ4cBmOY0LM}5{QTT9>fWebqMEFv4*D#HaMFTGgc9nMu~iQ!Wr-9 zfVaJeyW0tRWDb*J5#wXqEgoC0@R#$>WzhxaQA(lRYJ<+&TEr?4?ND^-4C7Aai0uV7 zwyJyAbx>38V2?gSE{0R8F!H?TQ+euBsQVG~b(>{By$XBx-Pl&#t-2Jn4?2FJ!`?SA zhA9N$U40q@R4WzIB$<+OS;<-V6vi`zD&RxIzUl<>Q6jGhte>EeW z>c*&6r6)n{08kpjky$hw4Txh`VNm^z1;h0ykB#+KvLVL;`woxBmciDhbUmsr++<0y z%4V%N#p$P9wK5MfB@vaZe~FYaoR#^B_`(2=c(^~AFa@%6!_$+M=sT% zl(_bqY_M4=K|4x7)sA7Z&WfMkz(4-*XO8*cr_3sD@SnlYsZR`AnP-pxP1doep(jo)lg3M}G9*pe`5 zQXVK(IO@N?z>`n=08wp(V6==zky>&ntb?d^%GL!j?GDm8^d7_Yf7S|aj4N()ybDzz z11eT2rBmU}2YamoQXvS+#3jY{CmhR*Klur6sgEPI8V!lifsjnpd2Be!?ccnZzkTK$ zO0m#xw|aG4To7!rSliVI=yLxL+M|B$hQVqxn`K}7BijPt(P8e(qa9<2{gkJF;9V?9 zV*E5_H|_KL3%*9sY(SD^zh~1<)w?GPn4uP5FxVX%Zh^2Ab^)ZqaJvGuHdv*I(*!Y! z2i8r3SLO+a97=t|TI#?14Su~%YLm`ZD^ewY&mRg%2=*5c11b*PWw$^a!Xt=od=dSis0URxcU4q zvI~mF#5h6CV`VBFe%5~xg#lj4kfvGtQI4Ql2VW~R+9(p60s*28292q=S@V})^ONhY z69$ z-Oj2E#(1s;Q)6w~vEgc9MM@R47xMem{k~KxfzrJ{Q)r{oN};tzw8vzl$;9|N=0D?E z$gFwX^{eafo0ABdv~W$xbo92WsNay51_cQ+dfUH_GXF6Ei->74JKpT~kXsQOB!!ERP}% zI{9s|hy{9p6)mp+RCbbPBFQr`#Sr}Vh{^3s%KJR(NbE;sxju=}`O47}V z`pl%`tnMmd_9}1o{%<oBY??T*NMrNaHKlsmO#A=lOcWTUFzC~PE4KS+Y{pqlVj;DP6)RT44m+{q zflp)AJ-5S>#n@)-7zCw{Qfp)UT{8W>QTgDr2IEnkJ%`4Ibv;6ad9`V{w&;41iJQn_18oQrz?(`nDDfq0p=gShu%>Gbf^ zX$z8GM96vaVkMzFuOLPIhJMVxf0KKrAk=_+BD#}JiBUvp8`X#j?!A|vop>T^uDynx zF)-1l;%khu=!#}(Yl8K=?!v2oa2ftVFD4zW5cok)fvB77sbQ`#T#sUFSA4EtdFJ7r zZBYM`A?_LpHSk?^TX1YDco@=BfIR&f{Kr?n$v+z{^sM_R%w$1oIzp=)pI+Z)GxrN}@lF&GEaB zNXVYgeh#+XWbH3*q8e*gnWxKXJ|otnriBQLg^FBZ90ldk*^F;ki^|1P3u1A@HH?+@ z7T$oz0>=)I!j{9?Hr8?4RhBGh1vr!=5rHJdk1h9p{hKuY_-AIv8u1j9o{xIm6WHhN zZ>3!h@U@>MA8C?>+pf&5^HQ#xVGymbqKU>PA)erp|Nb1$KkPM(KILh&YbAU!K|Xtk4||PBg@t>D^1Zl zMVBot9n%cp&iW)zdE=Wn^vf3zE!ZA2T0*HT5mSJ6w5dvZ!;fKa>TnIM;&jQwrUuIY zdb+aFcPDDba9p`IMoBH&YHT@#r$3QbfBi!KJ~4)jR+*Ft@hxdRVsCGhzkT5=th?k# zc#|zel1@lqv?5N^KASMze8N_m7^>;ywEKKrps5^kFwObfqm5y{F7f+|zK5D>vwLKqDj@l2Ry)64J>C{&5F?`Tk{W7gT9C#tCPQvY}k%ppSl-cDaP| zvMNBP5o5qAv@&S2R-z=$bPkkfNYL0gBB&G{X>4eQvgN7Jd@d0N)_kxdRUOOR#s4)E+Ljjfg7$&canU;hesCKH%yKr?CMhd#tHdjugr|F5%&e*0VWM62_+ ztOIeB3lCHeE1Ya}C)8AZ`pvvws*_-4Tc@$AOD2);lL#m;AT5Qw=+skKl|ug!Hv603f;D>{nSB3VH@92uq6s6(1$?W;NYg$(_}R$-;K zaLlZ?;DwifVZyLQakOa$Tcv=r^-$HhlC>iRm&ph-+2op!e}p~#D$NZW2xkSXv=K+1 z`F>>HEGPvX?Si}~Ur?FGng~i~3c|`Nv{iU zl_nkAK(J&H*S`Jjj4oe6If+Tq29YPUt3Hbp;m8ku3JyGgc(jTmfW_we52?jkiN{Y?U%yrm+Li5Ndb+mgs}hPc+EF1;MVamypd5_s*P0%s?p{N-fVvO?$gkB zEJntfS$iiI<9hdtY(Im6ra=;K{c87{*{+E6=2@M9*8w_tysum`5r%|Me=^POw}Wz- z?Zo3BmtBIIoJ33F7y^rCvg;o@e@SMF)`h_HTnnz6T8Th8WO0LEj_ zXFUf=g$I7|OZ-;q^l%CnGUaUKZ$(WNwCkLiE{Tc4`3Mv$C}ETb^~sKp>|Wb&Z3XM~ zE_Qx&B@8ZE*mZdSw+@#l)A^_lom}P!bXK6nw%h0=;(tH;ab``{@#=NFu*A5OcW4<*<)@8tGhG~fYP$wcf#Kx>!mL8`zSh0l&rV=D5kwGhd;_gUBNMC?GFz*90TQM_A%i9=iW6L$JJIU3$yxiXMU=&2UA&%YS)`6- z&sPDhz~@D8JCUW6YZ>uU?Ba#A{`yx`9Fr>B1xTD!%ivWrF=|r)Cwi(4if->_ zV$rQO<3G8Y<{$sa0;`Fl2D(-z*>iVxKJIv;Y7O7`D4RNT+o_f2Hq1(faBHV3j73eh znfTi+{N(cQbKECBMO60Cfx#ePEk-+@a52Oi+63z-_{m#OW%ly5%+VULZA<{I(6S^+ zIP!y^pnAv)NM_eC`OJcKy`wfWf>s3Ic&^>b_R+{GW>z)L=M-ed{hW4_J8lT4d0{~d z&50IXY+3QWOS$?3A7`h@gi0c)q>b@XtY=x#Y!L6ZC$IRIpd2E$>&H~~ zqCI!sg4^?a&d~(}N^fB#b-0FWxHjF6=atriq0U=wI---ViYo?N4&a5)=8%tnlDis{ zm`a6u(k2Ww>G}!wi8MF9>m5`ZmZ;I{*lKYAhV;n(7B!#M)L?XrTM_Bwuj{qv9pVR< z7iC%&0X*d?$TmA*5aubLKV5n`y4C8!DGqU0N_7A<#msIZPqci`qw^a*^tkyzHYlBW zl0-DCHcWtaEu#yzW4rzKW5cTDaPLASN~XFJ)wkz8sMJ8m(`EWA4r3cdwOXMywzgY6 z9~Kz4E(l=@#?E>RUGPD`LBm_WMewSPKB1%-VrFa!8l9;hTOn<>VDVCZdBFwjOh99N zl2XlMP08oEXMUKd;$b}P1c0)ETdh=;|=e4H?}s4 zt&}naVhcuP=)cj~&-&x*`P~QK&-~SE8HrPrZR7bK4Q09Cdc5R}GbtT>5OJ-97T1mI zbQXln5R1;+f)omq6^08I*ELVkTWVH4iC1q&?|?}FH30nTr{cEC=_?5Ji56um{N>C~ z^7n6ilRdoQN-(bHx-GV}_=HC${g)1MX&*dp(!r)yripLJU>IASeEE7FQ)^vFXw@J%POv@uR4 z2(WQP#rTxQQ!4i@<~JYzFhK%|B%oCXRab?coV?RT285qm3*68}-klhwD0Dtx?AkEJ zF+-uV83kLbvj1yd!@71%$*Z#dj=SK_yCKfmlC%q$Bsbi>)rO+)P^u?&F~&Ikp1yaO zR~eMsa|71+tdA_KH;j=g4SvAhFE|LJG;4qVJHkXrZQ7}$^oWH@zuM*?kSnhm^q(OO zo=2%RLThp&OBiQVj~=cqv<`AH^rI$WcZzYMt01%?r2CA<85lCB2OuNZdANvH` zG*e`}i9ZtZfYu!J?sp?QZi}vzI?9@^I}@>z^@56{kIVZfmBKV5{Cb0{zjQ7yJ?2%g z#~!px0lvq_vQh+T(y@Ph#YVmy*n? zWJA_@`Bf>f#lF*wGOvg_;btY&v<|wLJ*i)ut@E>LKxmOO;Yn|rAx2Qj!g!tP`Ukk_ zSF+HJ(zPTOp^%~eggJ5z_1r6v-OO#boj%*Lagw$MRHQqe4ICOqafZ)E#7yq0#g1bCeTt_y5W6bF{Y{S7IU z^pz^Q+bND=$~*!CK_)t2R)0d~5HCa_S`j4?j5qP_yoW1~KZ(j6_c1q4v8@PbBY|R~ zY*;;aHm|w(Qp$%NLNaRv<9V46lME>9q2VZ2G1N2z*Q!sQXSjxIxMqM?wnR_<#t@$Y z`T$el3l~VH5SkJ#6Y-3*Kh5~gJ27SqUa3Yxj7lwgdS!0>^qHvJ?m$m8ve9X!9qp#> zsXmKiS<6H>DJVi0(2=d=_Vw0)yL8?nWR-9cJRQL3Z1z0laMq+TbA7||U;GNzZa`9- z&3@(WNLIQpEWJKLa=)Uw;E?D0F7TE<-;lTcD5u3>u{t9IS+;Bip7yf+UofBfd+o{U zrHf$0`i!WTKDj#}RYFK30`qpb*Ed75w zVF3OEs1EP<76zpXAW%**Ck1kgt}V+3#jH(GjV6uX{g&G=yNsR0LnUogrOf2KZP@#5 zC(|wkc*2PfA-!xOp>#@_y3&tt?Dpg^RBeYD4+~R55MDY?3b+V~h~NV2qIwIat_mNrr2Jzz79`NRkjB0g@2XO1s*fo!trD zU0wCQ=l4g&?&%3a+TB@PXPzeh8QiN5?5KK17>;_$gX zVW$fclSmIYF6>OYv>)*>p8C%B(m(nrbj^6#f!2jVaWP_*0hCFqiH6nzp|;u%a^IfM z$LAf=bet&read?P4w{W|`c2QTUUnHjQK1_eXxnu3wA<(5Yc})g7ruyaYA-(OmPA7Z zBCiAP(Mvw#F9UiJt1tKs6){-=(m*Z0z!M`N7F6*@Ir3L8qSI{9l9+WaVf)qB(&)!{ zd^XLLQrHSIq@P2v-{C7=WJRzxSjaLZBjSh{um*30n}2j0{Uih5f`B8=Ity_L@l7{E z-=#ZmXeRtNtOQ!E9c6TtSve*Oh4xR-9w&)oNRn(LrVb9P(8jY|`WL%kP}S$w=Xw~^(*O*jU!>BfKIG6h0cJ< zM}ag&?42=qLV})S`ueN6{+h3F@rzzeH-u)hfjCkIBP>`$tT9bIcfa%9?6~p^92r<_ z+)de+nueW;u=xoWaNM6gpD0K*8uHe!*g`~_0jc8!OM8_dJAg(l3ha%>8!Ht=F#?qT zARDhT2Gkg$ewP-8dp`K@eC}^A;poJpvvXjSL}F++xV_isA;11C9&_ni>8xFc8EX%W zObsri>Cb`c)xn^)+75bC8Dz7`7}3;7+655F#K#<>Lf9OK&FlHicU{I0JG)7Yr5ReR z7u=pXw(Ond`b%F+I2V)P@TMG{5z?!!&zyuodaMZMrW7}MmA zyY9m@ns^U{%H-(}L&hh$_lB?2OdL+lfEHX@%M1?94u3`lo>|M-q&YjFDyG#=I~Gwy zE$O+6FZlzIOPl@t2A>3+RiEnBR5>s>ky0H;$`OdZ$Mzi8{@rUhI*M_7_aJT04oNuc z`Okx6k3h#-S!*+E#pW}X+0t~{(N2SoRRhyAaPN=#>=p0fH(vBgqOm67cnjmw0>v0b zjezgdn(eaVo$ur3_q>OrgOI4R7aus{@qk^XL2%k5dE_fzMK1_2L6{1jsV~@&@^_>o zQva&^uJU}yz;d%J*acW@I3Wk%M61D|$~NIa49VOalZoTTSG<}V-}zo1VjK9GSsJ0k zwk^qcgIhcEJn@yUF`5>d}bYbckY6;1c3 zh8Cn(UCgV9kX9;(qCj@`w=Xbw}nUC-;+cC-LVblG)atwF@ zx+RN!pT#Ge5|z+sVy!`=ZdOdxSJx`Fv$CA}7q(z{PJNCKtgjqS6Nk0Qw3377PK-{> z?H~In^WVRjRyV@5TJ#!C+GjkHjTb(fs5Oqw1BEE+eaXCtGlNX9Hv7n>kh%g0)CKu3Mv#Yxeh-kv?QdL6#; z7cXPim%hwl65{83gkgw;LdSZ3k|@9Zu6Hp0D;E=uj}Zoqq7$U5<$iPBAgHaj+Q@+k z&A1&>ABJgM#okiA1D4RQIp8%_aXT!yS>*5D6NF6~r=3c?ZUcnE#-PvbSALd8lHfA& zQDY41tL0ecjPn8{k7Jc=dypC<=O0RggE4}44ujG#sdmxM9Xm0hMUy1mf0%5s<@ATq znVEvA9pEG_(nN~RlCMa?EjlyH5FY+!9b((7bJPhByr;%k1t=G10CzOh4rx?nf} zg~A9C=Ykw!K*+I@3Fo3SelCJ-ck-S0yqn`p3++Y7nif-Q*YT*ocro$#7)={sP>hs` z@qmJG=AhMs_9H?~xcahx;jm*)XZ(yulB^xaYQ|G-6g37X2Az%J>o;=cC2!=|poxuR ztW}aWOew@i9mkX3_D@9Zkf7Zp5O7Hv%G8x}`gsYEFE+EFX3%l2@=eIuc5#ooP0c+Yc zOJ$qB8v!aD?V=lmnf)z{LfOG!lGe3iy@sz|_BPrxJzU%`Ar+~>^lA!P>Lonmh-~#h9;k&hc2b!k^#)G5@B(IB z%qFqieeE?cH=F8Si@^xjHRWF7h&&QkLj7Yl=MRd z4J{i^Jq2qN_k){|s8!a;nz*@u?}3gH*^ZY&3A$I}jE<+PMKJaXQ z@&)Wh7z6CoEXl36A#qZ$6IB>g2Q)ASo+4+t_I~8i$iuIfG)^W8vG+I(9k1MV-!vu& zGSExlsACylzlD3gdjqXLSr^@;Iuk_~{r(&aWx@8f5yD1;ey>x~hSwoxwb~f_5s2TP z9foJu=k`E_kcFA94xkDdZX^|#=M~#ie)8V;BR{%@@oo$a&mIp)U2rk-s7K-_$I=qR zn!%a+?C~_GWFuXW3+5fO-}*NH^_fretpD>el66gNU@)5PETpqUB=cPw)4Tbvm;WWl zL_MTCPfU+ERJx6jdu@}a{qtoc8`mM@4HbRvZD?XhCwbl%{W*w3ES2aKfNt1L% zA+D-vp_ONmOeIadC!C!pxbX%)`KN!*rtP~K>p8sZ<6V#D$3^daH{J0z z!J07);IaoQ5G1b!+mzJkPi?i;R=9zoVM!^8p-Wonav(-3`U=u&!Kn}DtUq`@cXnrp zO@R84vGzK)?TEPS@_*&p&wd5RPNzN4>~b=flvPKu^Kcit}!?TDtFx7ny^s`w1fD|g*< zZw3;jpcCUw4m;r#?)&jA$jnT-D=7K^_t+}6L;7hU2pc5*eo3QShnZDt`!lyT`{@lm z7C5IqwFeMP8L=5ZY9s&vAOJ~3K~$xys%fjb;x*I7Hjkg_z>j{&jsN~Z)+d599DxOX%zRhGD+J=0m${=0{ z)WBGiW1shj?Af@9snEa?TbWt2mQQ@|cIG!6#fx5bDP1+FGdUzg&Y3M*)wWsA^Bfqs zS}OVZsGL3BC@2^Cd@JO*^B>P_IPPMe!9XG73v z((mHC3 zzc+%jO6)wLbKLsR|4j6)8`vC-(Nl+OKqm;e2g3y~eJMQRVYrELLQJ|)oXuAWIus1@ zrRTxEu`I$ZEhBD6>MSstN>xi~Pn_{YNgwMRohz^8lP`NE$0i2pMfB;>Gd*-7;Fh^P zJn6-M&2fMDNAz0_8m%A;`q{;qZ&Fw3UqveV)Us?zQ2Y7XYWwd~Ipl;BrJpOug+wUm zGmBnB?mij;ZY<&z2aj~dIaL^<=fev@@izKCeNna%d6D37l!l6e`V zd>S4tD)+m!NVb>TXHZ5oXk?-0n`q8Xp+qjBC5C(d_22mX8~%Y~n{8aDgAyZwB^e9Z z78suPhPTqb@IvCT7Dg;Kn>;dx3SHIED3xkOCA5ZqYOAfb#hsXBU0jn7F;=KVz2+m7 zKK;7gLLPZ0M_v3>K0mdKZ;7yba)SA=Nw{tUm%Qm6?3#)3n(8}XqV;!G z`wKu@lsHJLuTH@`0tw;JtuzlmoF0x%EyMP&e*@EZI6p$XXi9QANau6%) zHh8Z%=Mful&%HadWZtLXSO6S;B+WIO*m=v1go!5sJo)2P-or}3L>*P$C^l@8^rMVI zx3+`8o|Wa=2Y0~${5r6;KA-!~Mk3`hNTI4)ij?K^o8mp%nS(pN&y64cS2hvQizAY; z2J+B{vh{acH zEU1FWt)S>ya7?RWr&-Y@nPVqP80$OkdiQ&{{_U4@v^4PDKF)X&1G094yN%`PZ+siU z*=LiiNq0evNsFJHp(=fX(XUSGq0-uFt8G!Qr6YbOS@%-2F(L*OAO)}pi7HQj#f$kq z0lS+UNyazfTN~)g1RZJf&+qy$CI~XuqyxGOI^eRF<9(J@bF`}3IY`PBI%|BJsWZ#GssI`l_*iOXNuX_{6J4=7}96`&lJ8W{!OI}XWZV;GU6>`w^ z7$+O>k1@XxbMJLjI1xs2a7@HkK zkwOeZM>_kut592QwcVe1FxaK6f+d9tjrRnN7G`1tPyfx|WXJ3*(rBU*;JhVhPx9U? zKF;>p7@wp&a3We+CYBY#SO@l4Eav0W7^!%lkU)BFmDJ#e4M16jXKmE9M}1N$hFNregnq<&#X`7{vcTym#eT zk4vzzlaIQ;?CmC)uG*G>nb4 z!BW>U-(?XS@3wGUXQ>f+;;ip-28zLbBu&&)I-7q z*A%+VfW4bGa`D^ViQBq`VC@7Ze?LB3-1fuW10B&mu042dwbi!rcu?L}$C9cjcxB&9 z1$B<+|Jn11rAJIcY!$VjVUw-~y!-thLxZ3~c#$%=iE4(LFGV7{x{k|&pJY_n83qK0 zA3<}&CcKZC3`6E_{yw2kwGUKNz(|K4V;HJ(iOayBp`~b1yeM7-F9zqbdl-#R&E@vVyDCpjaK$pVWJd>C-p8E4@g1wZIHS8ozfBhT$;G5s!yyyKddJRipY&vpN zN$?vz$Mz5YJ9mBVi)>cG#~qZ2qzyY%c;X9Rih0Dtag*byPv=UtK;$dFPKTTB3se8m z%Wk@8P+9C3>3(0{rV+s5dp*YbJ#KpK>)7%6FL0O>B#KevNdlqMY++70m0!N(O?20! z8Vk-TSW}KOc^yGrSG(!jYO8G@7Z7=4A&Uzl22n#2cX`2|{2sa4#)FjF4Evp^thL^Im93$Xz$wh-9kQFgV(^K#w7- zp5lH|9+k?T!=MfjWxGG@Jc5nSYmV!cRZ>F*-lK zg`n3%#@kFKp3|QF9C*ly_-3g(zFyYN)NY+8!)qY0zD2rZW(T<;YE& zIqwl?ph<*nLe%dALaRN&2S4=h7!wp@qEcaXTXLOYg=zb~#L<>aicEShuO4x!nA*h2 zClCb<0vi(T+66Ov@frGFE|h1~-Xp|8d;OTvBSUE6p+eT44w1r+%<)W!34Rk zkl%HPTUD$ay-+KT1I6P?6@AvWJP4t-KIqW&VLELK5up#Ol2E{1p zlAETB&XQ5Hsw&Ee7X9y2&jQX z(w&7+m76bpBRjwN6%MxzlDLO9m2MNJ8;0=Vr*rP#UrN-nG{#z(eBRY%;z^n=ix0kl z<>&@$r(IiZwLNI%LP~L+i9?n<4F(rS{MGY+pZ?r#oR6?AL!u5f0oQ-?hwPY&P*-|O z)hct{O8DpYFBr07 X3F&K-%!rBR%haXDpl~z9{zWp{#Q7~j02$fl^e4#my$^Cx6 z(g7%`3YC&!FaH}AQ_|fEY~QgfW3|fIlMvJ>lSdy#ch7Fvxw{;tq8uxO6&`C>O> z&>A!B#qjt)`4jx)1ST*zmr|dWOU0>kOk9eDr~z**rmNh2%@^6bYl@REd>T<05(F0S z!D10nT(3tM_4(3kU&H!d7aPYQp4i0n0+?Ad!TE2vgt*nhm_|DFgJP@!NsAOO+4JgB z0ySEZX&W`5v`i3@K`EY`l_~w56333Vv0VSkzhm2%zsw=l5_daTSw{dDKBQdGBR9(z}=?V*k1DBYpW=(7{sQQ%DA`~{A{xG@*)&z!K zH{Fbh%Wk{R*pYlLmsdffJkaY!DSdb)CTWI7X?CBSO{CCA%8IU?Q`4y`wyaIOPZkOZCMGdrD>%JLcMvjY zL^29J42bI!#&dl6Bk$q7Cq4-d--?bm@g9s)pft>Ph_3q@Klu2kI3%$|^BrtMh#SIo z*X4p2zX*;!37MS4LdiIxIWM&-9Brw#Gx{J5?<*{YZB=&`s#B>{bQV%1NJ*lEpws7v zZ+t6vfA$M(@sM=7grUPU0;U^n8fTu#V_*MfdaVXwGek67SX0T097;}FOiE*@y1?Dn zR$Fbg?OUK}@YWcUG2LbU$XKixe*NMnV_k$%k2MA#I~uJqzWR-un4U?}u}UNx%gKg8 zhgHf(4u8thiR%G4Drpx!1;c#S#Wq%G9DfYmhNW#Soo(A7N$@U3{ZsnByacEShN|L$ zy|Z(ez*52QQ17!5uv7%jktO}v=#dBnVaT?7wqvtu!DXXkMikkw5lo2Ry*(9OUBIfR z58j8QOz!Fn=U}+D_fZbMuSSyUg5dyE3OU-5JNqiVcFFnxB#LpKyRP^ko95w&pW;ZPv_NXpOhB zg}`*dxny(_%C&`T%u6IipjHFAR$Fbg{dd5Y%D5qwE`dscm>UGdal(m5aNH4xVpSkZ zbUgu)Dqs1=cSyWsw5%Q@29GZnc}0g@+EV1#1DkjU;Ghi5@umyhB8VE;xDLB+Eg~M% z?ZTd!G*Qnx46-;xwRi7qx=38jyG4o)bI}o~j-?71W7)oAR}q)gdyEZI5NmP`t+f+$ zx8H+_-4Kh1F0(_jlEYAe(gqeHYe4HDu!EyuP}L^`52?@Bfxt#|@Pf}S`m_;{G8y5O zq}PX^-N{Y=@hR5FX&X5f!`jnNXY2`2z_pv@SbfIYAVRTE;P9x}lqIPb!LA*A^O~#q z243ManLQ3LYf^nkaX^?}Oa<;eX?B8>F2DqXa?7lvCE7doE}H z?Q4mw5VV^(@3S~u1`{pR=1QijqRCtP;o54eZ9hkiA`Q&v~>^06cpVfXYNJVLr!V#*?HqDi=J z1G78t#l|k99M{Uw(EU()9x8Sgbh-!BLu)|mAh3sIx%jFt7@ks}s{;iWRq$!0NJrC4 z+^5}#BqmVL=l|g!*${b>`7Xhl7JFMw9`XF=o& zan;8^#CcDA9QKGq@j-y`9%B?Wg5n6~W43+hqwK!=YpnO+db1?N^g>~$4SCAnycR#v zrZLt`L47j{u+b%x|51!bD``f^2uw)xeTlTf!YH)nJ52xkNBI5+-p>(gp`V~uiH$Jd zXfgInXL82NUrDbagyYR@anO{MsUtNOv=S%eewIbG)mB?=4{88OlV6ju9jU`w!Fxfy z^0X&E9*O4>uUMO^Jex*?tG{{!PV>DvUyfZC^EXR%40LsQyDWfstt9p-T@v0JHa_%3 zBK3sUFu#2V>iwWkuLy67lyu*-^YiHvwPd5hRevv1nZKndR`P!H-kG^#RMv>e3K+o$ z0b_@5WqxW38zlqNLLw{KA6eqpIqQ?Gwvn;g4rDIAFGiB;g5d!9`uw^tcNmJT+Co%~ zZcO*OuhPBgCfa@QB+Mk96P|Pt9D6j{XqK#R`Lc5UxR?ShSEoqS=k{x^q%}FgmdBh+ zG}a(6CL=S={(iO(H{8N^KX3(`up}-*8X;ZlxU1LaS6=vXIQe+|SbKoxX*o%PrMMeL z>QSuFPo_HyrL*=ZGCfP@b6@6~x4fMrvAFpTfpz%MGTUzBPCAZrUUvz-RzT2dAZ4wQ zEo>J8odbnKHKx|6O>MQ+b^z0h3aXqXtdbTA)>y!E!m)?3Wy53&<^+a*5@UjZshK%$ zxnl}6(+MaN?Uf{$vZ_4az?f*}pM4%(M3_9{2ogzm^QQ0GhEZQQ@nScv@}APahq?I< zwz9*p01K-IgY0p9)@q-d?-0ABWMuLlF@g_Z?9fB$?b(CrMgxQ-_vgHiIz`3!@$ieS zX;lv%okuOBba_{NYr$qpQ?{wgv+qm(o*YKF9{1#DTg3vZH&QQS{CFv!Y zsadXm^IJHi(;nvaX7ufpeq9T-G}i9-*e>l{tnqPt@Xo9igZP)9u1L{P;!=yZA*V=LW-QIz1- z-Mv)k)T&v)`BWscMzhmMwH=_8^`4G(JCw$3Y8;aBT( zwBLcD0VF4T%BVeit@wKpTy;73;O&jFv*V zOc0Z1yFTt?nd7QYT)}z2{3QJPjYzYB_Z~w!%L>rwC){+|Kf#^1)Ak8TzmIB6Uj#Yi zFdq4m7ZH!OQjLdvG*c>wlPpcKw^-A4MXpWCguPriOOYd!OD@EHxczoM`KniQlnwDw z7c1#}ZqEqOA&2nfH@t;-vW2u-DeHwHL*=BHhb@DNETJ1wTWz(~wu;J!ifXWmVo)A` z&RImgS%NTKjE?(#TCFj@`t|Q*qckG~Ll>afvbu|gp-57&42reioWx6XkP(5*?^rC)&*o#QfaUNB%<&#Wlqah89)6;u1CMpWz5icN{4<+^vX66QW zMRd9Mw)EYKVxd^TuH)80nI1mKIr)PiuSWxAKss;`r3?@K@fDV5BJ>7cdxjD zjbezkN2dwR(@v%JE9cW68$(P`2*;$enLb6`HC<*i0^%HDlyK{pzDUqs&!$J6gKLGD z41qxtj2P@pkIv`*lc}q(VxzJ2lNg$UHk(X2c*g5rho5Le7^X#t&lDoGx~(9~%KR+o zxUeYG>PWv?sB2bRuqwEj4vk$q_`iSt7aS2Orq@Tslc-16jI(Qef+t?`W}?mOF=OLd zb(T}*rHg;QN^4sbKs8cRTWz(iejKXW0<{Q%fwLZQ5>4YF@u>>1wWw+H-J5SE$x)w- zVQKLZnZbfUety_zAH8?U{#O>ab9TU11K*w?P8{)`z0i+}G2qb|mXIWls27!Bkt|uJ z6)`*SQg{zjQ+pP~dPE2|ZBEC)W@b|TiyZ!v<({GXomNh`4ki+&dQS86Qpd1^$R5<3 z{6Q-iRP~9#(e?S+_qfMmU*}Tt1$T7jczoQ&;`sVo|B)@RLwlaEY1mD`c`x`&e0vhX zW-JS2vy&;hk+ZX@D5^2^dvNd1xas=qIrkaAfg7L1m;mE5HRK>5nV*Auw{gwCyq(R7 zA&Fwb1~?4&ImaV^?|0$kQ;6Gbg6uJeAcQfFbZ+{2zVH+`HEsQEw4B`cAOghSyw~Z;q z@^nQry|vX=TWtpzh|2v|rbeHS<{W;=X11(f4^9b#fTY{Qh+*%{96NUH#^)nFrYaq_ zwDy9I^zH}7M;1ifTBGF3TclwE=(~Yo=AOprE zjg`f6vsV5tA8`_#S0vjB+`fH#7SXj>#1{30Yd2t2@q4DJ>{zcjr`HjOTTs*`nP|6R zumY)L*Fk1SXSw;RD;O@S&&|Gb-oqVC8FA%Tx%>wxR_7ApZ+?e+uez44!5UPQ8E-h^ zvFF1nC*qqSRt0M`QzDj9ak9v+6(1|c#a#QDf8(*|KM~n_C>k_V0XG~9BrYL{9M`@6 zO>CHnuuf6e!->!fE#X5?;>h3o1NyBo8bN?})rGO@+MO;cZ(qfxR;shGaEC$5N645f zoGS5rM6>U?_GPbR?VURq?|Y2)NhHCKH@Pj2c-m{<0>>PIot(h>OsvRjL0hX?;gOGK z$pY_OBQ*83Jd7yTR@-Vssj3UWiWT9sla9kD5hi1GP=rL4?|kovh&5U4s515F3d`gH z17ZLGAOJ~3K~#Y1m7xdd`qYYGD5+|vRx>4GY~xm{PB$Wo zT{==E6;ZeBYpuNX^lL3*p`u_cJ9bWy>!#*<4yqvIV`*`*cQ4}0&dun-qY>Q!UE~~i zp8;rg5OJ%$)j@a(KX|$MgE^9PVSQ>I{N!Sh%Y5M-if&@Qw3RyUIO-!#`P{$!3rA0` zBks--PEIgAKF(>+|3i{im?7fnqI2ocB-brV85zW2`>_4%*MWp=ee~HxjWFFkP{F80 z#1nM7?EcgznY-~uCXGQg##qaokC<6M!4qG53H??`7}^wVF;a9tOYUl|IN>Ewk!7e> zNB*obmZ_Q+NLCCgkoX=UaLe1?LHCC5G3f!Hj#xPhclNtH?KQ81laIqqv`M)Hv!H4+ zwN3-hDqajmOrf)|>gWS&J21|h(Ot2+gsg4##UC9|E*1$eBAkBm@%W@q7+6FU=Myj? zx7>PLK^>t3A8J|DcP}6P4#2w>j~WNYm-;zbGOh;357|tD$EbsDRKRmu@|jmMEMczG z8Mx>N_Vma)_&Q7tD7b8iS&U_R&ul?8p3}9e2!4DGw%ww?cV<9eO;&g;tNVM!qBd#V z6JwyzeW<tL(aTpDf&OFXX^jn4<&U3+^y#%&yCKzvLdJAO_RWeov5m`2@46=Ew z43nz(!%P&XT;}qC_=ILZ=BMv^H$VH2PqW1dz8~RSAI2JNi(}6HPaV{Ne`dIG@2o( z61Lj>_>OJa_@xekIm*KE{nh%(*Am`QQ}}W**^sYS+MBoF#34Sx&CgZ#1+-9YR?UkK zVRo*At1u2}p7d9km1LAgTCtB{n3|d%WYxAM6up; zxaO^IW$j!K+lx{4%xlEG-8oLa_+sp1A5GjG8>FGDC^?IibDg?q3sgpy6N~X`EZlj> z`WKn{HGtDTG9S_X+*RE0fq&&t4KV!}5PUmihsGT9l&7%yx1L3>*~F@p)Toug(OiGW zsHC*C!T{=1+s|J#ulCotoH*d9<@_46tLxMeN@y*CZP1^Y!RF&r_sg$VD-|iul_q^m5EKQ2R0DNw2VWe0 zkaF|~Yb5DO^(onR4!e3=-3wmW_D!8!%5vlgobuC;ev--Ed$65|)_BOY3ghawx0Jm`i%f94FZ$xE~sFmKD$5j8M@#7E}LwK zx`>=GW;s* zL6rNGtSpvvuT;rN%b=3?xA?EEwu8aEQut+Sg>`FNjJKP3mtX^% z66ttl&z`-^b$Th6kN1@zHOeXLGFSb9xk`i*P0!PKKR_X(G$zOKnu2K>$0cM?>8e!s z6sC925QL2sJQ~>9OCL|k6ZHzMc&4%qU~IsArP>d$f1F-NiHl+)ablI#< z7h8fbpuL!8H{z;S{56}V_Yib?2oBmI-A2egjgViv{2fHCfMC3h0lZU;kwVx;w5p1_ zTy|Kej1Qz}SZbVSxC1b1sd&ME8F4F%+?S!>Rrds!`@3cSSCf<4YFmw0p%{bD=4!$a z)~%hy<8g6wbb78Pi<@#-;N@o}8R zEy|8LRRGDbd-rt8kW>N0s{P0XI}7>WRz{(EQ6KMpRUjh;`HW!05Vu^xunhPy{7T|p zM5Ea(k`Ji?uU`avz`6Q^(P5|yhJEb5M!8h26bd=3P~PrMx?R|@jqiQpW2{B6%_e)J zh+}^F@$irn@j(+&Dbf7g(~01`$J#77^dpkHZ{d#Hzt3qGUPQlPvDT#QKpC3^^F8LT zx|Zp0eTQ{6z)Os2w&>u9Hf`Y%fAV6YRtsxQrR`Y>BvN%$i?>gf8|GPfTt>vDOlv_! zN#dBG7xC>kyaD%WMWv>!IMP&g=g;q=auz`p{oX00gQS7G_sAad@XFCPE zr=~%`d6zN+%J7aJ3*ZA0QW{mSh_%ekcTwjGeFrLxOkxe{-Tm?(i?J@|KaYA5VXIy8 z6-qX^T)U`)_Zof>a&_|nU%(pb4#PfAloljDGVElfOqRVP@bJ~QU&f&X_+A8Kmd={> z9Q)kgfmVya+RT${?guhvhjdXl#^Jm})bX_|{~u@m@{@66YcPRLI|PBjn*`VG!>-+Y z^WB$ogcE`&LU2S4VY^c<`16;*`n813kiyUT>YG|N{a^4CS)_m%5ab;3P$?>$edC0n z6Epp>PjcUXUB%X*1wO$BO5X_g_Iq6PS1*RsPQ_1-5hAJc&yHEiLZ?HT4z=B%;&-Ki zZ6K5mBl$XXJL(f83lsQdUz6P5SGn+ymZDExyfzuNEm42EYCDh@u9Sjo*-qA`4Qo;F zuwpX?C68j*y?YKZW>5!cnGVAs>FR2klP|B(!5MxaFo-c2uZZ)7vUPdY$^iB0&Ye?) zK_fke0~D%*-YxG|SC5?Yp2T_jNg9(!xf)|^!OkQP^w?#NG>Rg^R>8=*}8Q`K zk3XN*d5^+14U!@9Dud67%1X8nhLE16^SZ(pONnG@)<7{?;M4gCz3EoI_TKk%glQ7Z z&tt74v9L4ga{4oWjn+l#* z^{WA$^x{@kx&MjF;v~T3Ac8FV{uXu}N~V@t$D_6bRpx8vJGGKDZdktwk3o&Wsp7mx z#js=7o^%{(scTi1x>j`6c&cK&QS))%%qJL0!86KzgR0JGUJ&@~cunozP1p#Ff@={N zaA=;X^h#Jm;-J@$D%~2vBX|L|nRsXgQCX1Q%Sz8SppH21(`Yt_I=os3yMx;rSxl;f7F5IX`%_|Cg8<0uo7%*|ukA#?3;%B;WY1 z^S5m7#%M3XB|Ty*%rydhmDn%TMV~z^_eoU_y4QRXN z^@UG4!bcQhdOv}dtRQd+{eDc)Zo>$|*xC-_>-hoZ><_fV@WlEQ>}#B3;b=p3PQOIy z6(2<;xBh@_*L{`oo+AuGre?Z4;@6%58#kkkutZn~2EZ$brV1u<-2Sy|IrNwl;qZsx z!VtX2Vi6nQ;x0kI!_{wlE1UXC(urvX2Imd;bR*9Bqd$bLhvOSfa#U3+@jw^r6Ug$% zV)(dJkI4Y@n^bfuj6uDU^ydlVnCsv4Cf4lSMbPi3O4N-e^9^P1##;5x_>1?$rQU1#J z#h;3lpV+MEG?n{k_Cb?M1!>6lfl_@BNvrWxyw6u>rgZeSvyND#E?%V}<5R4MlWN=l z*Q%xz(W)LX;Ii()IuF@Xge7-nZ?7wrSfG9|-eD?$g#LydqZvaw&XaU{n; zzp}cUm)$>cj+xoHjP+>v{VhCFgDy-8myvi+FN!L^PyLzkAl|1P5Lu!qlcmo^iMR;F z{nW39%`(4`+VlK^*aOPhf9`@oRnG*DtB?P_b6Eozc11{dfe?c#nj{$UT>H-Va)>mD zdp&4EfAb+6`t+v~x5p5%pbneyNlV_^)DkImNd$8{xb>zRIPLrk=^IOE1W}KqnGm+? z+4k{I6aV<9v-4a$NYjC`gq) zhvHGMINv9T6Ylun73{h0dN##ilO9fD;xWrT2p7HTQry}J!ihHc)B&s4LMtLKvIgGA z60g6u#f!g{ikFq^{4(&bI!`?ytW@m;Rq2Z=5WOI&xSp@@jYyvCYsmpXmiT9%0ntkN zs|Hk1pC$t8)8{G+S5qL81x;cxWR!IoDq>DLlq~2#Pow>VKi(dLN++@m%l+XTqEe3-MM<*+oSJgs?dIbs4@SKuX zR4c_1#ESp+_xR2gS8$vS&}1G3TmaiTJuZC7%i#E9VWNq;ob5u(k*~bA)=KNRE({*f z=X+^=ZW&o#41`smr5GM;HgY!5t@1_PSS;=-nr+_ez_Q{Xk~p+CBOty+{G{ksl;w+7 zx3W$4(GeM_>!k?3Qt+MotSga!-hA;FgX{D~nWVDl@mMSwtF5*Nbfv1t8jJdJbE}bc zt1;>4Jx&rd+hvgIH8}DTmt{eO((@mnB1tRy2$C-`XZ|DKX|o|3q(Yu}%`_Iu5hVqt z_skF{jv$YHB12$P<^775W4oG-(Pp571R=BYol@vWQr#s5oO)~{#LK86M;7%G)zecv zze3WRCu}w$472^eYzMLAOsehRw)25+@7IDsRi98F_x%L$Py*a#l1~XwCJvLpb(dYv zk+y}OpTmqncYG~JJ@@JKTj>shSC3{bM)jr2n9Y-xo@4foTiJ2%J)HTJUn2@25QAij z&Uhv0JHB`MyI40hi;WZ1D2c#yl<@H1crKj$P_)%b5mz&`vmg~WmD3r?@@eHtgZ7hFIxF$QWewt}{h3$$hD zL#imV48Et@maOF#9wr$o4>NWZF(s3TrYKv{MNlnq_Ou0n8u0iOwa-SB#O0j<1@B2e z!Y;jlyobc4|D6Npvk{`o%zy|fU!+a{+x#|2Z>3yRbee`zHTh>=GUk|4d`U=|Rf@3% zQ%U?K+qKAq01+yEl=3=XoQr8iN58h(evT{GSA2Cjcb|{>sRx&MG>}};r@Vef`S<&w zJbWOFq^fI|3OgPM;uR5tGX~pg4GSWb%Fkk$-o1x7c8ulXL#l<=(SoC`Rfgu&cL;(F zLNrO(xpOyWAf_tbIegM5XoOXPqa}`{41HGtCp7JW_U314wOf@gDK(wyFA@Zh=I*cG zZv}&@PSyp(zD8fm!a%YOC-1fRzS_QZy%_()?{V+9zQIEq6VPt6qdUXHp7(56vjxE- zKApuTLqqZr2~<0t)~bujgG~<4SLL3UNNN_tnnx`?E9DZmWY6bg#mx8l%0Ik` zwR>kE>eFmCNUUcshUBQ@Ipf7Iq(9ce7>li-!$tczA6G$8@~%l$n5eeu85};F!`<+~ z-&ej8pM5N}6iIT5F(ym&K`l7%)9WE|Gz>`W(s2N=Ku^Cpb&$jmbzt`%*fULcYB#;9 zDQ54zkN)0Sx_kEGyB&Hn^Jo;|;{nMbQC9!!5s%2a`SdYLE z0f%=UBQ^yvdVRz>)F{#lix}bax}vhSbl=o&9Auz^jt+H|-M)NWS0rcWgCGdd820R$ zsdTrqqAf|#enb%3EH@Z^E*Heh;yJ4$jEZ=OX66`gx3k4PDPn0I>3(7C3933NBD4FY zU;rKug!OTM@LjZw$e2~q<;lSjhpaj59M*fTy8K-n*_gn`F=jlZKe>fNF1{Eah6G{? z5gC(>SmZO(Mg)(;_G5Z?{0QxJ*?8I+L=s>Pm~5;gN@ALEpKC6ECz}#5i9?M;ngKgH z^E~zCuYe8f5Su!#d>o{FQ%li8>XOH0v9Gm)2CMg_V>20jsuS#d!u0>o-g}2za#d&E zzqNPO2{(1lYPDhuWeEk45Fo$?35<{kHdrv=u`wPeoW|kv%-A!7XZ+doIft>$V1rE( z!GtlGAi%~T2_=+6OD(AtI)@w2IaRgy`u^Cv>eRWR11Pp`Jx{CEeQ%$0Pu1RQul25X zz3=BgPv<)~vdIYDEJs=mx{2jL(&C5z$NMNohDch0i|V~%O{AXmiB$c7Dxsn%hg=Ws z_Vqj~a%B*3i+Ei`&jN|A)FQ96%C)az)ql1g#Nv%oh(n#jc(6rAQp7I7_%z+eALGE* zZHzzoAP2TR!t@h6$R;LmGaWQ90)4_75*5UUBt5lK6_%+n80i;?s){X9wfMsrW8%Q4 z5B0VcI(QB^Pbcfeu0ulR6rVI1Ubl*|jmNO;kL}QZ-iybAxTi9ktw|M^ln)mkB=)zv(*$lnBz<6bRatfrso4r@G)5CW&(xKr6C`kvq?=^~1&BA_q zVw_cL*W(O*xz=)?Z*iXCXt>gt!+-8+DHv4s(#3oJ)P(4PL9kjrDkXQ(dqhl_T@a<1 z$w;bNx52<)yFwA-rvn zh=tr?#G=O0=qVFly_TulZ({>ZFgacvGsO(6&O8tE(pOS6hp-Zv9Wt0e%Ipty7;)!d zxW?dYb6@w`61p2s4B9K;!7W_(cYnhMW5{NvXr_j)81`j7F8}%W!lq*|!z~O^o{l)g zDt*(Q!HT8MzSqT+YrX=vG9oGws(5c=o8OlM${tL)zxv?LQ{KZ`u<8P}Z0AlU z?z)3VZu~AgZo89W*FKUgBUOVbz^bP$mX-rG2~HAvDBgKy)T73LF+n0Yv6wW)CMm|6 zfcz&(9C+5MTct$-&QoMP#CxoBq+XERW7M#`wUl-vh1^FE6EHjXk?q*U-s`U8ks@Oz z?_rk=Gqipqn=U+$jTc=6r=A2W#?Ydm=M7>Th_Th)Hr3EJASRkVX^sC}f(f-EEh;u? z zeE0!BpV(ai@CdFmgNVah3qvC)HjpV*W1Sy61PG5mzMaG-5fm{|>_7)T-t5kRNKjX@ zX^JljoQCeku6+maBAD1_@M0j(De@k+*+fhqbeJudP@5uL0D>z!|DMiFhxW)&*(41L zSt^Ue&qvcS9dh`mr(ggU3x=mKNY_Mu22xXl;HAcWE*yzCZvMOXb8Nd!F@69UwM^P! zHeLM|ibjI5NlY~SECfYJjNqInDTGIFzM1CGQtUCC$ZdEBYYeEz^^|1yIJdt4Z`mkK zx)b9xM^juQWpB@M<$Hbx9T`O$3F^xr6A=uRzQghn9smbJq^eq~v_vS;)Kd9rrXZc3 z;-CNLe`j^>@VyR6;>nF+GBIqta4lIl^p z9uNO{X^nXwX>T>YZ8Hys31=VTKx{Bcqs+5k^+8V%^%cEL*>Z*79XEmn|h389|0e zpxK19(VtT=eSP)nS(Y;oc@B9OW@cb|n(V*@OU$W+^AX^5vX**`?T1PJwnC2%QIKk}zmm z8vqdmALkv)j>w=afa39k2)Y)lvIACgDG#=NT!V37rAY((74~2RklIybI5W{1S?g#|~@oU&h zHW;18b4WJ*u0F*MisJ*BFkW8 zm_={rqx$-ua`+=I7*utWs-C-ezn|hHt;$jR@}2>>H&p^wbTg8N?&FCYZs0_1Lz1v# ze3}d1^DY=$hPTO}XMprMsNp5zFzD1c_uhO1FT4B=xK;~{iGy8&S|OS4@$f%>9DQsX zNl$x_CEL!etbWTMuVZAuzqNa4NggV%7JVC zFW>n4kJD)<^U80$xu5ef!7tw1r~G*DglRTmNDcBzBh?UQfLj~ zRj_pm>0Lw9+!%LIRbgO=u|x<(`YHw{q8SOwjUW05YfOV| zVv5$7B_CSB`ZxR#MYA1-D!mI?wM1TN86W_>V2hmXciqUkb!%vDI+o1X7#KVbOVP<` z?%cs$*IdJnzxv7C-p`X`R|Ir%q zZ>9^lqayu_nQ}40Q$4v^4Axdvo~umu}`GANvf~UVjh9DJP|d*R4I4 zV)riG(gxeRlk~<~46Rwm#*c=)=Tnc23TbYhy7 z=_pO#rje#h-u);KJ@8>}`qK|ktXjrNuXzQhyyYr5Zar!eI$}wUB{6Ai7zU2BF%lA% zB2ouRXLFr5Wm{n73UkF47t4v~>&~9NETnc8d0_v6@gPP(nDKGW<6X|`Rm)0Xajw}) z%~bQ*T0P9p*(eUb>9Idf^ z<%TK&3JMn{SD4((oj2XU55D!^QJC=h>I7?2ym2(M8SeeN_cJ;<0i+~0A=85WQ!~8c zr+)!fu0~Q@<9kXqx~9wuF!Cf|oYI${D-G2GT`Lqty_p`(=^6g%PyT>4NkZP8rrAiy zG-Ge>xZqvygmX^DwJqwxJc#oSYs0Xbmuj}{2+SXj-U9Z}MVwTHmBwP$`$(IK2dOnd z*{ECOw9*zPNx6N?qx|IuzRVY|{Vr2o4=u;o#!aNIi|=-LBz2tj>Wet`!gE=7>d7>Z zTMZ*4KoN$cZ5YT4^T85bxnDw`^hibvLo)n>XNh@1t#&(00m+Ln*zP8Fqi<*a?fRj_^qIR!-{#o9C!>Kas@>KTamNYXQhTdV5=zb_UL&-)`uJ=PeU z&k-z6Vb9*ZG#d?a?@7~yB(dbcG3!>xq!C#tmd@1>!eqb+hu6*U+5|w0BNea&c*L<9nYVe&Iw4qGd7B6|%+bEhgXxYa> ztUlV>M?gfuI>#OV`ZZ2G{ajeGGN`?%kl2s~$z%q%-^I2se}#>9h^#Y(X*Z$Sz%CnO z`BhhuH<~2I^hYF02g>?~nf+l8ax~PB6PYKI6|GB7hQTtg8WwWlX%>zxfBSyYM;@W+ z9f~72mTB8y_yuQj{5!5DA8KMGB14`~EI1bi%8ZE#pP427${{h@qYPU-?2@5Gp*($` z3S)xGsgnY*SetSH^F_K~($vvk=?T0Oz19THP;>5E^;o33~z7r*d0 z+KEsoICZG8xJai9N+iD4E~gqM3|44;k*>-ljsE&Cy*dUY3I|fv-t)duVGx zA`MO(U5@VTu>JphjBo$Z`&oL|VaxZ7iAyjOaoCa%uCjpa z;^CH$dej!li`Stdg7(u|2_VqUoqN$jNg6^gD}n^EFIcr=Nldbn>OiY^o9k!9LaTAU zg8pd*O|-5oCVgN)ArYasXBVnMbIlsO1PY@mXNbHa(GzCh{z+zLa>ka-bCwLyHFa*O zE}>9Ez>2{D-EJ1=_JYTdDCqcptXLRZLSM)B@a|nXQ&vq$ieX~kepanrk6JsY3$nQD zkGkuKbNM4G7*ut$s%~Dq&rdF*J11cq7_yQWgi3ma;(9&oH4R}c~D$zGBDmKK(5E2^=eCz!?+4bndyz1@mrf4-$HCPXr0`I|eyWIQ_A7aca zh0AHQ5+>>K*tBxhZ~Yd&X|aikXtD(EXNfvfnrKPC1$`JDhWWbK{e+DH4SLn!k6=we zespGr?AF`3>vNxBlM{R|M^eZdP4=}KT=AQ~LD61=u ziSpd@>Jd@208I2#NGu6W?%lqJKmOCd=byiM3*$Y@(9jav%a=knNmCx@2QNL3H^1qJ zxaiCiLZsCg_9mA%<*_&1{^XQ}8`0M|fsj)Vvz2>zX_l`HgrcHFcCuz4a1Vt7qMVo%nE=LrX z28XE+ty;5~Qara$A)Wuv@!;`M=F>UIh2T1z3`KP_gZhG@M#}Ofqo8@s6@6~AU@#${ z4>;3j{C?Ek3jzS(La|ij^mgyS7X``MwP2FI8eC$7h=>j6?Bh@DL^07frcV)^-F%Tb z=YKgv;p5qA@Xj&Q$tYB5M1@09A=|s1wB3d!ODY%oy6c=SoXk}KfQ!LC$P-;C&iQGw55Tn>!+4|YfGuF)! zO(?7~ZkxRHhu;cmgEYE2M@xc%z^{0G)Stiirt3KQobzF91xOm%sSd?ZbUWCauV?>v zZsfSeFx~DnIi9WvtIoavx%fida69-kn7$g$fI5pDD)`T}@2Z1PvmJux==-a$+(wIT zm$W<0wSV$QZ15?zH$!5TuC?s;IhXv{yWyA(NV6G}+Ca%Q)|hQ+WHEFXsndatcGy=g;{J zqXaLHINUEjjQOh#-5*~%BA1xGF_oKgX|NZ5WZy>zrLP83p?6d;-u$IO-54#d=SumD z!TA|eTP#u;Erzdq^^~QLh~V&1qEY&D2uhGS+jZttyIv$RA3CV z0H3;J`QVg5h3CUFZaLU@KCaG#5^$L6;r8qY5n9J?z{T0EG8PquBVtcHvh7hMu@r@m zpGT`Mz?|NlRw!5~z{(ItxVPRb)13@gn`jmX`M&)mtzmq#6%&80z5ZOyR;5-XRJ_eN zyVvXD3r{jMgsZ%hq%U25#IVES^&Ge^0iK$IK~)zEhNmMn8`$SQ&LS8@m7<%IbUNJo zg)eYyn&PH9%!sn$tg~SAi8z}C*Jf*~#7+m(9E=0Ee<$~Ed4Mb4{WIjoU}GRMV)4G9 z(d%&i-+h2pq-4Du6^Bm@2Z|0?y!)N#&}eYJ>`#i;K7$_ifb)F(>N&~|HPxd2H%W!m zs1~Fc9{KRcNVh#kBNrORldCdA&a%_bV)+$sAa68COgWGz{bsw7V5}2-b{CVMd7e^r zTxfZWL=6$Ii1m2yFlmaR&6mD?6aU|zeuP`LY$q9A&Db%glJDEY@|NeS*Zm+r`qnqF zYAB%X29Nh1k)UeiRf4LQuXP7Zlxio%nvxznn`~>#AbLr(XnBgWVHW<76n#`@tKX44dr12e;8|wF9tK#Tl*kTKRsV z7?CQrYRLvL24e*8Tu?$&N7n5jwhDN4)xUNPawM0$&XnL4C$e6tTn>2Rr z!g?W3Jlm&dxbi37j&G!)P%r^$&{||Nk!z_QEW&rb{xwcH^8$RMg)OO%aYV{YdG>zk zUzoZ3epc8P6Ws}HJK=!l9RG@!!`bKHlU5vRoo{sOdz+712*VhZneD6dltwdj9Ncj) zcYW*{juT6F<^aB((rcvbNfNI7*!+b^TtV~)|5e!ae zuo#SO@y*+A=Y7Bb0dBf;8%tNLX2rVo6iygv!aINN?Y!;Em$9q?cuyi8NiEKYfj(Et zgQ^dm1g|mS5A?8*@allf@a&>e=2V}vs1@JqX8j)ju~da_IGC#+PXf(Bh^W7*nJ8-H zC&$hMlHhTHQ@m;wORu_;%U<_d@*8gDJ0JNJkKKGbtJHG#iuH`&b`RJ7_Q1+Z9}2UE;$Ujue%7p7i)%Ip z@12$s((~_Rt)5T-Gc3yF-aQNrk3y0Ly07$yqJc$O)uZz|qMZJ)3kFp^adCd(sY=9Z z;9cztfu|T#c@=z-VO)=!KmJixOM-Pdldj8>lQzSdXHhi6@T1mjbA27@jER@-+RpCB z9_PXzxPr_im|!Op^6$(c(^K5`scTqb4PDox(Q46^E|cj9m;Tf*P_)`uLzUF2#5WRQ zUfS~!77O)%>Id4QB{ihqU2wU>cDj7!FaL~XMM070G+J$DB;)Z6E_lz+!zm}?TWR!a z2-Ff&x#ViU2odV#$n&gx9sGSZHLQ9X>Z6;fr{_G)bcj2*?c&#d>%Z}>yPiNt*COlB zVDG*sIDMt^?svU`w_g6Tpii!j6pI?ct3kcP5L)^{jmSr2i;A{W9i*m~)E>&w?-@ZR z=dUwl-XBk^yIe|BmIaWO9f|n82Eg0+aSWu8rwz<2Udl^eehEDA2sizo4|4C=^clhmYPc)Ug>luB`|k2Z!Fq8K%SQdKn_ z%m95v7mJ0X6~c;sB?(5bw|y9B0Q(# zpmR#o`T6ose-Y7FO_Sh#fI?Bw+4e9Lj&#{77;Z-{CRGQ~T8kD1ng|a*ye-fxbH$XN z6>^BrQsEm>y(peb&MDn)K`(c4e#>K=V|sc5v-t)1MmpCndZ9=dw6JTBCvT|I`99z7OnrpMFo+u&@KB)x*aM9wueUcN3h5lUogOA2Js7HDZf5$y!TOOiW zB&aXg+sS$Hl~b9PEL_IBvV{TF$xPLKq#wSQBML9g0P=oQbbr zi$3@;Lz>X_Jqjc2?K)m?#g(w}SWK%KX248e(n${2#`6&OLKjH&)R)7prHy6avfED1 z_*cF}e&=0`V#uAtH7y6!GIG}WY`F4D@}U-q+K~A8{?A%!iid!Bbd(84eza;WJ)lCF zrRu<#Xh;;1E-A>foU}2EUUI7U<6oi#FyY%NGq+h!AIrn|nQaxH?KyCQPl%Fu}sbIhsU07@Ot%AWD zkGBR+K8Evu=Rd)oU2OT_CwTOqzeZ!nF3w!Hi9K7N;Hy9NEBKR+SPSGcer5tyFD zcYBPkT2o;PRg9=}c}1z)=necJB~n=wOddGErV~y=jj6;EOcYLdzQJhEv+E+@unGoq z*dayr;>G*=G*uAV-^<1XjK@1gyyvD*Uc>U#phZSzm27MU%U=B|@sE}VSK9@ z22)K9hnAZ3y&O4?rJIy0meurQS*yy};K z5jWfne0R|brB=X;;3^dvLDa8IWv-yfh zfW+V-ipitrdBvg(2^6NN3WW#-o*a+Ilc*=fk`@{S4w?zw4J$bLH-3&+e(Ha-_9w4m zOKLcfDaVYhV%@HBKKsu1aP7bUS=#q(rB%S>%p{r$#tP2+prU3ZiX})ymJQy7E}vFK z-QX{s=ab01#ey7#Pw(hIl=@FCaN3O(?!4!I)EWx)q>Z4}-%JxWpLl$w3|0^3^ZQ@Q zy!%?7B|EZoQ!mxboGRoQ+2ap`2*W3xfVYNnjL+9*sXW4CJ9d$04%NZkP|jk_^>i7$ zNu`%dUkW`qX&1dL)Bpl>rg2%$(7N?#l7xMyK1xj2Z@%=;XYF?ty{7^witY?}f#KmG z6+2L3=;F3Ln&E;Y$nDJ$yzYw^Z|&2N*lF#zGWe>Z-5j2HlwCL6NW1XlNyg5+!^u~? z23D>>O=u%lO=RtlIvc=vunM>Q+rP5;+!vwkHewC+aAIZ(_Fnrh^d5ec)I;VnOuNN+ zUhu*zUk~fn3x;ThC$bOuy`fy<=pf)?`LFU29swJ%#mYk}NRWIYl)#6SJ( zO}ymtALDoa{KNR>2<_n}CZFa%|KgAF#Si}>Z@B1ehM1v|3T3Gc8;7n=IcH!9%4T84N${>zUoxmu5}G&>vQ;ZN=@);AOaI}| zSn-b6vn45*=w_U?dL1k7dx$T;s6?_2ZViN9E*!s|;c<&Gs<&9MidSZ&qt*kUk zt4iGWsH4}*0+msL?tWZiVEx*#U!~uD7nyaoRVqz!7Od)QhS%yGmUmVopYSn+K6%%>R5b1W981>+=~_U0eK+XjgW zMr8o)*GRchf}#TZ_AtJ4J13lZCV7%lXw;Y%k9Rpt@8^zBev+kX(L5tf3|-0TEE(aL ztFI<+CM4EG+Q{s*Q|3Y?a_Dq#Aw7%|=Mf{7|AZ1cE~3G6f$!eJ6aRWG!wQKQjA_!L zi8=mM*1Yv(jpuRm7ygK!c;h9czDFv8p@H)OxlRalSYu3u zmiN63=9`w7Z}eS`Vv9VT_gU0uAWEW!M+1e(3ThRs;1G;h5^u21((p~xHptd);EZ4Y zRj&BNhZud+W!#xNrV7VNts$D%-N2_`c{#iO`s1`ZJ$h3IaMlHvS|0{m6>u7qtP&rL zqm1?wjw44176}c{wS!5kj#p4Ea|l}=eT>QJ3?pg8q?Dq_IrG%b7}Z*bK?a*q3$B7X z+2xs^h^ac9UgMevaNrAOwr)Z43`UnC#~%lhR8r4j#v{z~<%M$Z{ST6)hP=oSExY2n z@Lti{*;&fDs*6!En!)*kEGxnsk$6ZmW`>4f^(u^*{{64dKF{hfXg{Y5=9=me@0r-M zlVxjG1zK~MEE_24N~zby>!`fWjXD04Q!udDU3hAPl+2|yYAqY|nS+UO9{l20S!qXb zy_^Hxj14ck7>+##O$|naA$(~}r4>)Vuc7?V9k+4H>8Hc!62v4}@rZY*g6m|=eEWKO z_ibfN8u-GI3yk-2PJPv@VeKktBzUK_F#;XH6)WYUL*Pn-g~q}DK88!xs_;QUdmiaz zT>m$J#U>gs*+E1p8kU_17r*N#VQdLWGsPD!yi�u5133s7o0XpLu@ueD`C%U$ehQ z=lTAFr>Z)z{>pn3^wTY>IUdC%E&k*KpX8huzn-t%crRlcS2Hoan@cX(%r`&(cl_3S z-a(_7B9V+#QoL921`&fHsnNXphZ^_27byRCVie?Ib08c;PwCb0ft_EDfj0yh_Rf%bPf0X*{l6hnli7}*<#V~oT+2WXBav3l7RDY!*^~&tik(&yy$^Ik!M`= z;um5QiSEP^;jRUu`N5b$kl@*{LaOy_LN^;1&a?z!?{{x!$fgXh-2h95!=8x7b(A2Q zD8{7RefRw&4GZGJ{w!VU=jhGN^D{`ekLAF*F##ycvYbwqV{8IgShbGRuKsthZXM$L zfP)dKSesO|Go(HYD()$i3*Pmk_+}$0AJqawpAxQ>BP6{Uw%)geV^4n} zPAtYNwld!;NZkxKfAk})NSk!K9lSWaHTba+PI}7~&}>CYNpStFC{r>xgK&s2d`K*E zU^q?$)lkq_F&GIHCDT*3fBCD7-*-Q@(f{3S{8UWiA830wiqFIT_kOD&l8he_=cb6 zm;d1JX>Z(2VuiJhj4%BC@9^RO`STnzT+q;*)U?3c@NCMWEb?en`pIS`Wjnx;t>cP1 z@RJ?fq2qZ9{Axfy)rBG<%sPbo>r#?VlTh$l%BRC2s`yztD^qsg1?n~=>n#4raGc;?2N5Bkw#(hN!=3hFp; z-~DLeS$g&vXd@knxdy;FwD3qO+`r`^n(ZboiglQ1FgY-zrSnI1t^F5O3Ks#h=#79Y zKnjY{B^>{{H;^|QBqAhXurN>xwFZ*WmsTfI%&H>CD|BaYd5#?$!;2l1O4mb`H;dO% zk&_-~j(@lvh6@*O>63_F%*qC<`6Q_))FD{-!XrN8wom^ft6M{4d5>NSW2c>pJ>zt| zwP6;auFtB`0LlcHO6!GfciqLZRqJ466sZjWBOaOY?Em_==-vMSLqL{Ip$*HpD>&oI zAA)0#$Js{kcZh*UR2NA9=fjB(pT|7lwIKCYs0Xk3ZU+1JaQ%lq$a0_H^BlE8(QGoa zbQ$Np_dOKt6l|kXlaOeTHSn`Ztr0HsCqGZFXrw+R>u8{dQ~ScN{edTIe-@b)wF1vy!KJ+nKos42~0x^&glz5W>03ZNKL_t(o6ZW0hN`F-Y zoH_5#Rt7s*!t9RLqMG}2XT1eKloGY>7I52l@1fOf;e3V@hbs!!EFER@i5m#A%R1O) z{yuF`o^}wA{@g~O>juEhJ-VHI_byzc$;g?fQJ5xu{3({iQ6QjGz+;c^!Z{EdsDwVw zfz$_y>wZgh3EF{OQqCp>UMEviGvRxUfVJpQ8;nKO<8gy$fDWEx)xdQLMfyNT%EtH8 z9vX%uoS9}t zPq9>#tmu-qTXZ#}Shj-W-f}g0qZPTSQF%QoIFxTF98Bj@4l7vC%jnjq&Z6PHy%r?S z@!-cl%E<0LB)LaSLT(!D&K(!N;~lVa1KMbW)^JJ7in5?qYA%|yR2~P1!=9Q0NuMM+ z&;`pctv{1PLJIwEmMFd`=xL5`j_|gh{%!u_``*vk+7n2uVTqmOOV|7{?|bjnqTSP&EB$|HSB>t~{{7>keI>#%ZsbLdT^=K#wV@8^?~Hdaa6EJ&_Q`oq@* zqcJfrGVZ$O6Rc?rVR{*ofM2$frLTSkMYAzDH&97bnwVgN@deX69wtpK!y7l?OyHNR zcY)(PlT+OKU3z!i%ZMfvUg;QRch_;+>)!~g*Fqx=Xhfv1^wE0lquF!Zbq`L68Pw*P zPq|$N2A>s=pUL2XE!_XPe`K9lOs_+s1)WrBoNxlGU-vrlw1p+9R9!^0DsogEqNPA! z^s6b!o)zbFp_E3ykEee4iTuH46%MwZW*1-cl|BPWo|K1^l688RG-b=qeVlpG>-oPo z+|3KlJ)516Y~|`JFX20%|65Mouo8=-nWT6J&U;dm1VcgZ!$A|*g4bvmXljL$_^RX3 zPgb;2tD2wrepv2o&&uTEdr6dzY;au$l~rU=1ha%EB#N^8sG(1xG{oQ=4f2&sx#Z9O zUoQKT-{rBjqddNUKgW+OW&LBj`P{pHmV5s2FBr)@-RTK19)sYWtFn-CvPkFMB7?hI zw3g63ho93?c^VDGgh&;N$z|Cc6l?g(H@=0m+t^k@?s|A2>*T!hr7tF}I$pE+Eb~Zm z$`ODdPxB^}erUvdj}wn^o?SQIN}`4pXPgRS?FfW~Sq;%3N31B0Fz}2?@G3ax2aG}I zcHHXsL@S`II+g^y^A)(5Wm$})LHan%oJmFZG$OMK!m^MF=bEXh1EK=CXYbBkEM2t{ zXKgTr^E&@|wOHvN&DXM+>wkX*191N0y?pYM17Fewv?g%|l{_0h28x*jJbeB2G!;mM zy_1uibJgo%WF&AJBibPJxy2rEWxLdP?zr(=oOa%Mcq!3j#Yh9i(a5H_{hAN4OdYOp zXyH(6@gt)gcje`1Ga>Q846fgnlmY2AnfFA{BTD4vBQkj()FO;1-b3mf-~QnH8O=Jl ztbbaeSGPof6V{-r60$2r)W1CI3J%0!AC(=BLW%Lo>8#n8a6qY9x7S<%<;*W zvm=$Axh#aa9c0z$VWpbSrSt8O&%2TX*7*~T`iiWK-v$+z)Cd|Sh7^ z!H@nG>)-iC9?S|7l+9_Io!5MZYu@lC+S_(wCT7Ts9yUqvQG%5yb1cji#wqiW>Ss0@ zscH*bx-DMM)Vk}(BCXM9!oms4W)9)nZ(L70(xzKmBEr>aEQ8$w{z7hwBka2k3qPE!)B?G)G2B;9X-szA?}&# z%+P4G`LUn<&%F0Hf0s?izW`el9KU*$o4@scx!?uIAr!P*L*yd-??LYFGzx+M6EgfPX87o>TXLWNv@y56C_y<2sqg#+oPGGIU zmhzdA!Z2?b(7{|o=YbX$(~oC(0Nmhoj!GYwrYi+;KOaL zEw|o!H#TjCGt!g;$EN0uBy&8jXO~zNg;T*=8)r#`JkR@X(ec?yjUQZ5Ze=drtUfCy zB8XQe_v~SG$#Q5l`T$P(y41Uti+#YOJ`@~AuD{q}c&2lJn1BUY#Hjs@pm`3xDIWO3 zKeNQ7NKr5`(_!8D7s2|CK`+_W=!~^j6(flEh|k%2*R5QaXzQf3cd>y!y}yVj<@3*DPnC69WnUQak=j3SsPXUUW;Jr;RZj5Tf`(1$sbya}%!}Uh6BM!I;w6T0V`(p2!Mfv4q?noE!Eb$&dp`3iwteoi zOka0Bvh_jOwFfgffpn&jyo)Oe3Ot4J6yk8kao-l+@!FU0&42h`ER~FAYU1FdCn=Ax>im$ZsIn4JNMj`oLW<%P zoVSQiYEX0k6Z%+K`B-py2}!J{udKwJ1MBETx#Wm8jm)!g=D9aAD1cwm!=!9unviUm zLTE}0*Bque=X@@|<`XP?!<9U!j;!Z7d&wwU{`!COwV!+sjfwsEiE%Q?W1P_tS8DrK ze81OL^^;U_;_yN(C0e|meG9Ko9W(Ly_k} zp2|Q55&MHwiSjHtkE>+4G)*Jt@$cLKdBKV^&VjMzKvMHLsvFxWnfKg(-y@`HlRPh| zxhTv6LFOs4XZaRsjNQiCc zoehc9<`UvkeYQQV#p{{qFeHblINGZE!o~ae{l#Nl@c(r{LbcR!7QkgWcIy^;k8GoD zT4Xr(~YZT!TvquySKAv*DfA; z_#yH z*BDy1j#aBx^Z0%D@{zy!JuZ9M*<@LV;dT>^v^X2*6G}PS>Z8<_K-HD0YkaFx{&aP- zHbDkSMOtSj#nM!z`a<eCpI`jqv!UyL9QtSB@a6yiDcSAT&MF20y={QmDV zwtWZ34KLxq)@@w#h9BXL@B1AZ7hS+iGo#%a4$HXYB#9qPdep~f8#vdZ@~lRepQp%T z>`Ix?c9PIRwz2zZ63fRw`9+3Dm(k056zV9v;<72;bme7{$FBnqG)_R&qWIvsH($hi zwj9_-1PTqj-Y52PV9Nv8)(9)lI~&sAW^LkUG{)dv=t?`M>^*RRoqP5$wqjLOK=q_a z1Lqyqg#A_t6AVI!3;aBabDks#U{$Y|)&7N4#fsEs@XC(jfCY~BThQf9A4SI0_&CR% zc1AFSjKzX2Jt!6|5EggrQF^^lRof!c`JM^};Dw9jnVnBnbh!qIHBg?@_F=2#_Rj&n-7{{HbR_(!gRu@DdAB(8P29 zr$5P9=5P^3_KBfu;M})dfi_z);ni6NTj34W2i{~|-GM{OV=l~H)mfQ?iNnXetVeTd znj1g*QP!pnie5&ekuqaFI=Ymz-~KkVX|YDbplyAS1ETC~g0vEJYzVt)BkNqwx-6&I zzmJLS+nE{P$JRUVz-3*A+HICBUB=MT6*N|^#4KM5X%kH>-lhSeO~Nw@<~|-V!PS`P zXdDK_V}0ceH4V&&riUa-@D;~&jxOrF<(14)6)MptMQT!d5?kP9z2X5&Hyfulw1mI- z(3kj`U->VrKldz>Ud9sX@|7=roQ*4oFup)+BOYvlvNAiflk$DyDWxg{v^px;UBRo1 z`s2~ys8p^RP~7lU?KIe~4{O}V16FjQ6*ONFvFPJ)umrJOi^jxZXcIZp-jg&MIPYm( zdJ(TV=l$ID-`>Zg*Wbpn!gFS8h=2U)U*?3XUdMUA`RjD^36jxK5-~v$rYuU-R|Tvn zcnm7O-|130LZhz5(&DK&kt!t|69Fu*eTthXniV`TU2w~tchg$B3J;`7hdPwUcAyAndVQxWz!WGqhK2#1b16>pPodXH3?fL%ww3-d1 z*~A-D9l{zB05wspU_n@KaZetF`1T8dZ+$-n!{YUnJem?ct+f{;e^4_;#;1As8~?^} zYVn#eQ8-S!{4y9B0UM+Z!4Dw1RwC+M2z1AJ?7^-4;9IZ8N0kc`-5|9m$m2WM@o(3$ zfs~o)DNM^Stp&$ibP=-o1$dJZbxCVML#kmaQymZsp$~_T9Stxm%5FwPLYfYq`21&S z?c7DuExpZV4I z@fRQdGAF(0#q4?XQGVd!)A`V!|97<8CAA*)rvF|sdBA9>IcB+dhaWEvj#E=esnMcB zd%xfPs0~0dXVS&Og~|XL;UJPypufRp|5`fD+z1QI~&GEQ5CF> zaK1*;ooWyhAmz4O?+MgQ&0|5HfL9W0QSYl0VBun+MmH?G#XjZ=m`HQ#R{`Y=SdcoG z*{r78nG;S4BZ9+Yyl3Ce?JQrt8XAp|PgG||`C5Idp?Ow5vBm4D=rVlI1;gRUEG=G# zpA64VH%gA;pdqG*Ue50C{2O{;KQ>*0S0PV_SpSNbqm5RTR4UTXt4!d~dlc&pPuz1a zYc?H^4vpZA4LaE|`IDmH;cLGP6B9U1X*5#0zQee;yyWfgL|cL0<9&z}Qd4oLO0V$o_kX&blw#OW_%BKeDV{lDIC^&a+l+VniNaMIN>dC#5Y=@JzPVq zr6$*6VsR7hjVF;L1Vp0bst7ci=!zyRTS@B#7%Fo7+(JBWim0r#?N#NO0JWI@%h$DZVaMtF^xVEKd3$ z65l^qO3iNH1)C_62wAot>l$>jjFBbFc-=dHm9O4-I~&hBo!t*T!h7Cv8UN$geu}I! z!%({!jE2z4@Xz#RZU&9+W_wtaU8upja-b*{DV2Mqnzc|3j>S~PRYdM;-U6XC_HnIi zjnXn}%~ktI4}o9G5fl?oZ_@gd;&yXhT;gDU>1Z1}oVB$mh0;id$;lag?GJIq+2?WX zZ~ZqqTOVSLT2AWp_}~?9tK6@!#s2!orfP_w7r6*7hZr%8(14?TOPrSR70R9>OFkthFeJ6DMc*6tr2qP zK~3;d(D@2tnWtXRNd2Xv9C?H+%Llp!)iELc7&!3td1k-Dd0w!0_ij!*=Y^HpqN#yb zQhKe(EYG56RZnQ~deZm9A#(mhbr{ZFyoKN28O;JoKp8m7#Gj}(W4%Xw!JVJ|9BbNR zxZIN`!m?9NhhvVxl{5lB3#ITLU)tFl$Afp>%K5Ln99NpY6b>Ta(o9XV^$TBNm1)t- zGK@H;oU-KfbKuO=(Pk?QM~n7pAfoe!xdo@$be{X6bH8KGtY%;1JuMttKm9q{2gXUV zBB&ZQ3=^uH^X4mI?P}E8!8w6}*C-RBa!@8SRr# zfnFE(?WVVD2c3O;*!|cOxNY0;Vrh*mVPwS$hS#jcEMJCcw;@STYrxv7nkzdXzFY}L zs`(Qi*Mdg?gg|@0YXuo~v(4%xFy-0_EeyORNdIL(ufVniigEtsQ(xrF7rltlrK=bj zTgHVie>1o5?Xq^$Ms{x7%Afr9uk({{c?F%$H0|~fg>%^Ih>Asv89b0?lg{c=nsXL? z%^E~!7d_PG5~7Tw*J@MrReq#IJbnK_Jp5kOp;ClrEx7Px^+V>qxKtInP?NXSni-t$ z%L97>TX+&<@gpg0TF0yY>d(3RPyd_;KKVJ;i{+&oj^jJO`{%U3`VC(BCx1jXISC_8 z8j?~tMWPF>im&vAb>4DL7AzDFMv7Rxo{WM)YBIcG?|Fs15VmZ4f?Mvqo3Z5^n4X%Z z)wCF+oOIGgHlMU9#N-&x1}E}nFFrZw-#p{bGj>*t2082m$M&yY%a|I*Hf(|uH{p^b zh+9O#hZwj+DE9@LG`aKMt$0a;fQhNmwA7;qrR&zBs+P(}z6!u)XEx7@S;mdAU?FtR zb&koJj~_1rHR_SVlTS{C_`7sTpnPgA7Sx`H3bTgR7kA!Kex3V06%4A{RMoQ=@89>A z032+%ckutik?DFlcJFp3?!JrFq@X$byE*4w{sZ`CBP5tE_!IOg`&zabNK`0xK7kQK zd-*DI5q#lE#DMYmZbs*p8{v^hXm~^JE#3$ddB#O=x)RzW5fKhEUs^*ds(A^Wj~;S3 zux6bI&m8S(sCO3}$^L!Z`?=4tRt3bN*5gy~tJbpSs;h8miYXIrsaxVidwcJzz+3X? zeKh1L<>;ybHjRVgQOY$GUf>ghHrnXOCejVZ(sYicok@zlyP16a3A+3CG4;fwOg{P$ zw$WmE*-}PUtU^|;fuT{PnW9M=P#kMx-WCW%1LF&QJ&G#PhQ7I6C1>V?gdbMO92Dvq zG6tp(z+?CD^-q18Z{Kk*zw*0(OuA_ec_-oJuYMcRkECuYVg?{{DZZaqb!P8lFbd!WTXW2vh~|+=17AaldeK zc|@Oe@jCJ{v*hF3@Vj`AZ#MYQ$3BHglS(@J|7Y*a<1DGlyZ?R8sk%$Av(GRyFf%Z$ z0*Z(mYLMWri5L}EaETf<@iobtyvb`~Vq!FlNi>N`B#YY{HEw{o00IWw00oh4SZ9XW z_nztX-dlCf^Zrq%>ejt|d!`45MeX9#pHDO0eQ(`URnK{r-%_tz4vdZSrq{g_buRPe z@VP0Obg0~wV|@8p>=iroaj*)6#NMIU#snLGbQ84{Ry_0R85B6|AzH8wbKfMzr?7qR zC?n%d;^6^s&FuApjI;?#a>au%3l2ju2@G*k5Rx=4#nX~AbZIJ~y;_x;%eH&0SN87M z#-bG~Ac_zyCFjy?>YiDpB5I4hUOF6S=Z>G#||Hxj>GNzl~oIJ zOs{5yK{8}?i*-t3489%_4-PQzS!d&{4V@8s76k=dL}!sTcJ)lPt3t@T z>K%Q?5VWF}XZvNBGCVd0jRqPiiM8x$dCoZhl`uRXsZ{WJ!qc4!YAFFPxsD1#6d*SO z9Ry{9%Vnm15#`O+;uH7?Tohp|5z)X9^N(JgO&@mep|O1{<9l~AxqCOG+qUAPhkA-N9@w;xSG@eq?9w_57cOROWDDQ9?DL$mY6;%8sMczD%?Ne5 zTd4^=nDWTGe2ov4y!8@4oyO`3FxmPC^C%4)t?4ueD@*!L$4i&Xi0MC*?)#AG zg~rc&_oB0yYpY&m-T&a`9-b>yv(9C(@Q$Mx)f9R1>AdR8pXED$@Ta(2*0H)ez{!c@ z%HRCoob~n#S@zo3FcnWy>#w7^Dp)>8=pa2GeC<1*iMivEEHjN5oYTy&Lxj-=eEIUL z=&KKsq%A}fT;dt7M!f9#=Mb4}YLr23eEWL#RM{@`ko0l(w1s#Pni&=6*m?DJBqJkK z7q22Z^GUe4j+l%roTu?3nd7~UvgX=tx35Db!nyWtWDuPCq6sqdysYadrTAt_@y2M2%)92Ri z5$OC$X9}G|e^4^%HA%Sr>)&Fbtq=h_#>YAKRWE_zMR*%!$*%XsChNp!3MXDsETofc zeQ-S|J?(6oCdzb|^AMLPvh_iB-*PLfebz=yC1KZ?)H7rOH{DA{ZZre;_&u)^jee7O$ z8C*6li;cqhMsf==YQlQq?L$u>vr&pi++!!Z!z-*43qn|al`*y&VtxrCB@pfc=5bI z%!%xUI%^pddyoQHW?LxF@CRjy*1T}Yi#+cY?;Hln3LD9!J3EsTJ@^}xrVBc4Yo&k% z=H^qgkp=!)E*df)jP|E$FQIl^l>E6c68KC(qhBO7juYCQMif9CFw{uApjxspXm zlVer6@8kc$gFm^Mr~TDOn9wn514CIsN?ZC-dNdVO+DTix)}P&)&jBSK&dwx-TfDUv zw1rBAORoGb+eW4sUN{IXkF|!0sRkFm@}&&b1X~CiWJ+=6-i*x1UDLXT8&7< zL|gKwOth^c6&i#Grx0ra!Bpr_SaNAvB>yI0$8xAGm_nIJb3@SZ3>gj6bnjtwBooOP z7(yj#i&oINsfX0!adqA1jz{G<{jduLRh=ssj_{NLI!R$=qZI+P67e1d>6k)^!UHoA7R;bxzyauDf?)x8L@v>!5t7Q|^AQR^_#kQ19S6xeOyooh6 zTCFD3Ev-tGW#_+$v|0&HzS1+qmUHfhQ!I4qaJMo0nbH$PiLmkVZxZj_Po#!a6B2}d z-tmmrzZ&K*M6=m>k@y!Q?=BAZ!OpY8BcP+JE}cD}ywg(be{C$0gbhI>MI(z=V}=$l zWGGExbQImSo&DQ3GrDht-P?9zqljvMKf{Zc(6@96@uJ00?*|j%qipgZh2yFLtEh7{ zCmPgzOtfzocm4AxXx(x%^^q}->g(sOO$T^z%}U<#cYh7F3U}SMi|7C9TQNtkq#`M? zALqL7ev7(qQmuiJD4Q%uRxow+*vnV~nYkd4Llwq{vBmUMOU?Zlj9?^2UFIaMVd`LP zsaViJG}j)Pv2^N{w-c9k$5UrK;h!(FxO}`yXt=aN69`9qHY=Gab^95aq6t zw&l3@iYpnm5vJv6;i<1!2FI<&L~$W^=5wrKFbT_mV~X+0w!80Q%}GyysFFDk19+Fc z_2eiIT=6aDM}qSS#KL%Mf~99Y16Hie%o}pKxJO74p-2wMI@`{t+2A$=?xc|tkB)KY zJ6w{)wkBztP0)-OcEh&5Z5X!j^mPB)#KS2Iene(TbJy9kU#>Xc<&0 zI2BB#LgEuhJ(Y<`YWuga^)p}O!E67AqqR=5V-FtT{>hZlr=HGRKJ)K1YQn~!ZsBDw zdnfabS&o-U77hs4f8&d^#wV!v*8*v}&4JO6Np55Bm(+y9vI1?)rwfb zVl%%HpOU6oa$>D5u(@T39^wmmTMxzed4g0VtJ;PmOh5mhuLM=Q?tICiiqh^1YY)Cr zw+y6LFjgT|Dp8ChC3^OmTyV*Marrxbn>F+5RL2rl?%BhC|JwOn@accYuR4aL-bdBi zl3#;_7$xm|iMGqPc8oFgkm=uCLGcjphyZC7qj;A&cYo&xH*(jyd#Nv4PO1rFQW}kv zXPj{=$F5pJoUwx`6!T2(ULP7Lr4G4tw2B@t`>~V~YfvYo))SkAd#?Bv+DcgRq^H2j z6?n0Qid)`QQ6C7zk|{)uHk)kNuoLMUgrpf-^9FodO|W31iOv-6+TzirDZkRDKBPgK zwY;|4@%Q1ab4acTswQ|8WvGHZyLWT!>1W`A)4PNkMCXcvx#JMI|G^4|Q|H$0k**=y z>I6PjY-5V`S6|EWI0i}B=M&bx;CawDK;Cl8AroG;ZE__pd*~aJ95}F-p`+K3s$wF8 zc$YyqrW{i@{|LWnEB#R)6KR7a6829Sp8diXL2UqR)FwT4Bz;FtqeMD-ylwe56Rhha z@7QzAl~lIxz&0G-fJPA`t%NgQ{tB2s4~_ExNE>%Ob9M3%`)+Mw_jL7tEeq7O6@-dS zGEZU^Y&N210Bmgq^Ovt+e$s?pyEw3QGrKo#;@+EoKyuwETIpx`DJQe?NoPV_rDCeE zX*26C`x@J>zlvp3j@5CUoeyrNf8GFl(wGfPhPm*wpFpdYb+>QgwdcQs#cNMw`_8SL z{=|iR{d51y)Z_sM2I?6CE~ikdbZj!+YUy%_X~LoOJ|1?_P&zT`ngH1-LVczx=F${Z zh@!v=31)bCH@oO~c8J7Fsc7h7O!sJY4Ia%Tw`*7;LWhK-l%ajvU0!rPN|GD)8Nh*+ zn6w(xTDp*zec?a2=H0(VeEWJ9JK<@4b-whXZfkCiVt53SS@YYL?z5kn4f z%pH*x!@c)E$mm3af&QV62$_3Zc%N~_4z}a;+_r>5_(4T*&Q0HsOZwz(afd9<`;ytF zI8U;F1Thh2c$lcJnZ3quP#OK|6?|mT;iU;oOrf0$cohr zFJHy*(^44S&(yAM?A`Dn$*w(Y`pP8?G#c!@<9WgXyw8#~F&Qx2AgIyl@2Jy@tC>ozxjDn(+>LD# zvKRy2;k|Op{ad*HrrW45T%7T@B8ztkM=hGqxz9Q)#I(>jQ6C_IQmB_{*TpE$svgUI zxT9uPEug0&LYg?D)&viH>ub2F7E7Ld7MyS#KFZ)oT58y7Xl_}Xxtk}d+;HQ~L^Vt5 zT`;4}&$Eg~Dce=t+Y`-HhQdxR>7JQ2#tPeF<&#&O@buVQ{>fXEq|>gZo{ z92~O(wH8ytG8PZEZ@cE=J@?*o8^@h;8csp1%^WH5_?Ez~?d-heCk)vdzS#ggW3479 zzUVwyvMeh+Z1y_c;1Vgp1dd3?T}o24q!(=(Df_;E1IfmX^f|@18km}8ze_poS*0E_S6I2Xp;*4k(Mfi%vM>U!Y7Q^w!vhv(# zv1-8rHh$}BZu|6?NN&HE1rsT5_g?&DlksMQW(D?%vT@!JZ~m{3qeBrJ@7ux4UveQ! zj$X;0J-c|)>8ttDr#{U1_!NDE1K>@@_7IbGa7&4J*_*BH3G(nOt|Okfl{LnL!cJcJ zL{UT(#Ta8ql7yt0pe_`n>P!3G-r`R(8M zbz*VY0QAfG3vGAkK(_7R3OznFkvij1)^I3_2=DQYCak-Yky~z|Ip(87#h`RL7yd zVJ|=V-9P8*KluaZ) zcG_wN++NN+c|M>2)CXxydin>#)F?Q2X8X7+?jQ{WR?#4(IZaycq4x!6O59`?-yiZ` z%hhzCkbxBg)>`691rr%km(psraL%L7XFeOHA%zdgY2j*{C&KwcJnSuW_IKK&?I`{5 z-lap!1vQcpJPmjQXiBwGqg9Dm@zyu;vQPd!cbW;9KS*t2k|kUA@bx$SI<@`dq+=6^ zm5_jyCk>{<^F^m!kLfm2?dQAKcXP)~u`MI;3f>E;!fp56&lT5PN3}ADIt#9aw3HR| z1~~u4=Mr0k`YfIpF?e-dcYRwzPm~^NSdaM@TIG+@5;^X=^ea@87K@(nBsk?n zd?ha8dq?(fXd6nVFx7yc+_ny}ZKJlbymx!&p80N+Y11=pLtMQU*U7o|>vzXjEw}jc zUZgQ$FH)NO_E4?Wp<2%#*nE01_l(RP52s*I)l=ry>j(#_u5MYI&!^aCO5@f$a69+V z7sYtvn6wp^p8agRi8GyRDXH>=@x6p3$6(lS*Ile$dm@O(nk*5|oX3F42J63ZEyJcp zVmwpgnUIKuPkb`0J(e_z0;I-gxXdD%JX9FbhqW+;9Gn6Thrj&}Choq6su~(Tp;d|5 z+wh!l?(<>MVyMI!(bbgU3Wq+2!%P3V$H&_HJvy|;wVjM=`<%*hc%qp-4yi}1AZbdv zcY{NoNQi&t_YBkbSNGmmx!6EKWE4<-9FNWie;=X%# z@Rm3HHp9!8F}{C<#no-e&$=I%Sa%oS7l_vTn03ghK!;Zv64+3S}pL3szY&r$A)pdX$Uk7K zeWYGVTtXB@G$+S-@7v$RP=6H@a!|#0__h?uZBO5p$NoN^4z~1J3q_Qu;hFg1&FsGI zr}+9XtIvNK3=CvasuG6U<)lC73)sv!^MQ@q86R(9Bq|96lz43i*{6guAgHU{&6{_* zc!T#ib%i`}VdyqZ7$AonnzolhnzD1tW|pj632~)U+EwSCl(}PeHNZ?0hLh)(?2!so zv>j|ia%s|pb>I3r3;X+Usi%c9yyiGqwhU}sD7dHyzT~D^NDn3rc5Qx;RmY!*mk6Ir z#firy4mR9}Tfd$f;EcglYK$as{L5Ydl>tO0>+~C&nap%ciO!hF9R5a72Br|N}dV3=cH`a*oYo_%)7ryviFp*kD?fI>N@cXcrBg(P&FG#K>Q zKyJ&-$9!R|CjgP9H8FuSMv1oG##QgWi1nZSH=Ym)gJTVhb5#23h>3}99cd}*lu5O0 z>08K4|Mu_T)DzgaeIM_6_aEZwHImj8$d8PO8alQC z3YT}6Jf-p=FMfhqmrNHH)k73wj|v>5BhQwC zbjXyvA?A*`V>UIwT*2@tAi=XyXyvdw8 zo%5*oSR-wTNjX#^^AL81l0$S6z?%-X?_}!_Z(_d2NUMpB3=?U}s^>ftj$MN{nevdy z(_1MqjgLq%*IhGo4}E*3!aWqlwVg0pV^OCi$wG%88>cch%Ke}CH@^JVx3O^Z4i+~Z zZhV5MZfL2?pi2^wI>AXqGm2R6EvqlMfZ_9=&i?U~H(u~t>>Y0q8%x!AuDR|C&N%fX ze)`R;xcyU~W%H$%ao~I3BiglxsM!FQptuZo98_qgjSsXFH`j^xpDsJvagVDM`uUXL zO`WiWNAh7Sy3c7vbwQf?P#rj8>Y)W>rGT^Cz-=``gzMMYT-%tCtk9NXK0} z{rq(1Jt>!Q4aD?TD+!Yw>EJOGB}(q79?TZiXL9Siw`^wq(xaeS#fQ(X-nR+bbI08A zPznZBT>}ixt=EwW)iT3R@<8ERDSH1sL}O#b>hW%h(Inxh=ROM`nF4FxR(ugW0go+; zi6YkDy^b|&PXH5sKlK=+O!^lHe8Q# zq4WPc)0P4sAJi?p_dx>GkxsNQ6O-_h8@b}`Z)3~nzR0P}qkcfK5@Y%+GyrNAZv-^O ztHW0#wpz>bSNtkZc-t$%c;5eef6jP1#ZWcEHTH4Ub(f&lG4GUBocD*n#nWGY9@3m- z`;Tv8{g=MX{%>7_xqmZeB%3%m1ZOg%pIlBehx6neL!$=c4H~8mPQXd`N@UXtCxSEK z_l)8tQ|QS3D;}NgsB~qd4P&uE0m)mCD56^LCypW-Q#qM3(IqA%&;=CeJlc zr;bVkaO2ebs*HOF4onnb)qs~w1+P+Ni$L|+Pv`0H`%RKo!ux*nFZtGWx3F;ON=8RF za?Q10WdGzSgTq63z*$3d>Y1!O?sV9^g`GG3h&{Ldn2|evLS@-17C-q^DyvtcLwyk2 zOk6aV6%809E$)TvzVM5YBh?=z| zX~OvEC~-BSTCZh9Xo9Jowxxu#^(b_`y1Y0jIfQqbEzSNDKUC85W8WPVZFiKrPv45P zN##z7qZ%!lV$nqxaK^krzWsr}aiC8s=r{^y>9uT(LnA^p`mgA}EC^+)v_UcKTBQEh7 zDZ$LjB<55dk@HOL-A(_{ATlsiLLg{Rh|^g}E6*LjpvQ^8hPi^_Q7jgEYlUYW%M1(J z;=v!@$Z-gfcy^3UaKh`KhpP<~n*Pw1I#tQ_owH=_z>e)KSh5uAeYm0lm`M{Fm+-)q z-(*M>aS3s381)`|%o=#&8MLC9$kFDt%WI;S`Iwew zLp|brPU4nK9;r1sP9=44>rGto*MGy{j@_)*7(dyhQi+hbLJP&(2rm$0i5m&1#Z@e$ z^*Wq(D(C#|p8*kH``Yzf|I>9WS$QJcx31^wmt0I-16!*iCKT8r@&(Ej->J?=yn zZoZ%0H{HPgP4_YW^?Mjxu$1}7pG5t{Q(*CYe630vNkIg5c)a9PVz6GY#bUU8+eJn5 z!F|S(5JAgBOKs0z^s#6+cE}XAr$O1oK{2*UUsR>lm}GR%DEgTddsaRUGx7hWv!U94txzo{$&;3Y5S74w$x z)!+LQPK|48vukGMw>D(hTcO1N6xT5?Iy&y>^%&pTSS1go(E8skA*o2H6pyIN$YupJd zo_h}7L|KQfY}Q7bT$ptV3|rUV$1%s9h^i7tCNpS8=_@PS@6`S z;;h9Oli@9M@^DF+D2vTA1Lb>92o9ZlbxMqC?@O46>)fzNGnZjfL$ZI0%J>Ag{r%tb z)!+F&mh2v3e#;@PCZ;MhQBr|44Me##81tzi5n;4mW5@ENIPY)%jFx(?y7DIe@ckd5 zHnf5r8#i+C#ed1M$F0CtBPvt_;Z6&`9}$oWv<523$tQBuYhKIA7rvE6C!WIC&RuN% z?sr-DpPyp)*S?L}u$ju#1mc@0tx%MO@pgckFX)K0DDpSX4e3d%o5(9&+n9k zh!6e4$EnRAaqD0ti_03Fjo;qDjOz4;dJ#uAk@B29axMicp zjJzy}2Q$^+fiHXx-M63BXFUf_ISE&<1?9eQk97)_22#EcltM*f%Qvq2KBgMud{UU$ z(c^Q^N(`c7!XTZm6&#f@;dGqkaB-!_{g_z?p%pHfW z1$u!`<_d-*5vb`!k=2s#ks{5ME#Lbg^Xhf*o|ZZ)OBTV(c6;CdAFgaB6y=#bmQ$e5$Z>2itDDok~F#Z8194!2XHnyG=!?$k3=gT2q~$(ZTTD-TCK@^JB@6J@7+RA}%=i@XT|edOcf6gE%f8OZ zQ9sp5hm9&oCB{b)R4cR;sM$(GFBth$9Q6#MyOhhT4FCmK06U- z!{3^j6{a5926`8eOv0oe<_t%*yx;>;5AIr2?W?1jMpptw7STY3^MFY~JA`!k; z!JTk4ulVYxxM$GOstMJJl-h0g^X+&17WJ0WoSF!QLhfafO&YQqHn>eoi8Il2U9?vu z{sovagsE}pe)xPp3D+X6*7?^j{5R`1Z9!}e*Ys4a;wDAa18TblG12Eq$?tnCK+kU zp6#1iyz*$Y5@(17vVtksy?ErNpL560!7;0X;rO|AIs%RK9zQ8G#Z`-d`oJ~UF%VT~ zX_FD(V#V1{g}z!A3ykT|^*5LxMv|Gb9oVsx1xuDdtq&E07!zWNV3QW>ue^eVwuUw{ zv$iCHfmJJs*B%FAFh+tsd@!NOL1)pM?6g(NyFVC+nhEz@aTWE{6RDE; z1S%D#YJIHvl^5YdAZ~XWXgZHqaA`IHQUrD2Qzf!7sY23MX}c4YL&6|8@TNYUu4@?E=TUXhsuHdL~atHl@ZPfT-Ll(mroo55zwfH z1iRf|_hkHs9z&Ujk(3^OR-0c%6B5lF9wo*EBP5eK4NHotSmrHQf-#oeJ2umrn#lMV zE``!sXZv<%KOGHbNFiw`5{OxquZQ9>=qx7`k5YisnG2b+hJ1n4A|&{smh!6LEL!g; zS-yzpfASyMxU7%XK#ig11PkxFhb!Oo4*DlO$>bPnJW-Hs)uKzTJ*4F25QWyyUq^MO zHjU=)U7@LBgVzNPoO*UoPVwQ7{WJA}d8FPUR*5A>8dH4meZNE1dMqY`zlNf0W@O>x ztN<&Vl1(AQEl!YD6WP0q?_Yc|sNt-OUI!}|liHYIT3xumPn!~i4u5WM^wZn!pp~R} zH8`KCM@fLk8Ii+jQ*}&(x}+7_{IZ~IT&*y7%zPYw(1Jl#`+!w*>vUxH(r&lhB6$sRj*RW0x#>Zw9!wB`7@`!a zUa-!hQB1OAF~cu>9w)r(H(2s3FQ6Iw) z-~bC3uV8Y#!OqQFF|8EiL46XS85x%*H%7|ch^3fE`5rKYnP5uNZo~KdVuoi>A4gHI z{0K>D#oPV-EWgh@GK^RZ5UC-q4>PrJ5x@5TKF`>($1x&?{=~BQ!L5Atb+4k=1N8=x5~L7+hiw|ox|s%|jiwk??)+|SSga)}YMSDn1Q<}Z@f*$E(w+9C}C1I+f2wqy)q{*(YUP^1zCf1z! zOloI8i=Xn8u?5{?c(@*C3x4j-J5tn`GTE2GG4;h}nl<|oJ{L|n44bA2x{gryQk>rV3 zj_wYfEp6-i73c3YbK&*sGjy&u7&Ukil7Tt{PdS|v-|%|Qc+G1VUU@8I+jeo=7r)HL z&wdGa>m5|~AHXCNpe>AKDo~i z&D2LGaZ}?Ynub_tajGTjHld5|1s&&bsC!5slQnxZGH5MI<{;Oc7_PhF7QXWJZ&M#! zjEy4_-@w?E!K&x~{I7RlohLE|#Fu^j>o0Anh_u(?P!K~1b z(*Cax5tk*Mb%5ragW#>uIRMf29j?J1G*y@!XMF4c{Y#f+GTkK}ixhrAzRM8tp#*Vlvyn>@{u*%`@LPf0#bRESit;T%N{ zB@jEy_0cXx2b9PP4}AM7<{Oi_QHvn~%z}9=I_nHHj@x+HLOWdA7~ENI?m9#9aIloh zgp@|SCkoILL#7GuT^LD=B^{rlGP<81zVCh9`A?tVq?R%;-Xu~_wHgQWs!XB7xXk!S ztl+%jBT(y6lj1~4YQp}b7V*Nr`4GM;+_CNf{`5oth}0L*n%c+5Kl;ZE_Zw=pN~VXd zCYbJo4M5rup}d>u)njaS4IM7Sk6RPye2RD__Ch5kgDVY0xF?>-^0!>b+6ymY_|zw` zfBQD>zU0en{`7z0Z}=&8&p2k%WouzAY6UMD#6g2xA4sN3;teKDKW3ch9QNQ!AL0|5 zcmF*GhY4&9=_dDw%+`dY!&}AFViv48nmDSmYr_W8iBXIi)H^Uf6lBG!hAE)*u98%y zZO*ymYv&wHdlU-2W@C@P&Ak^XmiBEK=ArElct%9^RBQbtRpHtH^tV*cJ&Wyr3eymd z+27zBZ+R!R(I&~%B!!QSk)jwEAN09YyDZygTLk?4B};kon>7*)7$tQn6Rn6p_>;dP zs@8Dc;ap0!VrWf{@zK9{KUHzWHiPTsjUFu#`a6rTvd8dr*4LxC0e2LNDghj4aX{(? zvv)T)eCl6O49|G$TVc&ge8r;97t>ivQoy;u2>J7^S%b%`@WAFRJh){CY9o>)C5mGd zOX^&qa&%Y|7U_KpN{9KIV5}`9oaxvLGdz_lSnt@iWg|EG5q-^`~%?#QIBc6s=j(^TMXcQMC zRPAb1h!{K;Bf{qU?_tF;t8w08Oehur?+onO!~UP%NuP;H)YFPAlf>u;b&zcihV5Z+RQs^b=M(MaD*{Ry3*2@@NW;d{LZ>N9AQx0bZOh919)Rj+w9PkQr3EIRHu zCUuy_+7uq)m)dR8y?v`K#+n zm$b9^?PgC)?R&ZOwHMJpGKHHM$BPToG%4UW8VoUX+O)8e@v~DX%=(Ik1Ty

5DDFF>jg~Tld~JeafJZ$!cmx z-WyW>a4(35+JRBlfAZf+Hb2O!XPry@oU=&=Vm!8FK%||YQ95I+jcia_GJN;?ACWkZ zL{jV%iCMR9B-ylEiblZmqYsZRl0Eo3@F zTG-Ja6@a8kb9{_}dGiA>CFgf3qzO}Z{Dk`0D6wXN%ptf+m4#0~3m?_mNo8p>5HSJz zV@k$qdf1>4?=x0?1i#A`OjRkfKKj)BH_3p>?d$AAA9m;dhnX4S|T!&6PHYY|tn4MnWUNM@M` zED{l5qe?0XHdaikn3kX}W{<0~=KKqYo_rcDgunjV&#+@k8Cx8u|eZyN>`{sAC=!s8ZY{xb>eDe}E zefpEMuDK4ga}Q>+iSsEwqfBIcnp{<>kQ>#un$@)YSb8_%kC+fp)@jU&1&i?ptC@g+ z(QFFRPK2agEEMWwnLDgTmNc%gaK*6<4=rZnJ@+!UV+WC1NF1I(b__*#xz?4O6rM+f zvUFqM;ABN*roeP?f~n4;6DZ7@YIg^PIo= z2=ktM4%=ES?1XUQM8cO||61aa10)k;*|U%*YMLwgXxq?ghQdL+`r|*Vt*Jx*Vs<}T zBczEVRmYw8Z{Y9#_1~!1=Vg!`9D*TsO+NI2Kco`y_dal=+HqF8ig2BFp&tt$jIN$Z zvCoK?z@-5@KFQJ!9G&L+^7(_3V!#Nf_O^nI%UZgO942xv93V zkyjtGzD(85c!PM$LF7Q@j$h<9cvnr(Jz-cgw?q%8P-w#o+IYrxKU;%^t|ZFl8*XM$ zYNUyyiKDXQC^%|4u57tnQe#n7ykr5sv3EO5mMlTysANZ-HUIh-Zix7$0s$l(N#Lra>LJ3rQW)_v>2btY`i>1;eJN(rNqDnZ9+H zUKBs`LYc1okX(k!1#2}c;i_1X|k zqn!2O_c3(tv)B=Nn4IDXP34QPegm~VW9Z}rDfy(Kw4zd4zplC4!GxdXXJCh!^?Byd zL^5o?Hx6H|@jHL?$M{MWiG0Ve;vPc9^OP4OxBSPF*Wm@(NdqP z9bdikf&ZoKJJ8@qH{VGdxMYeJlB*dh+8_SCq@_YE_s%F)+&TQzrJgiRJ77t9;qARb zbCSK=H#4wgS!TW&R1n*I2k9u}McUpCIw!F@cT7Ll9IRkiJ-0p&ry2{vEDu1T>!1ah zSipHCN!akc@3L^9jumCHm9XTrlhIlgH5pdkRAweNmy!u7iDZVo$9pn`iljx{N{R2i zoh#n?4&=rkalGw^(I!zP>uhRdabi)Eb*{zxOc4q#HeiDzHknFMD@|!tt2`j^hKv6c zttT`ND1ZJZA0dkRnApFEw_o@YPG8keU+lq4k#uT_+1VlM+2fPS3|-bdQ)`dBGYEnj z!abshcT`fSr5>wFt3M(+VHHc>{CZZu`9cOyKb6M#0k(hd8Xoxfi8q;?-6lMi)9`iy z>BBX}D83Sr3iK~MiervHfi3sn!=6p|VMS5bLd69kf$Xgv`Sd^tUWpm$H+*JF(zW?q z(VKiddOxMJnexqc8u-X(yHYH0k*H82Yy!YFF{ao)Xe7qtZoaRDlkX+C>Q;+VMmZrsRzDkh>a)gY=?nTq=u z{^fIU)oRE2&CCSQ9^2HzT$BLGP?w?)+>PwoO&^)WWm<`8NR{KyIVTHrMy4wnJq+oM zVhGcgacN5h7D=miszQ0iBDjfhqR|O|TljAeS zX1#vNb?1xBFjF;>2uDO(uk)Z+p8gweg*8XvqX9nl_n$!q7f`Rq^u-D9f6wb79Ve1F zn>?uxypA@Hqf@-{V76afF1W=ID|MX^bY#&jYf=@B(vALmG zUbqN_iBfTTKG$$BIfwTI&-8|a%uaGu9)q5G*XSr8DQNjRbZXrfGO1pPs{=HRa@wE# zF4gm%!4B(*n^PP=GRoI3d<%UC8YGirc=6c0pbi~g)z0Qe)^wMS0!;YX@qIp&1VwQ- zXo(2x!KV@TKe&xQ`_RXz4KJnPKqH7NmULp2i~s7w*kp>DjWbE)NKkd9xq+61IHnh# zs)wgi_t0;n&e=jolde?oqk^QK_`n!9v@}F4Ha4w6VwL=p_1wF`#{W8z%*NI`@!`Li9*t%Qi~XO zDQ4AbIC>4vRzmkfrllU;1%e(j`8Y&A>=aZEW+WziC3 z?eTb1$&xbZT+_BF_Tfc4DJ3hSWf|F=3*k!zgtW)DKr%j#O;XnV^T)a2qyNb2DJ2?j zW|?ms5;`MfzD)-~?Qd990!6Q3Y)}oWg{{f@eR2)vr6BgqZLD=q^6})$7=_ zTS!Ft@)!Su{o_;A<0{U9#kM;}#iXJ49HG?vn+Cq3W9+RxN(yP~2^qy(12%ha4Q731aEToJIV4xSdWajgl#)qlcW(*a9I7c|6<@Wa zam3KFRjfJgG&bC`p56D~k8vK71TQZ8{Q#fIi)Z7>%%ps&yC^w#LP}7A^3&R@&)ka$ zJriu{U`56wkw$3Gx6-!kLR&sMWb$I9gREYy4lqT;bN=chEPC;a*qpYAoMqM61Ydc> ztEsh`v>M~MoG&9;b0&)$VR9xOCz%jI+wZLmYI*zvlMYoThapV{TSS3`)U{|(m`5$HkfyndwiXkHV!~ET&C74t?NEOB=X^LHzRc{C zpmLU*c}nhLt>Ez3W`nUSuHeBNt|K~XB`3Z6UAXyug}G@~oSKrsVsL4v-ObP6#W@!~ zr{_RxlIwqPGbXBJCT^vqy{xFL55eksdO06pNZyAAlv0Td6(g8jVN9~KtVo0Z&)$2- zTUM3#|LGBvtzTm_MgpQHYSet=H^xN8 z5?inZv138S28vyzh#*KA>d`-WXTI{q&=jVo z;B~Kk2aWv>L`KK?!)HE;gAN*Hd@M$^m>?BC;WEq4aM-6NJuL3Io4>-zsl0=Wgy^tZ z`2lDuOH3a$4q`0b#3Dx?%AwDG4yXLZOBr8tI6Lp(#4j)U9CuytY4paMiKix!Yz8F_ z;(YJHYb7o=h1fon_yr}ecoAL-YsWg`w|fWge3ffjn@UJ7xb?YVfS3riK(oo=k9rJ^ zc#K=Fz5?maU{i;t8OA7Hvv3dQO>`(=O!aB0^!R{@3_AhoB37Uq+7+sVT-|#AyKJ|Bbz(U+jS+~8#H(&H}MrOLC?P;@XkYfhV2_SGQR(ckh@SgzzzG9WM133h3mif zb?~5uVa>w2Jp9eOjt&$7Fh&FS*?Wh?5qJIU=QI%1J9bR&;E3Nkoh-H((b{-a@?=IS zsVAbaQ}7D1l#HmMS|i(q9-BXOUmFJUy`IH51K3e{=Ls91Qwqp z+}kxg>bcK>!`Fg|`P^qO<-r{-CSt>4nc@v+KbwYW=PGHNYfh?3QXLVM0n-KTyBl}Q z?<&9Ne2H2Wd}S>Klp*E0n)Vn~Vh2eEkq6f_q(>dX+LynSqhI_NL~9Oa^PP8a+m}Ae zrq6r^bIYCBsTP`M;ZVzO4yTG!4OXn)fR7O$Bi^DGyzyb$VYH9S-2Vo&kEFF{USGQf z258L@Rt2LG_>co&jfDiVNLjRcEk_>vXl}XgT6Wxj7ZHMHE>LYyoOfmOrBXw?rE2m_ zVW=ei`V(O2C#&o|jc{KV5ET`yEY@q96_;n`x&r4dlNrNanKVbqBH_e;_y%7RXu2OUb|ZGU=gh5<_@BBVB)fi9p#a4GltI7q&Ep%^6){Y z(1|QR`o(qp)4zR?(b4_z&J)GpyN;zz&->r;7MeyuZJ5sFfPmKA*F-9PNRQmU2T;&` z`_5}aDo+5=OyTOM?2CLs2YjZHc(`Y25 zX_|vK`RjAaL-*Xp$~9}yD9+EP1;f;ZYu>IkgBpgl3+pmpFq)J}vMM3yMF5Y3K98m^xtgjyhiaIJdNQA4M;4Ri>5@hy(3R4ZEm!@F#oiF9&{a=6 zGU$@UG>m<0UbtBm-C&^#IY}Z4js+d^gx^Q-u)?-?6(@<+Qf%H^m@e25E~Qp z5ULn!O5v^yT;;*Vh3p0nJ-9HD*>_3F?$FZnTa77`o_zriM-kabf*f-=$Gr5V9RI98 zAvK0AH~fO@FZd{%zH$lnt_?&pGgy}*WLQfsY7Uwluem6hfZX|_P@m(47gWIBz_j@v zyO*DAf1;}?4s}D+i1Cq-HAXn<)W@)M+YWBK<|ml$45pK!KE(<-=RWs=GS!r!N46I z^P-oZ%lOg*@Dh=#L#$zD$9DeZ&97zovN2+7LmF~^pNZBT^0YErszXV_UPiY+%;8jd zw$|>EVp69D+XcJnL2mtzf2Vclog94HZ?fMrexGb20b|Po+nOF>LaxE++e`9)I_JW= z0PT)&`FF0uL@^#0tiSWK&6vE2DfKdYe!t9y$$F>Zh_Gm4j5vu)I+S`4*J0UWG`w3&l6B)(EhUv_)|MADe#4@~zz?fM$`NR1nv=596pGh%t zH^F(rDBbB9OlO7-AO9#ndfPiVwC!n5Wr#S8F?b1jg+M>ak+_0BCE+tDEu2L25FW|B z{=?gFjRp^@;gA0LTzuldkMh3vUw|1|OsnHL>4d{M?&#IT%?OB44^|A$xjs}~1|p)e z8wH=tnuXO?xtNZ4sr=0Qn}+&fCSl^BHZ;Znu{T7{qQ=r4A4MMfIDX@A{+2aQeHK1y z@W4-h$lag%7?WT73f!|1)0)P+ZqD7)kQi-za7;3x5Qw}9$grR7^wi^MB$fW6`uyjw zppm|Yp`c2Lk)St1^Cl{|szl8MCkd;MIEh0JeiYYy?>o%gw}B`#xK6jEj|3OB{&wA$ zn5t;SMV?*M>m03ezqUaK6u!C-CJZuUQ>HAII^raEF4NHU8ex{b?d)CS*T5|-7`@i_9*XblBQB0rG1Icv&{(}>31*n zji@$iRDsL`J9l#X7cSuLtA4`LBaY_CSNsib@iIaXPCW=s%X8M3nKrfOA^(wu#dI(| zfNO5JnFk))L6jt+$;eGddK;+Pn&}>ZUIR1*<Jx^AhGCw{in?%=5_J%43in=dEh9v9yIrQz4oi3++@R&H#thEpHZwXI3E+;R&eCZ^l&qGFgp`)YW zl^N+1yXvlBtG(afdvfa)^%*>H5AA#JVFXL3o00-kDXe?k%Q-hka|8BUOjDu zt=6#aDNlgI*V4)?_uapPuYc>uESeZ$+vfXt`@g=4ENv4>93~67^`|iw(OyyAV02V> zS@;bO6sp{6W(5cJXA;yt+FoIC5K6i{4+IsgH~7fXT{_Oh?>?RrUU?3OKjBGCZ`r|v zKm94c`p^g2dFc;`wr(d*Td1ap@p%Z6;ykFBSyLyiHA-5Q)xo}0z?=Cp1s-~`mmR1U z`VOsWl?sZn3KCoVh{Y~j#L2()6eb_q!fikNE^#+SIvL&t-AizIuel=$HK(XSAE)kO zW~6h>oOXA*AP;vF*sTU^_AluznsNuPLd+a;8Wz)S#60!=Z)e-eWu)T`##%GP*Idta zZ~A{UyPmY$#sQ`juu^jKXRCXLt4NgRut9p3roH(;`ihK(zYRT+5K_YP&qmi`DfQq|Ab+_*wZfpS1%JkiunCNI5& zJ1)DF(f!wQ;;UW*%a@{YFiY@xy0PgAj?~iLx5w;5Y7U15Coqcm!dJeIG@Gazt%yZo?qSZkQ;>@w#<+H^R~Q3rl%lGgMz$+G1*WBGN;URk*Q+gu9`!@k?x zv}c2rB6!UyWEq=(c|BvX!5C$x)8?R)PQoQgHAbxArgegg5yOLjPKBTojPR}XJ{uUX#}R?*J0<*F-LtcvSoAeI!N zZ7r)#JsFMS@Y?ggS<$g}FKTHJW3$J$yA-wgyb%kwbJ@-=jcwcc!MT6O&dYzu8VuTQ zlZ;xN^QeH82pmMlmZtHxOaTrw9ixGB9*|MNSw$ir0+7^WM-nENt>CD?coA7*5!2+2 zZ+r(kyIrjA;2&TA63DtV8xhWh&s)UM6OOF?cZTj^kuk@AFJMFCQ3=0_F@x5}3(|Wy zqIpPR@g>E?B~N!wT+lhBk)d=hAuZW%}YbkfmtK-QN}4D$vYFp<;7ONKedS9yykiq zMH`kZj#+v9$*ex)5U#oG>&WzWqIQN?my6_!m^zaO*Gc}JWq{ZnhSOwZ4-0r5I)~b4 zm2l6!6wIE}V-TUOo^M|EBP5PdljkxR z8^Y3R>E$AO3B2jB$E2`pRqzl+F*XW_h%s>Yo$Fb9*im>4V)OL+A|!TU#ISJf!-iqi z!n(}YgrRucP0j(;u(9meO85Rv#HoUXDPZ*(r{iqY>l#veP({S%|4z^>2(>iSz+=^-k?@L&#U%~y?WUam!WW`Tm*RDl?|tuD zesSAf9I$2$qTBi3&;DITMg=7*h2n*dr5DkjO()v>!lfU!@3kLNsT#N-O6^TOE|4!% zx6{|)SW-CE_&i9?p%@2OvH!(zI09cI*QaEU9=PODy-yLV-1DalTx!kvtd`c4TJO$`v@{F+>+|| zoNCW5TA!mK6$M1aqYk3x1lf`ae*Z)7Z- z`;sJ_pMo;IWg}x_qr~G&a8WJmN&O=drnhkYkGWPYGz|MWT4PhIqP|P_wi_5x z#fp&ONk=E(&~>4pk_s|Xg6o3j$+Sk4nW;&ZF5N%(DWwb*@Csxd9{9!2X~Yqk7$iz) zdtv2ir=ZOV1i{*xb>mPhZ9a;roX=iF6y&yuw5Qm3^DQJMPu3KeZl|m{`6OsIg0Qzp z$Q&f9lew2&sSBGc|HP295Rm8K0%XX%gPCc#^*Szj^*JoLZzIbxk9IR83g~u`oni7K z#o?+XVg(Tk1ktZULQ@tgsis(CXn2cF1@)FGVr1Q6EPdMJNMo3uhIhRCW27?5hKIKD z5AS>nneSr6hDnrEpUs}-)yZyul?-XTO1sHgSYTelhT=_Sz-*%mE-FX)#SgItHG&l( za!TUCn}kkd98NlwBhNaAHP8A}((xrcc;i*vcHu|qT=I47J@+D0ZBn1&b0J7bO|Pa- zh9bYggE){3Ocshssz523mpeN@!llF#12S0(3#D%zYg}nnjLF&f7W`;JXLOv?e(w+2 zy7@tV`TcKT+dE*Ujq_Q_Xe(6=>T3m$%QX)S(P!u`r*V5ed64(-vmiMt=Oh_uyta9RI)m6#KZ}CLJFO zTz;=rYD-~(ErJs%Nu(iRp3S!Cw+S5+O#|h;0hyn>I#{BbTp*n1fFHvjfXx(`?ZtG4O zDrDUhFNSIFS@-xe@VV=Q=I=kC*Sbe0bkZ|TsKje!0Ff#giuayqXNQ>|{(!H%?scr# zF~evlMKr@k2A9=RVe7&l;d~KemNbJ5;w=W z_>%AQ;PecOmW;CQkOO$k3C9qN#SjG8^@gX=Q4g8|&6cm~o_<0D3I!<;v4?-FmpZPEeE>|cQu^X+0s})p)BGW@khuv}lQYJY2u$ne$ zl3ByL(;vs0Rflr*C0~Ip_hVZr-lgT!9|Y7U7j$c7Bp|gtX1DbR!!!(Yarp;%aqVd# zqH}GO<)o@|y3*c=zFO^5_vF_?Fzz94jG~9F<=G#27wcUIGdjxQjV51z-5cTBn=sQ; zq-5pyRgu!mzke-sARwVVZK2Gug8S-#QK_*j3m<+DntHrVc<(1J;yXXTp8Z#?pp#C4 z&5)?cqQvsPcfOTayTrzVXg(G6Js?59L)D(QGxG<$4ax{YLgVb6LVE`D&?dhB*0(Zs z&jTF)l&7)(GyXr^Sd++xJfoi1RWl5zXq|nHpj0#^m*r*y3NszJXnRw+SIZX-D~yV5CP|jqrgVkxqUkatXKolNFMfDG%hZ0G;03O z3)jN+@GhJ82<3#~z=buLza6i7&clY_)ng3Yf8F(rTT3i3ou;fl<1~B{=gF15&O(~A zD~qCg=k|vf9h(5LxXfWR*vq-p)4Jtm#->^{0BeKIYSxTd{HP;9a)+5>dr~P2^LgIt zt1)?<{-hpBGw%EOwTwg&nx-@oOP7pvv`OQLBhZ58BlUuR&t8|-8nuEV4J=TAgrk<^tntx}jxGK&`gPKi8U9kQOP@0d2M>=rx@4w8gU3 z3#)$nOzbg76U8yplheHK1D~LqI-;4)yzd`h1??1T4Z#e<^pw1z;2eIC*Kq8;`Yij) z!m=3%7El;9SRZ~F-_yqK*7uZZ;1&#P@&+#QN|bsM2i^#sWfLrZ<}*0rZ_Z}?o@@_Fw0_{TA~+>M=TsJ!pc^_`7h1y#F%FuAF*=r<2W^%fVCVbxJgSEVXlB#G7PCYZ3=et*4$Ls&~ zUBrz=Oipe?qKwfb!L%p2=;P-zW*nkP07886X_`5j!aXBm>3qFstpjdVgCrW9fiiEv zd+g+PuKTz5uyg$_9D4d0tbWe(aATtwYpOuC+Pt~QjxZ=T*PKZYNRq3Cd~P%$0ya#d zzVrPncyQac{Co=QZo~%0QXZxmCfPb@AjMn(i9O9h{rs02Sda%q0*s6_@h&53#EdUl zR$1^N!DUW-{%kBv7Z$F0y?Ppkl?&@JpS!(Q6()pn=+q3YdmbPGOk(Lo5eJ=m8kty3 z&DEt0am;=FRRo*^5p0~4HgH8zY<aniw|B4iW4Mw=2S0;ad%9CoZ$x zB16ifIZFW93zO1Plvztvl|W1L`I_Pd5leR@B0Fd~YoGso9{s#O$BmA#{gzv}@nauh z+t1rf6$8WT<{PyghBbS z7+o^KDZl%Nbf#Ne|Mkxk?bwX%hG~N_;k_7Rf`zO1y^|H$lP6RdoZO*9ey>B`)coM; z+V?C~9mMRCT(SL6FBlkFT(FPhK$CQMQp!}yY}+6-!Hl@5H6g!@~OXxBQE^(7f71pJ)xPU zXj-bju6q$OT6d%B*;8s_bDw7}A%>CW2u5IRbOMvaxZKW}oW>k9bbg^#Sh(iW~NxQXi4B#7GIgu zAtINt`MT>FMaWWzH^P+j9DM3YXd^+asSdoFThKl-oUDYY1?9;p%XcK^q?ucC&N{Jod!37YR`ShXU6I&n@W#+?&k$?I{TF-8#bOGLr0fhbeS4g^l3e+!QjX@sN;`NoJvuzsRnITgb5Jeo`xiaicY7SP4|IeMu*FMg& z_!P~>xA|1dSX?6^9bd-5r#+Dss}AL7U;83@-(AEl7ZB4T&@K=1t7kV1V;-msXmSQ7 zLFJ?Y>zt97$lFR*e;V=Xfsa*C1n7_m4TvFxc&;Wy7co11q` z!S(?+cmC~A=%%@$UQO{e?UcvtyInaYzsa?%p=9=N?ESO+eN^ic`8544L zvNldEfBO8Fp=Jb^dYtcofz-iYzVx{~?eUKd#uGkd;ZzhDsWl)nxZ&yH^Ud>wLFaBD zC7&LKM@4W7&ULZv7WaJmbKLTs@3HdeM{(R)=i*i@N1JgN6qjZUeLqSE-F=mqSPF_; zIn4=ceEa>I_~Flfj)_O`YA_mMh`@)trlcb(bBr8yj%CoAkwk4DE*J`h7Bcs*P8xA# zW-Y;FL$vT2i|}6NaptvfEnMZbVxeK!2Zh0`$5ntdeDD1j$DYMq%fcO)Ocwv@9+}45$kfmdPn*?tOq*A+d(UM0CAn&0|j|i;^;xxZntv zAcpEbdH=<(FL{mOHG_NaB5Aj2VCZI!%m^K0LsJt+!A(Z{oH=&;Aepc5NmKK zDN5RIGn#g}>AZJw{ii<3+9*a_GpNlli6P6f5{q))QL@1L(c=!RR(2kkC!OPI1MQ0WDv!*F=cOW`4hL6_m1ys?Q6{5G_~ju|gO6j<{TFkRMC6e7SBsNEkkZ-Q>P;2JbfofBDk>;Rvdde zk9pD`@Y8R9lZSqBCDBY5I$7TI7t}fFO_P-Y-ZnbBP#TVJ55l|W%5KVv;WbDlSMY5MTOWmr)c;* z{7Uli@y;FFXpD~s+KqQPFJEz4%Jy4t!e(7G%fP32H-m3BVcij^L?wT^_?{(QXukK7 z!lpkb$r=d>qBzg?U)@O4Im9_6cYWv@Wzo8W!+JUGolp!dD25zP+FNvp3Bh)*o6^XX z_3!>58!q}fYlzU98KQu2x+&_39d|VGBlnjrl zny)Ik_A`yoMZmd3Np2nLG&IX%1xJRr8WKz$G`wI{$YMiw*cuLe(epX!FaMHsVuI}6 z+gbmKkFn*VFA+VkiKvyKJ|OS84m4+Qn7wiek$KIBt~H<{I8%ANBf*8syYQTcflG!P zv-su9IpdF>&DQ(x;l^)&0Xsc~>1Mdnsmqs+YV}Eu_Gh5X`ebAdtIWME{r0iVW|;$0 z@xAL-t6B#C=0&#mH0%gn5e|Fhi#X_iJ%zhy;kNGNfK8jZ;>~ZN;UG-|8n1a1ru|T~ z^i5{zo#*MEeEz%m_d(}~X#dxRy~k+>X(YViUFY+yt8c)JjneL>SZ5it4URcv1sA^O z?Q}aWn(+u(=5s(pYTWj^xV(Q#qI(Sz)K_WH#H9DndVjdo0VgPRU4kcaE$;iw1^nug z&$D#xK|J==uR#x9iyw&*pPNvaq8ZQ6Q3?}{ded2bc6B!_HHCH|$fS%y%fY8UcQGR) zO)~FLBbbQt>q_yt%5G0$20b^ri<=@?qFY3jX41$rC2UP6SUvZ3J5^nnFf3g2b%y0V zLNQ@jXc*?TJg5+y;Np?7^@f{iN`zCG%raJ;dJ^8)Aha@hIFW{8x10#537Pjcf+))W z+>{$lk~%hCc_q!f#BriHA|?(#3>GcQJ@Uh^+35AYpdzk3^vz|s0n%!&GXlYgmioq``A-B_BDUQ;>VmuyR(xGKe&wbAG?tB>Yo!$ zP9fb6Vm!7e^mK4Bt1_g`(Q2hb@i~9WhkBf00T7tnfFKkfm2lgb2$~JjMPnTGyH8_?GJ_Y%)^QFRzov%c*E`wQe;`e_nFKXBQ_EQ;Ouvwhkv)TR%;9Jk8ttyT}4 zz~_$Q5n8Uz@o)QkHu@%y%l2+&ag-9&4_>&s?t~+3x9u z_Iwtjy>C|o043fd(&Rrr{}tZ-k^i8%WPe&|ib;ggWB2z$hUlz`NjU|Tn&tHXsZ2@dEn?fd8Ft$Y8$R+0uKCPGEMIdd zXPkW&tXqv=)I`11vO03}5T%y2A+x45y-iYKUQrd^h#~WE?G3ka$K4NLBMbOETosJ? zO3ziF(#YP-SO$)f`uf(&NvzRGdM1mebhFFd#8elw&I{MRxOy6f0~Xd}zRIL3n%794 z&^Wo3Y}11z#$cO<%tTBaeJpCC+y$Z9H+;_nue7G7ShRG1JSHSO7ePB7B<-^8`s)}m zLGu|!hAHPc;M9}wNi#oC3qSXo)#$uqsFXje{u|Aw4V%~BPV6&WH^qq1$~>!%Jr^fM zA(i$2*Q&rPUYgHE>0%@lyJCpcB2G{fvALrh`ozZ*9km7*!HgF^@ZnD~x_EzD)5^ibhFUJN{&v~$FI5dXjtDM zJnw%xbvM!pbdM$r1M0^zLAV2&an-dq@#;6dl|{>zk#<5ZKpb0It*w0HqR%nWP{tag z!6I7gL3C!;oN4xZsSBZX_wE3x??L%SK8xig1N%|!Nsccx3*mmw)ML6CJHBxlSAFjD zEI)W1XPo;w^r)lo<4JI)18iYQt+s3(Oz_PLrU*5gW7S?Edu3G{&Z;8pEowx(|HGdo zYK{a@N1`0%uY(?xl;620PBO=z>X6}{)>g)EkEV z7uI9Gu&}TTn*1SlSxWbo^(=}Kbf$w#Goq!-VCBj{V#&|!(#0qboAD?{43pcpuxQaz z)H|%L2q<0^rl*~PWYo|;vxkbamAH4 za@$=G(rk`!+%fAo?4bRKayt(#EnOLqI;#Vn(R-VMUZ?n9H*iT*uz*FdSPUWXPQ(Pk zvlJn3d1`halB{Sh_7_a1Lg}U3$tylbjcsexTMJ7m3!!Gsp>u(2iutogPq-W`yan1E>&oK188#D-&qiR)^RyhrM z;Z{_i9vaPYI!$=``_E(hngeO8=ZJWmZ@%G;aOHJKYZ@=UZ!LUTU24u5uR8Vmy2viy z_3o15YPdly**Ij<;60tTeM@VYdW$B|X!FK}Cr4NnLffu&qb#GANNktPp#MykOOo z0#@xS&u4l16dd(@Vqm=|a=|_@T{24iJ5S)mH@}gQN1w>d)(5%$OJCsLkAH&bt_O&w zQ!=tJt*A|lsJSQ=MS~~Uyp|1xcyy2FT?|F)rEt+AxM)>V1?W-kltYKSvT`0Pif>rl z$^$w5`TvJI@B9_N`ub;xvlQQH<0$|KUu%xE4!vVkusMpVI6SUI%;$cx>ja!#VdIYk zyx9jH^f8t^iWkM@po&V^8ym?e*^*J7`N4N`|Dpz|S`JB-FTdto?8dEhXJ&HX!1Uzd zNJ*2-Q-MuEtJxh}YT2Xap)BdOqRs^US;|b-<-ShFlV5lin@xjmX3?%=Vl1LFJ;`}* zdJRu_%t7t?YbGC-`Y(pNMGo$~*MhTRXD=I+7sFn|?UOuW51j2NsKMw*S%R#*Cl za0}PMwGXfT78-_qbc_|ecDyGt!h<*7#8@2DZntn4mLGRKSrmn4K??4npS{KST&!l} zvg|MgXFki=`l}llGZCUfCeRgSbmalCWN~m^GsPxXP1CD6T>_)n=sC;g|pNHXiuKB^>Aln|85L z&Sy3GxVnbymH|lG?`f|kbk>{iS{nuD!aVVXDWkRG>4QZ$qk}~TU9Nbh4mq!13JQw7;fU*#utw!uEKg;mL z{BE@pLfV2-6{7}?8)PdFV9iUP&yml17A}t1_VcT_{Y6C%DljCqL_jjE;`- zoe#X7WZPDvPMeGjQ);2Y-zyxvv}_~_R~A!cLF`rqGnfQ7{0`fP`Nu=Q^#B*6Vowpk zN0?*`f8Y}S;JyFKjcyVva9G!H@k?ID=v0U9%ruzX{6H+OM6Z31pd9Uw1>9AcEpxb# z%{DnGos1A9b7V7)j>4b4#u1Zmy4XtRI|1zYAFaF^R;}IqG@QDk)M6)?jx)>I~MQAL@nE>=o3Wqk) zvgh}9&(tRT2Wu>iBp&EQyud_SxE_&)K~OOpQ(7=; zUpnf19p?QOqs8w{=GbxDdPb56mpV|$R2W;a3X-wjalRDeU609~qGoyD6@opN<2@#1 zWVc&0B-V5DyWYt?-?)V3#$tUIM6^_NuX2nlIUhN9nUNl0sLt+DX6^Y}6U7R~2kXDK zF^sNR%|VYp1!qA`%%?v46-F12Gu>+QqL)6Gf`cW&b2bl+>pSGM4{_X8AwKH>|MfaH zDt$_w+pHCpVVDC}pADAe69;cX@Rso!rVTcs-5g=$iBIM?&UpHs1%Fp z>@_ILexrhE#B@eRIOK^>=g}vg!X^LqF64oYBs;rwQ-@fKCr5Wp02hL0#FdmNqb6ub z=6p-Lq6E$r{-*3R=cGX*9jcp1>gEXmYJ)yTJVp&BSq#S?$M5~iKXP+-C*7Sh9J+Iw z@Bh`y8SO$R?N*8IMS^&cM3>n&{d9a`FriC_B%=(buhf#Si%r0F=(cB&#t6@O;oq=+ z!xl^y<5Nd-ETS_t%~OBtF`V~LZ$h#*;^X{`sJqlk4=HZ;a%zvI!m!B5UE44h6r(Db zu>PIN^+2ZfHJWRQ#DGIcog->@*?iH({Pg1&a=_|CIsH|y#~phNI+6736Vm%8d)?5q z-bcvnYFd@L$wqm*8gO3u$u&1~>z(&tZ4{_aMZ>3H#pGE#Iml2GZ_Ch`(H;#WYk#HM zcZxGCiR_$v(E<}`;o3*nxT-b{uxw#1=IbQW8k&^zE4OXO@0f%%mr6wq%)x8X+5TN5>`H3gslaL6lwPM5=dT<`; zb7FY7@48+Th1SG@W^7x(p4jJhuvSRbvTWVq5I4i9TD2Tgm(5MV4b@t(jn|%&z@wzj z<2#;_Ou6yAcXP)jUt?9Apxrhm@`weWxso@o$)SovG#0i_7 z=eXbhZD__g6?RN#eC4a(Wn{F$L{oVDnWqy`INSK#{#9y8q4jgG_b8w1V9+7(tIW|- z{HKi>9wt~HC}`mud2%X(1B=1OmUQ(B4tc>#IPQhdCrcW1*Wb>yZ+#oB@BDydrj2%6 zcu7N1=tI+4SaxbbT`!&xt&!tpZ`ZE)EVP#bKjdpmeax)-C%hi@Lez+HqoXW8@i%$; zvtPhh-uVvP4cF0VJGxoB%z5yxj_OuZjcSZlLuvbPV;LClH>^<@gr*LQH`~i~)VUu3 z>HDYVMOL{7YVh?(g{+w{@wDIO*cUv9`|#Ku(~RAC3+vB&FC(d=>pE3Iny1BwDMQ)$ z_FA3JTB+I3pY+s#y>zi^UoIxj26alO+a;Np;Osa46Ib4P8_8IM)U_C06fra1=E$`x zdGFi*i6$u{Q8N_drQJ_q3#~mrLxwKtU@K1D#a%F))M-lkPLD~MmPv{=!LHBsUxMp& zi6?im{{0{1#}|BxRYyIVQ(yIW_@marSVSI3kY^zbK-guru43SzY}UHx@7imi|0NM0 zIR65g!U09SBfa}B8abNWPP;Uh?+;^Rh&2Ov@fBKTjA3SaCnF=Hc<1vm zuL}v70!;3pz3Cy69IbTXnK6b%M;(cZ#YmNeT7W9rADg-lwj8O0IGq;VhaN;SS31wM zT$d$>9YSVI9s(leZm$J`Tr18*@`M6Y^bEnt0*O!+Z05NClb_=DFMX9&Hby!f;v~w0 z+#s@XKvR8=(5m!gD-3j@(*bhVGMF|G7%L9`oLk=^qq}%12R!LaTx_s5;*xLukZtW0 zqbYy%^vBYSv*5^-+qinI9pQDfT?dJ9*+uiv&o$RMq|*OM{q#J1kIMPTW36C}Mbuy& zBuU%G9dX~6F5$ilKTnc^6BpL5h$Q#@9QfJK)2#5VS?gJUrfasXrPZg&Ta%2A zILz1t=f34UF1X}Unj=eb?Tpb8L8sdsv1%z7fA*8aS(}I$=e3+v7UgWOFuQ+xpe5aJ zcm1$-^(0h!XX%+Dnp|gQ)Raw^Ac(a%HTX_S(%r%B?|%n3eC5-uee_YB_S(P0A8`aa znglD^0-`b0624)e`_is%9%a_7r`V70dT=wBegCJ}c!X}6g-`}Sa-9NFouxfLUuwdG zhjC}ujZ>=5C^QX`wJaJRo8>#cFjZK%9-(Wg0hTPR!+aJ-c@ZiarT{nHO4EaLDJ}0< zdd!h%BPwnDstqBh4p0Q*Dp}fr*yejf7>|o6IAzBjcVgR};5lOrULs^-5whmM@-UK` zdrIBWVE~0ddcPmEO1=AVfg)|+hEC6rL=oNynF!t*mL0kpY> z1ywLzC7DXu_{j^o`XlGFMhvFip^+Gz%SyXiuf9yc>~%mg72;PXJ?EQ$-B-JC&p;*T zH|I`E5S*D1+G1FF%4x7{1tOLtvV7tbUuN0L18G`#-k(2>kS-9F!c+Br2%SNC<~8;5 zOLvK6?+udsU*O1*T2o&7+lI-LF`>cEe~ZE3;+X8f{aO2x|IJ~4@~3p04QB4XjhjAx zA=|(D4U!$xm~JPSz7|R2T2096R`h$H$Ize1UH4NN%xxY#a}57tY@XoiL+GH11jff` zFJ8hEUj9mMy5?%ufB7>swogH;gE3N~x?)Y3sPqSC?PiCkyWI)u0Bm~ix<=it|JW;@ zEz#iIWvcT351<3afb%5L7##zrzWKFuj$X&kSlF+dan*V6!TXNZ-5j#RMr>;?h;(DCWHvUc|>P{u)d6-yiQhNhH`zSQ2|ad;a@r zN}G{J6H&#Q5FRPBEU=`OCp^1>*;SgvusPH0wkdy)WwLucKv2qodruvl2|C#(*}9!; z-}H~%@Re_H^yz1C;yHhVJM=*OqL@6qxo3nRb2PfUzL9FGFe!`>f~B@M37`4w*JyWr zxKE1D1r0Q$vTdm{TFPfx`x=M6xxY&WQcV+))1Zpmck$xU09N@b@J(m6qYKx72WVtz zd11+6hk}WEXg(z$r<$neg}cjBls#fho=4%pb=mptukn+Q{yVGV1k>pd8B3OVq9~#e zh$99yipS+i*VM+$bszk?-lOi~;cGgE@b@i(@dlIT`2i+k(g{cW-s90`0_wT@{!QGm zVIy7J;E+SsvU=5WM1t_z=F_D@z&>pKt9YeqU3XpN?M;EE{}mpus`!Q6O$%Jm1mg-% zbz?DJNHQU!NftL5dD0Vk)Ze~=u@g>X$EJta`n_*+&-ovOyEkE4GdRtQYkX)hbE+J9 zgD!qpKFF@tO$OrayI%h~t&y4tmeK+;G6tg|hQ4NB2dr2RWk^9BS=K_5 zmZ~IlI0y zbvF+JDQ9XRT3Pk^LIG_c6CrJNh-P+h?LYrBxBuj7j(zgeIp*v)kgYiwMq`WzqaoC> zZWN$%0Ua`=+o`{UB3Mp+2~^=Trc(IW1s5@~crmH>7#qSoMHS;i(-5>jw$dRD)co&3 z0Y89RR~d3pf=VI~Swka=!q*wu$IsQmwQ$Y47B4gmdkAt4$8Kx$aLsx0PS~;j4kB-G z>cfQLHy(u-8&Io3z8bk(N_S?O(XmA#yv7)O%>glT!q#8iLIc733{{60!^)$Mge1;U zG2erv^aoYV8^ElC*9Y%u-+K=Y6}0O}GmkS7Iy#P7vAhx$QuCmfYW!MoK?PI(4DTHx zf}Kp6y!^Xd_MUfeu!)hG4smSq1Gshsi?4BpYXLWf2wM6bGqjc*t36)tWx?UAKssZ> zWT%x_%&O%qJMmGZQH0urPh9u~qVZ9trn)@ug@0Pn$#Rjc5rdey7j@5_z2#vUf&N!{ z2sPN>;H7tJW=!$y1uI9NX>d}CgmmS89Pxq|@|c&rnC-0;z3U#Xd-uEO{P<@ioo=4n zpjB=|oxU@d=3roLqI|B&AI3wRC5P4Q?*5+bqC`T*^e5d zxC0jRwDaD=-PVyLW31_9{NOczN4$Ls-)Z6c9d+i2z0?7&WkZA83hUN90Md23M2!Yt z`_2zJ>rHQF>GJ((x4R@07BVGqZ9e<453_RVC{5cSvAMp&=)gKz2gt6ggl8Fezsl}! z_p`sYhP(8HQ3mCGpc9EFa|)A_BoA%m(sR#c!_R-o8P9q)N4)Hn_?7!%Ml3i`?|RP3 z${b*h7G_S)R3&sS8W8Vu9R;56|MV(0Y}|^shHlm=?;}KOlwqxfL7H9*2Q&(();bk30-#B5Zjc6(X%V z>O_r$Gf%|w;(e>H-j`P<@4Jsg6z?;2G|G=bZC;79FsPbSA?jjzlf7&T#SPFW`_BONdAc z{co?6SKky_WcIYDpOv~dGqYJ4cJ*OT5!xA!1B#YS@|+;27CY4@y60BD|F>r`ecyeY z{@g!j)pK8fTeb{oSj6X_i>;=y{f1bxWaYa(7^{o1Jyt@pvyB_P`~Clc9T}zT9ZB58 zNQ_T?&@|Y@4ewFW+SJ{uOm_AsFXMKO`Zt7m+CG@s=EI(p$PT>n!VhD8hOFpr12 z)&iooZpKYcg}~9+V3#b0Wy{LOpy0}v6c}U5T0V0bHf{twNCJ9Vh=Q5gNq6H$VhkPc z=~zoUZm{HtBk<2J;z|>7^)GH^N7o_7@${#hN<(~Q(2-|H6jKx(5Z2az z81AZ381AFKyFzFEb$_si76WzEQEIdl6QUL>4|UB0;~ELs%KbU)6)$G(?>&W`trk0f zeigTU;C$q|8%d^S(5#Iy)WY5hl%MLEt;+VH6ofr=Vc+eg=#g_2p77DcRl z>Qgx3aZlv4Z+|PSznR947T$I9saCk33kqc=2hcu2bUg`M`}p~TG40{^-ke;KeR8i1 zTl>=X*$7xbBV40F{FKLY>RB)4-u5KpZNuhoeVSjq_>wocfx%z`({)b_J}| z030!B=25YH?`PNXS8sR=i&m_p+X5T^|LnbYyk%8=|NZ{1wf8Bv&7BTI?}`OfL}TM; zi?PL;C($&GNgiYJi-o2Lq6U)~>@jSS)}=zyQ06&SxRVPb`0Q^D+52n-KD%9F`N6f<@$K(kLvwP9#JP-Xk(c(eeZ*v|p%5bPCKZWa7ATtBV9z-$ z0oUBdFmcpTaKIr5LKGQ3__5E?Sigw~&x>F1EX1clX(ca%S1Z^2`c)|%m!0>qPI%?%==x2xuD_oDdH;LZ@tKRT zyBA4Y^EmODuR%T!G6Jbq4ALq*gAhm=Aov^@-xrYY?czk9bQ_9mQ}viQCLODD$m5>O zQ=a?BeBy2A(!TPW#Je0xyBoTeQZZS~60GoXF8uN-LK-mS-e5C#^rc+|Ss^JH7Rl{p zh|)5p@+JAER*d=7pdKP>cvIux7ydq*p8jNRPZOqFDObMZo%ruuL)7lzoy%ZOQYe|} z&}&&qPbfU_q}!qG99Mkr$NcHvp3eC61{P*pxTr&2!L;Z2_r%7@BRQ$Jtp-^6vbgZr3x-QM;F{p zvUJhWqm!h4#h%QZQSN48Mk)z|_nwLIv9d_8LZ@gnMq{rZ!>W^pk!jl=pG(6NhfrZh z0GBdxCq7 zq(&IuvtO96hoGkHL=@E$ z?*q9kd%A(L7>FCKnHF`^;>)i+nJx2kG!lmq4P6&s>SCkS%AX7^`wdmV?iD~O;8OYj zuK_m|ey#`!!a|a8%o9$+$76U^W?OB(_OZq^Qd0ohB*ZIfcqfwUyI43_7w{y}GMLUDS}=JNN+y6Anv}|FHi#u$hJBdK zO8?$e?om?c=a-cf^@_1|Tmv3@_G{@JdpI2uj&7{sqElZ3*WHY7Et2A~5(E{hYqLL? z9h^v1xKs+hn}9L=^5#4E+7B?ddz^#${Rw$P0%Wyiw9lLcFlZ{H2%jsn?&wnl=0L z%s0G|#z~JSUDF_n!x^F87fOMZ1ysUnb=6CLVAn@PiV~em!)C6k+;r;rgV+aGIb7n>~G;?3u3wA%I`qk&`uJ5_c>1_c$fy&s;a7D z6+W}vVO`}~40DuS-I4#lWn~VpE{e&}dJ2&$);lmlckMV+zxTU5;?=KWrd}i7dJ8{( z=R0YC^;^{E7lOR8k#c?(NiBsMnZw|U@?w}PrNi~!rX&u514$s3=CDX>Eq*Mf{*Z_8 zjI&w}SkW5N zr7aC9iVkJg5*+50RFqb1xcXN&@r*xuDN>&%Z7G<932*Upck+$PFX8wj4n|#YsV;OZ zJlTtuft{44XOav}5LU}!&^`}|LTjrMyoyS8wn^xG8-r^*>T@k_{kQk<#kaqeL+^Kg zPI}F0=!1_Z9cvI713pyk#VBI5=+bGX!CLtB3_7@Ub<;+rL~W`1;Non+)@r=(10P{A z(J&X(V#|FFpQ6$ z!>?VqTC%Y7xY>G`yY&uYv4}OK#<1?FBk*E+s!_e>VPQvWcNej7jUvpK(uT)+&+Z#; zplK{P#YQ#S7&aVrG(L)o?=K0PgP=HjGkDSB;0VCa%tK;k7T-;R@23eBPiHL?`|sC# z&{cFry$_Do`QtGisI^+$@ZR&7zvA0W`@rY0QBbu&2%ZZPD#ks8*nux>aMit+PAV0M zPsL*A@!Pc=vn+{Dvfc?P%NcoW}$*FVzOu^U}% zq2hydtOVWpvYnmJ~|!9gCQrhDwyUZ9JG#SoqIMvn%_n} z?XvlfZCriMKQNxAINuIMj*4%`fZVM>CTVW^<^#NvR7t$!7q@QZd4KjYXfz=;SmT+B z$4O?k@|pj5KSvy}j#}N&u%-u#mv^>$wz#2a9Vs{g!kz=LPa=tF7Yc~MC9mZkjg zYqmpt=Ps^&^IQ4G2QT2blb*=KPdyoTzeDkp784m<&RNKU?Oa~m=N>(}f+NVPkZn?Q zWV~vCbQwdMz@1xXxbPDfGdVSl&)F%N=ZWI+K1it=4ZJu}sWv})4jLJd53eY&s)JcJ z)mt(WAW73x6RX%sM*IC}?6Wa$fU(hYxLfthCB=YD^N{&%+kz>Sg^q{jK?j3GVbg)$ zmTo11@LsVt!ugE9V9G?qqzMbR-9`*J?@?ozNj&TCcMNJ{jJ1_IUauB+Z}(I0_V%JZ zSd7X6<~^NVJA%=fH35<%N*dQ-?b;%+&#f17RU;L_J1x{mn44wp@=N*gCobdwiP3gK zl)(Xl7PcWWQ6?GbDiW%Fm!wPk2}|#Tg1}nHyb_4BJ?(((Jvd-aB94C2-T5Vy2AEfmQZijg4IsTzozMqq55&Vh&Vq?et}%&y&h_uX$J+SP`Ic9>I{!@By=5wy9|jkrGY z^RTdVK*wX*^8cXkk$Y|<&v;U$474dt+0^3WNhY5BG#>FcFXOs}879&Ott-FJ_5b<- z>RrJ}T9`=Yf(_Lfr$qrtOXElDaoskpH09bG?&Qh;&r2}PwX|CfXS>u=Pt={`E0Cs`NDAu!)2y8TwZ{>C%7 z=9}N(F)w`yN51@3xWf*IW&=xRjFxfF3)tep;8 z=t8NIlUD|_GMD!bV@y!~30rTk0o(jM-Cet|-V;X#V+>s}L`NQu6N7iYNIa^WmsW}y z)q=b1r3aGx7vzChXU9&A_qZelfkc&PY8=L!IdZxO6-`E(_9oaZb!O+`rl0f0x4wl# z;&JFE)M6VdK2oU|^&F0wI?~YmKnM3)q&M*?jB_Mcpvgr^B*UmHrWP}AYi#+ghvFpx zlq;^d8mZUONO;OqACs9!l|q<^RxW9nZIbpe5C)qsv8vQ#v|{r#Er;lNBRynHe8BIk z8ca$^)ThUo{GBK9s8^hh-LyZgUtZ5o|Ls4R|LWJMEw=GVyXaW>Jm@qPjr+>_?pb*I zw0u!_1ZM)Id0v?xjUa};pzeOj@quYm9 zfvYslVG-XwJa6-7RJ|wBk_7G*qjg*qbHq#kgtaFDy{FvirQnQx3_Vhm}kjjHmiTekA#=e>|YB?9@yE_w2-oefiA<6dK0J8$;9cE?@x#k+aa_VX9zTq~W{qmDJ z=nr0q+cFI`izO=zsjuw+l@2vkr6uXB=&UeJ`lpumwtF=5!Eu_htJCH^=YN>Snkl-Z z6(N$~lSAGSlF->sQ`ghZ5$zZ7U0wHhc!NP$qRyKA*?VPba$@N1?qv|vXpF|tgrPZl z4tx77SJ=G^I$a_W67Q(5oq~ETNT>SJk+dK{XUpEDDRHd{YOuzFcZdeqfU2W?*Ih{0 zA*l-r559{unlQblgd?d|$cy`~epggMR5Rzvyu+ZRyLMxo!=g0e8lA*ZpI!$h&SU^9 z8);U7S5>SSI&&@RbG!Kb*=KPmf?t>?4n_P>9`Yr$#PkS)YnfgRB!*R4%@tkp>hsAX zOAUvMXpk<{>@W;BZJ0|E#tu6I_B#kr8nqf9|HKymtx6B} zd+k>HLR!&tACy>(U`MN&m{O^&dV&v#BM_epiOIa=D1oP9yu&KEIKtikejIzonXG&4 zV@c-bx#6>);rb7LkZ9X1vd~2dJOQUl>OJ0A)EmS{;4_Fq*4mSOa+5=n$gCY{jLBS& zL-jXO#YBSJw2p`V?a7R9T*GJI`g)?7MVRkoJ|>wfJK(Y)PKu;CS1;35%*B3p2jRX! z#P9b~kP?z)9L@ya2`4EwZs4OjkAC}Gxa;6e%)131ux1?>oc1@EUGpS!?a-~t{RgUK z>P+3uXN2-jnvyo_{P$Hq<}uHDA&u$HwC7V?lrm;4HJRt~FMfu@x2(ewhc2eC zTEz&7R=Z1;5`*;bcq{k1hH}{kR`D}lnk&~>YAmUD#GN+XE56C+&O8H~IG%piSu}t9 z$*^H6@Bk3JbD64LIbFR3o|{(019J03DjblW+<2ueP$|4hR07di5R5U{sKx~sUCb@p zwj+_jWiTZp1`;76{N9TX-6U(WqCnUuO8$?QiKiBlWlIDrmXv%}de9O$QsEg0Vk~Q> z))c3L8Ir({(uL93hod=481{O#FH}xiTeqQ|6!o4InA(3od>jWg5<@QmR!Y;I)H$MB zoxCT4$<3a3Qg8SoW1>PUq4?nTC0ZNB{K*Ro;j3~?(Vj!lrYvlQ|ad16#3 z87+5m=&;je=_V^$2AP5hsx$dPEy46D0otM*@wneYqdG<{4$oCr{fJt;njPdpfMqE$&`k)W=PhKbHIyU#Qp#H z4_WXXlAr#BAHVgVFjrqg)N13BHpW&u3dRI5QpM~n|JnCUN2My?LgD?Pg+~n7i8^j= zgA@Psr5t$J(R}*Vub_7GE!epQJZa(iQqp*oWsMD%?gRr3sD>=YdpOML=Py+a`toj+ z7SK`?H%P`BJo7DY;CdF&B;mNa=bNXW!dL=6=^_$b)N+Vs)knY>gLW0E)%fbSujbi* z@=~Vv-$L4gS|egZV}gadZsDTy-_OCDCWvc7ElXfUw1Cl+elUZ#Gc9^JQpyjrdd|Yo zRBpg)%afuUHY8qr13Bq+XTb@_v`Y5eT3%3T2eq_1Ew^aGa3xpj1inqffTfh zwJDKxc%Nnt;5O?*2Su^!=F=;5y9y79A*LZgR$ZB1Gg)d=>Czs96i(iwF&g_WVQ7q= z!`=#-v*DG6JMW@yECwO%rmWqx8E*oFs9NtSD^g*hr)h#Q5r{204B3Wbl7z)uZVU&C zLKH`IQpegu4*?sM71fF{O;r|D1|R@rFWh_x-hH`kc@3~BBd8jpT8vYrF*%u4TQfB! z5!3}oW{jbmwrMutns=NFKl?e&t|u~?=8-W0=UTLFwNOLqfnN>UnuQ_S=#{#C)|Dxa zu#D`frtDi7F{EB`)-abkj(pq+_7v8Vd< zqaS!)G`_-6R?m%`ehGIpQJO!CZlXVyR9mogYlMW2+WWb;At&T$pjncyL&sP!#4{8E; zz#43=fj|5xo_5w7`T6_|@$TKk*Zz4R=J5UT-V$k6?JffIUO$53ZR^rPl~fbueehIE z$K|XCYQcGfUsxoX-_B3p@+PkO)I}Wp@Q3l3bN+$!!NBs@7htH&!McTSd! zA)W`tg}I6>v-MTFYSqrVF1>J*xjUy=K6cS1+<5C<*mw-*1QSJ>NI=;^u@aP!Vq*}i zG#bipKlYJqnuu7_$W*p8s3~C#YQh@x7*tlsX!>4I318ISwBIW>BQva>nhc7GIV872 zxG$51QMxc1`)o7}P#-;qm20=M!gp$4HD>YlJBfU718#S_OdoVG-b!HrRR-}=(bE>Z zI;^$f;4xZ|l@-N!W#*<^uo%30(lo(~W#bWt<0F&9FUty3#gAcG%hS8{kowd*)MHdh zW@m|v#d(MGj#QQAy0wrgerUcqT7yGz>Zm0tH-GrU+{uNx|9?m0?$+Hyl`+qf!=WxF$A)Kq9$6=;@Kf**Ve!od?#P3A@M}(I>Om*c zo|)mUFMo;a-tz(MZCi;t363-%5;7ua6>2zmQp&#Dv9hkENGYo+rjkTCh#L{Tv;01o=t`wPP{E|#J8Fb1l%oR|vtPqCRC|c9zD(7#DovGXCsk zr?BPl!`U&nKx6$lV-gc5GyK=bKF)y~rfAgaG^051AhP#l#Psmpmd=>o>T>U^x*I8j z42gABvyzQ~QPRX=TW!ou*Kye^U&Sx3`~fGu_{ALm%2P-V+Kicq3Y94JC2tKWlc6#N z@xgsZC?J`+Ibo0A#S)Bd2`MWRi?Vf?pRLaO3})wOdwBah-pAO)G^qiJY#~{zLCPxo zsO-#1Q+&6>1^<2?Z++vd`O>E^Ko{pRJ}ux$Nt#l6d}`5=u4V_?e@G1~a`hAFO^_*% zBFlz#YfFPy)0-fyEVn%xqj9%K-2k=GbGZAp+pL63I7mFyH*ZFb4IPF) z1D}Gu1{f3k2a2|~R$7owS-j)+%rq!eZIzgn=O((ZLX`{}lwGqD z*p_~=O=N8bs}kCsglK#X-bdlk$p~de1uH^2H;-QPeSUEM2ian3XuC^f4XJZ@7jUOQ zq4S*wKvj(v`8D|(d6-ii%p!;vtcCzn4I!!V4sT%Ic@BE$gP~pzIB{e6@@3z^iZB&L zJo>~32d7-I17!93Y0tywlkNk{um5f7zCnUhv4*Ze@Jo=AS5S@Rb3uYUs4)U!5vRoJ zP#e>koZ!H}{Bw?d*QVI&I6iRsEdy!Yx%Dt;#! zX__l<5hc8+TV??LiaL^Ww-07brQN?`Y%5Fs#9G`?&@u+=psA>+Y)&Eorad~&fXI&o~uKiP=`7ds| zbt{prfwzd*0OQTf8pMPLkXBvLtjys94MD-e7RmKiG z7@zqV8!266a_9>hOk*+~&q@$1Wgy<^(%H71nvL+0#b^Ew;|J`I&$tU^64JYsi0qr~ zWw7y?r@Od_F@_YtMmSaC@dj!^oyQu%X5X9aUZ6fZ!=-1xfy33}TODe3L5*TCp_MFU z*Q849>z6d_mpNQbXr`(?m8?PwTzyai$#k$q6l}zTsdLb8oq)FnF)=%4T5R7rPc4o( z=7<9rkMj1nu8d{4g zgFjVzsv-t39UXK0wwbbSoa3mpV?upEL*GUytnd4!! z!r|zLAL)H$uU@J$U6JoB0#XM!)GM*A(ZTV=bI;}aIap2_hsB4dJ-YZW;#u-h~mqcKVt?vq`IejS9cXDF?0TLZxg#fyOr z8$-P_t3c#r!m8#!s#q&MCOKL?E#`LNXJ--Bp!A|(Yy|73F=ET$v|lx2A6GW>%2W6n zw)v#p!I+4|DYhPy0`b@cY9qYKs!K+2X@}Ug`0sPx$okn9(&`e&7N;Kd5}b6iB*;@i z8Y>0aET}_O-huXGIhBEI<7%>_)$_{Wjlo+*jR*CGTXTwG#@1MK+;QQ+GM>x7c@;7~ zfr(?D^rVw;>PrbHT`4qNnn;w*`}=~b%y0gR_2P?o#1z3nKU_=)RLL{x$}IRd3XxYL zZ%FJIog?qZ@n^lBwg2aF%+Ky-+vT6*miNC8yLAS?xEN$Nt<4foEp+)yPQze9%)Q%T z=-UG{4~nx&vIzm1wKclyHgWJXf1k%Z{df7~8(#}I+(bOn3D7KKa8f2eM)QO!tV*vb z&N!Tald74W?t>j?Z|>q+36ec|$J9vROljtRjTl6QdOQhR4(8FPoX#CyNq2WRc=v9; zd+I6Fe2pF37il&p`Rg;@%xVAl9@ZcAAa2|_Lt}l+%uP4)@S_jnD;HhBbi>kUj$`sJ zlvD#sSqeiM;ImL%y$thNSeY@Um1H5P*SP{#s+udqNueGk?G$_a?R@>Ur}F)aFXqHQ z{6ijc`sug_9E%^TV=)-@VXbNCz7{{#n1Ul9s=-A(lb-e_Q!z8(B=~}eg7G;4Ia|wx z&_(`{g`jrELCaxD_~$}exe4XHIrH0$V#1s8sf$)&T6qBeYP9L0DW z^WvAC!aL5tkf%QP#msdgoDVO}`xNzQ*j3bJut#04gt026C!b9W?;VrlO=_|2OCSdq z#zv>bXza^`AsRi0U;ii`xZpb}-JQFMO&kc6^*T&U6{N^Q-KU3|7;u((LXo#r^MFRx z!`7{sZi?}x>|t$k4UCTk`MxTMMef-H+E?vNeTt+B-Z@Nc@dj)yrZzDaDzC${4qc@jl$O|Q$+AE3;Jpj7 zoT_*u)HiO1skNv&B5V2l7ruh2)kqc-jz8{LtO(wP?gX-YaTI4G^EGC!p&1FpQ?mO=CL@Xu+8b9X26Hd%<)vG52s-lsJQ&&e!p|ZP$PGy?X1}LB zgH2EP9kxaWw`+zQKJj1p?_JBr4HLZZPhZ8qfB2Ie_TUGwZL!7Vh6!fwyo0F?85HRC_tPmrW1 zoiSWHvi{?31+J~4%}g4aw1FNY!(1+VZW z^&X6nCLS?0w(f3m_POuk`W-u=*1$n1Xc3Z0@nlJ6{{H&8bvigFcgkdiy5+0zvh%U^BjZ7V5>(@hlEF&RG2{Y1?(3Wo(Hj1*i zHw%U}m-vIu9d}Y!#U%-8nv!}?eZw>w$C$`cRqdBO$$Iw2{Z~&m=Mc?2CF(Vt_p}xl zX~uOTFQK|*4Su!-Kl?d9{Ln`^Kx4WKZHzUA*-`I9r94o?lwr2!&*hpaW`! z(s#Iw{5$t%L3t_oYxRqy2<$S+Q42(=Css)75&lsR=P_@76Whj`)MvKx%lEyP#jjjJ zeX&KFc2N_8BA<~XtLsKGC8q+XohirMJ--cEHU%wUeYsDAQN(+!QT)0IHa_dQJob6d z<^$)Pfw}DtqS_#=;1< ziOIS{-h{%4H4%$39Dl~Exp8`o9ZkpN=8b&m;!pA0$3B8@UU@YK9dRTJi)|WfQaW32 z;hfX{f;YYT<T?uSStRN!LJyRL9?n_N>ib2J{@`)|m(65<5nz|#J*c;V z^sN?cb%__|x$Wa0;^K48X5+ezJp1qdfyVDV6~A#B14Jspx2DgVZwb-2lB^D@pH#G~ z3fN!{;j0!_Oit?0WsD;s%|F)&ntF`Qi*62MBQE;S%lYMvcVZeb9d!^JB4aU$5Mi?- zf-l57YO#>IE{RJ}Jl-4DjBh{_L)vj!VJHdbk~OG-L>xIVDI{ORreA;ZpoQ2%UO4dH zW39nC$J%w%#XT2A#QVT18jb&b34l{oeb~!foGqCvkR=naI)s#`oWhi)_)gSbCB7%tlf*Q&y*Vbx0JUVnf#? z)!;879z;lX&k-#y^4YV_;c%&;3yU=Bh9pfYrMB|ODYzchW2nMysG6ho*5!5iZj1ES zVoNZrXh>1SE0MK0V@M;zp$|JA7saHBaMzCA+w2wc5>zY&i`z4=er%Pen&~{y#b!esu?|=In_^TH_8>LGeH?oFl zkPoU_TvWx>q_4ivQ&bqrS?DFV4+OjoQuoXpoAY<(d@%TtwQj7Mi&nj9y;imJnx9`AsAD}+Q&`liUn>V6H z3fD}LduQX&inFvBA$1;!Bf8$>NHGy;vq6$|PGX~cWeD;dGXB z5u~DPua#pw7tYq|HAug3=?cmeT5=eQ&DZoEP~t)mYh@`;Z9hgxD!jG){Ybila|Hje~yg z_u08?8}^4k;l^{%L;mL%L@iIENrs;Dg<+UvoB-oPd0R>`)4s?__uek1WiqV=kuv-0 z!PT`JPlYfT`Uwazda0_NvEAwd(R7~MDVlwS|QD4~=2dINdZ)LSVxWBuJ z!%9BXOWv*ydM>?bejcEy1R3aDD>R)l@W2P~#DDx3zGK&OUAx9y$5M-9q}5?;w!^3X z^}Rg#w@$=H1PA9F*d(QccD_%@auq2l3m@8(&c3*H9!qWIt=35Gxp)uNy78QN}~$4UscJVQ;81p$<6$vd;l5ig6nF z8_uzAdQIsKG<2Gb%1fiM?-K?ydJcE9t5Crkt9m-ywqaFpoestr#x`xn=L$yjdL*c3 z_*kXK)J>|@B~^^~w6<-dCg9TqoRXxDwFex47@JA0mDQ)^PoKRXQ)%DfW<+pl5;o*+ zN~DgOF{ttQg$^^9Ue3(-zRx-@kan?FaNc9QU`_sxM(yVN z+RD%2ZW;He2$FozSy44eBE)e_cdEgf=RAXl|Mjc5qun98`Btuf-}xlp{0_BNN|Lm~ zuIn}HGDuMkE#MjzPD8JT>E2qiRqTG&Eih$rqz>YGgj+Yp5zqZ29{aT4Lkzdz}d0WVfrJ%L3A|059sOF$Ht>7K?W=JMCQndY~z;;M7s&Yhq69CZ_% z9a^0>t&Z}xx4e(IF@Y0<7#sKtcqB4~XGyQCbjEqgl$bKCUB&ySvL{qwRuyM=9rP}T z-Mdt~FWJ8~`CIaA*sx(>VX2b#jkfC1*w@3DQ8cpWhmZ{TOQGC6+uODgnFyo8e5=ja ze*5E%E#&12Xi|a6&(k+s@yMwvCClQK8;wfEdPX4da!x+a=zyolF1uoouN!aP2mYS{;YSSc`g(x-`R*hK<o z!S$c~7`I(`A@#N=ZMTsqsK^BPgp>;G-t<(EOL@|}Hy8S4dZjW?3lN+JqbM;TUa^tI zub*Pe3;u``p86y%eA5}2JGUV-txQcODCmU!s)QkFwcycr-O~5p6_@9)UEGmXKa)_E zmqK(Rcm5U$`V6L$>?Dixn8p;}zVcd*d%$C0EQa+pYEy#l*vVtpt>@$?p2UWkMW(IA zL!W4`mU~35y+7zdXn);D`-=5d8m;u*PcI*0srUjJha>!KwHD_+akoqS^Ph6XnP>97 zi$2S7PkknjJnId(2S1#2a*Uc4j1NjmB8rXjnxpA;q}Cx1TleHhD!*AP=BAp#ctg72 zfRwZ~gLdcGI5EX-*IfrD#*ko*@bOQ7o?Gsi!A4Q$%nY?S#-%P3Au$y$i-b;JrKppe z`e|R{k|&f@I?1`5u&yxImQ2=c!FYnom@~mIMvXzmuwniB%JHT{psu4a8v8a39wiLF zMu%abntl$+@k)EwE+QMCH4?|vHf<=Jf-}w35Ezr^0wpG2N`ry2a-j;Tr!zZ4Yy}%f zU?MsmYFqZpTBb|ON%#1REvjWfK?di*Llk5zNM>40Abjf`Z{wg;upLJnMRuJFvOTSXj%r2s&8;#EueIXYnrqeP(=Q6#1kEHGUER{zJk5h%_gdqTwvk?S71zDhrm!1ly;dx4YG@BW~J7K8*&Mjni+6HK#C zvSETlpZ$kC__t2tlc)V(YIoj+nVAP%VUbsbC048EGC($X5T{`ff@M!bBH7cFL;J2G z_s1!sztAns8Op8J0`b^1um8t)@ccjj3l2K^Aa*TwkecAql*2b{V86R=<;D+vfGghc zPq3KKUT75`o#Xv`joE~O?U;THd_UZ>;Et4@20bxEhp11iCC!s^iBL3y@s^$WW?;^Vb~aF47OGy?Iw8dsHtac`&Qi7u3+0&zQ&XXqYfj2F>%ov&-7J# zB3G4gYdpeUNy_$Mb^-trn>ykU7BTBx>=Rg)1o{zAzTt)lms5gjOtR8u$&t?6iCj12k%v1oXXgI4>l~lH;iVV`Rw9%M$*Yg7ZYyBHmNS zv1!dXx%IT(JLSO97!Az59l6LVet(}xt(Lu}vXme|d-qKCU=^&ztY2Gfe1(!i1ux%U z94M^eDGTV@YU5fhY*wA}8XQ_#H(gXagnQh6VL)A6g6njxhIc78vZxA^wFV2{zlNW@ z?_(SsO=41r$&7Wf07ZH{7KT6P9CIuEo2|&6y}62Wtqh1FN^wF+owDdHwSx}`E}-K0 z;SYa8+-Q&_DfhqM(HNtsD;>O5mJ8HYcYKV-eY0FpH3|%>G-b6HQ;`$(P%{zT@dlGm zd;*U-^9*jCYf<02gCD>1?Qr#vsCN^*OTlJAS!O&H;1fP*%NL|i?&dDpyV+=85Tund zXW`SJf;yp5k8t}!@v?BWcZ7RE9!=t>OnNw`k^`Ue!A*|K;%2 zD-2dZ8WfZE%0;aFMyr+k9uvy!EgFRRg;}O1r+Lcr|B{b={tFy<_#w>Cx7oCQ9XoHh znQwgkQl{5T!o&t%aM~-`*`49wbxpQ^?+Q9seh*n}kpu}f)c5kOq&HmvpFm*0>q#1N zr{jK(gGgT&qOT>B3qVL_)Tf#ypgz18Zyc`cunQgBcfQNzuRN8zuKp>H`-2zoh*M7^ zJ^C=r+Ga=!w8ZWksRYQykCh&tndqLZr^?=K7m~($m6W7=g~*ERi)KurvbAX7CQE zbS<6w9?pT}gi5KJxs^dkcXkd*U4}VTyr!6D1EQz^B>832v3+^_iZxl=UO=oNj$e|dDtsY!!$i#e&^rOXesI9 zVj0xsa+lTlTXbl&^i5$YCsvmW?3el4t2?JXYi!sKP{!9)_)h3`dexw-kFaxb5g(89 zl;`~gKl;V>M9n7cc8kecot@WT#~nBPh=bOTv2J`4m%u@P_2*2Ucmj9K%yZ<1{WG?Jt(DJEvQS!vD zARc>+gpR%itOx52Cbl36Q7z*AANmAaXS=A$%;}6^Oy(D%rO8iFUeYD!g{rPnmsfgC z_EwTp9b~yy?ObFqT+R3z6BCov;sAr~Ehr6o(4)@vXzbgB!QWe51zAN`LGNQrU@$kW z5;S;6x3v&9>PU#14QSLtJ6>2?RZ@Ds%F<5+RXVeCnIW3bHcug*7>CG~!KZNVZ5~yO ztXQbkP!l$G5rf1Al36C}9`#)ik5gCrO&IBwFU*bGhH{p*Ae(7__u<#S^xmpxrW}*1 z#~avi)Dd8UbEq*9zx?HO7-N|lA7}IW8YZjci7}O`;jkl9RynutYb${J*lN7Df4mNv zP+CYGdm}gvWM*kJNH?$L0k1oS`hyd3U1 z20+~QY<~3AZF4A$>XIO&hhQm{>bDonH@CsB&Pa_gMkNIoj&K83U}7+Pv$X=M&YZNqm5ms%T~|r&tLd3MLpYRDgw2h?5ja*OED> zJ%S~|a5r;Z@hp}lz7@s#5J8=1{n|A|QI!j{M?{d(7>&I(ydlU8+$&uLU8ckE8@mkB z6xZ%xL`c1-HZ_LE5t11;mBqH4ztEdJXeF4e%IGDtGa24895l`=jWyF?<30l2o~eZF zoxn+oRd#YSgCXp`VSXfQ9->Dv+2@ZyEJDQI-|7u zJ<@+LgziF+y%oAnS$F86cpHTSMugjL--Fa&V;o35Ofr7_Dl+syT zpyT0xes(jDI_dY>Ig>KK=rFNo$89(BvOj+T-?;2TI&<@^n_OFbqOlG;)|?&`vkwXqB?LT2*q>-Xub*CVM^GnpSBwOP-o!iIDdS77_=( zonq$~+5YKI@}Fm($=qFAIqB7>v+*T=hP(ffXfq~~U{sk0uF@09YLOr)iZ^8nD=RR2 zb)`k5vSb4EHd~f*)O^AHFr@*ZcO(e!c<;ZnI}sA1P??uf5lkvAm>_Mdm6X$0Ityh& z-CG`%Rj;+B%49NRlaZdY)C5siP-WAGbz$wuAbaf5*)SUS*l_oF!Y~Yv`v02{B~5Ti zimKwgV{CE?wE?Em(@8KgJm!%88ikU~&Ozz|7F7(+35_*tP#cwsN^^=PN5kQTC1cc>EBTYg(rhA zvl>pzuu)M(;qUmw;u>JXiQ(wmqF8>=2L!~`#{<$GZRLiTJ{-tbEworxdi>(=$K2v?7ZweW_jfzbfk`_ui2_ zyY))D+vfIdGrZ^h7h;>^5Cu|p5Yf;QtCd5+v^T-bI}D{Fldh1;u3X-c{uH?<=GPuR zPxb#*6z?2c_S=M!(D~{;RYqep?)iivy{8Z5Az}jexv}SR(F#ciTv}j88_gz2gw0e? z%8v5jhIxU{|rnDmTR#USd8@NU; zFpb8VaLm#C!QY?5WtV(_nXmm9^+m;L3ML33crh6iBs0evc)dCx%v#l^J0KAs|#1nQw+D+aa;VS<7SIcow&aFrOwIT{m3z%F}7sI$iI<25h}nC20o*gt|~4V`zB5;w+9ACS?5c85r(>t{ET{u)D?5a%HN5U4)D%7_TO)FY4(~MEsoBF z(YWVFdXFaz!-BryN3Y(ENPh2i6ILz!&`t2p1=x@Z&FQtMwG|V#(&=+x;!t%St+dUP z*|{Le7a7DFQib}|1bQD()lsPg$s%ZE(TOSA7>Ht`D9&`|bGgRM94-5^6zE}Psj1ZR zq4UsSTzjiH{mDUpLZYg@uvwKNICEk!<73F0X)qSAaQhwGNn9XwAAI2c#HP0%H7uf& zJ(_i+@f$b#cuG|wq|qwK!yt~3G&F0ZhwR70&pdJSzes(yjqffZ zHZvB>=c@G9K7+B;pn!T1jG;fdS$=)>1UuCwl%hHUGgao+%(B#%~CV9-~e2pSt?xu4Nwx(sG}3YAN3{q@*Rm(F7N0?QiB* zIt;Rk4p*N2Xzr$5Rm4_V!qMn||C0Gfi0@O5Qs^!!SA6@2eCj{H%GmUJlmug4(RtBo z+eC5#{?cb%T-V?fZRJHne;-SSc%TeQn#ws4x;V;~{WdOR?>RazM&q7M7`pd~x}5Yb zFbcoxCPJL=J zBoOqv#K=ARkSeBU#hz!VPfp{kL82J3HXJA>!_=x`jFc^W(H`iK7W(!ToscAp zIP1$QZZ1cYsn@9ZqVn1!0-$*ZAtQZCFR`@8QM#fx)PD~>dx{ALWU7i9hY?G6Vv^>G zC-IaQ|1}>y_Z;}2pApUYtepa0LZKr!3=I*{^|-@ivkqQRmw9s=DFXgHhf46BK}|`0;05GKclM^11n#Q3%$eL9;4guW4fS3a z;5stQh%wJ=efdleI$*zW8fxgI46J&M#%S#O(Y;p^23@h7U#V1RZs}DqM_DGkzW0_- zxQcYKRkTS{uQb+9qb4&ns%#^wOJ&@8x28(XUBS)IV=@w?;&BS`Gaz@&Cu(dB+3xCATAN~aG4O2+JOBUsb zwdm4O(Set_>C8ZzRED86wM<1Aiv8E@PNc5x=Aq#&e(4CSpY$Xi`NSjn;#=N|x#>>K zmYI<5sRrep8nz+xpr-Tp5^8LQic*l)p15JoS*0Vd6r3lIb3K$6UqAN(p71x%qu*-M z^`4b0#>uv>=jF#fmyf^y-%vlp^2v!%925yB;I-%^6`llfFH`jh34IYB@~W3`*JuM{ z8tmsBKYa6>X!RX^-;Z%l=jMPixYyOaY6LBXFs!GadCY8&)*W|o<-eZBH{X2*yY0U} zN1k#DOP~CB`n!#hG=(I5^iXKzKHXa3tHf`ccxFumfyzR!dd2D{cXO{;W#rT6a=j7> z<{qyZy|j<>F7RHwM^zAG3nNb*c&`GMG6qQunmN>1KK$t~@$(yggK0KNvlQ<&a0g9( zTj$&bU%CIv4$h$e(L!LI8i%G<1nUr~Y#m5(s*HsCs8V5^2<*OQb*+P?^HnK}YjOP{ z77W`>Knuhn*uQ&qr0PgT@wxSUG?K|9rl~xZdLJL(j(#GWn!&0^@PYK(ZXqI})k`JH z%3wgRB1i+}gZH?ZX{;&A4LpwKlCgl=u_INX`=)p(fciq!0n%OuTQ_mZ*S^eHBf&Nr zWTKe1B_qQK*yu@Nq@Kl;=5}?wQRg}PIwyq~#vhoUBBsMo$;y3|lj-^%Cf4p=6b!Zj zH{Ey>tyY7H@o^@Xj6jZvRb&mTAwhR~b6*Sue-Q14fo@h|Vw>mY(R>P{NanG4{8*dz z6CTT7oOl9vtFradi@D+bXOi5y1)1%W5k`D8I^imUQN{YY{AK_!_Q~Bk#+gaZ2GxOg+V&i!i zVpE69ljv$8R)mB>1-Vpia*a5u=t(Kj2(z_QQ@1Z$ba>bwGSY=pCxT4+^3GrFKhjl_rm1H)f-%r6t zlDr!rH|GXzD5%3|?8Ib{8PCmk-NoDA^8s3&Nz$xOquC6Edo0dL-Sa3u7$HeF1JQPv zYFPd6vfxr;c>JxJ+ZAJmmG^d;#~6dEr`>9@V)?Rg{JyRQw%B1+tA7M z%1ome1(V@=(^z#hG^5eaFujb-^$UkaA7?g|IH$NPP{SQ8H#-|zw*ncSk;#cTv+rv! zB(g(Uj9ML$PS1u-&X^FOP?F9_iOh@KFDYhih{1aruFLvwdQp+tr0nHfep z6TI;FH*&%$|HjJw_GEl?DHCnW>Q2J{oPRbCKJ*}zjIohU5NGfqHTLT9&PT<%YPThq zRSxfKt-&Ng{32?@1$VR7y9)iXf%{zx+GFiKU4!_f7DbirPK z13!H88~Nt3&*y?yyqL{re}!b*6#f1z-bi3z7!&6&S%8qb+@s`77w`jaW+t?71)PHE zjD}4scX29qC*A%FS5A_s;(J}JHDoSy98je_I#y9t+O=Cqv4v~cj77oqGuXP3%Rl#7 zR;k6$rZgH&un9@KjY%w-%VPHAYq>`5bX}N6RSTf`%$s!{MGhT4`K-fUQ0wWu6bVTA z52<&IuUQkXo5uln-@OH=hTV5tJ&@JZ!-5V3oP}Mf#qPrIdt+hn#?%2EQwCIf@{G!e z0jF3m&>kmUyM{mems4mT_CR{K{EDkS^nUos6(n1>l4ZSu8|5^r5dp@7@fkw?$5-zC z`D~_if%~GA#ZGbj8Xchncw?3E=*fdKv+lxtFbu^O&N#+fX z!xs(Np#;_HXkmN~y<)85vW)5O49+%q!r#1vbI$)MOINSPnuNP=yOl>gU>!gF<`-Bt z+M?ZT(jXtD45L3{ZH3G(h4G|T3cvbfyhX&2NQ1s<^T1dC1Gg`qq-&IQ#&GV-kEi8@ zRMU_PYgCIX&abb?X$szHFak7&Uf*G6Q_K}V<-cF|Dz5&oZ}89;9?QY6dp+F;9YMNe zl%}y*U;1ho8@n-*cZTM(#q{+HQ=z^{uCP`7DZ5r$s}Qu}p&%s)L+p5Mnc^p3`U(%4 zT*Ad){5;*8Zo=S8sox>4bS9rDFo34?yD8^icq!-p;HR`(ZM@G&8cBi32XM+!jF81* zq)=C-+NG+G=t-s5GT#$X6*~10xGU5-LW2fLc~7PUmwX8(sR1uoG59oP)rzITw}`km z4C7ENuEn)$R@VTty9#iVAqiBeeHk903=jvKF8DqlJN8)8AN+`k>uzV{%A5H1Kfjfq zo_-pwshQxuOF&N4$97$@$8GNsmjxc}&Swb%Xm=JH6X8r1CME+VELRAU3i!9Z_pScb zIs9xlY-!ej30qEMv{RsGx?No13`|}fHT;U5wf*QIpoAwv*e)< zqq|`vm!9!nrq4N-=2V}o-$N}$QXDdi(u_(6ByEO|BUM40`D`@@K2(KQR6eJX)a-?3 zXOwjL1kXO{E&TE)KVtnCzd&nqINo4R6^~6AYTh_}$EjBjTHz*?BPSKbp~|SE2=r%W zQERw!+ZGP_lRxJt*WN&D#U!JnEjHYC1Mhm<8~EVqZ>QIrW=Us+1c?%|5;(q6=AC8X zB!%EXO&+rvilVe`v(+Z;2uGfJGPjwGWvbYp|C~F&d_Ku+pMJ2rtB8JROwK`Fln1qt zs^YiI(z<&yH+=X%`RZHVMx!;#k*A!(>Sz8H>F!H0O~FR>qhJA~5u^#cET_IK_Tsov zRTNcfF*+wXYfW892Y{8r>!3PpFBQpDEu0RpD$r=Ka>WvEy5%NTuHKFIl1Y5(i&HE* zXQz&8PT{loROw5?8%{nI(;g-DeT)I8VH{ZFxR^wNjgVvbTH!3r71PssK%e2yLIniM zi5yanJehc(2U*zq<2lDKB>y=R08p3pS-WOcl*}FMhRE-{EdAp8gIlw^q+rnb@T{2V zh6gmX51oVN<}G~p^nc^9W{0ualup-S?%v9wqm$h5m9H{;(ZyjFQ*vIai6lZ@K=MB* zHz@a7a;*y5?FLyoA0tfC3>$~kjon4&L&eI!78|yAXxa;!-MPNI2&U7?caI&6?c9^0 zp5ZT5sNluK-}fEddL!4J`%Na3CW<5V88o8nYcs*NlVYqOAa?mEkS~xYD)&YeMUnYP z%|n4v+0MM_kb5MbuP(GAqwcquaL!EwcH1qWl*X`io6_q6&am6$iXibUli; z@qdGFEn{?a8Ka4%)!oVu&iOn?J?rs^?=jjOEsW5LzN4uUz@?(ls*w;}?uFGMOZ8D1 zD+)q18YA$Khp_gU&tScq=D-zeIQx{7G3z&xZSB@fyaOoXF$iiCQiNWbF*4JoaqZaovf-{0Mb>P4) z0VDZ%er*!GaT+a7dEc2_|C>8V;@DU&?^?SGS}}gpx%aWu0&e9#6scwObDtcoKexF& zi%Kl?!uYL;_lp;;6)GMHs!#a{mgJ+>VZ>0qW3N5d6venyWPyKQjc*s%?|;ECbHByX zs*zb_by@Xny5b6wU)@4us*7zv*Y#<%8}zr%uy$mEOTTe;AWC_y7;Q0U9E9xzxcO1E zd-&PbghsnTdu$w!1pRZRg6!uZp|aKMTsDBt6M$siW311U z4ho!>#ChiWKuJzuG$dnV(CMHi1l6f+)2KJ7^DJLF9ymAU@B>o9kP9pp?u9~baqS%B ze?L_aS`@U*4f#HHB6FnL5R!f`7}KP`WQ6f&Je7lwI)<#NtpDCQ+;ZlB(AuyC>330a z7!zPvV-hqf%#~w1TAK}LL7j#Yf8xXFkAFmS z>ny4+DvC*k(=h)vlFIg_dH1A3V$F+(oJXKOq;E!)Ubj!X-R6z&d=D>u?VFLw+!P&9mlBC`n5sa7ezVD; z#~;m29fKOnpRZiOcmDAejCu!I8U(c@2=4-54K4$=rHpLa!mVe0gwMVBl{9>l=bUyb zYo7N!`fFDrBT2!laB+V#QB6tJ;fP&|q7&nb&&hy@ru=y+=mP*fe~k&41nFU|PbHMj8ovp&t}*b;jEJ~n9~CSDIy zjqv&P%e(P4m!o;h;ykwy9Z)mMfle8a!&_YBGdxu?Qnhi&}>$a_Z?}D-1E=wDI4ada-Q96kL;=|x#NF+#Auq)Y*<7+iMOP)ecFvS zRt%l7QBvK_eC22{$4te%&gY#921XPX4oNSr`xf%QU*0|+P4a^UW)QFs zXk(liHzHUpi8olMG-=`*BaA=&NFMas6PO(}Y`N^G+;G;J*k9j<^!j)ck`SjpI7Iv4 zWE*C~m7%)tgWpiu$#YuvFLK>fF%fn)f?GL>efT5zv*$jKPrm!TaP#fR)-Ilaq>7P> zYSIv`yQ&0pwQDyxIuetrTwtP?&VU$tKH-UfeJmgNe_vp7uYE9$G1lMl8=n2NM{v$( zK1jpNGSMCdY~U*prIA&rl735v-D*))CT@OU2d>kEgJS_AG@B#LbS60RttWBSrn?!> zJdK}U&H68Ym!y}wKj)KImi}~?MsF*vpZ|~xUUxi~e)Y>d>E$owVQ+a0-9sOMTRBP7 z7#avhO-zhUF#=ou-3Djs=rof{!iIzO#SUNU2KhPyA1^{)EJ##cG8Jj;D)=||zAB+{ z0jvr_0}_iD!D>nT9=tbW*aL;krS!z`suNF!)(GALjQ~t}7e|9#Ww#fpTp`c!kTlpsAALNT3q*j!7zP);e7>??1M(P0eqISYQKZnAO=a=m{e&w zL3={hZg6KWWA_Ih0XDb?8)HMk;eCN3)B~EbeScuN$H_!ZtRw9OK31+9+H5T_fswfo ze*SYV3xBjw{0wc`B;lTupmxX@)>qbXW^cS9^6}nL+#!Ztrxo@sF>iY zTim#T;Gi)X%Em^Jhdzjhzve{z$`wpqb}84t??WWF-G%M-apDS;T_c)XW7h2GHWOXI zxvD$+)?A9KScnfrc7OQ@tDf{!{^W5_<;!n9iR7+L=*%pRXhtVtT&ALeF!!8EiGSxq z;5m1pivEiVnVY5A>Tu1izu_@Y|7)(e@n)8;-i?W|5&E0%;Jt5uGw(h51ZHQpGC4Y4 z09JW|sa0j4IUSfXf|ZoHx#Ncg8=&A+M0rvJ$w-^zPanam$3L2ly-hrLsZmTbgT)-AJv__R{VadhT%SPO;l+n!ajTN5jk>|4wuxJE{j|I;@VYf z+5l7cOQBF8zY)trzR3PI;T;yvPA4c8K5z}lf^M?07P=#Gn~ zLhg?WN*OxR03ZNKL_t&?Y0AcLd;`7VMp`~s$sI9`5;5MA|(X^kx=Qa_agRl=MrphI%_96c(+n<&m_G&`fvXrdmH zM3|nP0jr3ljEy$q3xs)KRYQQhOqA{Oa~yORmfS9`d$+S7{4Nw^gG`1D!HMF6jnzPU z2i^$YhA|wkios#=INL-IK7_+hJQ>+%Exqe+{y+FK1J zlb4&P2hwboMx(?zOZ=BbR%$~7d?An9-AlJ9+u=RWb_O!sCO>rCKX>92s) zO*M6BQD2VXsFWzQRyLr#-(5rIg~%y^rb+O&!5_c!W&B!%%x3J*R(^Q$>qvUSsDyQ0Wfc-;__Yx8@CwExAWFa6=NG?P&%b@}<8;P4^s^o!mLzH6)r4sS z4GfGy8&ETp?Dmj z7Yth2a)}jr*dPMnU^Ss{Jb(VS6Pek6Uw$#Ql?@Hyw%#napuFs}pQhImq*-o`<-@ci zq*m_p&*rnV$XvykfT-{ANOb!fxer9KsyVbV5+r4JItkJRey@^r$Dc9PYp3969o&8= zKl{YTSz{8k-$O*{YmiPARb0P^X;`v`V8$nDOf19M1Yemk9u{D#$0qa0JC^=;GDmiC z_{-@IKW0^*JAZB48H@70Au;IA&W1@`BXmYvLzG^2_L?ki`n#@hxUWHj?O$RYbTQ9I z9BRF`x`s=xsA6mYRYFZ`aP1b^+7&$DwQpeLh(qb!dM#J~=ZDDt=chDh`gos)J<=;+ z%fxoj&!BROiX=D}C(!x*$W`hnZvOlfDVm9c+A63|kaiPRjPS6R9LL7n@8G&meUj$Z z6m|Xb88jw*y!foiP=&N=;wL47>^o~F&{v^r!y7Ap*SA01TltM(unz@mLYTFqzMwcMVa;m(_#cns zXVaT$S?FE$GxQth;?Djy-~9VyxbAD`vfsK#^5ECLm8H)=hW>tQk5$z>dI&^neJaRjs4r`;JT62=lqR>V}g4XX3t zdczWkdk=lo%#~=oHDK(}_t2er%)QI?mcZ@8VBBO6-kziVw^BNTY*!UU?Xa_pS4EJ)HTO&vDVuu4Kvha{Adc))*v7aG4{)#8{=p z9$cw9%d;U+l~m0q@aCipLrC0tl5e78xS*Ka0oxml3YZ3$U2!!tY8YL%j+Vq9(Nf1hH_w>^)9V6?1ihC&6R68VGH(Pi z7Hl$57|ng6i-0Ke-+3vTyVVxSjSq#YZ8Yev7-Q`Vk7dckDt_^?PjdB# zKhA#B%H-3ZM0d1HquD`SB(X-#QO%PoEse-(_iRogE)r#3_+C;Q7mQsKABrQEjba{h z7=Q7czvY`B{shn8V?UAy97MXLiHT0e5&?w>&`0z-x5Um7?kp7Uvnh=BKvAHy>kpf9K0^Y@QXMqg1+JqyHJDN*>_Fe4yY4((aZJFbNuRZ}Du#U8qARdyS zn~R#5tX7IkDEDZobr?#=V*M>*bS;W%scKbl5mYK=Mclh%vM)vVSfW|l_D6&Z!g{XE zn|}%i-F}~6t-p(ZdCv#wOst}pb}u={-#~e@J&2rflS23|<6*g0RrbWSPqROBzUF5i=Jw4T%WW3)VGAY&$%R2CUdf zk}h+|Jox&k8!Z(H!Ch5%*PMr=D<}wJ?SR_P0-xx?UvR!;J8P*TRCE{?^ilM;{&1>V z1ZlzG@w7GL%1?frCEXNpk+80cv2g>BE!&n!*z%j(8SRXaHkIaz36il6sRks-N^rFl z!vNo)HoR6!*cHu(Dtb51Y_{$=HJ4!qJQZ&Sr@?qGhq}Z-+V7KCL#tscN-D8yp*m3a z4a)W{)KPbF?Ya)bE)21C^V~4ktb$b=44vkbniWJgfA&rIzK?sR^`0gP>C&Z)J^Ps) ze%#+dQ@G}fpX8R0|0nIfqu-xJY)o&pq6Z&Ge;fuU0U-~%`o2JkAvgCbr6h;VwGYrq z3!Gk!7K%+u7P=%O4R(9N6L{1!p3av}do$en8)RDqY73cGQ*n zK~y^nior=9f2h-IO=)tO%aI!OU@d6|iO-mrY{S;Oxx=OW+9Y@}Y}>FIy>%1aH2Byg z3O>sy4C^YAuZ5YK&fQ^tdq`|x(my82!B{K+LaA!Jwi3A~c@Z8ixP-x)VulE2v)?gGn0UesuZ%?U3TWmAYhh&hZ#_rw(?9D|d9^ zzMS71-cX8S1df`xtQ^->k5d>KU&3Wq-^eeo{|)04D@pqr3q~Id^?Vq)3ZqgM^%jw& zXmw<1pMCb;Gq7^B?Ck7}P`55dmc@1Nt}XXd!7vZPK*fV6a7ZKhRSJ_ZPlT!}1N}a> z&V^yFy7z6}Jt)Bz<}TyRs^mO_g{6q|^3bAcLQW8$BSA6L%wcC8uIE3-9KjKSmbgqi z+*Q|b*Mbz%ohgWZq$9R65R3;_Imb_9CpM5`NA)BoUz!sZgj)Z9kL&lwY=!n%#F8Wo2$sN+;v_ z>`Qe=d3=>n7LgJe#2xeZsQtpdVCj-~{so*WK0;b1nwgpYG?#wvQ;ek!-%n}TB((U& z1Vb~lFbFaREPZCy-+@gMbR=QVL;r|bHDo@ZmZo8FF2kuGUJwmp^E%I|)A@=39Vn`# zPI4a3sVw~?UOVtE=`F|ugU=L!UMG|77D*(od&(1-hH0;)v^^)h$w1K z-ju>+*dQcENHR~;3)$E>{*NEdqfU4u(@C3cKlvdy{rh`KHqMYucTpRQdt&3zLxLyq zo<#asQdAX>0Uw-zDT)i7N18wjjVGN`RA?n(v&9&TX}0N4OmOhAf5)b+8@b|hAEGhc z#bs$xtgGa1xv|4)px^7#2N)aY&;Ig-y!(@1V&B6L!_CZMdt3S5e|?769d|U_W@eb^ zjNw%fANESIxmd#7J!a6bY`6hvMGs#HPQB#kB||jDsw>1CL^UM9qKE~_47zCwJF}V1 zU;I3uJ@)17yJioLKku7NAO5GTI_?$t{SP9W?XlbK*K_kbPhm;Bfs2M-U?SQdP0V5! zNULNn+1YzphjxqZRdWIB<)8)FJ5ya<$X{Qgs5>A zDxMBSm(3W+9O8Ybyy!8V^||QEYxuxNzQ9P*0ibD{Sd$=55EnbtK35g<1;0O+FSXW& zBFp4OciE9C+?xjShyuLKPp2z+ajit?^)~XHBcH%A&pHy@o26-#j109(<~+t)j0P~t z#JFKJ81jj?7;mr&#)Hw|(&2p;Vr-VtYBtzsuf4+W<)EWTZA@-Q!04h__u{%Q3Wm+U zi=mr}iHd0D!5u_;WMJEu`~SF$U}Ve!52%hc&)Enn$Vk0n(1o=wcBWvHdO~i*gn44` zajIm!9$foN)?aiHOKdwx%lRy#h~Yxz@^v9E@HCyU;l^8;7#YD?rM+eqW?}+qCV1z9 ztf2{pH6;OG7|uV?dBU9z(M{h`Z=xK@C^Xd>R7+88Vr%c~B2UGu&LeBPP^sHR?B{pC z>#)7=%YFZL1z;?-uZ5&aLD?xWG`B%R(yv(@H;pZtu24}T=Ty8RAz+j9>#+7uBp5ocrKTjf=uVV^)rVkgya&0uQ}x#@b&`Nx0ahhO~$&pZ8e z4tV81F>3@n(jc8^@u&av3O2V#+0Bk{!`Ht8S6vI;+0d!TlU^N2YzKTRb}mbt=c>je z+X#sFsPovg&v#CJ2h;!k6AtV-F8tX4gKe|;EEbG$WVo0Y7~(@yp!Qm7VXF^*Ok%(! z2ApH2pE0e%OJDg$q%n$%qp>Q2^Da^-f>W|7l6w;r_(X8dN20KgHH?o+Os1kxBo(rD z<&UqP8>tkKLU(p6kAC=rIpghb;O!^=1FwI@@od_-9xHvk??MvB-YOMR*~OB)*o#M# z3${eQkhOxkjPXv3J=W|7-eHZ!=X{=BsqD767FT_3Ho&IehcLYmU7=l_ehnuvOJxJk zB_8gOWyu``}_YXsMBv2x#im`=N>2^*z&1~%qlof0jRrAnbNAAMcslR1@YcVRBe zp(MRk%t@;WRQzJCEpb$>zUNM9U@evnztdfZolh+9mA9LBPOk1u>kW7l@NFM?H=;f` z$96{Xd#>fdC;c;{4|pJ3uf2jR-}f%;ZR=rMFK$nUo^xOmyjqMjP)!0SEH_6pJIX-o z!;~`Q8I*YJUgPl^qEVVBnHWd+-^cg8Hxn`fyQ5Yy-rQc6*qa9%l1xT>bIW`RJ>TXWfGz%G3Vi18h6+LG+h)Xf-Tt zK)i#8J(v|weG2QnXVQ#w-aFqztLt!?EA!Y2t*xSEO5Hq9h86@1!=&>LKzzHH;LSO=Il{iXmx) zIe#P32u(aHF8nECQ4-BfO_alS7~>J|@S5R85tGDIe=H@mM+`A<)h*y%HqkH?EG_1vSMp$+5 zLG%)f!y;IWF%=Z0dKgk6f)2VTONqSB_vxh^&(;OuYlE_X6*toOPe~H_7Nr$wZT9qc z?JiwhyAIa4PYZ}V36#2ulH?;kQVv{rjfv|>2e;>L9Ppas8GGPi%-nDTm!0;{G=A|* z8dI})QY892q6GD?X8zHzfm}Q<+%$eB^{&$JHTcY9444^@Y?`65Wrn-IdoEvk)d}2m#kCy! z?)S6*i~f$;)sr-r+F)}T=6^^u!FAfKJML(HJ$&QB`3{^IAp;(9^wGF=`>}q-IDhh@qi|yujE2!5WMCg0 z(rp#3IV|V5kC9vg)zWvuM?Zfy=bZOHv|3~IoF_?Ik={@iB{JZ=EMh_-DnjNnOcTVU ztXeUSrO!mCjrSSe`{<5cvqLRP$2tgU#7EUY3P1r zu5{VfiW@k&0-#nLtR;>X#!<7V`sA}dd+xp((a`;qvT?O&)V8?p&$ZErNS9q~-u8!_ zNJ#Wukis!^=#bI8)xn&Q6(wUuOwsZUkdF@=)b@&tok()#;|8l&tPxz_Vfr&%{Mk=4 zX$)i;;(c)7L}IcZkJUs%rImoDsM0W&o36T?cDqg5ZZo?7-q?vTvM3XmWf|&x;qFh>OnGIutuMe70CB{C*Y* z0}`9Nw1CLH8Ymb8QbWhJC~(CXL<6cyqfLM5a@M`{)$H*XPb2&FZCv@DcfsX9r@3_+ z!9^3zFal)6V7$k84X+Uss|EMX=FPk7{D+(rtO^M&vazM?`JAIzvUHqpy!UO`W|Ntz zS=2U|F-;!u*k^L(ozv|5ki)t4mfLvJV;;eezVjt^8?&?%Ps23v>Z8_#l!b*s(9*x{Vt+V(Ben`u2_gd{wQ)Si zhU>L;w#yB_Ue8D#n%ka|Q7Nk5kXS`|N|UrZ9^uL%*^ho9@N6xbEFG zB_iGY?!9~QeB1*Ru##CyoOiE_RLE}uEf2V{N+kdm5m$E-ahR*0K7XQJl?nyd>!Md( z!qin)Fe(YorA1K`{gzM(YDzA)U4v>xNVFRXY`FPGnCW5Kqv-e&)~tH~T{SpDf}?0` zOV@*YZ3en}I=3SdG7ROC9n?=nsMK5&JN6K~i%zaubQpY`x%JLh3Y#6LQ;Q=>zwZzG zQ0V-=yP`u{fVrl9jZq-ECkci4f+r;U;su*D$yP6A?XgF5z>}VV-?W8G-urL#|N9~u zGktvK%1)eCx@%REn9henFJ%5d9FMOQ3OU+rV)B$U8|aAP@ozYZn{M044?prTI%A_; z_`RQU&_RE~T{Ft~?z?l_Z*JlAQ%~lj@A((9sVOEp?SLx#QvO+eM4OAPWP6%2I#eV< zg?Y@+$`KKhD>sP;E4a*Iw)IFh-^~>tI*T)3@j6DEOF8bNALW3fUqE-|2<^!RE_OPC z$(RY;uUwMULu!;gUvLziLk}gJ>an(;apn6y$cW2GNnF(seY+C z6DUL7nWelJJmCU*V@ca>{OBmyka}Y%q-v!$Dq1fJs>&5QIKJr})4gf3#t6@S!AtRW z1Sb}ZA@f-vnQQ3YgwdY4U635pHv%r}GtudA^ij`Y)kKpA|Is=Q*ms}6xkC^eVvdvG zNurgpzg$?rYstA$uYn7r4UiU@ZE^cu+we)7)G5X$sLLfDCHhZDC}?DG#8!&hu23E` z@t#m+{{s$ys*87R3nIJO(zyF8SZ{w=ZeuA0vFQB5 zBkN6{r6WtZ;lI8NS6qkdX1ECF=N4eT;=rsmpBk+dnhT9S)x@^SNf10%Jxw3ak1Z2z zACuielvcx)6_wIRCw}p{S5dq(#0Z)6vCSs0dDFje*HkZ*K}Kjc@{U04Lg28t;MIU2 znrj`!*B2%8DM>3K?RVL_rH5+ZHKeJ>xp0l0Y9La_*T~0x!%bxB9zU_m?^JQABe8}o z^`IvHv%#qgoQg1$92A&TqY{!}iiSm=?E6^kWr*>Jc*J_FHT=;*2gI0KRrFhgnikjZ zM!~RQ@qFBKAW#GRT#n0n-QiICAWk<&X+!~I%Ta?O{wZPS8^}>rsS9=v_M&C@`xOB^ zH-D|*H`9*(<-cUp&wkEi(!jeOsm~%AlEM(DmNTms?QCxZpE^dd-2CHkPoj$I`^}G6{-w#(%=f0mx?p2T@!7>EV%`-a_bncD8tr(;A zw8ydTsOOWVDOZ2uEV|!3hsNe zf9bf;O?&v(7|tYIebbFxaoOb@vin;69h*7s1xInw*FVXMhSF{-jYc~Vn~jMCU`51+ z4n{sASvkb2Aiwm@huX#OR`PcXGr;mRk ze|6eB=^t_+>52}?NP>tV^Dd@}6|akem|Ye;qJp*(bR@r7?x zE(u?hsitxsHF`KXM^hXJAFwy-(;Aky7%LXn{kS#&B=IkIEuN2imO~U>;6|g6Pg8Z2 z`fLN6^Fqz01+X?04Wt464t+~-k&q}dWb+?6o#SlO_6r;*2C zOA;HE=4&htd?mX#(M)b=*Fm;ZugPElt(Y}w#q&dibf#BB=#r#?D7Y*m&H6yI9%S`? z!=o41{nBC3UH0A*g(0iyNB9?C6unZ}l)}uk2BR+@Bq+UDvep=rPka^+TDlKco^?9c zede?5({+p=`Bc&+2GeMuK3WJEDmH9dP$jne`L#OY7rok~DqfKLD!5|y-erB-%{D)~ z_!?gF{A1CntxUGsOx<_`pZwSdS@)<%;`=Z%+97orN&W_=0)1&dP9$-rz8s8E(Rh9a z93rIR^R9E98Hh->*P!yGR)aHk&(m}sv*|9b{rqS7;a9)LArE^bM}OqabeFB9JJF)q zXcVZjmry2zOCnV5?I<`RCeYUKG{w@lhC^QYGS0c|HSE7>3)_Bt85_?2AC^Ar(WIk} z1RJ}uxi?3AkDSZqwO~2K7cPpF-0W0q3cb~lqzX4?j?-oN{Q~6iS`c*e5gae2!d$o) zOprL$(&}8bM|Z|?!>{k+gp*IF*;!5x7x;iu^4g39U@3op6TLO6oD7wqtYw^%&3Y2k z=3C#nkZ=9q2PB%%99>G9DeC$`BtU~9Nj?e`ijcfR6-SY@LJ!djxR#s7Ni2T7#|q`} zU9PsICKA2#sR0wzq144QZ4oWilvYemuw<-(_^fzL<_50C(qVD!(sft0!?1qweB84f zB7b~MBPon)1PC)LLxSr9L`8~a48{aKY4tuLFCMb~97JrmGLx`f{zBzfOx5pHAWKuY z?G`rt^kOEYiDoHy4QPt`Fb5iuJDCHgSIqZp2x=WHcV2u6O_R_YZL#D5htOEMJf!{_ z&CyJbxJhkNQ>1LClto#Eo-8|i*>pgHa4uocB4o3(F))V5rPFB#$vn;Ib-U$p)mt_@ zyuQ1TGP}6`fXkNr&drB=s^8231)s}C=IFQ@vWXVWKmQXR^72>FTDFWE&;BACzx;U` zTYC7^#iU7U9uGD1!(m;PIppHpn?(zCH>WNBDrjA~B2HR|N!U^x-xz9a@CCgT_ zttUL>v5(=+Z+s;iF1`$FAf3tZ3f3k#h}*qS`&_>EvR88L4}ZdoUVjn~`=?hiyZ4^hAO41l1kulUX={rRlrci29 zt#CC=*?7^4tX{oa7Vn$F0a{|Y?n7rwN`kU4x<*>*fRw+Jdo^f5)HgT}d+oIc!D~(t z8Jt2`v}0Rb_sd#e6bvG=STOwd&4rrescAL~bUpA4+(4Qx^(eoi7e_;f!6wD%PvMlA zx(Y(Jqv@yZpc2*0)yHjFgARLw=en!QsOShb8pANv@#^s@jIx}eMw20nd}0GQa>HG#=ljKtO zOmw(seW9{4ScaAk%Yco@vjRA*1`T)~d;|x*_5^gzB-elETio`APt({mjqkhA*wiw) zt<1AjlFYevazJIZPBvCwAlTt$GF;|Ky&+;YqLKybpbVM?CrIJpJ_dG5e>FB3rYHW;3CIfOinx>`9_| zTm&a_32D{byek}pQy;bYr2s`-r_JuqI}-ax>zLM*eR_9u&1vsq#7WU@${oK=^iC)| zW<>Df!=HYJ?K=_|5cISd>8bT`heWpdyJ|Heuhf4p@8+2NPSMC;!GKsi8PnY!Ze*0_ zzx37IHIuPrs*7zmit!?YL0m})*I?KarOPYiU&)i}IAkPBmU?Vr@an?IYqT}@-r>EE zqjHf061xPYQLM~~(ZwiT@lmB>Y8XeFd{tqpR5iUeUrd^tO)wEzt2)*!Do2&!7I}yHN}QGM;|v_lWRvu zNjU+sO&jR`;%aQ>nH_Di^ihvydUPDu4l=AlPGXaqoMayCWniADM6@c~sVvYFp=#1r zLvTvcc{5R7UF2y7bre@Pn97r7ls?L)yYG*I5`=S()h3`z|C<+?q!1GZI zAscIDicmpb-<0NCl8?y5@72y-1sN}Z7a!fOec&iqW6^F(vL(ZAxQk!B??Zgz z<*#CCyTfx%Kb4iwK9Y3pGMbZN?%n{-Yvd6`QHV;HBA;8&0ij}lq~uvhKDuNSOu|&s z=h3IUn%j)ByKQmn*S`!m-ihl^li{L5R^AONc^RPiOp%#clC2xK<4d1s>$lFvZrqBG zN_Lf_m?s*^4qz9Vul-QGb`f6+u_y72MetzSvV4A447-^!CR0p<-qF16E zs_4MYO|HZhxiTVHYeGjY`UzmC#b_+Q@)f zR}>8?#^^lw)U)ou1ER#L!5YQ_9~=jDK)Scm`Dt6dZBeJMKNP~eQx{S`N-x?G&I8y z2!_T4Sy8FFSSv%&kPx*bCkq-dqKr2zzxejIY4gzZ0Tn>9L{H`Pf_nG zHCFt561)!zL^Vp^4fSbQQ2VytQkR;RYal)MW@ZYN4^@~L9mhHl5~jEIYJ@}eWw}r! zFpHb~|9Uv#-YpddoOo3h^jp>6mlEs8dLff0=>dmw;EP|y%5{fu^S8dmuRd@FjrAKz z`)P2%&1nb56v(s=BCAzQLWwFcp5P+m5R)O|>GpdxnxlN=KR?aWp87YmO@o;Yck{Z} zzmku9_5+wEc<*VT&}zbgd-IfI{+3Ujb_z+?pxxY$0IacbUaBl+iqle{yVPghH6IDl zVn1)y!BfrsE$EhDiO++9OX5;h3+Q)HAi=mN>!XG_=1CKeH+xs)@E@OQ6t#7B(4HqU8 zaD}p!L44V5XegMeEqwi`zvGuDpU&4`ay;op7twS+a7zjZMt-Y%T<3Ke%s;D^oh}0^ zj|oT}Vz*fl!RWG@7Mmxeq9o&Q4dP(zU)B z?j#;rARjUCyUfGvR<8NhxlEX5ln`{W@lC~Tc7FaqVRRIkWBI&kGgY=-cOBetJvc{i zyu%)ke>~mxDA;zCHjM>YF+=SN65EE$e$wvU-f(O+E6H(zrC-#+VO{Ka4YEl+vtJIM|^ z6kRbvqiG7_u=lyyr7yW)g?bakIWtn}xK!v*b2cJla{2@<-(WUrv+v(OkBuvqFfuwx zdg*0sJNrCruTPfG&_`OliZ64@Ou6~O%gAoMoqd-sW8d+W+;sWnMJ&?aKH_cX9@S!8 zmdgH&hS=hh7GF5$JDm2xGudbFJ(!x_NHcMCIt@Iorg~I>aavbknlG{q>H;eD_Ceu_ zUGsvd$PJ1P=^qTrWozYqw2Vu&oQH+0=x3qFn692C$09Vbw9_ z;TPw8gO)03dj$5}or53uIPOjzPJ;Djp$MfOF|lZf8?MD*w4xX>1j&~gp?RLfXWrwUEr>JVh;^)1sn$vrr;0u~wf1n%JvReb?CaOFUa!1D$juq{Ir~}b`8=QDeb}3B!ORP4E5H3}!Wcu$CNhv=#6yncvP-UFm!->j@v+D7oeTbr$rWQr6Otss#S9nG!1uSU z2HkPX5^P5?xmR%qH7;-T^2i+Yvjo3;LE7EK68iAM`C9@6A z|M`Wy@$K(n`S?;ExaUq>aQ=Vr@)tjs?%Xs9f+b9{ywjSClP@P*zOOkbX=wf5_Wt@W z+T->y2w*GQVCmZ#qQ0%1KG$q(MTLIZc=;UKlk`ze7mA;C&K2KQc3QraWy>bQ_o48R z)vZ}*3}L6?x?k41vSARBb$}aQllzF;A()Xd3bQm(fh=3#bWks3$mk9OvvJ&QPxc+Zk0qgYXrM#9~9Z>fo|>zNSSHsBnV`r*2NXA@Ee zTWMkQ7=_5jnjpGVk8h=9yNH9xXlwH;I_<4CSrb+UYr?Tr`K8vYrhFv@J+;-OI8S8qy&!f47$C?DI;WNs-W8}bn zdEUo9%zh`ml&609t&Bh6F{m*;2WGiXqlR8BSTVZLNL3XTGIW7b6d zg|^PQ7|s?aM&WB0j6D`2e!a!MpgyVkFo{O6CR*d-*kiYyYiZR|?zO$=j3MMST=(m8 zu}7%qg0gmaUGD3&K}p8OAW5(`h?NFU7v80lceLJB!1yqI zNkkaW=DF=V-(kGt@Ycd!yYbLxKZh+Y4;GLp=@Q!GhD;hLD!64cz=M11`I zgBr`soI`BN^h}qdjy#rg{{4JFc;>T?O43fS}3+2oq3{`20pS>Nvu$ zBG9Z(UnxYpJ$kpYs997)Bhbe3=r^6pZF$0YV;Prz`SWno9i*L5H;qWlvk?%7HQ>av z^ofsW%?Yn&^5~S{f&b-UG>V}(nbc71rtFcDW^xJR6(>7yZ6<7>CtU3glqbD$Q}z1sX#C< ziqjm$(XJ=a$piOUgOwVJ1=aLy z2r~`WpJ$c5!LVWYlOlLvMn{818(@ur_WZ(V1gX%t?ZOMJAsrhFg2Q;YHwfM7>7J%T z7e%ud6aQ974GJzIgJ~vQ^X>m)H1j0tF+QvKmL`dZ0gsxnlXhAk z56eIliJGmT^jbqkv{DFe70k|}CQvpkFu80TnzphSI zV5?wEg7-0QDmnyt6^s?!igA`d=LH<_^5aQoH*@{x&%*uc5}I=!jqW@^N&Nvutik)7 z*_k=UM-#sL??2|y$2@^M*WO9mQoeTX7x>%1e`M%| z6(bz>>XVqcYdxEO_(K|7=Hn!SQZo>2Hlu(zPAt@GnQtIAP%yjQHjVilv+fRl^?{G@ z^?&#zM;!G+jye7BNgsD8Zskbe)kn)@9}OH#c+n+rB5|79>%df@)S~p6yGG~eNZ~3& zzUr=R1wAPOGhz)mXf&I2myGe0lTYB*tiv9SCO4e*DbjA2oIE^+7&7NW;HLyCREE%P zq&SY)K?L01z@bb?Z^%y-+|zTs|O{L0sp+ZI`FpxMASCOGZg|Hwao z{XAav=6A7qW~LOe`#k0jEto6Bi?_(O+G8oa*g&M;4wyj(B=u|^DQRP2x=?i}fy!&K z2qtELRNJA%Nqj9uI__Um+;4HO^m&&@J>sFE>4;XVW#gi{-C3{0HC*>c!*KiXy6o__ zyWr1uY!pcoOp=5obd}hxn-1$bF;S!~=QL)9N^G_{KMBwH0Sa z@w=?#kYiuWrsyywF2}nJYm)eN4dZ`Rzwk(|wT)J5G=;7B6iH1m-eU;~fnreD*lwAM z37z4`R_?SMjN$gR8=^2Viq`tX(=x2^!}Y&nNtxhov>x;(rN&51z`VVYU=6K`RVANX zLi4Dj*yklLM6xcIee&;V|KM9RWc3Q>( z2kyf!FZwb2AGjYXo-|HjLlC8u&Osu5V24p$sZsLS(Ui`rCeMBEX?)`gAH&>sJ4|;& zeJl=lJqL7C{@IC=Czfv4leQfr8#i&|7r)3?UVkbR<72$!tk04j`E;_?yI@C}q+&49 zDz(f@h}tTNO-zl-zJLK!TD8_DAmwa9dkHU9K@HDmBnYHUX@94Xgafp1IL4oM3{xw1 zrj^6mADs`^+=O;Ld3>|RMC)81GCQnS5)GfLN=3r-s`uerRUzjRYo`)wVR0y)ZO<{C zd5$~jjcmzat~-ZIGkn(rhRd$Jo)4VyX@2taOKCNi;9|OYE?cryTXh(v*&hVaoTA10Ecx zXJV<_9twRXBg1LKaNU3F_F=>DC)j`ol}>54AfoVz5ty6n0pR<&o)!INkrEa?r0v86 z4#g&6e3=MzHcydgP8r`L+iGfLA<9@9nnW+^yvy9hzarhRk%k8mkBdbb@faH?9MHjSzObqa}*RPA|pdo2{mtA*85$>6qrc2?iUcBe3 znbP5qVYs%thWyWi9MUKBtOpd9O>X>Sp$!mg=`3k6anw=lbL?|jB5khw#7CGr_Zu{3 zW>7aDauxENnfYm&qZ3^7o6C6o!w%+_>#k$jk}+QWhF5d;*FH}sU7BMlUcyAv$Eha$ zGr4kbDYeEvAhI4InQY-!?ZmTBcrEAr-MeXQ+>Ex{HABxb-7C^L6k`*dDmt6dnCr0N z+u!G#C!E6it8e7RfBQZjdg2MpuNbAZycK|n5}z@p#idk;L|LE1Yx6aFiw>M_4rE1Y zB_S&lTvU((7&21ow=|bvzTh01waCXOdFDIb!W}a+?3Il1>yLe$k&cp)#r%m-lZyO_ z${3!86K8Jtc3uiIsQ8JvL!}~dG(j{~4%EGspA}rkL zOEZ9KiXDwpoM)odVE_Fd5G_@G9ntDZJ{hjz`m;9-A~I|k?)COwp#aisVp}aRHc*nX ztj9u4%HOr*GWG)=BGTVAkS<*o(nh_^++i7h&yM;_>3vqJ84JIPw+=nh57T zLYr&8`z@BJfb&=qL``%QF+qG5Cl1n+J1GmN8an7;ZlZN-a6xJsSmE3J_Uz9xIx~k+ zLudH}hraIhY!E}9rg#jR=K;+myoPXgmr~2nzcowWL4$#@MB3|Xt$jZidBzFK{KmWD zJM(CsvB&PK@!pZSjLn;8O0+R(G^MXe9IoM7yl&k8&n2~F=)x5&c=uPS?N!q(PD#|@ z##hjK@=@&n!k07J80U(!{)x_ie2s=nn48bZeahI#Qr`ZS5Ayuyoxn(Igq16I;d}r6 zeO~&C7t+mhMn_uVwL~_Pu_ii31^&~qV`3zP}uHu?cf0pJ{ z3a&Ittq}e2L}asV8ne*+-Hm+b)Kj_O8(-s@r=G&0?|%=o2Ofai&)(xI001BWNklbW(c1q2rn{h5Uqb$0->2!o;iHxyHU=sKDU#a1-|X;iC_Vd$c#Tbtr;|;S zj*fEjX&>a0t8YU{5rNbs@%!hnCc%isAXu?jkwERiV4|ZGLuCT zj2l7nZXAu*)MO%fd`zh9WqvN|hb>Y=NDB1bqX^D>S~<*IeGQ#Ue}(DH;>Jf|-~Cu~ z#M7Bc$H*iE{uqb&0+kBPtY05rc0d_- z-+dJ>?|^|@Z@V*crE8%+s(&UFdANpav7?ame7}3V7ew&l%4Pvy=Y9KU?7Vs(p8bLsan4V^#mJHoygC|5 zg7*&Zd|#lJsoce6A(678D^+wCuZpG#ok`1+-|{wYx$;`N7hi^HyD|%*z)|MgZQSO0 z(y19PJLA)Q>8cm4fI~&G#?YSG#A&CzlFy%c zI^X!jhk3&ZFK1@UJqW%=@2U~d`u53?D-^Foynf#0JmkUqk(lZwc<&HnYYt#+XU@=Y z&l#>i>AH1L!*JX1y4=@pLLf;<#zuos*&2Mi9Z-fk7g1^fG-z!?EQ4VXBQ%yTM;Za0 zcg|zP(3zfruEreIz@^<@3QTd%Hh_(jk0i^v_Wbi10Yn`(F~NPr2L!UnT_{sF2Np2x zJ)%TfQt`gaq)ru#wc&l|j!8-Q)mdlJxMvFOD($AQ?~9LPdc_K|)P^y<^C63Z$j_}$ z2#TamsoBZa{I&=1wH7*0l)_pfsY4qDTw)nXt=mX#01eXV6Bjkj=>Y2&XYd=Njm8=etyP3 za`A^g&P$%~G-mEt&#sf>eDL(YjRZ%w!3{an*eyD$%xiA!Z-` z9DRmNp%t*CqfOX(8IOD3G5purAAv2K=}dJIYsfRmXXY5qJ+r^QkaOSmCfb|U^Rmx< zf@Q}%oA&N2X)YZjMM)NVHCH(T6f&(ogNN;gmZUG~x1t0MKH!F`tXkNsT5NZo-+bAUeX3rR1of;_j+;JO;53U{Jm0fmSiDq4r#Bkf~cZSJ_sSvYa%^t4*eG`Vo zUBgPp9_>35YjqbDIc?U${AkL|vZXxhGpBRsP1p0EAN&YU+kG$I|GHQ5;|tE=&?h~d zJkLp5BVif&@!r!cp20=sGgYfV%h1T)O)D6M@{0V&N9w(mf; zXrfMyG#bzyt)~NPjn0TKCouw$8|F5wXKb`VVhuCX_s|5$JXtg0fL9#D-J_O0$Cq*0 z$3BYOxe=Y~gq=Z4B!q<|{%yUNzMs|pL4#JN7an}pCC{6D@f+Xby&wGy%Xi&_PCE=0CX0L@WZN%&J3lrV@E=;_Aw+GO5y6Xc`)zB<)k0$V z$uBNuWNb-nsw=I6mV2PXG%X{gfvOWvu@>N6fNco-eQIJXrD7kKVUD6?6ec4 zDQXOP9ySu!Z9`|id~B&b$YQkAo_g=K1ao}W!C(7Zl8twfI!_}_!t6 zR(+XRFH+YX)wEzzS|Du0esq9kh3bN{wrFo7R*I=TA*-Qg37y!G<6w=#(g_ZEaSnQwRf%Kz@~EJM?S=;NkS%op%zlGa16aQl)l`l zv@+=MQfbsmg@JgSnM0Zl{{4GD=k=$3h^5O`Gdt78rWPb86wpxbi`lkBy}>aZ94U@B6Ygc!4(r;yed9G%4D3xXh8J%^IfXN@3z)k=){Z%0Z2cL_wZ< zk2iv|2ApH3T)FZypJa5)X7JE%!h=pYk=dPgrjrEMB&>oh+?E9OalbX6YQ zD_|^MFR5oSlthh~9U)lt;RHCT*GyL zRIcLtUIc|z0gECM@)*QNDu_fZUr}|aID9sb&UaXM+j?Gl^zj`3#`p7p7ahlkZn_9o zjdR7>U%*Pv%=8Q)(!t=o35m~z2shpr9$)T2Agk~jQVCNPCGU1nV5U2V zOLLBY%ZdE*pHAmjuY4;j?an;^6Q5%5m!C*?*#vf?g^6=g-%}H`hG0uAwS7l!TNzki z>wt;$oo%&7e-x=&IbVjr30^~e?s&uGD^B3ZAN`0!zxqY;1NMy|2N)GR${sI#9=c{9 z{5>5u{rY0`${W#6hdd_f6-mSk69^X9ztxRk`=#h2)nwbd4yp~l{vY4vb#H$+D_8H% z?0k-Grcnz6&Q*teQ=>|iY6vZ5a^)V;LwjA2HzeCXLeL$*V+#F5v44p%h!~tpnCU1d zz4XHkYyQJ`bpJgede2#Xae_$ru=} z;rbt1Jw3s&Vfd3w9_mS9LTl*~NDR$JLN+^#@h&<8_13xihE$+g{fJLM&Z_K6{(vcLmVv?u7{he$|1ez_tVDbuuG8OQIsUv~f zXCxc58+xKVBg!W=l5vS<#n)(!N3t${{aqx4gjfOgT)ivCJMP-BiK(0z-c+B~4~zV8 z{qL+;T8osTFlJW3m@1{*hm>L`9!!%?u9#Mf8?U>HR~&sbzx~z4eC@{<^7hYulG)w% z;qW)Sk{^HNY%YJ_-!VDe!M8g&s>x??z7dk>kWme?B?~z>X zwT7#|?6NS3_WrrP2WVB6LS3?g5oqGUq~v2u$eT^9hoo#SJgF5}xf72*@dP$xT}EaM zmwxUGv}UuA6<|x1ZTUJE3_`co2ub)PjDqoC)ga#Dfv$8R9pURgxPaHZ<8(%rE@yV8 zgCw@hN)Qc+-NoMKi%ENQ1ER*7T6`!_Z*>MaBFTVXTOUGf{atJMf=cUdQM_~HxnsP! zl(EJbu9JreQQ?sGILAt}TL1>(aFcp!;Q!X6?uqmiE1Je)54 zxggU>=1^;py-2id;({bG&OXmr!diLyzq6dBlJDk--5lSLN~OL(wta= zBLu8jQ4;TQ>(_-~F3gxVD4Ny_1ND)C zN~e=52{Z)McmbD$gxFO{!lnQGNm_T@L3?h7?$S|s=z}@ng~zkjC-^i4^;oHexs{WU z{*8qiv?o2+hXqJmC3q`7gd$g=DZ=b+w_(*|jo@9*1NPYq?{b{8tX)4FMcoyAD8u?b zT>neA#bU-#21u3Ny~cFiBK#{@s&2XGZW?KePo4RBj(qZSS+mzZ{Px;kaPVUu&RiEp zn+^P`ojCQ(Gnu*kLNb2M(c9Y5d2MH)+iCcAJF4g%3qxe#BGEc6qqCbaRF5v&;U zEg7=8gPoq?rzgLSv)}hVp82LzIpWKoXT#)j?!4l+j5Z-BpuNu5pcs{2)q>QY4eo8? zVv%c58(vhpvYrsPg@;(F^KJ@sU8D0*q;XmsJ`*sjZ%uyMQEj3iwP@09$oMrQP8|YmZzNigCBA7JKoRuPP;HS*Tpsx ztg(0>DWXLdhE(Bv18JK&IN@m9sTPatMpzipvt7r=@>^JbJ~96%vB3`5d(?xqww%@n zx2M81qnM&p4Y-xis2BF+|oO8^NSXRI21*~}Nq09nm zELj$8W=$gGz6)Amrs}KoY3X+*kzQ1)Jem}#tu^Xc;bU&aa`!DaW8-)@_nrqnXkYSf zh8<~f-HkVwMO}$rX3O@H_BNOo8hx7Q8Uc>Ky@e?+jdk!PFtVKIn zOll5_gnC$_Q%8^GYaQ?}fr<#4#PMO)&5)@%8q(qS=YE~DPCAJ-2S1YIzx5w<4|*Kk zT~~9=``*EM|8geWvxSy}Of_gKq!wMLF`llby4L#$HkR1{MJ} zRXP@d(>Bs*^1Yv4!mCbwA0wkXF*}uE8c7KIG};sFSx+mL1(N~Y&P`Xh^Yf8yqkW4! zo%FR#y=NrTGZ`spLn8FN&tlq5oaBXMXvG>E{=F}fyp^$r<-z-`iC4u`jcf^62|GL@ z8Lr;%aoHqjnZ{jBk>U<*dd~#IwXi4XpD+v?h8>(R4EoD-*)lQ#NrWuxK$iCuMctNe zu9ogtCY%b4?6e{T^4Nf~r6NqNy)$rA7lBAhx1eIWp>-5Y@wvjLO>F$_6^xn$8@-{$ zMSiWA5^Y2WxHN3#y%bqy)NJ8utfcsO;XqxCbT#@=kLKX6`Ymg}_H{<4r-L43tjWQr zoXXVloygKLq>OdMFXx&+2B9IoN|vuv70jzKi2ECFq)4(Fm^62TZ{etwp2 zTe$JYTiI{T!%0lawKrbIkw-m^Ogv+wEqsv_FOVp>iBWjcQ+e#GPUf1QoX;I!`B$1Z z-hp-;&PvRZ33id*`^sh+6YV)`aC?*!8N$KLXP(*3Gc*w9uepk^oOCjCGhI&n^4Hk) z=%bhdTmxi$6!zMeV_){y{PZ8sK)PVmR|md$YA@+85=mMr1N@|R=n7ZkgW`F-?-A{a z3}8I zpL(~Ro;qJT?M51t5fk|MwD|Vb*WY*pCeeW?Pgo zrdxRo-O;Cu4qko6vtm9|8=23J^FwDL;52uke+E|M#Yja5DB!*Rxn#pKHB+;a8j z{snWzALvZYLaRZxas^L)_XpU}Y(OI+*I-y@lOzTcMV@VRbnm57)k59kq^2zJkytF{ z?_s-$22rGvGIiS>ARz}s5!URrlJRB(Nm^WS)lD(sv}Oz`(nEE)PtfIX4cC3DT|y14 zubp=+cpH&TAZg&evUzHjG#%&t?>&>JKJggNIO9Wn@$56PsUc}tnu!TIf^u*XU0(n< zu>^m_Q#t&Nr*Qo*e!@*(`XbF6?u0o|YXf(iKr2E+^P=i1^jw3Gw=gxxM46kNBXN!~ z?eg6Z{2f30_IbScLmy`UZsQYHHM3>xPjNa z>7Ar@l)0%462X%rUa?`E5lEt@XjWw_Tg4!s>p|H64kCK}PD!#o_eyc4tKYS6{?Id> z(Z0z?AyN@~G$Uc^LW} z&6rS2@-8fHi&{49(gyYf{SCwKhu7y`P8jM4`q%_6u~;Ld7?_!lW9e$ts5NAJa93P> zLQ*0@jh%MJl^jr^VT7r*>jF(iq%WRH`ZQDZ5x0p>42sd5KmPavCZvIhsjgxLu~IrB zIq!oN-eRwD(W)zbF>ZZ`RWZFNY+(spTyMiK*Ui}j%Madj8cQ~8q&+vzUoVTq5M&#U>#-Qv`^or;{t2sM@|&_E}PIa)NF_%3vWuGtcb$yI^)E<}8G% z!>+rm#F22z+V#viDqr$K`>}wNJ1j)Qb-z@j!T+Y{-J=k-q$Uc8B(!H|FfN5$IQ+3k zanVJ;73C<$~9j*m#@9+be?(4aUA*fcQCtR6kTN) zA5DT~r2wr!QokB-VN#GJ33(&su$P_0FTe3s*f>Q#lT~i`*5aK;Hz>U?I_CvVLvc9t z?KzSxqct-{W3G#zn=b(tPt8447Gd6O7w^7$?IsphZ8VUFJb=l^AHuX)Chy+J-RGW1 zW8THYo?4SoxU5Q@h9P~=AAiUx;ykWv`SoSj@rpOR5j#H4?A#1k5KTaBxwnZ@COG$q zh_`z5tqDCCobV%yiM9JF{`KZZw|~;GKssGcep5IReHaDr7S=VdSMnda$X9m}*5zd-#$3gl8=n>?7upZ@4f$RjQAAS&C5fBF$PoI=W(*p zE49`g`ZTmQ1Mm*a+$=wP`&;?k(MR!v*Pn#F`4%L15wI{l!1Mwbd;o2IaUZ%m=wV32 zFfX2mz4jz-_m0&K!_{B?7g#?--tP7uN^5&Tp=BtwSU%dZny|m+kc0!WAqE2?i}HyUW)SI)ZNl1DVwJvbOzy>JIY^{l(;|xW zYClQ=q4f6Wrb<;8V{wk44|0w)O?dRd55roiH6+#9V2E_@cvUQp8csZ3T(ECe0Tb+j zO*bQLHu?5vKf{*w8!*-&qBxg_(7Yk6SFBrNPq65O0pP~rb-Fh_eeGb_YLSkOhRxFo z?M<5_+M>MqufbR4(%*-wHEbl{Dg7Au(^9D%f*>uTo zN!uN)Iz(M?Myd%8j4`F~S+;r`$#!128lSTU07(&EClU2ihbDk}M%s$p@Oy6h=g%BtUX((HP= z!yk7P*Ij=Tx83~*jyn2@YRD9|^R1@%g6v>K11xsnQGlPT$u$8gx2PvO>U zF6MWi`#7z28`1gM5L%^*!Gh*xQts6BEGCEM>)YcV%8dX`5OI*0o>kEDO@cd%nTCm2bChW&0W6sJf%&R~EaO z`u2$GM+T2J%0N{QRt(N%h=inc?;YwLE0<5QX3yO)qJ93mGN5T4f=%1~3C8=$L>izM zAB6bM!(7hQ=YA8(a(3EvHJwhkyx!jLkmTk6?`uIXFsNbp-S9fyo3Ch%Bb-?5#1fnp zY?6@8Ph+H3M(VbEQX>r2+(RG;XUi}$QCPMN-xvwG4Avszu+G7zEu|Ajou3*~IVA=# zX_;AIqx+I|UH)*vPZ=>5av!^EEvFTQP^k2T)*U#i^w;(UNQo&x#lSTz1dnQvJovR^ zfz+UR&dNOD_g_DUP5=HK8dG!F6gnd#?ER`&vG22=$4mVA!2kdt07*naR0dJRHt4py zsDQNwBVg2F)djj$bdD+rN}`3VFxVElXv?b6qg}-pQKl%K5n$c5*OU5CS&Vuf`mp`+ z^Ru+lgbOda0!$-@FKIZ;NUzv$xQ6R~+%^LxBsv4B3eLMo%S*^I$Lvg-=2(j_efg_A z_`t_-${SDRtbh3=oA237Ye|DvqZOug8gx{p(+FA%ii~wDQhU6Bw*j?I)#H{lF-IK1 zliqv^S6zA`*ZtdPN!G6?pKarfkQor0;^rM`yUpZy!d3tFCC>ksPwq!8f^LtdcEF+3UDt`Sq{91An{$JEwRFiNi>|SMRfx zqE!9~;vr4gvi2@kHd^?*HZh@~+6CLe8DE=7*XyHfS2L&sfvTv>itova;#w`%yyQ5# zlPhT@W8C$#AHX%g!_9W^u?AKjrAAS+R+QPeKh4g~nZ?{QJBf1DVk|aK~jX4`eD!(|SHkIqGG9O-B{mNW%BRO4L#f0lvk0fdvhNhzuKsZG1s>8xsaE zhIHv9&Lo&5WqRW#)a3zcD8>3U_Xt@86|9}Q4ZzqK>9XZvQ@~@?Ve*Xmn{UIYlu69= zxKUJ(5nLCd@YxA$+Qi*gUQN>&R9qCn$%09%0&H_X>krE5E2VwSP`}Qi+%UzUVu1v_ z2^(8tkSt?ObCgT|>7&?RT|~Zl3$%p!R>EGddo8PuK88Qew=u0Tya|GBys6S-O|e~t z|EF4ad+*U@SFTVL2OtC9({RGt>u&(fP?uo@c3!oNk%q-N&lOkw9xp)*TG{9J;!wji zT=(C!LCOU9!s1x0pjc+_nGQiwd52?;cm`kl@)x-L$_sePlMlfs9VS;T1DleOlY1AD zYbhg(tJz%dDXPcW&47>fq&t?W=PtQ4z>p%T*(%U!Trh_2_vkq%=HeK>d zzWSy&vd`Xo^0c?UjqZ{r?Fl6rPh#@1R(yL22)2pOQRo6x;{6`8AMALOnZ)z5cm5sc zp8j_vGhH;#L!z~6%@?94wXuE_-^TI1n&K3CjBb{(`k;gOLwlNJw+U{WondnS{lKQB zgHmC?xxGlZEK-ImNn4g)VnWm9C8v!O`aJdg+>_Svs6Q2$O z?sd?lU^}L1qKh;QJuZEg42L*32G}x- zXt0HBwo>-qV^Y!$ zfWJ8MNdEh0Kf!8~$>k$7Z8J_IJ;n>xB+>rVm-I#>HLX;+b8B_zMuN>TAz-};=*T$o zutRwDnP1?7^M1^AU-=v(D9x!^He7chUq1D9Y`J@i<39RHnvZ-uZIfUpMgslFD^_bp z2BoKZWs|6)@er)#gf;w)B^{rD<$JRKfsf{Q=lvULMoghLH3I~#B|Y~xUJHv|zrdh| zEUO|7!%k0s3Qv5`J6X5y{ygeUr*hCMPsZs8iCRo;BD1}F&DQC#mHn!U!HC5InSt@Y zd=^v7b|!U>&6izDe)aFkW;sIetwa4*EOUozxc-N`g}%v_AhMR4wlS{ltGQY?NXW!L9ClOsNH2D=}3 zEZLG4wowO)WSfNPtvQ0q0yw5Rbb8T13A0W7(n%h1;_gdu^alXb}@<>=?1z~z6q17}h^hOzN+mX1xZWO50wee*jx;Bimn!Yi&|Z0Q6p zUPNPUc~Gkk(KWM)(#2-G(YS&6KZCX&UF0!!vA^q?-7GT6DTd!7y@Kc3RH`1fTA+Sq zPDZ?@(J%^YExB{-vSJx~@3lI-JFi<2Pi*`2F$6~XCQcd-{%VvQN49B-?DyAm)kPO_ z*vn6(oeHf+igUjA^CgDs4X^X|H4Gy1M__t*t+rmAw{`|pqa@2F$qm>>gU-xUnLAJw znn91=*EsmF>6U+$T3T!NrW=cqbC6=$eEY35yhL$w<*ZQV4a9B1Yus>E5i)N3%@w2w zKF@0rLAqe4k{zmB{WD-4aP73TE-gLlih^zw(Ht=z5{dremMr(E(&oEw zdNWHi&z3E-G)EiEr-nzr>s_pT!c*9kB$!5%PL?6ol87b62Bb0=z2n72Zf#8F)M_vA z#-LGJX~klV#R`n1DR*3ZJ&6ySfLF(3A9D~!Go;br!rxqr7pXSLMJF!9HC+EQb*k4; zIUcsvgBm(nn}-`>br8Pq z*(AE{#6j88E>MSOy)!5P8Z!t5a@9ciH>4PJ1{8~D+=U%@mIH20;* zSENdNUyzHSgOJ|JIzek7Nnw0oyZ|k{Gmxg()b?p5JloQ=Zcnq&GYg67!-y$|F*=g6 z{K&t+tlXVMER9>PW7E0kk#<7;6*K@K@!iaI=Ez8R!SSbX-K}@iRv{zQW|MgGEGO%F zM#ok#xpWmHBNJrp92++!6LWk#KG43(!+M-DM?2kt*Hjn#UiWoYCG}_Fzpbprwa0yM zLF-qi5)B0Hwnj=CGz!tOL8uakv}{-tV=$`VJ-N?$_`?naby#EjTGOEs_x7B_`q9<$ z3=_2~Hc0~5+HTVl`06J=#R;!}J?(6U#z+Gfvlj}h1Mhups`P*Us!zvWV4D*Lfa``| z&Bf2hi!xiKC~ZQrbQxVSAPH`+4efT=L<+i4zffRNRHd+2v#2$U?70^m4GF2%q$CKN zZ(9r9yyPi+=j(uZiI__aO2R4Jxt{s8>uE+Or`|WH+qMdjZT-deZE>XlO-y7$`EnB| zIIliVNW@rZSkyyfzRNCm-^GvLa3ag{He04=Nk$vAM-vYGo4;YnUp$RJ&UY{)OK^Bx zo+Dbsx%o2LwPXMn7ORoYP>h-Jium|CyrXI0hD$HOW)ATl@yY=Y+8c3-PbT=q#aCjb z;u9CwbJ#8n*Zoze`tw&xYP2{s^IUW7<^1gj-^0xG49iwef=%eE!@CGj#CuI8c*Z+C ziAk#Dcp2;$UfgC)JT%YSU1aTJ2Ot@?fy6V+Ql!2Xe$KUQV~u4fIfr`V5i2>RV;o zXisZ22>F(3uxf(YM6`Vh88$WQQxD&BE7~yG&ONDq4AvU_E)wjeNmAMk!(&f8o?AO} z>@_;hZ$9_W*!kd;CsLSx!hE|!CM}MB{z+W%yW3FH#H%5-DUCEmMRBRcr3smLxI6@V ziZ#(;PY@eyLrcwql;Ix(Mkjq&RtBT~efWL(Q2n@DmC;Z%CovBqx;0hQsNztl1D~s& zqAh*-C8c;;7cm1V62nX9qhL7TeK6y&#xOrO&(n{1GNSQJAPbw2q1<@~=PS}1fT)0U zGSWQZ{7-&{0}nbF_S_Y-bR>qcOZizTfdVdDMM?ipt${tjuwl3l6NbUx=ujjYlS^=7 z1D`(YLT4TiA&$J^Qq(YLMDD~`nyYuirwLvm%RF>F-RaHHnZpsL3}MrkUKWu0 z>ch@*He7Q#O=Ix63lycq_MlWfb+oR<(jBQ{(q}_gJD?Ls=sv_)iw|7aAW$ZFw0mEM z0EG=b!K+1*2G&_dHs>t;!);u2+S^#>y3EbY&>BsdpJ?*1x4nx!k9Y`5}4K9mPlWA*or#p{cOdbX9o zlMbh|^R9Gf=i$~n*zoQD#`q+L*%{`$8S~!og1>q*SKoYRaIPXC;Lbao&v63IM8cK{ zPE++JQKER%_lS|hXP_}nTT6TI>P^>VFx$8g(kCvU*X*NaW#L832}iY+^=@Llv@@YM z=Cl@uS_Y%~>N?9J;~}Z**9^fQ+>m#=BnHGeES|#-c@)-c57;(b+rA-c;6QBFlpLGG z-536bTmN_~2fySvx+4t|u|XSI`wXT3BN#FQw!dKzk!y$Fz#Zspi@l&GDrRgHO%t5= zv;=hHUBTwl6r0IHQE=hjpfzrA@pB;+*^n%oBpn|KTv?lv8l{oXlr9IVO=3wG960 z74NhhWQ&vqMMJ-ZF&5(#J2MS;uA@xkaT$j{;So5O;f-+Vudl@GfKj)~uxA*q`>$yz z&+u{Nu0gbH8!3_`ph`O5A-Q{s=Ik71%M9MRID@OgyrdTljt8_pl)OCkRr;TT->nMb z6*?K$p8a(Wwj=DS$~~KBX*}{!p7OCzvux!9IQvB}p|Nf=*>sx@K8UMHqth9&aj9xa zpb%EIvOt7Yv_kg_y;7{JJ|c@+d;Ov@7e+kbP<7FMS?M#5t!)OnK=&}H3#brosAw{a zD0+L6+;rOwVOdscO-vA6AsR`MQOBJ~3mPLF`ic|SEE)UlyeGdn^J8$&CNx)O@|=5S zyFB@6$8!1gf56x@m}?{kte|2-O;ZVGe^D4+y3N)04+yE#1AL@Y?>A!(-JXM)&s z@j1@_@xZ*rdVs|y4A%_5p4+#^h2+6H)`6&G#T0-HPhgX*D}S`VbaX zA5oitwu@M_HOBa=UCD}rxaN#V&h#IDhjG;@A-Ez@2DM3)45$K~S?<2(Dy+{CRcz@F zRH2Fsx+UGAyLa(ouQfM`Nc&OosHp_eq7o1Baay9_H6}K?unCrero6+Fjw8SHa(?yp zzhRk4nVO!ZHI~w8SoV9vYdP%XQ@As+NTY>!0eK>Z=~(rs%Y*2<*cc>uIERKJY!XHc zJ`e6x%bS+FuJ{ecg^QA^=NV5u96R5pWi3Da;RTSSl`p&YBTvIMT>tX`NBy81&T3~#B~8B0WbliD6>hZM)>uvqz8fRscqHa*sFuFl=-B? zhS_z@wdT32X-E!v80}R{dDf>t#-5LSFz3DPWJYE^I+NoCoC&tZ&Uw5SR1;8x_ZBa~ z0Zm1sVS=q4^;~%VA}q5+9^-LPLcmJt=8S@Ov^!l$pquBRx$v@Zm{LchWm|{)g$=-> zkv6hpD6`ZL$VFz34WLvm9#;S{1qd{{gyd0&k?y$<)9q{AX9otz4U)erm4K3QSsZ+B1LB&`xi!U!P{5+;Me z0z?ow7-WI~0mgvH59|ln7>vme42WPXzz;A%Zb0V+1ZtB^z7-+XIZp6+tYLJt*Uc==NH5VMw}RM3C3Hj5uDHPxJow|?az&uvT^lB zaYo4%Oe?|KpkesKTi+^VB1M7tfky@Y&joFVX=o)gtkiBA27`=_T0Z{vH*@M~r@`S{ zaFZ#54H-`(ZTI?>UrP_L-Y{I>3BwpvNrM#vtqq&VjUh3X#p`y(;4rB)0y2uN$Z)KD zfnb<8AxILYZgMP{F(GR}l#Vg%{Po2c=bGr17K7DrV5lhG3+&pC-?fK?fW9PYQd+T& z4&BH6{QC`YTjd&wG|(YwjDfZ0Rv=}tQc;wM0JcK5FoKx~sQ<>UAV0l;Z=UsZHe`MF z?Oni5resqsCLZzt?)|#gb1?=#yMbI`fD0(3dIA;~%Oa@>IitHp`O%HRC`@_f;%}da z9k`H%p`IfT-^}Le4ubH#@0=ev;0$RQ)?e$d-^0!ovP6n$9%LE<2{m&p{^DZxU3DeL z-SlWK|LrB9c@RHGq6$TVaS4X#h+~Zmm2b7Jr(Ed9B=k{k|BAn1)1w~F6-ORH`<{1a z!~ISn0~W30o-cbjTMs*w@4oy+Ow9KfEM(-w085)RD`aYu11Wg;^$xwFKUS2W%F(GR zGh=}}7+`u`Ci=_l`oZ^T?c9aTFXC{BgtU8=)zR1MLLa~{p1l|joZ zez}V=O5RBr96HA>AM+6Q*#R2|*KyG&KFprq?%~wCKb-Esk`2P3+8B$mA<)Vb@;jt4 zO_Jt>Nr!1ZIw}v4p|fq=0U@j^gOgP*UNJ?I_=|l_L_Ox0`A~_T7Aa->=xE7VI zMFE)AY{`|lPShbr$-Dz&5HrB#i`XP5O(iC0R}FRJbBT}CI&itiXO3)nk%vCuRBY5Z zh^!Z@2lDcTzObkRV(6fFVn!8A^p#(IrR!qNA+AH7aOlD2TU3tzu}$#1xZK}0SC zcCBBtHEjMm?syK}?!@LT_{7pmQ~LWB5RJU{T8eThB-Ei&z1X~p1K%q~!l-Tf=$kV@ zXtg?cYiL=+&P#qvlIInjsTdc>shdP^R`CNCFZwyNMuYK)N-2^q!drDzfp&mSN&El0 zVtS&rL`;b`sud+l8#0ed+aXnot%t-4sZ%yC2{XUCly5xexoq$`OY`%jDde+Lvs#)OF?WU&d;C)|RY-s7I=El=bL zZ#|pbcLIu!sd5RR1QUiO3nRP}tqv9wdiq$*JJPl#-*70mdes}b->1LC1J8LM_=IVN zX)N=}6nA~aYuLW`8h-hnH!`{G==HksP2&1-4)KZ+7lhP>pfyqqihZTbR4FKh#I*Zb zP@~sqeBi)l!t$5D%!luLAHMbUvv~i5?uY#3rzHJ6PG!t~oO2H5j_cS$Sr2Gytx5eGr&C6lV;oCQKrH z2n`EGsv}91>5k~C!lJ$AQ{$IOd53uG51HgZ*U$?byWc3MaA&~2A+(d35O)c}gcLtgZF?qPK zz@s*zT|qd922O6cS)S>e--5nZa+l$aCB?Aol1njzJPOOSasjA)J_U?-Z2QqqnD7Q2 zWC)O?X<3MmX&(1q=g3+wRk6@Vf8gRMuYZPW;!*Rqj@Q>9Xy#GpaGIkXkCmL+u4CqR zm+|#yJ%hQWW%lja7x1^)DLD2h{`k#rg$@?Hke}@2eNSzCfBoTDm}( z@EL)-RmKmT*mB*C~o?aYx)9>!f?@@o3``V&5K z%_6&ICYbkq`p$tQAeJn5k%m;e(IR8^bH?VN>A0V>Y%14SARlm3QJoWrpw2PbN_fsw zp1@Ia6a2|(_hY)J z$oqlDDyA%EMLnWn_T#;Rg(S7)SkS7_=FEQ8cv*~x&@Gnm~lewj3miNvFKJ?rq+14%G>6NeI7LRx|*X9m8 zwUJycP7Efs;5^0?JjKalx@SNpR1E4NQP_~Sx#U~t(e_HNfvbAAJDtpwwTNl)xzByA z%)F>P+AE&#`fL5Qzhd+H3LuN2f|nTNg%5cnz95oq<=veF^C&e?B zSX$YDyt@ebQYgmDJxrU!V3h+)W?V6UXLw{It2YSscZvD|LvV{ig)1+;jJeb?<&}L` z?}VLwY&XR(Wb||+pTE4%UwrElzL_4z<;#6ct4)$5QTQE-WMd7^<<;jJJ5AS7;7BUh z@UgOtx(nZGmrf3J9aSIDyYO7}mix@l_h?N{v+bH4WVtfv4v@s4sU_1qXxc`=zwpc& zV{y4hoyS_(GCjc*2J0PdV0vN(kBPcEkCR|SjFH-8D_%aQEH5u`$|-+@pd>b}(Q?;c z`+MF?okINOJ%~+kR>+J&$!HtTk0_i>;|ZJOjJ&wK_aKkI*Si3t6PX)=@2cRtXxB+wj+#zC~0ZUn?tykpYA z<=^-k^t%`z(4?73!*NF+g`>squDX^hw(Sb`szYIR>zvi~*V>Gl^?L13!>h_5QAZW) zeG}Yo0&=g>g~*gbf8PL@CDz7T<4cuSqOOW%6>}6cTHyrC&+z?S9yAC>OjW4H3t}SQ z-5LwCvmAQoJF~0Pfx~Cmo3v=(oOrI-b`4+r_P5#H&H2O^zsV(6UJ)A$ zDVqe5%5)^GB@SbIRTBf+gq;O%#@|zt-45XmEQoaV;0>5sT2;)91=B{X4(> zj|=(ZXFQ$$WC+XCNL^GP^jJQg$#T8d?SIpgX_eZ4wF$#|!?0G6NXDeP8xM0c3|a|l z1#$=b_6;FOHCsBV=^uuK(Dgh6L2;dw>6;#dw+5f*Xr41^EsIxOffQr6i0tJ~%Vbo^ zGxl6`0UaD}5RfBllS)f5(qycB0qP);>gp{<)yWU1T+0a1RPR|tC^aATQ=>rCg7_)L zlcJhp>;&GY3I-PBJ1Sn)86uPQhh4D$3Ws@g1;9 z`E!j{b?ckeI({~~-egQGP!#GqYVtXPRa5_p+5|uWXC+X9icl+R&Ov4H8Wr`ZFia>d z2u++e)U)9QF@h@u{?f!gRGKBO)TkR}Rc1oL?=5E_=}A)nGFmVUn%0_ar9B}`l-3^$63n|@-s-Cw_qqn`Ry zt~vfT9CgN--20WU$1z!gDg_lPSfv)R(|V9-lh%~sI%Yr66{R90_debyuZF-XN@*W` z0*`;od%627Ud4mo_;Y7N8rfM36M`)M3j z6Ld|qNN1+O2%R*czh@7obdG55q*6z)#%v%(sbDbViDCBmTe6(@X<`SO5@fdRPP#q$cufNt`V}RJ^KA=thy$%OZ)&I@#ysnrTgmS1zs+C5tf>ur= z)N9y?%oRorLI6IBV&7}IZ~3TC(9(LfQXd6t(%Rml+Cc236opnE_u`fI#unMuN$&Kr zH?sGdU0nPAw=uo9OE&0+J~e81q2>?3j>k_tj}56yO&nG6;1d}Ns+4dH-j+- zV-x(`3`alybnbojTR8SKifUC?xr zC6jEp&x4t{^C{%B(|q((U*eHZK8yZ_Ew~xWQh$jg6^!#FVq;E+3)&BL7%Qcote`1X zCKVOMtnQg5YdAJ!>|kxBf?1pAY(q1mjsI4vM1{i9D@j{;^|afQOtt37^H9*3R_qvk zm~siWF+J0g7AzDx1F;4nAg$MvlEHS(;#t z#pfRNz6`T1%u)(_>Gjvh%g5UPz#0Y->96(=*I(mTLg_~>3W;TM;~`j+&@wId?AU=6 z;UY!Gnh7a8Wqf^U#X0(YlqhjXgy~zINM@BpmAvaoHD$+dFC!(v7hdVkV~t=m_-IS+ zxbD{%lSE*wkY9V%U|||}bX^fSkd=RKS#5`qRS&%S!YzQe3YFK!?xY36M<^Ju4&(b6 z=^;48g~!_A7??gXVd&4#b8|b*kKg!Ce)h^&v)LH-?p?qcFdL@vb5q>uZ(hpXp7$KC zO%w9=6xL3G4;c+sES7`>gHgrhIf7?0HT>#3-zUi}CjKB>58c4cj@ZalTe$eQmvix@ zyUFrmHY5!pdNs{o-?`T34Z};ViJ_MM@I$tys@!q~7|H+t4#v#kgx(|Kj zz@w-Y2IOQCc-pwoI23xQVLw%+$vmlMBvljQ)z`f6tCV1)kWN~YaPuGWJ?g(q|!Q6Yq3Z5MAGQ@(4!}|=3;wnxBJfx(q! z#cTCS87(1-DP+ipQ8C~Q>0-i9KX@*k=}jDa{|C^WYLO%sdwzX{0itU<0EyZ=ogj9{au(R-@c2quq7k%`lM<6DZ+kHk9r>F$}3*R zHy1;Ph>rOYOZ050NfD98_b-~Rz_$g&A|&7~RxBi7T9&N4#8K^Ovaf%KFFodTHZSfX zo4=0CIn2xy+4M9sr`?D9zV{rs-5t0TMdvor*90%Yi#s7ANPtn#lv=i(e?BZOAnNb} zxhju+^h3y&_Tk%;eCi9|gjNz$c4O9q>WIV1`fL5QDiTqngBU4nLt_A$4|9YPbQNu^ z%^KF<0I?fIaPds)Xi*yKMh{sZoAv03qt$oN?Z;wY$ko^Ot7by9tW+pSZNgACE51Yv zoDa#=W>br$Ipu-xd@n!!_K&&tqaR_)r3~^M6;Fz=xcyotrY8B-=fB8@UjG{I{m=(- z>p!{&d6pw4@Se5dzaEIK4vS+0&sjVT>E9uHn#Ki_#}F)FI4IM9Oq(jrUR|tQ9L3QNkG^ z^@gMt2B-1;Nosf5X z%q(UcdCgA#^^7x^y6BhK#l0+KUDB2zo1G>*WGi=i#ozLfSG|@iZHvLw25iy+mm}hX zOI4C!#AEtPOm>%8yx=D!UdW6kGs=DLdkQU4re-&C?uWiW&r$Ka8yID9oe@878x~QqJip!xdEw1T#?soc< z_|fH8A(NJ+UYDdjfvCk`qX?upPpV#0BDx-EVc}YNDek2-Ep1h5!!T`VqN_o|&|Po* zXFcp~Kl{+gI1ZUWOzpvd}`raC0p-o|HQUBGZqHUIu5 z3_o4Jga=YSNtGffA-ZT{(`LF^ptbnLWmxWxqU+`EB%>(540RB31?25Evqv38rXi;o zpEKn>doH*TiJ?Tr%s>@zIlKPj7bLl&xkK=XHPNQ5LXK2dlh=*EBD&g!ujBviO0$Xj z-;DAj3wNXLD_snYg!{g-RQkyQo$!4X#wS3~K&tTVQKh2DD>{(bu zfn;hDHclgVxdRV)-}{j}-jQudf}Pt)rj|Sh%LYnc|2J@z~mm| zJcDci1vsGfGIM3eGaZwJC6&)iSt3X*0uq$VKYA|TeeD}Ke&;?8zwAoReeSd1+U>}o zpcI*!exbR5>qqynxlp<5m4HE|zcPSxE(oDF&agaVc=S8}k#pbh8uIV|kQvjW`G%d1q2n8G6*>r#VkTQf=4{5=q>Uu`<1c$Rh zM9Xlx#;@Tux1VOPaPt?b1stN47@T0)YZGCq*XPtzP9ZVTo;a>#BesUS4_~hv18B2a z9iX+99jBb4400F10~3+`S`h1S<1voKTv7#V>3>0Eqk-y$kS#2d4l+LQx>xZ>_j@>U z>sygcx3GlCQK6|SKczGcSJtg;53c54OT+L#zH%6MJd961t~r``4Nb$`95OM1NmJTZ zV9y>3YtZ5MsW;;?X2~|hg;v;j+^xy2Ax$k}VA2ZL{OkhkAQsXwkVX{}gq;`vhKVF8 z4=`hFHKvo5O#SxPKGiwBLlcQc;V3*`&DJtO+DnD9 z`r^Mw4&c>H%GWo&|9|(CVYA(}^lpMv=y&_HykXZ@zskaS-=O1QKt^r?5w-wmZd#v;}R>QV8G6rfgt-r^DOLr#V$i73JV z5mzQ=8={*~BY3mY&NPu89a1{W3PLv-X>CVnOry9_v*0U-9;2Fo2AitUoeVYRbqdYM z^r`-SH62hvO)49e@ORD#PHedDKt9OHea>kQxL3%!C|P$VP8h~D_HyIet4g6sjbRWc zBoZlDJ>SO|OKza2kmU|B7BTTjHlu-QxK@a;JGhCxKS>r;k-~DbT?{zAdSy*bf zafv0%GfbATG0$24>esMd@jl16jH7S5m7@-s126pS=fC0V-AiQ7M>HyKx+T(AD@$~E zny{M7{(&Mg zC>>pjB-5r;S6BSTI`otkqF^m83xjvLa^#7(;>!Lq?V~ob3xm7q&9JvVnaou5j}nou zt!o|k33SYl+E~+-)H^1~0g3}UbF=Jd&2sW%p2DWP+>?;kBVzt0F@>wM;r3+F_*2_+!dT3(Cflg*Us~>$NV`T{p{D- zeCSauEjiLu5N|;f)W->f#_ttH;6+A7(G+OZG&t|#)i5~Ihr3V%{~?uq^x2mKKvLJkV;Xf2tuk#x;NwUPkxSzF1V2UzwiZgCxR6)QlnYd zoQfNPS z>!Z6-h(#rg^>p6hS|Novr!S={C=-1aYD%md%>f7m`Z$F^MYX@|DJ zRBwSFe(YRYE&=E98k)sF`?JThuy+UANjUdoU%{lEc;5MP19R$veX9qnI$pOiazM(8 zlO9+tcu&_fEjoawef@O=zGp3Vd*28A5Y4=ne2ploMkkF>F%ltH&Ep-wnvclm)Daj$L*d_cfzM z&V<8Shmhm4WvuD&g|GaGQy%&_F4?vhS{?Q-F4LN@;U1U>PD#Z(5(5qwXgP6WZM-eD zF~MlAg`ov17#AF{LZY!nqrkqrZZf1r7*iCA)c1dliQEn~!7Rf2Sd9CS7+wdns8hmL zSamBzJ~Xo%WaazRy!$e$KdBpdj2nj(QyU{-45B7FO*yix&%N$^M_NfRDe&G0;13hm z>Y7!O|A}ovsj90S#EMk(x*anfbpwp`c$V>al2(V0fA*`~={}F;9Uu4?dd%^w-(JD) zJ-cbBJ!&H4(J5T5d6q!CREf_)oLYjS`}%O{wS4LTlR#|0Lm%PkFM9=kvP05IgCR+D zL(^J6D=YQ2>#u{V_4nH_h{$@wuxeqv!f|0LRAOrDVK^hStzmxqHbi~H>s-g6CHteC z7!(fKj7)d%2CTKT1lof>y-R-w&R32j&Xeufh4eD4_oc6Tk-jQJ-)|%VQTLBXtQ0;Y zA|tsA<>9jC5x>-7hcW2u80`l{v_2vt-j}>9^$zhKQHN=Rq_Dhqfn(d7x#pu^;2TeV z5_8+GptWx|OUpgf3NqP(P1A7O6M67^|A`Zy@wba2g8HaF~{dR{&G;|;Q@#7tyD%26^sQj zM+BR+pgqM24}B2kE_dS2FL@3p{`r&WJ5Lf3btxs9!_Sh8{Sz72h&GUKArXsCPH^lQ zkKrC~dIMV@{b&X=mUJRo+K*YX6&G2@Iwh5Ou+$J?A14Y%%Kbx(A@>8^z+taj;&ab? z5%)XwUbsGVX4?#6h0yQkI3`;iw7Z1QdKD#7n_P9X%D#2Oof!dLb$N~~&C|Z} zYW95Zdvun2A69)YylBC7#t83{u7YILR4mAP*D4j!^w=LomjH@eURrB^4=J| z>d;WN>24Ber*Zr#9_A*PKIUd*swEv@LQ;19>SD;Ey9XYl%C5^Vr(+UqoKTdGNZya4 zZaSvC(EVSN)%6du;(N&oUprneT^(3j1~%x(ep?8NrJ=u!$B&ng7%72I>?MOKVYxfc zbe40-HP`UvC;u5c{^>(Z^)eRr?!zVEQ_#st*f_@#Pdt;;KYlKAfBeVnbc#5~ zu(Y}AOJ8DY;PPq!$V#CEu{veY#gl6 zcd)E&zW*PWaq45A&IkVe+az-v=x2f^!O}AkgLf`y9IPqrK&yn!XwQp9QF6_Kz}!RP zprr=ma*U1U8KxkE1-g<-UQNHo?H4)=(~faqcK4<}=vDuyw;6w?FZCtfYX&dxwY}`V9Je z*@NpRgi*yvOuUXZv=V8MsyRubWRkNmSVlW@oc)2XaJ$pa{b0hoZxC>ocMnh>6jR+1Qa~TsTUwq4JIp!9}ap=A8$?}9Hu}Peg4FM!7 z>%nZlrwPN)fj#Tja`g?Vp#v+zLJGRf#!a|(iWW<%_W4e zS!2l)%f{Q>mIa^3DnUuT@Vg)X6b5-EI?OzKF24eCF4%)=NCqv^RmWBo!y<|`q&beM zsK%mBimuTkX0^>zX;v__X1)Q!Of24POp0{4|w@YnV*>Av+q3z zdds9fIAdh4%QK$)c+h2xTHgEa&*CL1h0P=Tr^>2F-VJj+tn}+Z8;k!z$jgC#h3v=k zrKaj^7?2hT=2Ei`uG2}P<*`>GkL8t1Oep07AwBA5o7A?(K`nI~#2OMOP03O-Cm@Oh zjVXIvAE`rH(d{NiFF$U-*|-w(`aCpEuXpP` zKmO>a`2*YGIFqtvex9$s`+ZpNOT9p`WoqpnDqyU|;}{H-ZfeN#b=?xu znWUTLNJ{{Vs3CKC^!S(ZJ*BRhE1SB2fSVjT&M?>Qb6Bs(^xo^(D2jE8iS)|yenl6o z(m6_Gm0i3#LyB~AgQ@ZUC2A`aYl@~RCNKx3Uzx=tI;zN#gW2T@i(DC=F^qyPqbJYm zNPrT;?@GOe6}tUCr`+)lbP~lBA+5%g;6SxD7<392#Ie-t0c|eZ zwwwDr{7hc@`ZtrcCXsX!NZ7n-lK%2CPkP+x-0BubW2_JEW=2Z-aDxWjz!0^3zPzt$ z6UAWEGr64etM|Q+#ogC(m#00Q<>`cWyM=SnreWB9YyBuY(AS=|#3-ya?(RqH*K-Xe zL*tB%5555=TG-hcFwnLs-Q7E&2vcjga_9aEgwVp%&@OCT=`rsViS=yxR z_{~Lhd>(azf&036%{JOC8^_N+#?s5kky+(13}q|qcPg;L2BggfVg!5|YD`v_VM}AW zAW}6^!^m&bvbN?i-{@fsMoo0BFjy6m#9|VQ?>n|i!ZF+T@P*S)XWKu%gSl>(g`GR- zYetfS?@Z!nHZgbFBRK1mpJ#gOX10Ixo3u>}yeGBFt&Ta0|1OT`~qT8e_0|#?-Y}aN(Q(o}1^w((Fc(xfV(u+@FNa~ZM)d4o(G+DJpcaAzo(Oc^Le9C@pbBg4y`!dpI_VfwN*BNT5I0C zBU4Lzb~Aexm)W#=3sZK2!Jvn+1(&_L_Ug54%#0Cw{VusAT=3f~IqgrM!rTA(W2Cbi z=;fiEN)jopBUQl~6NS-Qik7Q_xyskjI00#kWoD3ZM{jt}QOEPVTl^u1?A*tsH%+_H zVgjX=edt(Q)LH;t@wI`H5?q0Tt*Kyb8a{&sXEJhfNDV$s7^p|pAew+rK$A-Ev7ho< zSD0hzuuQP_e9D;ns0Xw!gy+zEAMJUAKBSjrJnGR8Bo#%xY0wr&>2UgAxQj_e87vKy zqgKHKnj&aw; zK9)3srcwpoHTjlRhEiET`4Uy}CVtxO1LnW^J-+?LFLV0eyqex@N~@EgQ8X^KhEmbr zd*Ai{>1$0j{#qM`AFf}|wIPq?K`R>RLk4WZ%%Ml1VrV5Pd#}DaSpF3@WyK_-=I&5O z{z@th!8^Nfx4;Npk21@(fGA}*)D^D5ZA19d(W4BoTnvdc*eWJ$G=dfFhr=&-h6 zJfA=h^ddEAPO{=4VlBn4>KJ6G?toCBl<{Y&P+Tcd+rkJk@Uvn_kwAtc6G0~va^o26 zUE=uGX0G|rC;94Q{)Cwyf18Qxu48fcJQ?6xz{Y9(;ahmh2R_dBeI7DM04Iil&w2i{ zpGLO0fanD8|IojZB<%>M_!54tEGG9GYjcn|xUABMjcZW$C!qfCJ2j9I;CP9W~v{DDAqWD_&!?)G7-s|!?HsNdr5^Kp_hFC+Vm2mgF-5zV|q|-*a z>6rJ`8*n!pIX#EYdn&3|NevQ=F*ui#y9{fm`21JD$0-l{Q{MBj&tNB~8HgwIJ(M0v z$KeJ8CZxv)-+eZDw#;O!jd~X*T|#MyP+w(Qr8PFDY9f#uk3;D1SjNuJ^O1L*!{eX( zQrwo!*r_Bmuc{^e8_{MHxQ=-p=xa?i{+b#F5&6OT^;`>9t3$P3K=ajjrVcxjzEiA8 z*|+Oj48C+TF;XNT>ZreTZ4nec+)4~HGAV36jH#_hk^w@B6=6f-*>~ajhzF-0G-rAH zcGTsyyoHzrA)^z9Mhz}2`}W8I?`e92T=TK$Qaxp~L^JAlMJkk6wQRUTL#1FHDlve{ z8k{Ht>qsUoF6%NqFdVt7&v#z@YA$%m%b4DA6?)wsx;`UIEZUx+mkTF7;?d0S+=c69 zB+?>to>T984>rxVQBC;5SAW2@i;(A)^TVk1?0?l$yVkG&?>BkSwf>!s|GB*eez@^r zLS?Ke8+j3_1qEqD!lc=>7R53pWFK=lYR)sm_LNc~wqgV|K@d1Xvxxgda2p~bxQa81 zv6PNt6;E7OuoNAZg8B*(G!>nq$ZwDar9q<-I)<=C^t-TUjINR~e_?p9(3)970%^+B z%oLaHx{7Unp3dQOVZwrdH6iI*6lp9}g$MNL%zlYWdCt#A4#MtN+uUI2kW9a7@ z12J5^caaC4@eE$`&i6xS8s{y}=U5X`i+vf4H8ia(!ox&rjggjKIF(heht$)TF5PT_ zb}M0!D@%DsmU^7k;P_|E&{@%>a_I`EL=%*f((Oo9X|;q-Dja*{Rwl*Mj=2!71lwHA zf;O&=Mrhk;mh?)A;*7~{sQ*E84tM}NNNqA?P0%Iexg#+`zt`iGyWEk&d zF)^hF2@>g=&&{$dDRb^B(UB^6@u>AIYlbEhT)CsiX@B|x&UohEvU~3YVmH(6I;;^o ztroVWn0`*)-OKym{Whjjk0r%>#afGZz9H?BhRtmRKI7&UaR{6dJQjLACM4%0fA>o6 z{oqG1amN#Jv#pS=flEwa?f)P(cvuLF3J)>z}Ob;9ud^-H>XUq6!CTzFPXI9=OY zwvbs%yVYiJ-5%(6qp+{Onra*1I1*kIpcSD;s)|nxho5vix)`#7$0P}p9Uy6Ncw9yqdU%R7F$aUw3otXy1)wzYSt|ENDFuP054_ig za8QelT^&M(c6+3`kY+-^+XrjHG$iuV6~BVvt_Sawqi?FJA2U%x|+q~xGocQ@qbGy%cobIOSGE2roc`em@5^M2?9m^xn{RFpo zPwkblK_4R-8->%8C@q_i zO3r6L{$4)ufpd8G+un@L2AG)Ytv-79*9HS7PT=t+0NwP3IQeZ^@D9H z3JiKY<&4uwOcCVjV@)uw)jF6qRS)8$Htfra%sx=C26CU#_X^VH_3wB$cfQwyIRDqb zqcbzha@UcE#8<4O|5J3R2#8JTDx1$ho-@aaM($=XDRoz z+ieEwXA)Jx+B-z%RFnQ-w@tco9C)y-lD<`KlJlQf5q_)530=x_t5HuiuEBK7?Kt%o$o1@B>=L zAP#&UoZgHMdBhY}ytRF@^3g9u%Lc&)_a;r~wc31P$2MMf(fNF;yTG*@H?Zt1#n^?pewr-f=kM495RaEL)GIIWN z_S)fSSVNKP;jzYH$jN$(v`m`|e|-g~JnRX)@thBnZrnl!mwj(g&@R{T0 z$86!RUi3V&!GM+xLS$>x2$0nBn4+etd7XpET|9^he`j1qr;~90KfQ;vW$60rfL=d~HU55I$pLmCf4F{a*RU|_f;kh3MoGfN zp@-rwzlx5r%x}A#xucH^cR(w|y(-h1hd`N{TJZ8kW5Q0(rrX|zuJ3_KNLvZ&GqzuG zIdpsY3CsNDS1=`(M5DKg37UuDreACCuTmNvUhl4Ibc>eJU4D()FQGotUF|)!vJK&m!EYO8}4~`?)coZ;3h}1G_cso zDUz0e&q<8nG~*xM{&IFK^|dTs$_AC6^!G6t3H|6(dxq7h7 zUcDjTxu(N082tQR4ms>F?(vAz$(MTAsTMvSiYD^PjS-x4V8ibYg~Wo(NlX&CJwA-) zbsXVtbkd{s&#e^|Ny}y-G$PjL_zN%Ky-zrU6K4*gn>#-Ium8&9KKHNWM)66oO zL`jR~8hnr|ZoA5es1{Wc6GP`j!D(ezy27qjXfz6|<{@>-ejkj=NJtyIwkoIzO<|rZ zh{0)s2w1CF3%;M>1p3bM?*IEC-tg`Zkhdo2%xz{c5Rz7gx&ep}#XA~&{=JC}Vsot) zW)O9Up~}Wtt2`PFfR&Ymffx2o7?xD%1KtFn%R4FUyhW=xW36Y8NFA=wk@8eZbkd+M z-k+q+j$PN`)YD4aT(ff*OWj3`Ovg!GPU?$^nUo3BS_ekz&O2)^nPr9DZ5TXR-RVq* z+>MINHjgJw+bk|Da{7Z$AtA>~Yv{Xb*`$wtueIz)YumHxMhWV*k%<(7Wx*KFV35&j zb?D~sl9#-i_kZq7Fgc5~rdS$yOlt6jqn20(-5w@Utbp&%^Uk-vi_~Pa(>AI(VoVvF zT7))A>GD|Y6$KU7gqlQ@93KK8cV}GsZ(ra$U;74U{o}jnwUt&U3AVgGSk{`-@FYy{ zd@WsG4*JAcpDx^B4TFe$S5=?6er;Fp6udScTCbsKP!VR1xCwi&zLt)9mag5#M(1%h z6rgq2A00FL_d+Ag#HJRXdBiB(JsFZC*p9W7r(6W`4XwVdPw&f zi@NZ4rRA!J37hci5BwwFfBU;=_l0Yg_wnH8zlh_W^(-&)>fI`5brb5u_sIdDOZP z+>4NcIvLl3G##bN&uT)}f?)A`de}>@=2P$cXP*D6zr)YXV5iyvRESMRvp(@ruN5_* zaV^Mtv#|C`K;*k?{aSMX@B6*=OS_+$Gz|Hehs4J)juTp2kD#BXSeww_w+FJk3QUqQ zAuhG9xg7l0zRr0?j78fWw*288SWE;p7AuC$oe8e_(N8gXv{W^5FeB$pVGYtU z2z@$P#?9MvOrQT7zWcvl$QPdQFg9HAJIr7Zic6bdt)bINNUaTH>za#BeTy3>-W!c^ z)9Z6L*7!c-g5cx;rWZG|e!gA?MggTqaZyNFv#O7@pUSdV;(??k724oczj4!7B`i3go{0$(}j?fBih@m@=N>H4EpTAjv8L1i0T3weVw+^w)yEdJaN ztr)4VFrk?UtChQ22z)Y)E=*T-<1aq@kPm$1A_)>wXjLQqC+sDzyM*h*p5>h9zT^#@ zcKXw~bjJdd8#j{mbMOwMkoJ|yJY(a&d2ZHIj>;Xg*?^SLh?aG;%1XsR)AGKaS68Ee zc?2ZEtHt}^j3l*O7!_CM1$ptM^Vj$*qvesPwOkP;(za3w)Y*wCzJA`1_^a2xfgKCW z`U zR8T}lq3P+IQ&oGf_5S`?Ywx{wojRvaH=y@EPnQqcHr;iq_TFo)_kF+9W1jjyc>Y> zc~HeT3srPLIsCy7XX|)`pg7~07vY!x{cQ$S#g1#QL)0e+0h8GpE{BhA(@FBO`gVe5 zp_813-S3NeF)bq5EU!M?W%XjV&Twe9uRu)02ImFm9Y!6hCQ-D~eH8<0BZoJ=hc!g| zIR~aEpd)&tF-KH}L$A4s@4xwNxJ8c_A&7~Btt#M(9#S}rvB~9xj^Fw6?M_$eq;9PR zpRDnuU5mI{|8HHMiCZ41B)PDl*q~SqSOptQf(}BO-229XMiX)vlH+6DXh^SzuS3l+ z6TJ7@F4;o}T=Y+G=Wq)TKJt!y@YSzDZ@L*OB_x<#Ov{OGAp1N$I*I&px|}A_YCB*} zR;S(*)T2FN?&L>umuLSGR~>p7>mT)Gp8D?d8HIIN6LkQM%`#+Ws**J%8TCXb+KH@F zXzFFH3;fzoKtL?LP%-Zue#cJ6+jlT;z=Xw^wk{CFB=K)85T8a&7qTE^vYIGMb#Jd> zdv>i-^VPPr^|wz`cHgm@tUc=|5lU_Jqh1tm;*?&SeaS>RTJZ4_8qe4Vb}o$=xdG?> z>*qM>)ZgJF7ySpb2OUCL8OjjwHV`CEOPzuF@lH||bX%7eJ;j3U({)))v= z#M?S;m?Et+O^v3h=C`atW+esmY3;P75(`eE4#d;}Ym@MnE=OV-tz6dlR>31y7*-yY z8TMPh1wVF(7BE%{wb)=HY9j$P6~UCK@gxkSJ6m6?&x5SB9avq>&;*4i4xowwELG(Z z2@Hq3cjsv8e9fyEn1BXH2qq3bQq>+b zBwc1`4)@y1T%dVwYs2Pp#}dV;M+Qk5X-an=y!k*S)wXB?-u-Y!JE4}rElT$-iT1Cnji;+A)M<-?$6Wmmh)`>(SFI_`OaY%{} z0X2$u0t3sqXIKnEPv-d9#b1Y?{1hF=$fLMX28O;wu?#C8?_d38GG`oVeEL`7w6A`s ztthXPy*-rv-<{gjIVZ_eW8z4y3#>LUD28qcwjgNIDyoENQ)z|^D$##%}XcYfY; zc*MKj!SQFGNjPAhVlD-HN$1;i9NN2C4AMEM^ANDcEhj(Eo5TfBYhWf>lot> zTgUbgIOw>0!F~rs8CFe;N|jhsyl0}>#wIkUk4;JaT^;vYA<`>d)(|RuSz@XR87t#) zm1t6v1@T0Db$VV*`n5K~^u!Y{ir6ev6%_^Fy8KEWd-`)Y^VP3oY-R~P$3i*6^&)bm z#-ao~C4murzr-Is;$$}b>@x1S?FJrn*cK#|ltHn%c0rd1WiBh8r2El3|3A;*QDU*4 z1}aKOh+9BR`gM(@i7rbQ&(W%;sUiyhF@o2m7!waL`svjf>^~Plsj+X+*h8> z9S^uVX_No}AOJ~3K~&wJ!di^fbhpra4U=*y>c1ao5!e_^s8kX)U4C&$AIleBb1v)V z=egH&p2y;#$IPHls6zLgSEitZtY(k3?{yp2^lp#J2a&I;>a+G;V^*C$1a0BqIzIu4 z^rZPMhp^+SA2TQhY`gwiHt0PG1PnIHf2I}knQwd0fJwnBb)ji&fgE%&`yY7};i{_} zXuUVu0ayGK*|iwqYvb^tN}7q~o9(hRWZARbc>1g)x7X9%qpH&uzD}btNlOulTEfOl zdb(J5c^^|-28PB9F`Jr6CXp760?az}N28cB=2(OXy*i;1k619qfwk;s#(eK1|IFQv zIR`(OCHNB4EBMHVKgofI9K@p@cp|&XlD<=%b&VBe2wj0+SFYW?HLcB{w4R#61S>tC z0N01rG->^wOuPQy5S4W(DT(G#PrpO(QFLzNy0Vhm36+Qjg^7?MiGo{coX~2qYZ|-O z;Y4*29U@Mlg)n%)iQM-!JgF*79kj|Ac^@JkP`BAWSV4jJ8AgHjgFrpar`QDYk;N1W9 zb}qR1%gprWn3>y;rA343MOS(8k=juYA;6%85PE%na^2M&J~K;M8g}j+QQ01qbqN8` zCK4vf0GB8s6V;s7bfnPyeK9s68>yvK z1sC10q7z{J?p0ON!yzeJyk1kyQloBY_QqOtwJmHd`&vvOS05d~f_jV?42m&uheI}V z_q!eyMbJLd4%1@a&1t!J`>7-Wdy0l{27<LtgX)1LS+o_NY{;j0Q~1z!b>lSa2x zhg!BPxi&zKt1Tor&>&DjvEcajJKoCmKfjjWecfwW=y?XSz4Se@Jdf@IZf=#x|J!HO zuN8b3`ReV~FaTfOcZpXoO`54;mU=1@`SB_+bKpVTjG-@%rJHYt#YJS}Tz&(yR9JHB zkO=9C#wirFe|n@Rl}c>NNePeH6CPmjZT`8j!G!ocUoG)u2lC7gp5P}E1 zEvU+h8Ed)n!i%{3b8cp_uMGMFsFWu?`SCpD;ivJ=GH2H+jZ!_hIuPhj7tFUxUqu zMjGU}Mu0-=z0<7e$>~DgT}noYq0t9q-DhW5RExSB8i}+6hb}K)Tce9C*<4c5sI{)6 zL3iTc_o2;+2tje;kRY9XtkLkPS-!^Cy2s?tukIznkOEmDV8u~|B<4m~SX|`yp7d0D zCK({q5$w9s38}Ywhg0ni60Egg0;6HcU@&0&&Lv)W&R_G<&wdU&*vQ6B2eLHsxWW)B z#fm@`2*G1DTELd&I65vORO1TkJWk<&gJyZ{EB_WPhs^c{v01Wlud5r)KuGtS&U)4y z!Ai}7y=lzNpZ^?Rzu?pS={w%d()vFAb^R!|*O)cZyic}f*9Lg+YZh+nntqQcNh0zi z_URaQGq;dV@LJAy6b#JIqrDka1AQN0>sBOaT4eRu$0XP;I~EiFwBE!`N=OU=EP@uw z9q#{NwwGi2gC5=)=KGF??|++O*ADO{#-_z=OlPHaCUO&n%WkJMyDJM-m!l1n@4Xw! z;i@5RG8sO%wmT)#ePhv<0+Dk;bxJBl&1Vi6;>^}WF}Ws+fGAcJHyU%W7{2+gw=owC zizvYc<_!GBAO9iW`QbI3b?#ev*y+#Xb?^B&H|z|QB9y)&gc4(7*cJGUT~Kqrbt0WK zbG&LQztC_*L)|DflRe4AgA>#v(FtU!>+Z2<)3IBtvAC7%F_{DyvB`2Y6&I$ZoRR1b zCOMHsuqtF}2+5I1bD#O72db~5))b+ZVpM+!_H=yB^YWn4q}2=}OnvG$0hd*KGe|{4S5&2k_X_?CB5Rzj~023Jd5|3ruu;R6Eeh;Ud z{!HF~{%7%S20Jsua2z@O)>(pyL7FNOpN(K*gr6EDfK!VO*7I$zyyD_-aptGK$PJqg zV@wY%E3lFN)C>zUEypq@p?3FsoyKnLOo3v^9ukXV5t1i=4-I{6D6o~pOb7y%4apO~ zrDWBJK)rryk-9M^-sjqnRH93cV4{9O1uucXgsm?rjkwM;*^_2r;tX1TuB@$Ds0nwf z5WJ7md<}>O%F&P~Kj{>TWL>^`%R9-h+0A|?Q`?0&BnUo+;@QBM5gtP@1K#qUPjK%? zJe3c9{_FJC9n3g5hT{tBlHF+m-gqjfs05r1Y3MciVcH^)TqIw+eKGNx{skyq!^cQ4Xf@*i|+vplr{S@F=^UBvgT5d*fbpqc?)(fZJ+ z!YCkBns|mPa;)oSL0dL{Zd<9-kNag0Xvca~HRZE~^hBlqNbdA6sCclRp&v626;+7) zfd)^|`0J3({aZeNHk_cQOG|@-L?JiH1v5>-C)Z7FgrEV8B8k5qX?UU8ck&)na>it% zq^--CNJ6N^;-W4#D=yk*)&*|v&bFMg*e!ZxN}_b<$&vheNH-5>85^>tA6fw&!;E+6 zQpHRFmd03hEG#WVzGD)dH~WohV_O<0x*9p(!1krUdH;F=kNNFq@w&I4$Ic)yKg&>* z$|$ZVP%%;bTO&d#6G2lTjPVwWMZ{rbhN0WYjT<-eGiSM34942%9urz!i#9#h3YxS= zbD|l#)OC2usfx8vz7^Y=WQ0gvse1`4CX&yDB>t6-2)TK_llh0LP7Gp}4?U3f*PV=! zYygm-U9yM!!(K~bwTrw-66%XFgy8X^m`uyiluI359|A(CU%GWS7ef(gCU6qHzQWQl*dO}16eEr;|oD)d{$x!-8 zDU^zZZQFU_^Pa=;cRi9B>mqt;X@(t5=8?2(E6xnsdlV!@UkMe)TX$k^yq35A-OG8- zSuZEt@kn&j029)>4cb8hWs;Y?Pp5EOy!N=3-{XcsMD{fdYib{~69%Lx##q!D)*W^v zBV*_rW!LrBfR8+Ql{AfO96JINyKlRp25kh;sA5fk1J<+YsH0IzjDf23Y`^g)dd5MO zZCcHtBAty|_Q;2NjXEQlR_#wKX;)S}t9F})oDLzm5Ooa*L$d*Os8^CvezJq1nc7ak z-pvPT#%JlDG!OGp6fM?*CDe2_>|WYB!}>8Yf?pwHhBg*;U^|aLwzboL#nDGXkb{5Vu)AJEdPOy zw4%24tkJ8Qx>bLDlOmXiWXx8_h$Y zG9wN<{#bsoa07m}WU;@<()WUa8Uu*e^r{ZT3J5eS%H+miYL&NvbLkqn>3V_<&&UBbeHe@Q&=N3 z(-x=_Wo&lr_}OQ_3RmAiC`WjrNB%RP@DL7LUqD$Bf?>GmDNBQQ>-o{O3!L-D5Af*U z`zy|U=f86O&MG}gV_=Aw5yq6Lrb4(bWNQ>cLnG>LQ(2|ak#zpkbQH>TxJLJ&kYDS+ zyX{1OD>v-j8?8y4YO##A-pGqje;OBm^5ZzyL&wo)UOFjgt@}^C3MJ8Ug3?fj@{lCS zERq{>XQGgyMD$7NaKRNty{D*@TR!zEKKGPo@YUaW7FWFPH59k(K&x^0&t_->k~_#` zdqqZx(tOe$@?go#a!3Q!Xas&lZ`%#*xa8aX@~am!7<-nMhIo@V`r4!`H-BGr;_A~R zvNx#SBzGw}BS{l@H!(!5(F*N8ugmW=%bIyzAtAdf(P*7_OyMy1JdR_3=QOU^WVzw6 z{dv->UkP_OG&Wp`P9ub-FfYrJfU>Xz7hdvTJmp!>=UHd`CD+`rmAUnsK>Db0SZh(Q zO;T+L2_-af4qaR7#zwQA)SK8$iQuiJGJ-mXH${}PI~zd{BONQ|)sWhnKGy(2no-hp z877eQB?c1=Mgk^!XHY~P5bCNk-O3&Sj*Q;)ObX)f?*s+Q~}iX z`PjdImIpojNxc7KpJp(>5s#tNlnRcE!uBd8Qx<7xkcMa(uIaluD^PiGo=^%0tTUW@ z&i}_;U+7r}i7MOPqm0I!Fon$PFIg+cpH*&Mpcsz$z*%Q;(nB7`L67(?miFt%2CJbo z(a`*MLtohU`V}J#dvzN^2;T-y*mu2|dU$0i=-ta46cG=V=jzXXmUTDX%wp-;?;#JR zchr$+TG-L5Ef)PorQ_usXa$gU>M8*Y0Z+hu_e#F>+^4gp?6Ei&2DYNtn?Zw52UDK> zDa*L|w@aq4wwquR$dpY%*-cQK>U1r3GhJJ4u&vWui`peg@(GVl@?o_pwnR*(cTI`2 z+Ro#e+{w(%MGa^LRt-Cff{mv>p1Z&J1?-&dF>ea0vFE!#xQ3^n@z>05K8%GbF!mvR z(SYd{^m-7qq8Ehw9D7H8`*9EDq4&KDedkepY(P^EK=5gcu7#JX?eoHP`We~uw|k-G zRuuKJ3gWPmRDPvDt9$jhEnAiK+3VvZ?S?CV#JA4BfXDvfA2Ae1Z`L&l!X(O!m1O^z z(UIa{QPD&NY>>KoLY_pU^|Mk>`~!khjZ-vgE_N$WHRBQHmpl2;lTPP>>kndd%K|^x z`g2bBn=?7|_n%5BCO(7GEgo8QNKgss6NW?qA?+D;a~smn?@lTcmjP3j{Osc&;0v#P zEe9?XRDO)V(;ax~CoW(#Yv`HKAPJ=OSgC1B+CEF#i0HfJnUaijJ#rOa;s^Y%qVmZb*6_zA1Q;4z-0Y>>-DSJgi&E6 zA<*f1nbf3)+@UHf*E?O&QUT&)2?t1uy;E|H=0+`w7Kh zJ&Tn?6OA=K8@72yLV5<&M9oEP7EE%;@;EgSNtKdus9?Onzr5pIe(S{hK(G|n5JJG% zWV+C-v5>9vS}7Loo1^2FVYsl!obB_;Gyj^xk2&QPFJriFK+*4E(z-C38OMZ3wQBa* zeNDq{`T9>|%)NdU2m`=H`>yv&rIJi|m^bA?ZI%qqvgxopQHjuV1zUe{H6}!MvQG=I zLG5!9b#3C$bMK|l(i|e7O}Con!|GW=ROVBUWXfbdw_SDuBXF{N6@T5!n^uI2@Q_ck8$?3Z%xTmFqJuU`TwqWIcZ zc+r@oipN)>>00WdMRl^+)NC3YvhHm1I!{A#ru{Q{vvxNPl*xHycHr#&EO7F1shw*q z5}lDOY(Dl*JoW`Iq%17``68PB8Pi-RlRVa2>&D0d3#y5Sn`nVz+JGupM(l5?Fx}h$ zZ7gt8N{d)125i2=;f%*i6g^8ZJIA4Sy)&WAa*M;{pt2RJcXA*k4>r=oo-?$TPnlbm zCeie$;%o2yFb_O%3ny+loQE872h7i|Wca zGSs0hdTFSaEeC!=urJwjlG9;CGFoUCO=+?#>U$z}FQAlzK7OvxuxBvy4pV_m7O7=9 z0)@qqzDX;wskfe-K*qs8X}#QWZ$Z8HmkBqR4C7|Dp6Z73~*^n7?3>{zO&dd5{9t)I>}0ND+$Gzj2RB!!D_)@QkrfaR@mIsOL9%7aiy8?F0 zfLWWsO{O3nGPRu&p`-X{_Oiy=#w3MTOx$%wqmPm=IUrPg?>+BjW@rf>u#Qnx@zS%O zPZ;h@0>W5a_N<{NP#8xw8Z)Z4Gt?#goZ%OvUA*mrFY}n^y_{3dcopyb;@?+p0_rMn}xysO3`O4&#(wdr*(S~aKOciV+ zw~;)+xXtAdz!mg+hPi=WQQj&EdTcvyN zrCm^xkkeW-kPy`v9G0=CLq=iAoRTJOKbz*I;|$dRg+O}0K37%+r;Z{REXu$-td23J zVpNXt0d`dt=Y8Y?PI}@q`Mp2;8-DQfZLB-saMTuz$5oo@f;9%0Qht3XQ;`*OKAb9r z7m5Hy75~?2+fde&p+-QObOd$sa?X2iY-m+iM7ujd%$;X18*weu)28(vVxC0L_Sm_w zjW_@A*K*MMIez=er*QHE@6UJ~wLeu^;hbAxkJXMlv`%%qnn)1e4Q427-_cjxhh)v? zoGklWt$t7_ER@4BM;yE#zj?xM;H)LodOQpCbZ6rk1jz`Ur>!pjCx#Q38rL> zpl+6Te)vnA@W|77-}zr-u<;PazQ@o9u!^yQb8+}Ms$yMXogrw2sHYbUeYKGuNz`Ro z75V~JD>Rhgm4`p<1kQNo?-QzkvjyI_XsV{~7tgF(*Oj%S8a109Lnuq;*Uj>O{^1|F z;mXT->MLHtsIR!Wf?9W!8;yhzx-`mtuiL&H+shi>?d8w=647C;*mvpIT52a3k_3S> zKZDNA5r!T!8soQag)LhUW685t^VqHq;y_m;AtFdn#-bc{(#c%mFw#u?OyXiwa5@Z$kr z5iqJ$rN?t*A>eP;d*Rd^O#LzQPttxBmr(Lj8(w*4DOzH z=ALhO6$}?(u%2qhMp{v54`tA0u1%6CEve34E)vc<1eLt8N%Sda2rlq|r$3b|&wC>W z^m=UDwMe;nGlOI9$e0Qd7mIG097^lznnYRIwP%v1-3UEvH>cEm<2_mdVkxPjM|=pK zRA7;8Z^&{0B;!7bCc(MpQ}%=))!`Uo3Bgm2JVoL7<+h!?`=39^2R{BWZoFv`vwkz{ z_d5Xfo-(w97bh0=zL~b0%f? z9c^*7afa^kz9iXwsP`BH!6QLgTDXN5ocUMui$vkFZF0QSiK5x(RmjeCol3{5_a^BT zgwit(OE@>nHBi@>AyUHd&OhcvmC z^c?FBy8{bXT)~WiUDy7C4O_NQhJX{3C=qdiH&C@MFOpj3L2@uLP~80lzsvumV- zgj$0_q!|@HZk}@nzWKp-bDzIDo5cV#MsVXXXFThveC)!DFm@Jigeui9K2)*r@Pf5H zR1G1Fkf8X55tXS}as{Ow@RiGc!B_t4tsJ~@9S^?m2|VtRCv%TGA3)!d_|nNKrw;t9 zE!Jao6Zx!YlPF&MX|Kiv?$DvLG}gUSAIK_*hO`{+Jmf0Nql)Xmw81B|9^lsXJ-@sF z03ZNKL_t&->M-00!KB8aM%~0julOqg-p)Y(s-Up1e zQ5PhVWdYF8awZF11KnW<=Em zX18F^XU8q~I}MWK_ZSpxdCKo%dj;2C_B}RkIFu8gd^%-sLrjwoH8o2*&3K;j+zMTD z5%!ez-PEni#{n(skp$+W5w;u=uKESLhP&8s%-vY59By_dCM(mDJ47cZ&|QAMWNroZ zXE!82dGU-wVC)UXIj*>AD{p!0oB81RpP?!(bMqUp8#hn_5=K}{qfLn1q0nY^WE!nn zhhf!ZIUS_Dg8XThY@1Q0iG6e3S>uG&3CGM41nt^+YBw6UZ6fNkMO*4(_Ffe*t^%L= z{8#v+-+nX|@Xhc25S*jZib{#}7i02AZR+4@O=pX)I)m8lfN7<6Vy#h=Q-7Movl9`l zEeKTrJkAOm*9|!B36G8$JQ`_c#$=FAM`x$4MQwgdvNK(MWFB}%gbIqH&&~yS<*WXI z_kH4gyv#GRZUf`t7~>+^8NA0DgZCQgjmbt&s9mZ;W8H2|f%k^8j0PP=VZnQXDY4Gt z$Gdp{+up{&!CWz*s(ci8r);SD8I(*tQyV^$@)!b&51t-9e*US?^Kb9}FfV)Sn<>|C zgiSNJ46*O*4w)uLk$p_0+voK~5!rcLkHlABK&oe|>fi6X_G{g6vss%DDlDJo;3;bI}7i2{4<97)+!^O5>!Eo0U{Xa686a`t09j!_Tgw3W7)IJCk`E z+bED+A#hpuVnf}{v(oywXVuPR=E3gYXBp7FM?i>9K{a#&4%JSBkmx+xrb&~dJ=$f% z)|}ls7)(=MhLI>*^xiY<&vBI%p7xfvF}(9p^ydeND3w}X`Gybk;m>@7YS4>3)(|V) zIzcwN{3QSWp zRCEMFh~aa!=-HU)=+cPJCLx)QurV)(4mlX_iYO&C(iBLOg;2fT zbDQ-bZPIq_4wR>rnqtTO3dOW~K_qky5R0{tD=^NHs?mxb^Cn10cx zIBYYge()0v_nTqROmL&1STYAHNe!CRS_p~X9}gymrBDa21@#!yV+r{3B|qd1Z+aKs z`1UvG**Wa|Iz~YfqCs?s@mXqo_MP)Y@VcoD=|MEAEp;k;PDsDk8aqfPf~HA8UbcL4 zTj)-TE?KiClOAxLf4dB=(^jl(l8vxx#f&Rh*tv~k?{Y`BZ{Nyw*KehmUB|FW_R&ga zMLcD@=CCv4&mAN+ z9AvJrh8SI%QRC8p%o7Ner#E)|;`|Hw(EolrFM91O;fO7aH_ze35rS{%hWn6&Uoj6x z<=+f1Z8N$ zmNc6z)@#kAc=yUGRR!Dr>22Kf-gnUx!?053`US!JxYUh}0;+uQv_g@8yEP2C?UC$` zTQsfU-#dn5UZB+?@LbE$J%S?lziw=%M1o}(FuPW&t>{u)Pye8vaba2P3H^KDm;1f^ zwfK#FtTT*#$&PWsBcJ%k4Cgj6tfIgcE7%}$nq9;K4gs~b?Z?A=nPo{$?89g>uQEu%0H zmz=f4W~4zONFYD@Oif+TE&?!lJ)|KI)l=utmhdU)(Il>LW6`RfAc1jm__AUsjB6tZ z>cS>VZYKA`PuJH3nYb-A1tp+f2x1WNndgZ#6rrXx*6u`bI!-ywwrmwEv+R8;6$%44 z{rji*+L?dO9qmT!5N@n?aL-r2igl+y34|v2Qd1+{{+J}<)Y>~L#K~{^d1d7Z;xKC1 zS(beIb6??~{`p_{&gEA!7|by{Q!pMYRcgq@Md7&C4o0ygL)2s<_G}-i@0qEei_r0t zmuBBsUbIh}a8IA?Xx>NfzBAA5{yVoK&WUBBdX7Q!p?PnwW1O|9H;hAtxPqXj@!VJG zI-yR%kFtFv6J6(2zKK9UT@i8Eds77Mw0OIJJ4tAyDqv0I{Zq1Q`^{YRiT88dU5<=8 zq57HVXazMJ@U^H~O^FMMmLn#bVT^-fT%Rvr`a@p&s@LLHq=yC!j_AdBMjJJ_6R2RKqc+J^G=%?(fc~PleT@u}_Xdx6r zgdmhY254Dl56sT4LuG^pW$ec+EiK{tJzUYpSh#-2i1&T+B0lu* z7jWqQGd%EqC-CrxJdop$JBsx-1r1SV0Z@`CK5ZqitN@TM6(y%PbU+}PrdwHFWXd#@ zOG10rrDE5Wky_$SAoL7l?{U^dSSC>iqCriX0vpXjr81ZBJY==8Fowzp#2N&LFCuTt z2ah9q&KnbHTeWM5)CIe#gV7|T3)Y2vUG(RlpeiS z21^;nvDh;`g0Xz}r#JBS^FG4ozx)Mm*|rlmGtb<{1E_+JB2oco;<_~xp--R{KQjy)1ftEZ8CFp%l4a-EOlgP9bpI39VVu-vp`fmi(fxqR$`3&GB_Zv7_4 zV{q1>-Xoz+UDqIhO^vcoXkq)nn54^UfGaW0H7{IBxHl!nTaMhko>#u?B@9f7#U=W( zX}ZFumm*e#6J=?2y`fMyv;A-j9; z=_rM+$dZ(9cb0|kwIN`cyGR!9mWOGHnnp*N#>Tjygfg(n89x8^*Yk*fd=rb6;+#@w z#S4d53!Acg=!q_7i~=00E02&dYZwNOb^@2Z=xpqOS3eFTW(?dPp)9Y(Z`?%tfT1nqgYvlU^6~{ zn)=miuKC&N`blOtDT(5w;<0L?^H^;gs*0-*W19*`fw6*);-s$UqG!Jnz0y_dVp|$< zU-v=O%vQA4y^xB1i%d*|RiUT?Tz56!c*EP-aqW$a=KGxbl9!-I9f=GItZ|Wg*c2$; zqO(R=8nn{1*H}Ys>zS-&({#QL7Hb-;djF_Hz|3-tsNp&5?r0-2Q5a_v_|zcJ9eZcC z7KTMz(K8j-FOIq6osIyXl;Ff9<{DKB9*k975s?>TquWfS(C_#7<<4Dv@#3%Wjt_r| zOTPOfycg!?3I^-;W8@>EYmLJiB~(5(jy1)w7Fl;bcAhVj?J9MudNVYr%@U*y2$V?y z^0e}J`aYc_%(?DanNGhXW1Tk|3(09onHHjHVp=_un3n!kCxe@GCe;}wDrw>^I)R>O z*L6a>bR?P55v&eOuFd~$wZD}oyK8%!ZumRkeW z2IdpO?yH=u-m^r*)*&u zoYqwflUkDyYSC);IVLGnniao7mo_XNHx^pwr1bmk^e>gwMhNPoAA2#w6@CYgJNFIr z?tg#m`W{vdWu;vFpFiWdXa56cemxb@rqL%Od}AWNz47XnI=NMw1H2VphX(f$0>P(3 zxvVe-tdZDoSc8;f`YO!y;J9P%$b;{59QQx*c#b&yAPN+NXPk;$D>02MUJsk#^hw)U zO^1nX?1DAa!D6ilSaQ9{lr2f>@Vrz#SRW9rxagJV@X7annAd&!0yyFj#yy9cXki+R zwvIyDS|}QAr=P*Z37KeX1vrhO7ZNvI1DXA-prOK#N(Kt_i{zE=VYY`u384*1Y=Rfb z1cQ=7jdGZ4B2GZ^{~wgH^7KbTKKgslX1^cBxu!PHheiiRH?*n|{%U@yZ z&YjTTgq>Z-!f**^14a!&ZM4o+0j+S>MnCzqaW`UeHJx0IHps4O>CC54TI!+ z!>apOcPCyJf7o2}tQlABIP6W88*8gRYRBR%3sY!~1BBqCKd<_z?@=Klr*%Sa?V^wj z2_-0S&;Ekbe)}}GJoeEn9kh;qVIq|>ByqfrNzQZ}r1rgjB}5mI(?sNB zzb=H~^S~JB?Yk9g3qET$sSq%>fLV)Mzlq_FUGxl8zq}EdI}#tm+{u$5o4q{4r?n1U z(b~T_itC0w&mj*yiEF=pG3&Izk3A4@g~3-Exr>Q9p||&%m1$F*HNXL@o)J0)W1bNC zO^S4}gRhdvS~Itv zP;`g)T7D+6=fADoW;5swg=^FmtVuhKe!V2{!g%J{W|V@_q%ZN{qDuRPPhjf z=Akcu4HzE~7bzOCu(olco{9m%HXis(2jo zIQS6CqDYfHNdn+hplcp*WnwaS`ne%jA#_6pI&@!Cqa>!8%{GGfB6t=*|2Zyt^V=z% z;R&yQ6Xj8dp)LuSH8Vk;OnD6%bph?rl)9y2+qfEQU}nHU>-T5C4JaNTD%das4e*9ysn`6+QMc2*7W@l-M*`B5HV64UAqI+1Hs)yj?kjAF_UpHNb^fRLy zjUnpJY-ZJ@jqWF-;xt1O$&3T0ZG)2*v8(Tv)83{QvrigjGy>~P2iK7rP4afqfW>RA z5t0^d$SG(VF!(gk$l2|+K4K#EZyB7hCoRFAjk8w&(=z2VWUf?cSRkey+$|1AoPOFV z^b3PEZE|`$5FiP`Yg8hIc^W|+j5Hv&$F;ZY;7|YR@A=HdUuOO624?m@jNy2Uvk^@u z>27?e5(+8Ul(uZKaYcs)KE!H>=xCel0Y2_pCavSb3Oc3;*O9ochp{sLGO_ zE2t`ub2dFYwRfI&&|{O{+nY307!E0}yO!7g*`M(EM?Z-zr#y-K()Q_cSCK^S7)8$Jm6fu&F~bJg{H<+;yh(~c26jo$JxJxU^fkSx$k zmRr!>*8T-Gl|d)G*7pWsP*y@mRufdxK^U5JLM|Nex(tg8^riH9SVVR6nXna&telyAE+>Xj0~4B1dcxB5FY-3 zd-CuH-;cW{SKVl}^48Zk62`a#RXmgYLvE-nUBjgaeZ;G1Wj#ZlI= zLzJH%d1oGc&P!1vNRW;V>M~qRWx0MQO)%^B#ngW;{d^Tg$3u2}{8N1M_5Z*|S8(lU ziTj@UA`X4hQz%U|H0Ut=YE)&SW^e6XX_A!d4#P(apm+^K<~^?lNkYqNaY$b(E_>Je z_{xQ!=H4egh z(!^G?>1rCZr01wsp_s~J#Zx4PwgO&EbjDH>@!+(_#^os;gaIa<#g^|elL(k}_QI}7 zLCCpEAyM^+2($}n9X5F~P&N0e7VJxxe$?z;nMS#xsKxK#L=p8=K(O_cJq}Dv(DXCg z-$toinB#*K%-d_A^65XBQ3$y zr}}!hClc()>+H0UtSO63LW$^XPD$( zBdu^#(QbH(2tIL%ZHT_}9JoI-2ONm*`UTzxd{xowJG@uKW^|irq=8JxbXzqJavNSy zX{OYx{Glmcgwt@0hLgSQfLQ@E$_Jv^yG{!Bn!-HYAFW+|tVMhUWyJwAJ-+mg*YmVT zo=nvn;QGR#aNOz0{W$gH`*Y!c{D2+7Mai^DaJEK^ycjtUVo{auz4#V|63yWBIt{o< zz}4i{Ef{04Vv)WFA1D`>*fk!a0?rxwMS&{@6#W4|xoHPK`S9m>>jyr?AqVZp-H$n% zhdty0oOGYNbHMr{iX=@+O-7q3S$)W9&@49~1&wJe3{{D?u-aN=HutA)3@vz#VoPRR zV-acskb=}i1u07y?t-NuvT1+l$4m=L50MayJqbykkTru7nFSpRMSUNWhvD$X>)3kp z4IH-rW`26z4VYu^N+>J5F&JZ-VyUTvLreY0{l{yOc%a z-FOT4I`Kq`d*6q#lQ?a!2N9VeFp;U@D~wV{A~$ zN^vtYRJO-0yB7KCCEwy-KYbAwe*GJ4-L{>X!5sZ!J+^0J-a#noi;GFo&c$ZRSt>N@ z#xRJD^a(|pWbecYOIDK6y0;d%OC^BEqRfuBGZ+mi9RxREr_4bwStQq}1A+!qM^iV0 zgpP1vwZwFpHkr>Tm6Gr4DG8g{zzTv14jYCPVF|EQMyX6Mt{s^|Nn%P?hO{V2?Y<=n zm4=9Jsl#~(_$2z4%$+V81*q7hcc^J8?a$c#d9(K?(n=QM+UlDR}R(##1fr z;!!94CdVAI87mcFTNl8P&h=z@9#UF%bRR3Rc7}id>VNX0m;W6%{&Fj`>kpt98)O*h zSp(uRR`69^_pz}|E*j1KOzm6dM^U4QYHBfZbkwu9h+T#TOfR6-2$d31!yOOb#2f$q ze=;sd%=Tv@I_=`RRh7<})o2#fC!JTiIy*{=Z6$Ac@mZX3ulsS2XFivOjdRQlTx^JQ zg87jgIuWUw;j!daSBmw(oZA4LlQz7OKE?U-}}mJ8oj^JsTeIFzlg+ zX2nZe=rs+42zdz z?+g80!*ClQ3=J9}D<38@>ANu@u&xn?)xKj>faS{0Wf=j>KdI`3gGq*9Na6xZDA8V@ zo2;_ssi$+7GoDA7EpXx(jZ1cv1*bm!Pubp|XCd)FRTK%D1csmztEo`u4g{g=%#r!L zPuLT7qO~+o`U+9SIU56fQg(!|JVv4hqHqPMr?3`ZdHiTZVU(U3bNJ>1IN@%03oUc(IW_l!a(zPD831g>kj(Qi!~KU-M-flS;*EO?~kV$HEBc zO*ithFJH)LJmxNsJqB}vvVAo5xU^1N zP2aO^yt3jIQgL*x#U^gwv%FApKV7S$*CXtIDVZ= zLTFu{UMyXduf8rJcp3v@Nqqlk9Tj_=s=`-QELL0rh2h#;Zs9Xu`VT(vp$quVfBzRl z4M=~E-s}v<*+{+AXgO(W?M6uYp2mtbK1bMy26~OoFrsy+p*D!FG@^e}bLFv>0aLNx zsN$G~TR8Qu$IzRZ~S zIzyzyC=OK%TH>sw3W=N~0Ws0$(<9NXPZJk^LgAdFVyfKQ6KHKAaJk^SK^)SY-Nc!@ zDaV9eH*x-l-py|we`gA75p1J%l3FJSX&PTKu7tu_uDkXI&VKo8_}Eu2#m&zrC&Y;M z*9fOillRG~RJ@;Um)|}w-5e0Xr|V|%0fJhbEij^39Rf?ZqF}tZol7qKGzZQV%+Jn5 zI$w<-q8Mz0Bs567C6ki=8diuEZrM7Q-9zb(XStGXlUB>97Umu-@Q zWu-Tr{z<29aLZ&A(rH!NKA~+EG(_J_v2l{?tBQ3fKlt#lu##5*03ZNKL_t&sx!dEP zjz8i+s#z$kV_x7nzxyQ4eecJyZa&gdoFv*vNIC(B4d6oEAOwQZ#y`70eMO}?a2hv_ zbUnO8hl`?!lQQ)oLg5P58l1DKXp7UYvQ)-e5dgPt77#|>^W&{0mtFKtK6>#bNbnrC zc{3;c#xb09@8kLZ*?aGBOOEo+|MOOL_c=FDkVZL@Bytd#v^28bB^4EMaQ6_S|&a~KWZhG2!#obrHMKZwR~eV z;WClr-Q*mBKcO1P{e$5e>P9T|=NKK@Pd0xNAXsa$$tY_=&DF-7wcB-TRa|^y0)_#W z=DGb-o?gdJRO-oyo(WM=N8+>fCUhn;BS8|^);MxW9FMgTHPEqQv(^DWfy3f_-A(b} zV&zUO)-D+3)}@X}+>BS`wF1KPtXzgcDDoW1bG~==jr{WmKE}KL^#gqC z%I{EmVcu?|KYt>H^>`bei!UL!QE5`!v2=A6xshNE9whjnGLs?UdD3d;*U7fjR8JGS zcUB#_2-bU6hO1n7{$03lKF|74Ip@yjaoHEXz^W86l{f8rpk2r24sO0%@}9berp1#n z4IWJ73t^(@fC#0!ppHgG;YZweN5QY$?)I>>!W+NvMJ`@lV~~ZuR68pI-L<l% z&H7m<&G)|Row>)|&I|WhHxBEfOar4djLHp6-e>=y;`MKN2d{el>o~k2ENnXoKXf2f zFc8$VyV#1In1%-=iL#+@C7V?f`P5jXFg7Y%DHek@fN_}U+tAMn)(;=x-T(MzPTV%f zLY`BV!O1yEkCLz2Zm>3-7>jHS=pWqATb}+@?r^)ibN?qiiQ!U67HXc%ROzeEEIxkx z0FO(*-(=yKBn$@}ovuLXgA0$tryJw=zRjY0o3r@+iUK2=puVHRU zsI-})2ns}1$yNJSx%T4E@-H9080#x`p0LE7Z+9*axc6PS_g&8CoU=}4ey+gP0fTz* zE;uOHBxM)pPSQE4tGqUZl^dQ0qNBG*By(nJyq`{ zy6VX+6!UYeZH&lm7T#gCiPl|Pc<~=RgS(%17V~-7HN=_#&(ua77lVoS z9N|zJ_&5f^9bBb%a6j*Q#?v|fj(6gMCq0$*opbb;dN?2H*0Lp~ljFw^?)bN3agt+_ zFo?)Ss``TCV{#OQb~LtfgR%awz#J*rN^Mra4eDq-NJsMN46_!*RZ5;mLdRVX}Cw?ZweRq;FJA^51GXdtlL)a%sAR(l}; zEuLNyee`G~)%b)s&uCcEM=3@Qw1oM2N(Z{oC!-~s*Rrx!YXW`y`98^3M6{8ftsQ2E zTJflt{3-8#<*WG4mDh0RpSwQ~dh#C@iXjgQM=CMLZ2bV5 znrSK)QI99uc-(|(U>bf&pmHV9B#5~3DBo&?y_xWteS6rp?If~ljw=qVa>#i!4{Na+ z+l8|waHNJ@3!Msj2OOGoMt##AtEGwCq$ily33(NQJ8wc~&Q~6cWy2JFb^khFIoY#! z?|!~F@C@=S<_aD&rP?8FGrYUbMm~>GMIG2os3pMmYx&>?wHP)FtDf!K7WkQmJP=Wb`Wz>a zHAZ!wq4RihzI)|v{@{=Qf)8EtDf0R4EN?%7a-$^66z4~%wWLzgn(XPR46{lLBW%g& zsXM)CiVi~TUSlF3$Hy)sWO;>A%On5C!?^I5AAwXua2B;e#mhGyBjFwQ>SCf4j38_7 z!RSCq?~vy$Pkt8X-QkX0@YH9rx^0pEQWg`9M9iU+BOFf_eyAa*$VJC2bUhY__x>Be zdB=xlt5tHXrMrX3yFz)@)!g`{FH(pnKXDg3ANb=`Hlr2LjM9_1&ZbqL)A`Ujuu+}I z4@h&qz8F3CXQ;7<7|=&|7;&jmo5s zt!vUiLpm^`rt;iPu_mFlV;lxaP0nh3c~rsi_2w8hY4yOo<(4x}=cix!I)*3Akr(qg zudI9Fg@5)|-uLM*G7^hq7L9NV(EyLg#ZcmK#wX^$H9SVf?$Wn9+LAs+)0%rpw=Yq5 zGg9JoJ%=Hvpfvn}fHz@OIVvp~xe-N?V@!tF42xpQAXiywiB=UxM_6$zEi7{WIj3{i z+nvQ-&btHWo_z*qo_Y#Pi$yrGcOjUMNs>V@SzSq%)CQ`7^>`8^H>p*T#> zx4M&Q1-j+}wA`1vGhx!iSVJP_6!H=dz5nfej1gF*T7}+r6(VbSpC2|x%f4Ii&^K89GQPZa1K(Wfv2HC$ zkr>+11m!j1zEwEV)(2)v^}89P##pbO11+4Z>E1ZOB2-cFswQqLo`v;_ldWT2GY%Nf zpa&{>J4w_rLTHD&dxEv+-F#V&4|Ig4c$Q(MJ%G?mP$=%?n0w9No0}UJ7e4u+chZ*uh3N&U*XA7K{6)b)jwcLTEoLaGa-CrC zwxl@aMEp1UWP?@8{rg~jh%Ontm~LgP>;2rq2x>gE(o`huTtDc^8^Jq=_dba}OnWVp z8R?JCBD3@RR{%ACzmUk10&SdB8V^=}ebLz^6E z^B=Tiy2c5#)9M~`2M}p=`hyxy>_$XbQ&52k73DlL6j_f2GskFsK(#(VeF$uAZV+ps z2mz)@fvVv^37`A+4SeRS-v#+Fl`C0U=(B6v3U@l^T<&__9XbC_x8uw+PvfLrOZ04$ zM+EMJS43RUOgF)4yAykRZKP&H!W{!JEXD>LaDA@2#NWsn#sJec>R~l6CixKGecmFD z>n^#3bITEVIi%1b{^~2y9d}DaWyi|wDsc6&!@`_6xfK6MCZ+t7V0`H)#AjdbZ+TLrU z3?yQ48f1h=YPoLrKEC?(?{MiCzrv+o_%heuw43$yK`dZcvTTv8XYhC`QG7YXWI|a* zo%t*nS=mTT_To_uUD(1`^veO;4~@ujIG9^jZJ(iOWpf*`2uTggCi_U5RJ^5;Rn-Qo z-n{%K$Q`Q{_AD*%wp;dKZI5bkjt!HA`4gXW&EaSir&6rGq?P7`2UFXdCs0WrllG=6 zExubm(a>3t6pwB+aF9e878%!wji#U$qY?@j!sg`pxf@g8`VFi$DKa{^xW5h;Lqhkp8w4!5JzAn+M}HG4-nJapHn> zsxe_awOhoHX8lba+chD21ob43j|y0AF-Hk1h(-lpRcu>Y##S4A;63jqa|5!h7Xx}z zTl2&<(X^~Ch0NFhfyKaJK>z05{N0m(kGtOSUflO7Pi1}kJiUcHDLh0dW&%e%-dOmN zI4(L?Cpu;cgNXdAsy^-b*laCg^-Y_;c5t?6-ecvI(B(Fp;; zZm`_>s+Gr7o!UZb+PewzIU!w3t92q04A4f7UBw)qdBbb?g$F+v>`rvKPrnd$E$2Mr zw=U#muYN1}j-64DKF8+5Xk(2m58B^OyROS5`ueR3jYb#W$(DbO;*jyD(9z*+oy|gp zO`=aoz~mZiQ2Rij3cuI01q+LF0rTub(ot67XPi1J^>GukAwe#dv6;oHV-3$0H?48a z-pjb?lFw0Go%G+L@=ZW5){f3%xMMOAqSE;)ZQv zcHtGU7HdQB?=`v#BMrH_o-0}h>WL{_>tH)3K^gNx17u`il;u-TX3v*D$AWR}+fYmq zl+oK9OlEZNlW~+pVXJ9^1?Yf@vWvD(Kt_(<>IS`GtK!qo446oMt`mihnOJbAu9MnO z2@i6lO`trs7{r(sV|r9o8S@W8#j-J~xN+|-eD8{D`T94m;tOB9oNrxuB{%Nb%gB2q z?;&Ebd7r!&WIeqqm6u4|)j+tHD92m-IyB~Tf$;7%R6wmY^hS9oK7_y5+xq%I`aMve8mE2cS{wU;S2e#q+8A)?@Q_D8<~R7#RacVFEi<>YgHjDyZeuRzz=v+6 z$AdAAQk&9k0C!eoNjf>rzxzOZ*3hjG1Y<4EyZAx|CuLF3$ZfD|+YS!wxsuOZ@?p01 z9dlVPQEc;x=S$03(C9Ml0gJ&MSSP#w8s7S>r*rqaUBG>Q=XY5@VV+{Chzp~3=8})y zW|7T7EXUgC>G&}_Hvdsf7(VR1zZy8}_z)d=k!s*A5*$JWkJyZ*(@$g161;oyxo zuyD>JKsG<$(EFTsWMoKGY?)|>1aZkZC)#hgy?80CYecMdqNYIMpa${HvF9?-%_sWm#d1 z0+o_qe&kQ_-itoMw+{vgNL4wEHS~%er61yA71;O|KI9{*xo-QnMZyr6C5)J6^EcGw zXPO|TMk{F3u2-`9w?*(+)L5J$YOS;|7HtwIkC9+TXsGel!Fxf~1ayl={|a^Z%D32E zo8!cgE%nF=B*cMteD9VuuDs=IyzdiVzzU39$-?3s+gDcDvAmtLPdkOPPd}C0opBnw zb}W)xW%t$BbEiAqj`PnyyDr&cqoXRmqOo>0mX-!FlK34bHi1-MpYilWfZA)V3=It$S=E>r|UeDfrA{Pu#}(juY6k=X$oy zXWX=5xbr>k9#maylwVFdPh!G+sS_pRBX6aiBN2Ayqq-MSjm3#_XnlhlZn}kQuD+2y z`wnv14L?hm)w_<~PdK}Q-u;_hQpU6r2Doz zWAQ$yVKsc|$SVj&b0$az#|}{AkI-~>eH(*oEF>8lrlB!S+sddXvj*oYjEPdPb%Gro zs5K#$)A4~BTd$0147Ef(Dtjvj45Iws51sE~<4q=$13aNw1~SiBdW z`sBysMjI5x93H5uia}QJlfUv9cHeY}eC1>|O2AlpCMa*!El%&l957-rQO&J!y4Qv& z*6rkU#?CkUM=WbZ4vwW%FgC;3;G+|Y0wpW+3mn*c1Mm93Z)Vq0hNTw?)9L(+Bx>(N zF~Ym>{?&$Ut0mcu`+4otp2>ahb3g9=#NTCg`x5huSy(3{7cUhwXk^Hv)U0l$7Md!Y zTd(C#HK@$QKsB5111=g>wWPlks;}dV{g_6Bbcm@v4-4xDkI!= zw{R#iCO{p;gszec)k}6Q@vzsug3JHuAE?$=x!_(GaMHsb#xRGTkDnFihpE-56e`g=Zb6Cckm#GheOuZOH9_o+CIhH zGGgbLTRH(-WQ@vT;2l&I-g}G*9Z-{K7}YHsRmHoig;zF;KSAprf*W~leD5T{pFD<< z81}3k=FpClIj}n5(*3(xFD%Y_a26kQd|L&oP8V^!%&VOWX$n%*C}Um2OX_zmT2hC5 z$P&3+X|?Rr1-qfjYyGa)62sA-M+4j~FBBR27EY?;-u7Lmg14AA2$`ug$W3(#KbO|z8b-~kX)*7P4_Zu z@LEx11-q}lg7?1d@445VZ-*7gjo`GxMs=sQ%n{66ym#0dhU5fU_wcQ&dE;}R&jTO$ z5bpNtzrn`NMds(T#AhK95sTeFQ^z$T$CH8|{_(zJi;X#!F|g`GRrOzwkI`mNTw7D8 zbOCgV$qe|C(YL1pNTlb@l7(rcx9>lV2?`LB z;>L9|<2ISN!X}K?*{tB{drt(SAt8E5gZ*S(eDQbyhn z6`c*A@!EfQKY#c3f5#Qe47H5xXV;8>`iqU{m_39giB#EW|Nupz_)RO3d zM~l(=JR-G+fsF6Q9XNg`gh@MN)A}`uH$jtFYwS$aPzmotwb@scWf}0|Hh5SVl>j-) zEV%(PLpchLzE&N^!Rq=trFt@xp;hd|dB@URkCo+lcI{Z<Kwk34J)N;U=qRu2t0yf$FZp8f2;=@zcvy_Xwz z-@r|K_i*#U{Tw*F%7KH2SRV{1M-?{95pS657l6g~a!SDVEWRp3wbX~or??6~<5XO!nrIs0r6-|K}nLrhR8vZmnxGb3`}hrIG(gkTj!HvhM|=*t`)4(``fI; zFtPu}ggc17nP9AGg-j|69r2O(zKzpXdh|1ksN%~Jt7^FCgMW_0IuBWabDpfh9(Pz1ybp_VampF^Z+#Ct8Zp?jm&J_{ zW+98}9Eq|=;=(FSN}#HM%r8^M%9N@mE~CYwFDjl;l1B*nC)_Q(m0&itUQ zVk;Aq;OUu72P@sya~f3&X+`ydF`D8CGNS!T$gjAPYyZ!iIpfzaq$(iG3alUTgvb0m z7hilSR~{I$;XGL`IPH{8TMvNxv(zUBWaE=$RvDqI+uzv?5_JClnR4-hc$T!II^5P?*-_cDB?WoJmtVMV8o~#phl?FV0(RRBwdJ?W9vZxQ{N8%mo9a(0`YzT(t=R(r3Jl|(wagiO{ z=2)8RF+bO1ar+{ROUv}<7qLYSB^YDrWf^L6de)L#OOY3i!*-VCH=XtD{`tbbWkNr_AB4Qx7uE#Oe6*=m!wuE zGsaLkAKa0nG z*A?b$Ob#us@~sGort?eE)ofg}wzF$>_#(}mCj}xmyp%xB_GC7rECzMBB@LxpP^ZZqD))s%pep?D`0GJz)dAGYo!qH>6Vu# z1Mu4{0XW)LmvnI!~Jj)Pr^4>hoAzsOQIb~T=86SKwyf%IjM!-vOp$=kR8hsxUsc6w? z=x3sXu3+l`QCh(PFA+lG0!e!46hBvlZdzr61FmT7E_4ejO;tU84`lYr6cmbm6S~z{ ze5u!O(GHo|)4K3A$+V7D>T43VZ^Kt(z`GIi)jE&4?}Iq`rUTsbu4nPhORndj3PaZq zUOUmxqJ|X-6Aiw}cX>-hF`e=I$~2#4QG=S0!1z%Jvh%*Ba-OVbsLBw~iqqzKNv&3; zfy84pB&nLN8BO;)LhvMD?J?oq@ud$&jI~;sfa-W_YWDuq@VA7dD+IoB81WPs2E$dJ z@#Npcl|!luiXuS94wWNb^|xqSQ3O9ZYwyR?Up4WabkE zB58bd@_vSnN(w6&_5A#QelSmX%rCRHvB6Tmk8@R`7#E>nH3>a(7l2>{x#1}y_nbr`W}_}|tkk~cG7S_q5S z7!uukjU>lz)%do~{DrAq@tKRrEJE@3PkJ6BA?wjsKep1%NUK3(rKLueR0w0C8nosS zgJeSHJZJRg_~8G3A^-U=|B_)p!}od=J>je~R(RZ_AHiGS`4QA+F@Uh45+}Y1B!G^j zWWABLo&kw;W;YDz-h)n&pX|8fN~c@WRy=h|7olxo%UI$ioq(rJ2I@fB)Y|2##Z$xy zH}J8@gU~d96Vs^rIMXV;*r;b@>Sq(d@y6KNuP*BMhwe`eBQsKKjmNjyww@S9BvyXG zD)bi&Xt2vOHj)Xg33}|c^EZmGBCN~@Nfqb)R84-P0W;n_BNP9v-q5{7b3VlhSW$AK zmt&UUD?v;a$>=7e2okzBwXBnB*v#3Ecq`WuC?RHi4og#CxqmAZ!d#gsOoc z`3BwxYJz)2fU^a&sTzhBCEcU!aERo*w%=0%r?hRY8Y)%P<{^M{tw%+e0IswCbUF)l z+#_8Bt?92s*YPp!!m744c})ZCF`t%M71;Xs)G0w6LevEe-)tRLbhA8;@3 zamRC6Ut1%~3!L|qzThqY^iK918n9#6622M+AA-2%nCJ{`lkU{CHhKF7cUM5Ey>28O ztmqM8S|@V#N@j)8a6l#=amwi@ZR2lW`M)`|zRL1^Fw6{c(Q!}kF7TY<{$q^b5Zth$ z@Rq$F`UG$J)0gqvPkT1YKk*Z6?3`n+s6|pj(q>|Tt0_E5%{7C~c_>FN93DT8Z7lvD zAz}D{_x?)YoZ~|_>xJoRP1hB+C>|2rXLqcSEpNlEZBVW4$L~LgIbjFRMAu1UTa%w= zP4P`2L42W$9V_H>b5O2N`)w*8$(*3u-p_9AFyFj-WfsDxZR<+s1i&F^_XCqDAgl({F%EN(dB3BU1Q z`QV2>j?q5*RtHEH5}}$-7*t6l8Q7NB(5=A95qdv5qdk+z&^8^2t&XTp2|}0z^2c)r zIm{ZzAF;emfSO=uBS~0H4Y|`s54C}ANbqZDD}Nfyi9|!yw*Gt*WjOIs$}*|aN`OZ+ z)#QjLH+7~S#LuXJQUa&JyGW3(g8B+0Vq(&Em?UbliZw`P$3lwM05A}tTWFyJ%6d+l zxQ|v2c#t@OVq$O_>dS{AN(Mq6SqeioqK5_(i-zU2G&a^e~{c1ybGpt z`y}JzHyvQObSB@rX)jl}MGh4iBXL-p1wRlUUB8SZo;s3rHfNM$^e7ka8Sk;8STR&C z2rqc!5h>ZZeTB+d_S|$cHZLek7uGdGiz&Jh5O_X5jx$a2SiiRhgL58}S%w=2IlO<- zuYC(k+gBL6Ds)e5;5`JlY|}z3n`Wv^B7apT_K2ofq9($GTAu}pie+_qFbUc|n_6ZBtj0iPguzTrWCF#&&L}O3!q|sC1J#mS&o? z*v{{3yOPb?o^r&k?nvJoAEDW|B%3!_Wpc~XM6^f}JK>ZQ+TZSM^%%`XHrw~+|%ur!=GOoYFg7H2I?ZG%s}(?;-p z8eD}>%i@H%5ZN7@c zZWF8Uw9ah}E+__SDls1SylqPlA zUbU-C-;9H4&M9?bferU3CXzdYY@}eCfZxXsQ1E~etlB1rt0g6C*pbCxRib^iChv(t zA}}Nny(cuGGlieAYRFuuq)OCJO8H7^VsKxtqMcVkJMZU|MKzh{fhS{)2N{uBK&rsc zAVA*G5XeqlyQJ1wYPI<5d4mxuZ}3YydE58C#|ysnDc*S9Rot-Hr|cPsrf80kh@}^s zcIjjWcVY7dSSFHqnc0ufMqMbVgIA1(10L{W_vByR{&&3n&40`N?tTw+ut63TdX%~o z=##Fbj1{k9G0Q}sksL%Pro#LYBW+?H3pTk8MI&Q8D}^v`*ZIUn@8Iyk18iGf!Ba-L(-!*V$5gcNplcuiy}j zjqaND0%?+VCu4?PB1aKqZ*8GImR$?owCcb4&ura+;74JRo-cAvKXI97J@Ij@9k>NdaK!Zz%%qH|m-7&~ zhKjpy+S@dyQb%|OI+;67tJ#gGSGrjfT8_}kNKcsj(V5|mjE$BgrdF-PcAHPj)OnGl z51}$Qh9hnuD78DAXiM}Ko2W=!ygqa!qy) zGOG1m_@wMG?f@2{Zb|m#1)Mch*5brcNrsagPmXAT@qx731hnGuCj4oXxFisBz4265 zDXqtu7D7~6PnmnFOsTBK0=TpWVo=+UqXLzLbClLIGL8~KO%Iqu=t1O?)b}bigfA+7 z8E=B>j@rom2$G~?G!of;^cw&Xk8ynM}%#@cq|q)93Tr=DWY0_)uU4rg-4 z_BndqlV_oOW%CTLij%Nj`-Z$*D?~{{+O8i)6YzSE6{u{+q24k#&o6MWpW~d64Kw<# zhZ!l>1rV3PcdWf8`hN|CD(Se*EQ)jq+MAV3!t*dt3D#Q*YY|=N&S&ky96rdlzU8NW z?7pn6uaQ}UF&4G;UQ=K+YZ7sZNo)Ol#S7vCr^t^+JmAOf$=|)|4|&YP{u6J0%}ba! zjx2T!)rSsS?PTvK--hlIRzun}I(i!m4xDp1t;p2lhim-))1QPJZeTGHM&~(Hjd;l` zU&qS!T_It_Vy$iFQtB~Lk85N8SpOG4w_YgBWz~);$Od>5B>AIdE5*Ci0Ym{ z#>T1JDU4uM8!?EwxRSPC)4gL_f#`Un;fHtJRx$W(l`wn^xcvCAF)Os}l7%{kK;c_f zGqfn!cKYc!Um?Q{hI?)zOI(7rsV>!|+Oe=aG7Ni*ir8(fXQ z^*=g`fZpn)M5l2Zb=sG0&11Excy~HEHUw0naYHj9P}-i&vHGPi zuT^jlj%}8ronnDBSpV@LDku;6aF6l+!XHiU zYCNE9$HvrGE!`Pjb1}8kf$N$A)>Yj`nz!~HAaHPyXk@5@`YLjbOhkE^j_H|b4davq zwh=fyq3u&AR4(#*>i4d${%a?>oV~=f=2@x^l_1@F`0&lV>GiMT^{;v<@BXJZagRIR zp3&L{ViZ>eoRxYXp=R9$K*o&IYodSM1+l@r%ZqQgOYIyQ zgG)~3Cg{v{Q^pDjGF8(osfx#S#*%UEwYxdvq;1U4_qpWKPqVnZ800^F=*nm+(@YR6 zsFUQTt5tUk17?OhoOLd%2RB&XzYjZdST8Zg4y0X~qGX}d3KwIBoj%H3iw(RbF-kRB z<)01dx2eE52F@1#Q-J0D^KF(A4As&~<82mp* z!Vr&tIX-T)ULa|!StRbC(yA4RDq=IXoqiT28Afvs-M9xvWgrh~02fFjKj|Aow_2k< zBp{Z>9Xo=GMp6M0*<3Y4dXw}lbUc7c0|DurKmC~m_0uLB+6Mcn(NP24s5Q?!iHLS*VX7{pvp{1B1WS`( zyQve#R6)kG=@76>{A0X9yuqkoG?2Qj3N}KYZ2V(lAa9b>bq#FmgvB8?c%q;Lh!u)v zN>&J^+W@Pm7@RR3y&i8Ws;s24B~?~Y39;6mqsHK^#Yb3}!Qh<>Tnmwg-y@rJ$hr}7 zAQy`cLOMkjIKCW?)64%0moWV825$9i*BLC12)o&;ApB z^T~g}C7-zrl_IEASrfZWp%9`bCUQ!vgocKo*T#;VD%NIPa>*xIa~X3dp2jz?-OYv? zMm}(9yt+myN;!fSxVGJ2QfEK~Zj z;Ys;utE0Y6h}%C&O0CIy(#$=dC?;BnkUO^)LKi@t!zoJL{&TN4tnHrhSS`uYe z#xbj_M7P7x?k7o|CPr08Y$QZSlP06+#*ow1^3fN+fP6T_4F-6i-^gD$H2WDN%a3b|Ba__J8MCfJXE$0zCNbU}9}1aq?KKD`0X{{kQRy<7+WV5)%~=|!_U6Cl-8hXBb**D0tB4FWNfTM}bdiFAr9L#z>BoQcRm^8ImPRh@zi$OEaAv(Jot78>; z9jnbxKs{Sww9qZfLK0RvaHinBAO1KW`~2rw*twI^gWA-C#>UxSJJYvwBXsM6ID=RL zW5~0NYp%YI-+IzBdCK!%$p8P0XEE~8@f#)3XcGC-t?UfR|711;@A2N@)brr`-H$up z;dDejc^(o5Tl9F#Kfaqi`$mvkur79mYOw{KSeLYsww7dWweLd5?SAYX3tgKQrj}(E zH!4v|77LK_0GD3;A+$Wqwv`1!>PYLr90#e9VU3AQwSSG`z#8v);!|P0;+J0br)-?H z3%{#}$!h3QLdH-@xC#0riZ(cY{Gej*+jPvm_n!bBbbJUmtEo;O67j)lQhh~r^>y6% zm9H{yGy1!Bv2ekCD2>#$gw_nlB!OK8k{jG`#QxX6k?+3gO)L+CzI3lQ7sy@s0CZ2H zy(5~Q&74NFjSW(hy1H^ITXcHX%_>E=#%--r<@0UpebP6G)B#(>biOzFaby2F%NK4! zW;9gkS`{4AG1L)y?K-NWY&1Av$X|)%xqx zE=MP%DLdCf>jvGqnsF_Dol-eXM}HIPUKtzp@p+Zb?P-&D{M@Nm8QN;krziLI^AKsz zUrY2xh@r@=#5Xm;N+BVe1X$YDSl1-5uGNSpQlrN9cxtNNj)T^8d_|paB4MW`54z7C zdF-QpnNv?YnaBV3vpG0WMlN_ycn{VTc<;%?fs|mPjH?&KXIL^SJZ0>-8e4)NFkIhY zVR4CJrFb=%yl8MKBCSL7IOnLDMG-RMtMK!gDC$Ps>z;S#`Wtt1;|=@pCTsi`5(L)F z%~mIRmD2S#(JAYUGt?5=OMK&%?2F0&hK_)?;(mU@h} zL74}Wh!)iK_25_Hqf!!yH-M@S3R*67M+fByC(3+(4$CL4EFJv5BxJHZR=wsXRcS^%5?@}xXT6qozkq}j3QZ+aZW-sYsqNwFtb11 z$qqWeR}l&7S45wMUJo5r2qnvf#Z(9R$ot>RTwbw#X#wkf@Y)vQOsL-}8@nI`_||~| z_U4;;(=&dbGfz8#hdl4Otnb)KwwMRm<=Fm|X^Y#Yq`zCS`5r%RtK*Z#moCMk z=AFmKZ%fZno)5{1BM|6VwJ7*W^3FSm=ZA@$@^f%#KTVNjK!l9C#T z8Pkag6}=79$VpOg3c=v}U2=5mIk4)KHtQyt$+SL$Zngm+9a3%AOk1W`pe2!8RgFC- zZSeHYd%Ovn`e*j%_|zNz2EKPSx;6;LTqb8Y9Pqz>|JOOO5atPLHXaT7^pZ$&Cg^)Q z*EQuCju9|s5t*k!iS$UxpB`Z-HyP6j6(zb*ZJAP1)RZ}`lbxi^D4sGNcF5-XSx3|P zn)Zrx^+%Y1Dz(?46G}GIo{5YAvp$PZBP@7FLEs%>!fys4scD4#D1F@HdgoX8^8KU-uI3- zvu(cL@W8y+hO?qGW@{(aQNg<)*$M_nj@o%X{iSbk{hq@(lOs{(Ea=sn1`;>^2wK&8 zo!|#(r^cTKx@p427@2kpK)~_;6&u3wwGx>TY~I6T>CG=N*I&Z<45t$1ffI>kNXCV!k`0+!EC2!N1+uX+2e>`E> zv__e#9Zf@!#>J+ev+bO-82S=lRvg^Dn+!vx0m~{AS`bN4VU&f_PiLeGVxv-0tWwSj z@APQzV=Ldgb$}*0%J56%*j&4eg^4m#1xcpVoTmjG(~>3ad``XDi4b+?WA0Eze0&~o z*sKpYU2@*>{NJZH$Z&^Cyejj(0){1j_wpAZYd5iD+cLfi)m&q3l;myo&FikoM1^du zb>1No>lDQQM4mYtk|VPoe@C2a{DiYZ^GPQO-P+WgvW0JrXR*oGGA-3S`l+Z#_47%G zY-TBWS{LPL^O~`cbmsMLF`*O!j3^80FO~O@6_l0X($8JS_FX4%c-8au?|d&P;aE@U ztDq_+B_bYgmC71Qaw^HGjNpu6WGd8pvcgc6hxwTYU%=BIe<61|?GzsIU;Yal>jSK0 zje<=b&^A00oh1F%*KLfUsvJgya#RIHBuq#S6soF31Y8w5K0a37r;&<}II|KiP6Ho| z|JEvcjjup*6w95V1vz!!24yT!RGBbutcmC88zc!S!_z{Af~P=`kM>E2urYv33GAKM z-z2J2^<0kxcCBRyqIIC8)u1V|;L%tSuMN4{#?Lkuwk;R@-jjcajrAe@-aLi1Fs#_U z=OBOc`hQ~kj_nLb8|1ld#X}M~(OE#u2C67s_^(kDv1O+Df8zxky-%X2+o&S&AArg;KZLc5F5(SO{4E~!xL@H87yefUr!JGt z*PVs%JZnX@CY4p4;hH1pt#Va5e*6HA|F{W*h+K6%Y1qOJEmOAT#N0)#uyE!sMnz7M z^*M0$wJ;pgc%do{zPYJXMn_14fs;;vUeR

h3}f?U;t!oz>i)rLDj9y5$(mWa^@H zg!n}_2_m;bh(0QBnOjThAS=Zo001BWNklUrnb zw)+OY`m#TvHyYtbL%>jE!Wlc}c){=fIyYbQ9p>^p?3*s221Lz7{Zblv$ry=fM;~;e z0bny5Z<}6g%;N$3J^E(?B+jxMfNxG+Sv&bo1A5AhSS(TVLC1 z!nI1*k7G)*Iq?>xY6*>-JRJo#jSlA|egnwt#HJ;1C##A`B=puvQ{*m)ZMb+wlFUjp zsr=JI{G{#vszwzdJVrgkwL?7TDZj&FZYVMvv7nw}uFs#o{MAUY$jDWAt#HoAWW_a$ zMsrJ|9i!4Vp-986W-&Y)?$a`}pxA zlPtuT`!+kW+xiYfU_G2E{v}HwMc^SLB4Eu-pvI3LE^+_5%{&qE=U5h-jAMp2wW!w^6a@K`x;iF|s zeFgo#(u9fdX_^DFkW?LYYGvF<4eZ*q$JNfU2C~AUrDvz{eEVPD#vOkA$LT%jfsCxD zmu2LB$fF*5U*7-0i@E&TTR3ELO79xeKcfNuff1|(P5IivS#&a5)&%dK$`{I}LvSSg zzaHJAJW?{*`EcE2ejep#$=2P!Rc2+Zv!9A?Qp-%4=%Cz+g-bi|}Oe-&7K}^ytd1UXiTe zQ(yQtPyOS+Cexe`edbGen>QRDYr41O8?!Z?@`P&zGsS4^O1N5`PMV+%t^GH&lU%A! zH$NM1?oJdH8gYWLs;+8c)uznu(N5v3)XhfYT0sPY@)2r7wOgxZUaRP27gER1R~wJT zYpVl$5}s4TXPXqN)}$*S$$Zx4+}aZ2l5;fAw$q$RtN+q{ma)U?ox`i65aF~Fck#H# z{vy@-Vdi@CpfD;&eDxdOSYSR6J}KUVN%j`+DvY(wT&WHX$eiPI zFMl~-{`41l;#*(O=;Tu|rz{gnhw&UHVXLTggbC`gn!1`woor5~8^@0yQY0zzjvqj@ zdK=fzRSQ+U3OMEXkREmYr?s!gV-6hVx=&xqG7jG_*!F-2Q}zn1kyzmhCM&hRbdX&w zk+lJzdE%2Jo@`IZqL{l@>p@T^nGmglen>l;I~*W_GLs`iZ$B$!9dg-|?V31_vqOvD;pnZUUFOs!r!y z>qNV1Dyf;x;d6e1^sEypai_yX`)nl!t@TSH$-iszOd6f~+RGpYs7B1Rx2}8rURx!W zgkV4U;%f7L%9K`Kjk%UiYBSyh+$2Scd>tbJs)Dg-1wz(Tr^Q-4j;g962df@G-#NV( zk5SX4Q@;M&RamhM+&ZeB`T1qmH-;2N4}6g0G{yunv+CsaNlKhgN#6-$xE>=-xh>;K z-DZf!DeO|6o@CZS01`P*DG_~kC3w0ZgNnA-7&#qMyOVpxstaR7P}^&}k%AY8sW}v4 z(vv3ZT;vJFfW132WkR?>NQ)EG{PWWNNgAU1m=Q~tKUVuY7@;gntj(J7R_Dl!XKnv2 zyz{Mp%LDFxF1hzulcByu^En>+i@(K}zPpFKC@9Mc%}wyX@KE!fyJP4RMWL*1w?t#s z8q%pIytBqcwJ#su-OPk_U8Q2ddIkq>;*%fx04JR|$3id1#<`8t&`ERcMlB{34Tt+y znJ*n5e(oQ#|IlH6@kK9Xu(MCTYca^wO5oy*@z2N9e>W?W#P?n$8O!6z!Vme_Eh1-& z$mq8GklUKWK)iu}K0cneHk+#@6oG{WiXA)gTH@B%8SUR6Dg^|LsHwdc>cu%$KPoG% zoP9PvR`R_11a&kfpr=PRB0EyWapp3k6ScLD1m}@E07-#EzKu9Z-yot)y7Za|~Kq}S+p z9s||zpF`D??=(OHosE3&o~{4G|Qg0-qwbd^&C?r&x)o4 zq|TuzrFY!_es|+1?spzkkmr3+hdSXsAN)9<{qi^I6@4nF*gPjQ8NSrkFuPF%Yi>G9 z|Jr!v!B8*UZ{&@M4NyEW(K#-B@Bgv)-rn>?lM!GLCW8qg39L&LISFB~F@_5^*nlzSVi4hCFqkBQK%lhJYIjeV z&|UR=|ETJo?wOuBbIz`iUcINEXZJbJITO0Os;hqC8^XNI!8lrp!@Cu}ap~vTyS2qa z%fw_MCfUZI(SXBUV+Uk zhEAn%Rk7*Bwy;gucFL)A)yFoZskqDzw?b_!D%=q}84O2E!83tMZY>ZEte z1`?0aJxnG9i9@JB74KRmijhcJjAk(N3oUMsL0cA4&s;C3{evIz{kOb@cE3lS_fWBP z<_vpwro8#(&t>__%UN9LAY!m-8Wl|hW0G>?89Gm!5?&_N22?E+(t0ShCU-;|Bh*oC z`kATm6EQZ;*jsG~iq~Qo%uE!9af0@G^H!ZTc%KgG(phSKdUPIPX>=A?>k_1bzmM@% zoN0y9jYQR>pcGTYg#kBitJUnFDcyP*_M>iYdIoMG#}jy{nLs zO@#Ff|4HSVN}myxA<9*Uds3lLBom(HY0u@@6Dre;Z={4nc^8Pi)|hy{%3RCPTe_YL zU-FmqyM5+5Eu0T7-I8|r%NJkB!j3)k^9(UjNyzKS>NRwuFJZuywNjdrJ%)UW(CrYC z#M19~q3ubM0SoN`U;V6)t}4o}VR&`pTTPD>7*CXMy*7cW(QZ?X~=!~nw>b(xz^K~_s z!r0GXt-BD;#o9%~t?mR-5=p;)`i+N>F+W6551jVL`&7#g^Z z<_a4a@!>P9QL-69j7LqM{>pyDFXJ^M7p3cSG*+=gLEH=ttgAV41?yKbKy)-27(%#8 z!D?-uN0W-%(wrSkDb^^)TCyy1GBoi11}n=v`kV)In_C@2+j_hU_j`cl&F}piF2Ck5 zN*Yok6J&C|i$wN{13Ogtdx{s0$%7B(fhb}W6^BY*!rYY5RZCkb&S%WeC8Xr++1laK zPk#cxvdp$E3*f=!u_+MYqn?lehzQ-44C!Ya{>aC<=&8@{~{{5Eqlb-5l5TABrF1 zK%>#xu9Ht=HCMI(hkkfj2r|n`T!Ra*G0Yu*96bZM#>yH)qpKzdqH7R-C=kX7{7kB+1XELWAr;Z%RbAw6J06VaSS3|XIO1ZJ=h}@}8O#F0> zR8rTUEy_A)nl=Oz4H35r>uev$h8_ahz%wJWUZ}2Bot-iD85yd{w?-${9GkCghMQc0 zbd15eN`tyk@eaXQG@Tqv?PXXwuO=-JJsPM5bi+GV#beZ+%~VDd#k4AcJoF@(<}W-{ z!P15ltNAWzwR&9f(U2oukm5}l0#SqEr?o+0?aY&p}{GS5+ERSu0S+%A-@FE*s@wC*7IJMIM=4qN*9@`M9yAX&QG_AG%JxKF@g4V{zFk zMl3EL&>!^Z<(3z{v~)&2^$ zCG=JfaOyEz`P#?-9x5*BqVsByz1Qr9_n)A7s>Dp%}Y*e`GWnF4oV;%Q!-K*5Z{iGzm$n7Rd5|2zWy^X8XPB^QgL-dmp2%pbS?JaWthjiK@fInZam5ESu}V@ z1ADCm_K^!V%0MbU*OvkWy2kKS6V}P(k*1!5OyTfPC__w5*`$_QN!jdcwNkP{k5f+E z%ZD$13%`2%GkN6;oSgsqhY zGATM`mh@nc&Vd7b^4#4L8lP8e{8QR>B}6SN}XO>=u+i*189%fuzOGTc1cz z9c?jO_k$mj_<->Z3v=*iux8j=)K0>-<4+*778f_V;3T~^W>+?T)>Dzzvyhx+YEXC- zgS;x{b@Yp5>Rvzfd8U|eNlGC3Y4>l)Sgk?GJd=VaXl^jR7OP1CA~$0p@JljhL0`!h86m9ZFe&7gOt z>_%9V+%BW%G9i)Fkw>iZD2k3hfRXl4CQfB=A2HU$Za`h6wtUA(f}xdj6feP`O@mzP zFfgv*z^Mk2P0r}!J(1ca*G$qXkBW{Y5L)k8td*KJndX(n2m^V4Ksz-Yzjqgj&sazj zta=vHHffrcilFL?wa1kK=8yF5R}p zkPk9YB~i!8`*!oVM?4VYa^^d87$fw1j&FYFN4)brA7<;$z2x4N@VKHXZ$!ZRiWj!l zV3vikUSWoY_vLjBIUkT1==Zy{7cDD?uI5f>--ge9@V(q{_#oSN>_An?oFf35eA!tr z7<`^#4-7Crx}5ht_)+XU=45{V!oOm0`mJG4o5T=dOSPuYNTp7ZqJt)tc zOR&Z+yaonbC#|4DCGhtZ|D$T-zN#NyA(Mfb_q6#PR3Z$xPvQ%;~ z>TR1cT(9EaRFB3?m>j@2<}R}*DN;*rrpzYYSl%;AMUWZrMD9_o;vCDgtV|9R z8F8(ytqZj~q*K=9@q0NE21hcziR+`r;)alg{yLu^^JTEFCHI>;wk|;oEMVSu|tdob3tnjSvFv?lW=(d z)jaSw@5u+>bPX;yfv;>1^ZG)h`hpt*OrKb`-TKxnX9KpTmV@8F4C5o7(8$nZAhsgCHSD?F zFVHI%bTb@&uj7laR4m{phitOu`5$ShF$uG)Q@6ymKWVL3$%JaDtQDA!#72y6&{Emt zwUTg&jFHB4gm5WiDn(%&ooXPmTN6ladCm#a;!AIQEz6(zKeSd>@tTv6Fqaz6KJ^%0 zaNeJ>a@|kaGT#Z6w3t{5HdM51=$vaTL+VJ`U1};M3{`W4iUD9#?Q_)O`beLUjT^wt zX2_<^_@IqIcqZku>#{muD=m;2Pi1yOxNKE5=fB^c9^}e$a*-t@0em5W-R# zj8%ORVuG!;)kB9OMKMtKqIGWv)smQ6Ktd%Z%j%=DoYzt(NHC4o-E}z7_`0GR*X*(j zq=6jKPeYn@}KA3y|(Zl(jKYAE; zei3U^RD?XwV-RYF#BvHn9UYVW(xNh6PWupW8w#Z!ZC=H7bbXhRRb(8e)r7Y%*H?w4 zmEe6&?s8VUOWfz4cjuRW;RF)#bXpyU&Nq$nB7|zz~{JF z8Wa;vTd{axu-wD+l>d6eyZD<|zlldZ?-d+>|ND|3zev&!zDw#9sldR-{4A@qKOKR~ zsZ304i_fgO&=*|{V-mb7UL(gtWybQ|=5d6_S7Jg-%+zF`HIS)RHJf=7Z)>Tn)0z1KNtD&e%y6Ea)l&GNNk^n|K6`~pF93H?BFnStrVC0 zs3$L2lSK2e0;@W1?5Y!l_Nba#trc|YQR+#_j4zN4ev?f5c@{mf>Aa0eiDxrvOV^y| z>kjPY$X2Q5_RiSCKlCJ%;S;GcloOu(NR0$GG2&HGobfp0@U~D(0`UpPCuLw;R7a~y zO16r@#v$T|Rn$tQU&Ujs#XH4g$!X)%khBcW_i;%c6YC0+-4|N?Yo<^!X|d$FN5urp zxOW(%WP?>))(r(BQM?fp3z`f`+#*w~;bbhCs6MsEg!>qbh#mcb$J<3Nzxog+-HPvl z_LaoLytJ@t$h?al7$$Z)9nLzu@f8S+#Hw^mEIe4X!3V(kU?}Q__gbo*h9c0_ai5QA z3S?vJdAvVX1fM$uPfG-w<-F=;&!fM(ioxO{ncF2T&VBj|*t%mcy>2(`k%gL(vmrT% z;gX5jsZhCp8oWTn1d$O4%0x!N=N=Nl8Ande+q=+va%)6k9( zsnAztBz3G}>BW?ir-DJ9BX{6hEqdy3y#c4v=0oS6OKblPH!jCN(yJ@eQ7)t=0@DawBR$CDfr{jNLLy-3{7cAQ(I-mk)Ad2(AnOnm1}KLmlF?!&73sV0^&W zi?2wmnnJ?U2c@yvvn}K6K%qD%f>(!F$TgUmi5=isMHw*HYN6gofeAspDfm?t8DhlDK{d+l(Q18WIfeBY{^Z$f}NVG{zG+rxCxN z7;Nkm@R(L};}VoXauk0xy2a zMclA#FzH-K&ItU4!f!&#HPFOjjmL@BoVE*Bc8$-mk-#|!wiN?188Ejk<>0lK^QC|N z2!C?lyVL7;Sy-6Idm(d?n})$y8{XT`Ne=YjOJC;io^US5?A*isU-1fh#~+K?H(z!; zf`^9G*JRa^){WN114J9kWK|WUaqMm6qQvLMcqKInWDe_0=zdXBH`Z0c*gV#Cti)LQ zmYpz&$W_2!Z;tnR0}L&P%o9j>Vl18GkE53+kRoj8$DZ*O%o~QLM!tzYb%ewPL&tDKb>(jXHDeIR48l7xpIQ=YX!Tdww|YIFdGRypc<3(o z$$^Erc@8fx^QQBk%-JXIrEMJ~CTv>PLShgJZn?hD9G3|#l(hy!bz%}X_7PL!e4SiK zWrlYh&DeiKHF%v+a+a}<(@|)u98ve7sWmp`_UVkBl_}loGW^ zcpJM91zA)hwmljjsb7)y_KLWm+n-uN=yT^^JcWzj@G}13zW2oS zR!EFRBtZGRQF0TF=ctgTt5>U)KursLxDRk20qrgEq(6HQPkZDa^0H?>kp;tHThWwC#l`N4kLvO=!C;AAg?x?zG=jMwOt)d>>KHSkR7yd0|GVQ&k> z;k@7HH}3Mw+yEUAE&u=^07*naRQoQ!jPWo(H;3Tpt@ilV53b}b@B9Eew(TT$j>M)I zYcLK_2eFfjXvMLbP-Yhg(dVF43sS*o0z~Om<4ETW?UrDAD}3{d|H_%i?;-E^Xt#p9 zb8tBiW||s{H~pmy297J<{{b#~;(7eaZ~P{AIPY03ow%2D#~dk;_^^MgVay0crJ?wt zjHL%*o=S<&D}9_IpRG(bBD~L+n{fET?D{U&r(}ng!gD0D$zizhkH3zw^ew)_fQW1+ z3>(_(1*o+9jDj&7bMk2{XO4LcOIQ3D1{ruCT9hWpVj2<#sc_sKeu=|62rj=iLDgf7 zghi(aQY&QKjfFl@~?GeI)bE_Xn8oevj|H`HjqrrMJ3*_nxf_3-nf2dG8yZ%c;BO znG=u86emLNl-vcUaEZ{2A*4#G()nZ1i(2TX)4UjT{R7|FmAEl$S-OGQxyGwyrdirZ z3~sC+%LcDYnj`-41Xr7-Pnp~pY0ivvLj9Ye`|vF1J(gu^h4?&FsWB3&-AM|fE@J;f z3~0nu;73gt6xS>dA{HYCbvY@X7d`8#oOaw!p8n)>Nv#1K-q(%GL?(E>XuVJ%M1cw) zJylf5UB;QGokqXgqup+irUsL=u&Kql43Ee8XjUg?Le)Ym&x(uzD^;YPY6v3*p6c8S zpW~wpl{Z?cK1+nHP$m)Lwe-(03UjW@S(@{RlTPMgXPnAytkN!}mkLOaAM(6LI#NpW z{-F)?qAXRJc-=myWj#LY^1ojBG6uaKb8{Usm(lC>SlGJA6QA;2w(dHHex73_a#oy2 z#f6w(3iT=?gF$Px5K~I37H}eik1p3PA0SrA`aQN;_|m67!rn#0e3AqxsMnHUFD7sz zau0(WdbGPe{?D^sz~?^v2_FB3H*l-p|6{tl=b4*}_UKX7#1si&*{F+nhS-j_eCcwCl|?O!78b$R=F&*ZxA{t#oO=8>XD&;#8(ZhkTJO_eZw z2Kdb8u&*x`A1?k{Dl}k)_P%2obW%J6Z0=aOWNP!WWV(UC#--cc7Vq7;JuvLpJW22Bx0=|)amCbn$W zEv&3lvyG#0Q*~ouqi@$E0V+7Ntp(TSrS8(hRq($Yj<*EfES?bR8muQA_u2hZu** zfQ4*@Kf3Md+>087D;SK7_Ax{hBv(P629^6?_o@vL7w5&g^oMHSzJ_0F2Ya=O7J3qnk*@S zAt8V*=c2ccf%a*2grys<Cq0G#ar@h{@2uO>&+~FjC!}?o$B`eOi81ug{YY-Y z4C%e9dbiE7pLMewUEDQRm<`yJV^6yc*L~}oY-?}f&=prPw|xh)I2V?6CCb6IF#C36 zcW$Tuv;DM10>Ms(8&?5^k+L-`6RVq(J#4GyK%=$9+1Lxy6>M}`6t$r{EIQ-zruE`~ z^UHL~;#jdh6Pf%Kvi@t(7qG6LbwNY3e<1-|w6H*w6a-OS(X*BES@!%B;V zP6{hsKKhy~BfE)S#5#(c&Lwmh56_W%V-UurOyu(Fq$cg)Q@x({{0dCM^ zvE9MBoPNK@fx`pNf7wNB+qI8w)+I?(979DQ9SWI8Tv<%CDt?=ahHDTBL||)iKF67i z_MGMLwO8`6`~DWMeZ})3TVXC~hi;8moL8((@d7E3FAYetRStgqpZL&gF5+H)bPmVg z|M%$b+k;)O*!Vt)9e^qbKr8$M9V^k)4fLuzQSoPAUXqqvpz=4dTj5bGg6sBa=Z5S4 z{fk`k>eq43qo2T@yWNRy>Pb=))z8dkdT=Ag&NpG*`X&m4RXeKs9pI$RDX@0JptUV^ z7*UwwH{S8nFI>tN2i`(!`*s%2IGrp_Fe;&P}$#!Qd1MiL3t z(Qv3TlA{@cR@JC<%}7xz8$W`D? z&yC#`1-<0_5&=~frXvO$T zYh9WmP$^pfWqeYczv`H8L5+ymbgoV3ht}`w1ki_;_pCWJ2_xp776}HW7#hNt85?iS#=QNghD$n=$?6!j172rslbJ- z)**}4bxX#2YC4us=vdK3hmUMjTGNJ5UB(7F_Pfhpd3@wETlLKMmbm+tIqq=m>0Ea3 z5TCf_CtNe%AvcCpl8{$m>K(NP`d(urt0Ji2H7YP+5TDaY6Rtma6`%X)`#A0R-6STV z(@M$RDw@o5|A#%1FMs`Vuq{lYh#EYK^MW@%0Iej5J$S1XA&No;OR-`w7{r6MhCI)3 zWV9D8*ZkxsT=4v-ao)opM7Ot0tJSU~H5N5eGC`8194lJt+6$&%d7cX?h!)LGe zao5RRfRSWj7!XbeHcj+P)2V-*@oaVG@0=;!ZVITT{;VUL;phfGJz3L=sJ4z&#S|A> z-m2t2sCi_ml39<%b9$%4hoANYI@euC|Axb{j+QXjHk@(NZr=3L=dkp%%h}RN$@8rI zu2Ra;1fwD~6=a4 zj15?Md@Ui6kz{b~l|*W3qmFO~#$GMc`o2;^WkoGkEU0CWEb_UfRo?LJZ}5?8f69Tm z6x&Kk#86;r6kqbAhRSd{q83-voh}PH#cQpJ9S#ZIp+g6*;m;rS5Y9U7M2vT|(gd#= z-M;WQANf~4ed#wz7q^m;;o>^yy&^_Sr+d+$UPUC%2kwi(h`~sLcNUcdwL&)^;1W;L za$I-$_xaGf-o)b{@<4L!G1u-4{Vz;{nrOt@SJE4L$YtN-U!L}CZn*j|4}Hb!;Ow*E zxa}Cju(QAjM62>cwcAj23)iZ>!YJuZDmzK+q>U~W5{(R<&(OntHqU)+Ls9wWAND;2NUC|y{; zng;MQUQ^!r*B{~#DVO~HKXF4hBX^4P(WREEv+}rNS-{qpX$5)if=ZEhc$ded z)<^%E*d_43)~%94W~ixTJ~pKzj8m%;uSHThbWs-#=4X_aan-=T{=YW3%4j8Pm5Sn2 zHQ}ix2Qo5QCp>@}9kMlYLhuo0C6?v(HV!V%)9tkJl0Xj{?{TawA7*i35oy~jGRiQrSus`jz-2FOVIJ-+jW&vMT@o=wXrmIT$% z`LHG}rK){5Bk5kxfscHEzx|7+a?)*Y$K9U$B34d5o@CD!Y*6tneGMi*N;=%*3l9u& zQC8iDeW3d6XpnBzN)afa+!QeQ=-%xCH5Sbs=>Y5xzt1JldWs)q8(2@X*bvu-=V(2(Ku0vYp4QE^)<|zQCUL77jVs ze#U9E_Ur)L3LB{zVD=y6Q;&WECtQ0S(p|-SAIPyvNx;))&6_DxM`tDL{UUMK@h8js^Cj3 zw?;x+oGY4R#i4x~S6 zI7Fg`v}JL59-YSvqG&V%R5xsTR|1^kJWX^s@)aeS0uL=CC3r=Lkfh>ulORbo$W9Xv zVg^W3HOedRyKg3xOn@W}tIEDHI~WE;AEYq^O>jf5AS^kes!6J#SVhSITvA_ zhC~AQo`Zdo{sF%F@|SYekFMh0kA4K{9qvqb&lb`&iEyXFr(!siMc1II{aSf)OX{+( zxKG#Ar=%+19Tj5Z9CZrTLaqkCx=O1rT>Y6(@zJ-vllwmSLF~K7Z!y?DN3xLO)RPn> z2+^2`Y;a%SJk~niV~ly&O?d+5roICra`EOo*hr=~grjKGfHq=bev!`3y>#=OdGU0A z{3FDBym)d^z~bf?xz(M1l~oO}5}PLAwd632CI!vgjyCo|X30?Hs8mKb=Ib!}k|XlX z9hFsW2zVxkZ#wY>qoz-d9P1hdB04`4##tnqQz;mABnl=E8=+#c?UckOq=#47wf_n} z|KjuL^c=lIUA!ns(kAPz@chR=fM-7DPq_A{Kcv;};1Z#qJG?PLTG0o6az~W#WrPE* z)J@xTgzuo$kg9>_piLJ%-ocotTdLDLRZ~jU8=Vmwo4PW?M^A*i{d_m(^*O$;Gp%nJ z+L0$>>J^t*_L!Uwa)%?OWp~g@7IA&WxaeH&72_+)z~YBSRw||2df^srV$g_!u`+_{ zh)|Y-v^%C;IYFYj;7!Gjsjo~Bqr>wkVI?|rqZZZB6&9V)t<$m6!f5iSW73*qoAb~( zP1cwNWr8r$?w4%0gN|v^;~{^RB=BD4)FaKS0_bU`%jl)*^XS#Y=`-kGDbM6d&dRUhoPk31V9~ z9FCmD#e@St{yu;9`}gBh?|TPyS6P^Eg9qn4)&$QCBan9;$#R$EKR?5VAM*&*B;4l} zuRwqOUS#{WGnXbLQJG17W$G00A!*u4WxOM)kw~vWRjHUyLE)+3EcoDd?zI4gh0nJk z!E=XJX^HUFcf5_idD9zt_%oiuzTdtdy**n<7g9t+_oe74G}-8F9=EJm`DT#@dhb64 z+;ekEY_!u59Wo_WoK}1MJF&IuO5 zN}<3Iohh|ixmruERA)WeVf75AR77X^D4jhNnb<+7b{OL3J!%l7WfkAdevOq5L*v%1 zT8Cp*$gEA|)oGVnCy)oyp5P1qcG!T8fjm|-jh3xz%`%L%IFLHF-uc(~r5C=K)%k?h z+ycgVGB?264sZI~PxGp`eUQaHdpUS`nN~YRG{>q7Mt{bl>aYmvG<;vBqF>yLv3pPn zRWeanU)*HNbbQDLY_+ zY^r6oOr>rDR#TQ4dBU_NmJ{cKF~e}S%4643M`3DKSgp91{E3*Hcob(tw=2~U0HOwu z!Al_Knn+M(0*RT%u7i&D#-=T_4IepK_e`Tpvs9FehPwVHbnYrjKUzW?{TTgzRQ)Fm z>(H=Hy~idd6vZ?n!Exu?oyp(5`_*LqF7xdu5eW=m<_^cZd8di%ncHwHz zc6eXQM4=E}jfr!Ze~JrVNbWu6@PKr+%g0`PKHZf84}9@6VA~G*+ZF=%B%0OL*N))~ zHk%!Wqka5~G3GaJ>XR@xhdUu6@7|mb8>{sdDnBZr(kuq(wApduiFETmEn``^<{G4% zVY1NDPON3|l#?)XbGS;O!dhFZcr+XStQC&0ql$YZSH6y9p)qV~au1#GnWONLCcm2! zJ3FtaAb{6lQML>En+)U1eZoaU}vMttrOG<&P6pD@7SFi4*uKc`1*yf zVZPU4u#|xq(zK2E9)JGed+?+`{vDRDzLM>8TgfuTdxOt|Ob;XR7g*$`M}O^q6xd&xk49 z=IP~$YH%MVP7R0^T1nK%PXs4E;_Q7?i;0fu720sQrbwqnb+j-ERK9>1i$8c!rAf!r?CL!s}gA}V#un6P=4%7vrycdr%K$;?{(DhyH7I3op1367yd8IZ{CN& zDYqg?p+#_|mSXHpL(r|0O@Or0o>P(FsSvwXUW7dE>lPsaj4$%_=2d`!O)!NcqD=J4Dh726XF?9Kxa51 zIr`6+=sMo78+pGT^@4#iSxlh%TOY^YM||}l%TS}_c@`_Tk<2Vw2gHriUvxq(w=o%w zibXmpd6n@cM2F^cc(7_Q7>t@=mKBN3wsaaUIy+j)zoj&a8!J=jq>0_kQPajuPd2pP zRxRGNVW}0Z_YkVo^HKA*vYu116}L9Xf4e+qq21x|wO8=TkGzL$^$?45t(Zs*KsvnS zuinhJf3%;RG^!!xIG2TFO@@UH5rat*#3YCc*4RMa7GaS07`Q&IHY{ELGk)>JJ$&a2 z|IFU`goRFr)TA*{4S`jjgFo1#v$VpM7rljle%|@q?!W&kcX{>;>7IHDa;q&Up|jw< zk7jd2-j`RcC)=V^?{akmkdZMiiZlP zGZZ$DS`BwS(wJJ0H)r{hNwD8qm!aI*Cy<`&oAR)UArouTwe3LaL9~?IS7oSq5i4;O zl3S?E6*whD6G0P$Bq^yeoMkr0n6acTX_C#9$ zIZhV%NDsMZJL^AKPgD`=wl1iUerBn_^_h1fvhW?Rg%cuiia zOI#K5=7&l`TG1gAiT?_S6BXqkEn!d9S?>X7z#GVo;#-znL7l;B3-L)Hp-PUoiW5Pt zjpW?oybFaQosbyROG?%8)oQ@5I`Xf&9_h?uTpH^Y(w+?YAVV%hh3moa{!3G9PXgQv zBe63Py9+uL0!VC&)#Waa{nJ0=tW$1HJ86;F6veT!n)A7@UdGE_cQKv0ZR9Q|u_-}; z)uEap#^J=#$B_wSYVc}kB^|Q9r`>Af^I!y+wv>ZcUcu8J{c!&2{ck7reHP}oP?#^FH*sFiQXaC|nzVX@5a_^`91;_sBxh$Q2Yg*eDXsOa>NR}^CrfQ+djP4C< zsS*xTu7Z;xUa=ZT^Qw7Vn*(gs84aAYl^$uY%U9m=2LAEA7xSdoT)>{+z6aScTWNPv zY*Yjb=cXiyMV&*{QMyY@A-J*0WVofq#>vg-zPuZF8gT696xrZ*DnrLi6+?U9t#FtB z1S5TXe}(*-tFgx&PcA8HVawTPF-TKfmIb1f*s{evj?S**#7p0XDX@#l9>Br`i_-OF zNR97bkL|3?a0)V8rL^W3HdL>XvU{TwI}AgoVl68)GhdU878I+X45?tQ<6jCV+^F;J zBy3g#Q8drlXXp9Ko8Lrd%MOlx*n{W|`m|eZ+N}k8gC0+P+ym$eyzN8(!nWNzSvtHD z6iSRiU6kb&hDsSD4x=&#%+Q&N(w><>6IA_c*EhMQPO1&&2IDw>T}M7sQ;!mD#4)hx1*9@!cE@Hw9BCg9EE#uI{k%%W(AIP@Gf-wwSM$#ml zKdSRqatE}8ES3zLxGsn>CANL|5juRaOm*ff-($vz)KW<%#$~~V2HRRw&@0v_?*nwq z5D4ex-XlX*IZ%x6a+?$PaQ-tMN59u)OJ@#G&T6+uZsvL9xzA$PzLQv5T?y{eMw>x% z8vpXbf9Ivf{K znb3~ahU9!mwh;sBg4As2d>g!XIAd^mPO_?)>-Y1|7hH%WmPcOjTJqhCuxpW`U#vd( zWSHpL6h1e?@k(*uQ9^^T>GpiY$JH@5zF8bMrw+q)z&khR$C2!?aUs^Q$a=LL|BJuG z;nj?mX|sIQ)yQ%e$ucAnIN=0ZC)}F6NCm7^iooOU1forK@96=LPAU%`N%?oGFq5uX znLL^~+UN&YtKm9JhoK2zF>6J(Dz_)qkt#6LQdz(2Frc6x@4*oQ5P@72^<8eeb)HKv zx{&KW@-aHSHvNHuF(gR~%~yHR6CS{$e)sMy?Z29BTegxrA7B*5hUCZ%6=TYcZYZ;;8E>k(%cVYH> zSM8#XHvCGifz-Qj0b^}KRn!x8`eTT0Dj0kdarKooOB9S3#5e>Og85MDMtxB|kNkr| zvp;t9q{@ei23kWLem$BuQ4c@7POvI?=dh{AYmZZo+sDrNIZ_`g#?FOqgQh0J3w5N` zGg56Xs{9flh(5Z(8={|zSHye72lc3+sH0_2Xrxm87vI>#kXoQyffmWoQFx5P6a!&V zEeCH1zzX}2!IaQ3V}^f|#NeWjMw%ofS(lIh?YrpqR@u^-1HcV(lFlN3_>f1jGEi1l zGqB+tUFNVh2|zEChWp_m(SYyAD``8$8xJjmwVp%!ui}56c?#eE!oP8++nq|MWk_uc zg9QRNz&Via3Z4Bw=JU^bDxZ7ryLsRY`mE;OlZCIH|NL^qzvMRfrhov znPbQCr?Aq`k*v?*pI%O?8KC6z^PKwYzs`zLyh>Td)zR`46G%+5Hn>Qp#y1?t0gJc*$ z_!4L!_4BKCcyyMSRHszbwARs@luWFvq&;nZ&GNGKjOSFRXPvDpa3nQ`rVL!y`9-px zyD{V48g)djC3ezO!I(JyQt67y$Qz2_zj$4l1fUp|Qt`(|WhFx(Ba>HZ!qrAgRfklR zv1&`jeYfBwm{{fa;z=wF23?-~_($`BzrKjSefQhw7>jW(LUDvFa{>M_l0xZdS}ns8 zMkpkO>j^TUvAS+eF!=(!cTv7I1nBCu&N&brs|M*>%d%98P8Ghuv~F0zw+j9ThTM6q zmDqjphz2t)2P!-W=QN@Tv>n!MA?6A2li7?&E-hs0po%JTl7aKUrVm#2En)Z`@Q%%SvhlsYaBmFy)FF7AJNYE~X$z-0`O*!@p~Kj`k8i<=cl=ed zq>YNf=V5~kQf@vZA6liZO$cVDzy!v`hjfzR(mJa8M_RbhwK@cqE0`uUqSGs^qnq~V zYsV_3>-4lJPq9h5QHCK?sVY^u?}fg3A<1ietcE5DNP@{S&f3!9GZ+3PE1&%ot<{V{ zKM(3jtq$4h5-&LS4|vrx&*iEgf0r#=7s*A*oeNd0m~fU|mZGBASP_@H_r%PVbD0u~ z>MR2`IRjgF^?k;V7F}l?U1ip9G)ZZm{L)BOn30z>@!WN$Z$I*tu6d3$`YP&_L`)e= zDl{0FL>WpUS4BuM-ax*J(H>d8g0YJ8;dL0TnKf#~OQflLHVnQa_oBj~@RHF^9C!Th zx2Jpk5_`5Sa+{M*2%Wsd;PKd&1&J`G$_i37?iwZ%K1hg)$D2M_7tF-clw57}7631b zZ5cE%bk*UaZ$i=i(Aqp44`Q`xomMH;qsms0juxCMiA|AA86hJFYw%97CIJtcd#n*s z4B65_Uh&+&;Lg8vdy-_hKOOGp{OcG0lk;Ey4s3fnS?-Y}BxY8?_#j8C;({}{A#@wm zVo2~tNVm_?^<5Ti#+Ux(gFNQJ_ahth=yW<1Yy|`O99ZdMmagPGuX`o`_{O*MD-S$} zJ3RM%R?oOCcGq@NqGW2tgmE}AD+yh}cPYoO`H7KgSEpjyH)&t>1s}xuW z4dIP9kIM0m7!z+M3{#SZO<2+q1Og=%zLcL!K-wI4yW7)K!K!EFnyax{Sd8V^W9aNT zmc9mM2=DV?Vk4$OBtshDB{KvEvR2z+_76O&O->n)lVnots8f=bF;SAbo2lx^hfS!) zt_NH*`bw$AlvNG+;op=(c_ZK^f>DnzlL_M?cOIV@TmnAJIL)^C@YA2h4Ilp}=3P!d z&+u5l^*Ahhz&4+=nAb<7`JPwZqG z&+s8AQpJiT&0*fHa577rk@VPVGrVRP!>|%xae1za4am4PNDYP}mEcjL0bHIbpZn66 z*td5J%PToQxcqX`)Z(2(6})q?ks%Y4p?ViXBVIqivWyX2mXRjVYI!U<;BmR4-eOEz z0~$4snym4#jv4V)k{TU(;8n_CIk-;vAj3+8?g+8#5$fChe83(4+u1zv5r2fsR%jUu zc$Qa|>A5x!ee^Tgef%jb^}CR0Am@q(gUJea#QP9X8)Nb6gZyX;*Ij!hcfZ5gT>4KR z;M9HFXs4FBR;RXv_j@^JwM+iBFY=G)K9%Kb5A(p6znoq7y+8fETWQT%3U58{wZfGt zJUWK3Ekq+YZD__fETecv9gzy@-Lj)p;nrwe!@Q5;7O`=X_Xeb0Px6y1chevw`Z^W*LZRg&v1+_}kxtL)Vjg$HJ{n!0bB?Gw|$x>)ZI&CGTgSPq4;eQcGTu z?o%l=z~feGDvN`TbQ7RXx~-ZS=}3LO!2oOK>zx=~uBFsYt;#NA70Qw2u`v8HX8e0p z$If3XMq!l)f28^|QhBMk-)P0%v{W`4d;gTGx=u+ds^bv@3PU8`nV?(m!|oZN3fUk) zF&5^yEX%m-b6&{q-@FgK7PQ*YJjrL|+T!zH`w8bf`MK=9^_g6C;2>#YNQ#EFSHyTk zee8yqQmLb61jf*)O;uwMR=K?u*Q}CMT^+~D->Pen`I8=jw*9GB7{Ph_m>t|O*y62rBjNSd!# zil^{mb#z_Ob+igq8y{`OkHoJ{PP27!1)WOHu2_>xgz$KSSfSJJanj&={^Zn?*>mg( zyynuc@B`anVAJ>@N!<`n>MNqoBEu#DcOIPD9W61Oeb$+L`&-}Ra4$n8!5bT}`__UR zL|Bn6J6ARPxnjQgVlG+47x%-HCPKfzOqQ*%xG<0NmfR$GX@f$deq?pgk`%uIAMcT| zm;mjoxL;TOF2n?BS)cbwtOe)!?w3D|^j2ADcd%mUWy?r9&u{(lTCx8fFoptrH5H`xG)ZKMGxU4fQ(6KPnjf)u=Qgs;<2C5Xht8CZ z`GHj6Q1wz|O{JxpaOtG*ckTUX{XB-Xa6fXyGOl_wp79NTw$66_()@JDIzL{Sh2Xt0 zRVQg2w-NuCk(^Ja5{rh(!VvrKFzT@6>}j_+d*?Ci$%W(Q<}p6U7^#q{rD0x9@w3rt z)jgDUBeejh(0A~Kul^TH4oKSgsMGJ&;nhczY143vDnV#bi54PHA;?~~(gc^U@Q3%m zH(&Vl$9UQEpUu2gTB?W--9b;CuOOv8^BT|0*jh<5JAL%lR}~wIghvG<2BV5|0~R_R zR`y@TCqMX3B0rv1R`M>=OpM2_h9RBgO{O+?}%&Ol0rrr_s#^^p+1H{S`zrIQb;zPdbIZ8!*s5 zCQV{>K~Q7LbzUk+`w7OzV;i}QMh!xElK0T= z34?AHs-iZfZ8Lu9jAQwSi(kv3pIyc=yLU!k29L2V^0>)3EjArjn^Vo|N28>^Nki6j zUuUg<%Jfla8beuE2tD=lWkc`V*!DctU7-S-o7B9MSwvx_`B9l3RIKm$pZ=_9t)f5P zF&r`-gsKa3)b5INQVL%a8;UKx#BgQi`9W^^_LAdgy#WIgl*N3}>ku8zkDBt$1Vy=0 zUQ|l?R=k3jfTh=i4_o{`%kw9jnmZIgAk!D>y&k*2nMV zRnPx(cD02+{++vU&L8|9E2~RUInjhxWp}LViI?wF)mRN!40Ab8q^rnM8&9>VARM@5;qi3WD$_%0(Y6MyQ|K5e}9}> zPE}XW%)OuyGVMpb_s&dDb=9d;@AE#-`#hKaf=|5c3!U|z{ zzYO$@GJT5wNF%vCzjh+QO)K$2;tkeY5^u47on5#IUT5Sr@Ne8J`At1zB66y#K5l*O ztoel1l1U}npbGmwXFPV>KFsWU5cAuwVz~PTrn&~E=D5?t9>isrTtL@L@F-8T)G`M9 zaV=?YDK_#6&xYpU;Y5|to{un$cQAFeuHi~0%4!AQ+QY)?F$Ga1z7p7J4_$Ak)qR24}qVlA4ExJ_@CZ@uepxz8ZuHZOb* z3yVw4%uEA@>0ZYE8x7z5{Kt6wb6*AhF1r^Y6&D_AV!|+~Iw-|0Av1(hu0&a+R3hw2 zjw+SLB$g;O6UFv)47awX+J5bgkkT>Xt=d=Oud#0@jiXspM25D15GMcgWT38VIKVX& zrWy_9#?rppG0C`uGUzNS7@_%WLcU5fNw|_JPX7M)$+XLL-3~e>;DQ{eU>RcWtdJ;G zpk3Q{phil&7XJA_Fzn2Gd(H*x;zA&!K5_|bV~>vrzOT8nCMo=TDqX_f_-AO)uxI#RmR2^j@%Q-hd*8`{`yI%W-uXAM-=Pe)Y^0xr z^K21Zsu(M|OwgAMP>nH_@k|(_{`@lm*mOj86JyUSfuZ#_{ABY z_>|M?_jXR_MiP2O-Ia3Z=}E9ewfRD$yqOY z5&JA>bkxvE5=4Yd9S)l#OP5okff}!3;9DY<3QBa+41?;#2Bog)^TLgvVesXqEfN=> zb;wYJ8?9OaMeZLfIzh#=5304!nvZq!vcm6+hnkTv%tu`(U+dOOQyL|;!8SB73aX~; zT&gI}C?qC?UX4cd6zo)=%Z83a{^0T4?G>+KS2kk9#x5>XjCCw8rwr-y`_Fg<*ZC<1 zro+WQ^S^W?9mLv1msf9FvvvOL>uQhHYTV)1Y=2&k7REi>x%u%Bbp&%aB9kqwTj8l zQ-w5TMS#^n2&IHbOJ-N{>uE%AIkPC553p)jL!CymsLruSRP0BEsn;N|F-l*CeErN% zGuP>oit_t^@Q3{L%Ii=w3r0w6iuioLuYIPK=6WW9>-gTS_BNFwkW9)M3GWREAx${T zKpj3^=I~qZ&$mDKQFbpbFgrbi5m+7$S(GV`I`+x@a()P!;7wpwyvt=YPB2Lr7#Cwf z6l-B&=XRd{N%sB!%>fU0$Ywemcf>- z*jY;=AkOD0)Y5A0thHyN)}Ir3;{A35@vq;-V8TQ)9V|&y7HXYj#G7ATlByG`l2!BQR{$V?~35ZsOo3e*U(LJ zHbWQ8Yv5NGcX8N}0@6~@v_LHA6=b&pW4u>&C1^QA21AD5`8sEvaw=Q*-H#)l z|3dt&4`H}v3s!>E3-5F9rd-;lg_Mh^5k<}UsEWAO2p;4tQh9ZAd-SOFXYjfCh$Ek7 z1`$#`9b<6m2(yqPS6|C#-tZ=7dNUmJ>X)MXA3(NsE*y&wA%v=g{EZsPvj&H__1CYO zU3jdBd~O{u>^;lyW#H)brF0Wq>5>r+`p1PUuVCkum$Ui61DWZ~u>AQi@n0YQI}UJ) zbs0hAC^$7(42Z{vXyDZ=V{Lp(Y{C+EGWJ9Ut}6{F?>WP;7Qo=kWY`ISK^S+FSTkUt z;Yl6=6p`BgJqa-ADlSUpd$e>`i%g7=xC)2~4p@?fLc?3|9n=~e4sdijAw|_XgBoRb zuS;^D-{JvpejD@MDP}g#K$d}l#laBMo#C|4ev{Xqa0;6ayfr(o+f8R~8u2MM9fp-8 z24@^Lms*4ls;`Qi;`8O_tp*s%fFq%qmRi#ek2zvZ0t_+Jz84IG>Nt~gV*x{St=o>L zDBgXZ%*$$kfojZLCol~0Tu3SRDhu`c7;{t#t?9noW0$0scPN13S^-0Yn5*%ywJR_2 zu)S2GqWs)?N2$%wLXj?}0T$|W6pbCF#_o~lAB3c3t%aObfS+19S$xrG>?$<5qoAeR zT3{xO8mx!Rjj)Mferb`JxjuM<_ZCe8fKRlT&M+(TajRdqx{F-ZEQyd!ac_7f>2^^U zWQ$!REbX|0@BaIj*>}#eVWuCXVgP_Zf4|PL>^=YRsW0*;Z#sc}_C1*8Yzei1w_(lt zP!wlA!zYGL*RpW!)jaOl-{sVIzM0X|GIP_t^7APmsYiwmt~#G{Px>I&UUUiff5P!> ze)w@LZrMPuZ-Y66g3I@vLIx_Olr5iC$c;lvydWASn*|0};*1I^N2U5hl)6e#t8o{# zIh*S)Q4rvmstRzXQfhcYNvW_2{#`drI--{x>pvW<4c&_;bQH$4Ly^8T6q(>Da2A?{NB<$)c zddI_f=--~mLcdS4c^Z=%tOf5zc+=xQ&b@>``m?vP<$y!EY+)Wz$86@XR&e4;{fLg3 zybEgZ-k06cF;=O;Ff^qdt!l9~GYkmARB4yBr6XJ|sIn7Z-s7tShIl#q6#?AJMJJ_w>I#6NknBZ>S*V-Jl*}tdpk8dm zTX*sijS8z6;0LcF);j!KHXvdrn*3HbTHTR83f3!}?o58ZYXGw`TwR@xh~CXJdBQ#J zF+R*i7z3l_5uJVy?-a%1H3YZe17HxTr?ijXVQZHoHKDvlz$_eN{E8jmv)*v!RXnfkp` zKh8JK{5PKcioam)zISD?X_};C0u!QPCQ)+aDXqY79WnebvkOlUk$+wX3^$%-_&4D9 z)|b?tfd-k-5HB|er@)A-e*6PA^t;U1SuQyBAGqPnFR>Miu^sRPJ3mtls`@hbakY&` zYF+N?VOx3QdzcVpwK~H<*^ysM&Q@KHP!(X(k`CL-Fo;?4`M7q5K_i4x6(i+*Zve?RBiW&j(64DF;xs0^6r%6!V8sLryY2GXY!yv@4^70LY8S5C=^iVNb=7z5X%+Y zHuJ<&Kf%(LIqb$B)(eS%_d^EGaPcn|dB)4%zzx&;v3qfuzF#C!VWdK@XOL_Zq*cX0 zpK84mUIPrV48}KBcgwl>H39~&zG3+p0fr_6A>;S#uL>|!UH3X*sKn$=zz}Qg>&R0h z>suAs|V2kOLO1h6bT$zyv0QmDOf*a#qL}QkF=9x8m8;+CY0~1c33m^hXR% zJ+@;=eTuU|f=a6N%01FVfNFvqR2vsR;%onm;kJE9woN1X z`qIdryO=91m~10bW2j`+%#iihujus^W6W>7lE&N|&ZLNZczr#s{qef`*TZG^7Dzg5 zxz(*0xFKm*x$T1=z_8!LVex6M+s%jhG%j|9Qir-01lkT(gc`l_H73gYMmhTr2XI3I8yu&?uZ@)Rcq26nb?9YmzDrR1BI3PBZ#0Y%m$%hXgl;0;noLq`O+s(U|(HkR!7WEcTwlDwuAS<%2D$FS1)-e zRc=3+r;Nw53Tr)AH@WfYN@?e^*H>x{MfN1{zqYj_Ykc1(xYgLTHt%#+x}INY@WkXU z_DagDmQa{N0;Sp{BU)MoEuTxHZD8`fHd!WF5);01U@|#yu;liZMF&`Yo{18^E}92- z6($8LV-l;C&U0d7lxJBRsp1;h$+oQ#&FxdkO++e_T~qZ&s%NK$1UEs*b}!Hy zD3_l0DL(e6uji24+>R%{;f-Yb2e9CAxjPo6gHqLyB0twMc1-JFVZFw^*)t3|%Wx*} zi1nql=a1U4p~GSbPQi=Ah_d5~3or^BmplCMHE+OOcnNcofK_}FWFvURY93%Onls(? zBDkir6rtwY5eKeCbV2E+vDfEFH0N%VvJKYda5m&Fk3b@qfS~ zBW9+j15?F_`Gp0N{uW;Qj?*~fD_>>5gKp2I*Ui)ISvoEaZckowr;`Y<)ofSeW@elg z-8S?bi6|PK{TdC^6284w1`F!lIq&NXL)i~5JJfn3ho0LRhC+TL6T0&f7YvLif5*d* zm2A-0ezsbdQhLf)vI|}Fji`G&@LXHND@&4M$zu^xSe}iVE?gV7vTen$)ZMnqf&1oi z(s7^>b=Grt0mN0S!{p9kNk>S%N5q67U{$ObT=Bl*sRS-z3^v@a5*6gN4;=P*Q+58O zn8_7`*Nj9GoVxrA0YG@~!%(kxtQHYs`8rKtmf(q7JP>1>qRnP4Y zJcxh)>`4q3cd=<^BLGXId6MoN$35p&ocoiXW9<~FLdSLhN2U%dmTWX)c*Ay{{^VnM z&zt`YMnigCOR(+`GKEg2$nr9SuY8$rfAphl*}4zE^^|Ag?|B4E+otH*BEKTP@5Gd! z(x}!FsV8w^n$Lzwxnbq3MD(NO&xp^5?0wm=QPF&j=R@ZPTn5P?Lq_wQ{nzj2vY%bX zvCn-r+kWQ(jQR=P={(9h9BKYLquZk7qK$I0$w7 z)inTxu^EQ3B6?+YW^<@@weqAY;LVzLOkHVX)yVQF60g@H2O_{g<5}AV7&I!tR&2U! z8{-p6dt$kPHXrJR%*IgqrE67KdLZc}kb3N3M1Ru;F4(n$yT9Oh-1dbp;rihsn>TI* z7bI;yOUb%@PW#l?c>l*v$8SEEtFGJ4RH9g^Xd`tVu|;C3LrfSBtyOz9mZn{j6a#=s z!3sLThP6$S)N~bUX>Qa2OL=XIB2#t69XNJbiieGTKKoEVo@(gb`=%smWx!Cc;H#pr zd4H)C*1NHtt%)*Pf^(3pRPV>;UGcur8uBEofQ2;-JvSYav@sjpQs7qx4N?|&&6Yx( zc|z-O`?x@<(x!WCv1NH*7IR(eku+%1GeAhn83rkARZZwdSFT6J@t6{s6#hyD*f+UF zauvE(>_Mj7W4!leqab|Alt4o?0*ez1V;$f9v;ywokInnC8sAnANhjp6c%L%c?~yGm zaNc*%M3(2-I2|%x(sYSVXNG6L?Dc%+?EhwJ^H#EKh?rpc+>s6@$=JE$I_`YH{`~XD z-p_&iY(QK}VtZsh>|f$MvMk6IKjZB8p2X6{7jxv3o=pGfBN^^{0KPAD^7Ue=%o4mb zjD<@xfXRT>NA6ig_?Z-IGq)x82qIc#Hh9e`jmoi0^EB3Mn9*@fR|XQZ1YV8|cD{|Y>6eTnT2 zMrb_9@{K%r;tLp0dc$=r-SA5`?p)+sul_4GUw;E#t6&V?CqZc`9wAt+N<->9F%_Yl zVTc~nwIG|d0S47(@y*`NFvNvlqZ}9;dM+OfHwG|BRoJ!yhRF;AG@O#89fvgiS0`k> zjRA(5?5gaXwN=}B@^hgiP+t|Z*5JhBony)f*}{;SnGO76xJ-J)5j^Uok8s2CZZ>Y) zfc1cQhAzeV9#>wo$nQV*<)qWwSRRc?M=Aa38Au(PWk@2ZQ;cZPCwrf3cqeJd^W#%Kk=V5IG4tOJI)XC6MG zuX(={frFA`DAHejWCxkoH#TotPxM-DZvaK-M)rVRh?@|OI1^kNm|QE<(^|obwg4ui z(J61swU%0=qwW|r`E&cc<1bziA3FYtSGs=0R5oBH8v<~Hog(e_LfEslsLLmEH80}D zp`q+;9X2nw0kt5$>|93?iBz_%@%)2CAfb>C$@tod&Q&?QTg($_{2QYCPx3B(lz{Dv zIdLx`wOSqOQf8*7*md=_T==82Nk)ron(bpSqzvdV#XCQA8gDt_R5t8)5W`F`>PU^J zCxRa>GNnUa_sT!!IZu2vOUsM&`#ppttZ)T81a@A<&p-Wn{`)hZ;O_T2g2SKnM~n`* zHM*^b0XjlB@3H&oq|IFzTQdcoM37{jFidrM3d{9JNo<-vgroB-stTPTb$?FTonB#6v zwdBp_rK*oq)i12Cv76vr3JrTfj}aK|xE8y6fuEo92^P=#8vUgutR&c^L*_$I1S7co z`zM6Lwp4FY?{d~{h1jDIwpcehMysyAjg2|A)Y7vWkYO(whQ_0H6^5aDK>J4LUsoA= z)_N+?WWb=~J@wk~aBCN;&TvTcy(9*1f*1mjR6zzqOfp3_vw;gcmZzU~D$AR<(AhXc zqJ~rjZ!!i;Dcb4tqSw5e|2+46Hte%M+jlL{C8291A0l>vdB6wiJ_ig%apTLP(T_>r z+5qBM+f;n%_EnGQm;@Lm_1yBK$R$MupsFzp4F+LFhM|$Epjrna@lvVgU++f-5pnim_I566P~pm#UofQ%7@+6NO|l-oKOvG-g3I4O$8yz?xUzEd`gc5+W69DUf~T(R?dzI5ev zT+uO%l7vxiL|}~s0H6Yr;!S>R+8DDIEm;TSO<9<#c)a*(21tt{wizm>B@=__#Lq2b zpR8bTOf{__5sRc+8YZZE#2S1S(vYnQ>D8GZvSnt5t1tUGKl<;p*)k2&y*^ef;7GB2 z@^k;eOJ4JKW^Q#b3xg%h$TQpTkh09++N*fNaS!ADfAe}00VIXYAW0{j@7*b#(U9za z&f%O7y`SaXBaV8>bKxHMVfldlFx?KuLt?-?ADoC9_lUFs$<`rn3izCFF%kiW#{OD( zqncm=n-2!!yvG`YdckKS?2?C_SMwhq_z1uJ>BT(drLTg+Z%?}42CxBpNzpI}o_Cdz zMR}a@^{Y0BJp~Qx48u*%E<8#^zPJt;eswIvKLJlzUut^{5o()Vv4t2h$n{`srfv}>Lo9jpa4hSU!+Vlmi2X;19NMIF~>Cc~aG42|b~Ooi{YOA=9N z*$QP!-4$hRhd7yGXtY5iz)<>-@1Z2A*0EhH!w`jTNo;@<>#|#dy)-)RQkJ7pN7@K3 zmwt2rKOBZM+PNuyCMl19?}y-ycV=liVR~wsk#i(?76(ImGqZf;@6Y62ANv%&eGg^( zjzy*tM@K_|QR*|Sc)STx4y#JyC6^0Wyt>fYi;k(;y)HHHWBRm}BuQ(ZVTksqS|Pjl zlBB(yVOWu2sE}c}e?5g8k9eC)cXI!I3AT-1prcBnj@fLH%|2z>!}jh5h9(L51hvFx zR3rOfZ4Mz<43C7VlDw zW=!`xTyyy^IO88b!h`N}XFB3Z>=d39rNh}j_yv!A!gJWV|3NGc4TFwjy6;%J`U>uT z_^mnRJ#XXigKkAO8qn`el}A4sK`(W1_2qp3!yn|QXP?b|j(G&PecE$b+`2ExrcRI_ z`J&9vVRK{`96*0Vi6eyx`K(%nOwh=(hh>WErKHR`@R4nBEgZX=4;cI(=BUl)2*`#H~s4~#+WDGoDSIB z+|ICwd|-W@?F}N7C?l*In=5tEMG?`NrZ>A8bDKli_in$mfn1jI<5HXJ~hQ^9u=yjA?}CGH}9PJ8Av`NgL{#k3~u-mwt$ z(mr8sW)|x+o^$-~@s-m*KxgS%=DJFEs>@R9@Ydk1#Tkp*99$X=57yjIrj%AST6o$x z54BN-*wW;t>YeCn?9(e8y&>YCttC*ml5F)85MT2mzggP$mT~VXSrIJg z%IinRdj>&YZgRH4W2|M@?nQPjEd&6U7HWKFoO|Di{@PV#v}FBA$M)%jFkdz$gFiB#ylQJuohS z1$2aS$|YCM^9Rp-G5a2PAj45cnk`}7GH(8Q{_0gP<{O{+7`NYl3yC=Ty(yeRHUhUa zqPMWfwV(MkpM3W7*zt>Nc+%V7%ONj%8S{r8NV2IDlCm|b=andYZJfc82$VJV^v2Xm zex5aQhzfS7A;(Cg?N`eAAWYD2WzHcBBRca-eEV(h;E}7i2H4*(zwJ0UFwgW67OP>Vck< zxRAE7_4?Tyi*m}JclgjMD|?##Dg3~(yla_zZQYNbe(+Sz|JJuT`W+`S*fGRxo(c^y zEZweW{|&;oKmC4Q`lb^(^V>gQ^VS2|xwwSw8oI`krHXiiwV|NOjDmNy#`eUs(G0{@ zR$V)5zp0{M&sKRZErCb2D_}xzTEmX#gdOPnJfq9G$!lw0T%M;aKB* z3vC3QDTsvbyTdZDhV7=yxeE%@bNuAW?OZULVds>=8ILhZkhXb2LJ9!hWH~bDexqU2+kpoO}Ym_u%`Ijh30|^}&w-Wyid5>~T*=U6)VucDKU zusWodbnqBb2QpAPs&L8A`06|VmK{I4kmLU71sGKQ~wJo)=xrLSksJZMk ztW&Gs*Am>;^66?4jGCQE4W5l9Yd5V&wbo;9J~m{9hlJKri$e%$kP(RpP9183zk0Tu zGTWPBso&%0lJJxhPk_U2$9z|r?az{Wk2OjEWORQV&`Cl%QCw8 zgH<%eYAC?U&ns1GZe3myDdvk=1gpkd*58`^;wa+T&CsSCeAR_ zfqp#0;KQ)T_zZ)L(@?5fvpp^~*IETjY4GMjHAs>SN_Vut{buGkY-$t#ef2N7aH_+S zwcrxONDaJ7!_7>nTwlgo3wg)7br4fm+RS{*WNLdToK9-@ukiL!9ax+B($?LRx+igT zbC#vNf;W(fK{Lmc8?iAPFyk$YCSga?k2869S_uwF1%oC&2Mton zyd-k>(TG=3ZLltD%+V=7r7+eKDgb6q{9$v;ZWYaEF2ZvqQG?f%Of%-D`)t4bV&3=}sq4BzDYu!|~601wZ`hWhC7>c3giAhaIplA35o5+~ZCMk-BBNwvX2g zLxN8;birYluIEP|JeBW%{)^oG!4Kjd&wnQAeg~26vw=he@t#CXVAgZDCR0c*YgP$5 zCU-07*VZRR9FEsKZ)MP12+RwC8z$U#reJ0izc9iM2V8La8T|Vv|B2r{<{0k!lqZsH z-GbTJ%g=ELV=LUP1XBhAIt>zIX&kq#2oiM7#o?X}J-5j6)?ekuB_h8gBIm3FhF{ki z_x?oS#p`Qt&jG`vdhF{^JP%?Gnk z7FBh?AS+i0WAXuAsh>QtzgZN?^+ewB{(7xQTjS6(6Z-o!igj6S=vg8!b0+V-P0f8B z_|&+VvDpu(tWw9jM{5BLQ9-R$dbbgum!V1i-!Qyr#4(jQTn3%FO`NyudLHzuS8>2I zp23c#c{Xg>fOU#W#vo0}vJUej!|~7mQ!d%*=*-PBziWY{-wWO+!xUq4IgI$0O-;uO z9&LMit_3ixL`JLz7@Ga|)c`{@G`#{~h?>3leCSzL8+tAPe2t9=QnG3&wzR?m3*Nbq z8Y`aOQpzk{vTlc+0(3k0%+)g>B+ht<@+4~OG`@?aeKMcbT$Fs73DK0%wcYsb?BU8u z^~zc*B&}z|luTB|z$$?i@DdWaZIMv!eNZ3?I3K>p*aYWY-qjC-tde)ai-h(fhoNE^ ze080*9;o6=A~`>sA`|JE)+TgmHyb5kH|I{YsD!k1)3PX@k_bLWWIuv->~)ldpaBgKX%|a?~IH38OpRjlq@;bbHVRjQ4p|8nwnkaTa}K*Btpd z1eYq&%26$D;AjKX$Js+#CA=5c!>h~L7K7&3JtXY|4A_Meq+o_Zm#v@&GMzH zhvzKA=5<(b;~9r|sJ75Ps$`hOMJ{;viCp#B&$GWIB;rVoiIR8Q-?SR?Y_}fNs9;(( zj$SA9LZZ~l)w}+U&slVvMaFGaBaOe$zqjxwUkgB3RZyW?lP<(V&uxHVE$V^#AZ z_)3FKD%rW3i)_$V8*fS%9iO}Ul+IJmV+zdd1Zxc=^{Cj~(lK^|syfWEs!%7~)GD!t zyd8#3E>s!`cq=drT4pO~LsGX#=zj&k5QBy&FiS!_o5?MOiGtR=tEO;*Rk6w0np_&A zLB1nuLgC*W6fgG&N9=E1%FYnHp~}$EYF{3&>7>>FOQJ4$dFJwtyr>C{2Mrp1jDR;7 zg~Wl{kT2kzBXcR!y&l*9@={*@!e{cPSHFbe!aN&hXHm^F(TbN0tS$-#FxjQc(1S@_%B3b$z*gU4d>J)yR}k9Yrmt#w1w>_jKDNYRVIK3o%9UXJ>G}vjm%g=H7SZnTb*fEYo}-<{dXKNw7Vp& zCP`WmFx14gF+Hx0@MK1CxyheZMRkxLhXn;ohisJ5 z>GoObD8HPZ=1G5hGU@Ga!)UI{w6(YtOvmH1A;V0#a{GuE{>2;l$@U>r8#gi7y+C3F zn{@G+!>9wJc&Aa(6$#I^>Hv=~2yJB;;-Xx2Es~_jB`E@K^-D8eVAsKlZw-vA>`0|P zf1a~trL)TfNm4oZs#=9e6>A*LWh|i^?!*VF26-D)eHd&n!n}g58qSg1%ETeETFdrm zzqhJ$ppk4xw9(au3BEL1FZg0@cq=q1t9SNzs%Vv|Y%RO@@mmU_KKJ4moCy*y<82O5 zJsykq0f<=)PFxeJ39-ms*t>1G|b@ZfzBe z&8QVpD5)|`3h*u9#bCYCEsJzzq$%A_!r~3r@QlYD!+Zbwwd~xzn@t;L0aFfBe)yDs zRqi_)oRLIb8K$Pj;8IUF zb+F?a&i%+~{Ph3*KOXv|r?CIg$1n+BZ*WH}!$(!v>Bd351+#J zGfroAG@@g>h?tPRn``^qM?9rfD<4Zes!%swSu{D8^sH;%8EY=46RZ%M$IDpq@}7;$ zti;ex1`H9Su{VGr?zFZ7hUm{RV32x7fv)iQ(MiQy6waq8&I*};Q6<46E|*7Of)~3F zomS(*&zk8Oez0RFNB`MNIq+#uV{xcVZJNPn;H{%0Fc=KTk{M3;`!Df#pZRC{Q*#V_ z!eZ*_hza?)-dD~}#UyrFKwTEKFysh$ht`eEsYucK%sdw5^Gq<=cI~+ z3$WdUE~><}MgAV9t)tpt7~=b-N-@qqqx#0+vPQ`r1Z!ojgNXJFlkv4L8mwt!U3szJ zh!z9m;F3bsA|bpmq;lnzj;0PzRPVOb956xEFL9YbR@$O zc7Bmd|Mhcx`=h6E=m`aQN+6+Oiqb=>{w3MxH`7#}Thf9g}L-KYvUz#Gu2K zL(uV>s!mmImrA%+VSVHS$yU+j3_J2Ho^uXgKItU3?SBXle$G?i&^wTAnh6F1>chEl z+A#f)$y&!>U*$En^=TX!)F2e3CTZ11%jy5r>*pf!KoPlm9Weawfrgi>>U-9qz}_+r zGO+|IfJ5doI#*uDna_SY`z=VmF1wu11>m%q@?N?jymjIa=h{{-^jTaZ711!Fgte5(@jS7jK%rTRg}EAwI=Tjs>_R6Q)3%M zTMTHUaIty`F$qvbomcN0;Cx)iUoi~hcW3$aJ(MJA6vQX1Fbrr_=a()!S`30qnUkOq z$SH&0Ot5n_@p_5Ar`dh4or{``Fj#IHW23b4d=|r)Q1BZm3-6+ZEoDZAZ!kd7ZOD4}Rq*BYjXNhYp~Kt0`8Hkf%G_un z%qNIq)zR}QUE|198I26u?c$u$?@uvI=lT7I-;WQyBT1CRBzT0uFe4ddbalw!o8ROMC!9=IJx3k?45lCQ2zKtfnW<^8Ug?KlMrer@%^_^NMI57D-dNOV6+nakt7uR>c} z!Ne`56kYw?iu{R)y!V#SaK1SMhD25W6S&_x9JuLriZXZyaK^)O%C)C_lpmjd8vDA8 zuJ@?g^5=}nA5h*$cCA$QZV-8&Yn8&PYY1*@VLI7chM}d9qNThm|6Ft|jhS3Ak13L5 ztYB{i46859nS^J^h-=QMY?D%gr*=CgDR$_p$N6Crb*3^tnGrg(WZ%9+ea+)zWJq6BzF{CcT4IN|| zeQ)s>|CE1u$KR6d+R5)6eGFTVIf``O0~u_drXyhSbkyLSj|?g_pKYq9OJt1pi?Kq) zKh$tUH2YMbLW-36qOi~3J7nDk6v9(nGkIXlGM?`*C zM6z|j@V^flo}#LsSg-nTf{sGe0q{<-2;Ra_NAxe>&KF+zVs5?jdhGH5NxH#vB1(^~ zie0IsM!Hf*yiv?gD)?Kw_Ijg}o@J~^`1Lak1z_;pgbYIj3dXKNXFi(_*vGO%4 z*Gz1YDZchF)?(PwQ+ksNp$Ig_k`6OE_>efGRL32Uv_%tO+YXuC=wGvA-iXt@pdjS9(DrNZ+4%LTi7A%;3 z^~jeoOQPmDO%0?5%TGm}(CvEMXo!eqgdmcXMHmunFH0925sX%v{sQP$41*FfbUofH zouo_ZQj8d^3BV(Dxx1SfoXbdztak+*E<Ei&B z;O3Jzj&Y{Qm<|KO)(eUEsHrSaK@l&QdLlq6rz_=ux%5cA2>?VDn+nDl#JJqjIJXcr zfq}r77V*t0`&C3eDzQsa_za2hY+PF8uGxS)NB?sq4!^O1DF8%tTLLE_t#$fJ@ zW+az>Rjgz&FH_FO#P-S_u$9D7Wvr!gOJx_HDkA@QOQ;ub5ijrkXMxA8Lx4R42(1Ih zdEpq_G&)gqaf$gapUn^7{nu<6rHHpA)`xCZZsF$DV~mt;9HGl6)G1A^%`TLUU0F>E z&|>|$7KVX_+fc(LX{#!1qwsBW51-Vv znv@~2@^D(E2`^CsKJxZA6_cUk=UX+L1Jf0Bq{z_I>Cdv>EcM8*6=h(e#7wSDpw};}f=`atEGUd=_XdHU# zMzpc>(*&wT+BpP8D@jsQ?iH~=C4Dba^Pwt$Y?FnmvNQIcVW_PYErXUL2cbfgQb=WT zdtC9ARj3v^ddX*T^G<)M*qCe{P)I|H?!U*=o0=lgWsW}b!TjjFA9Kl-+sUvwvnLo6 z>@Y)@JX|y5i%_y=7+aM-?41v?l+@Foonn4z7yED8f=`DGGRN{z@oKO(NLWOgc`ofp zxi;w_`AmmGR|Fwt-)$Q?`k@cw|DAg-7hZZfOPNP(2k$blN#0P&@^x==yH#JwyC}}F zEk@`mAVw@F3LiD>(dvbsD%L3-pOW}6_mLT8m~?Pna3&wVErGJs_QbJIE_I?K-WPqD z>JUIv*JW&3+|2{}!aZlZ$Y6n;b6vhQG;F=oVf@2~PGEU3Pj6}lgTa&KcFq#5W>2OD zmkyX(0Oqgb+z*_>kH35tcYDx7`0YP_7CN_)rF}ObNe8P+R|M}etXNzrl&bEr5Xfd~ z7PB%&p;j|18iU%%3blmGC8I`Z$67Q$z=T;JJWc{L;1)+DBglUEBhGlwd+2mJJoc4; z1cx3@x_JZAN$~3EhzYY-xeTeeu3`njl6jAq0%-rbO15IwEr;Ne@9nB&qXAtNNJsfO zve4-<>?nzJf;6wVwmC8A)Z?=Z@4+EVB~^BdvJinX~8UBOyg6(tg-JU2MaraYmgm(es> zX)kJTluMGX^1YEHX-|fpYtoiBhNE8j$5NnJV?Bl~)&dw>GCAwn%w_di`#m&)*oyaB z3klI|Fo$+@B?Zol9UpaXCDIFr9=E}Z!COOTCvpsC)AW<^sSzgM}?lf218-Q6n1(# zN&>oNattE!u$yD5bK5Jq1znV{d+$#MUbJ5I?>*yCG6PXB&6qrRNQBXx)AlIm2KpAm9s%x9(smV_lt=IfIpISFJzHCUVTuD73Y;rk_9n?B&L+ zH|_P=8?YDqf~-2wDJ%WfxD7ekb-T@0rc9!ZejqY%s%YxsVei5KsSyq7(EScZ9HSkJ z++}V*uKv@)>9wNYu|Jt=Usd$ z8#nC3lFL{g4(M824n&4^pd~oavgpuKn>&7g#J-GLt2L;NK*U3nQa_K{a$fV904RT%Kt%8V$M2 zZ4YGg%rt(m7#y=C*z=-bjg7!faBC{TW(1##^Ia6w)|dbSG}w)HG~=<4d>9=!WXq;m z_T9LNYjzIFvJ7jZao=k`vp8>adB>>QuEatGHXin9i%9w~>s z>x7U9W#V?-r7g75gSI{&zRpbp#DwW2bt7~zVrs)?>?ZiuMHlg=&z#Oa_qiv_>4@3> zOljRq-c9zZs94A#-8E!J7ubH**?i^Hk1#vEi5I;4{c!6;S==^5XSUCj5As;e*`^}w z&qV7t_q+ZmW6Dd_0yxC?utdyTxyGyf3^bS<7-NFG#(JEBAEk5$g8T80 z_~OZ@;6!-do8AIX6I8jVNmPPYi{8C=U>DRzWaTi^v)CUoo>jwBY@GqxXrNf5t z2I+ON_rE_I?|B67{PQ49!}oUtW5dvM8JblN-byaL%_S8YiXvT0BAGXsa!B9jBgEZ$#I$gGO6TbB4uVmvr@5RI4{0@fRGMwwMA?e^U zgE5BLzT_5M>F58)rq_yCTG+N~jlCO}eNhXR zD@=khiil~TaBeVQETe45l&0LqI&LSzzztyY20E9IaHfxwAR8^@L`CPl2yQF~YYSa@ zB$*3fET5l{Q1E)uytegBi?U_b^7G3s=CJ!ej8o41C$67g0F-{m2Bu03s3Wln9>`LV z!H~}KwfGtJ@X!++Rgc&ID>QkO|^FKL{eXqNYzMrFSE$45)n)%DFq;t=E zVQlpvzJ+0it3>2|x3u!`7WGoqKUdYatylgxf^Ud+(M*G%waNEcwZbS}rgz>2eD!57 zXTQuN>4>foG;>&!RJuoF0_#xdX0_0@=Uo{Y7corg?6rT-Dl-gP3q~CuZp+;qngv?Z z{xxjWvW6|XEi|I#SjT1!5Nq^7o!3de_Ga3Zn!%ot{6>=Y!23MZaG(C9Rdwdy2=JGD4h~&X$ z`FxKQ#dMG)jXAQ2Rs)jwvcQpA;akl-RFb4dIya4 zruv-ZvM8x$D!doO2#NE|&ULugz3mtMj}mtT(f4w(yIH_^cN3Oi9vzg}9Z3V}7j z(>0dSaDkaEvn(&BNHRs1Rsm-cA8S$iO6b@ZlCD@Muf=e55!$5&XTlz8iqEUyeXug# zH@$`z>^I9@yB$Vz)10%kn@{|5f$g1HaJ_sjO9e^^1-}t9){dI3-+wBixU`EWCX&hjwJf$cGng@^!cd_`xb2#U;k8tHrF6H4* zeHQy4`zVI{Y`|@rqZ^E2!Ta1AH$HDMIEWaVMhbQbbhSufj{Ti{G)K|gTk9qUV&Yr{ z%1jGI5Tp3z6q7mJh3E5y_nnAwU5qmV3`G3IH?bl%jSpY5qUE>+pgiF#94|~mP z+4R`oM@^9W84vh`b6);p`0jU@kvXi}#WmJ)+n2tYgP!~ZOh=hWo!*R@2NC%T5&4^2 zQiXVnVi@w*354P2Bh~@HUiKHXddMe|Z(w3Dq8R7tcKXQScjC~8AH(Hmex7|2gHMNa zdtKZx12rUtEFkxwrg$J~)h(1pdsCW{S{=^>Xz$faLnGa?>5M-LYRpB|gKgiWbg$3n zWPU-`wCU78b=z2`w1E|EI{{LAU)_Vx*;+!4r5{IwfwjqT#)ngC;6`d;S|Sb+kHdhs zAO@c~tm)9}LApF3ad4069_PRN-Te4-pXInWzYgwkcWzi*V8h&I5`vpjzmqW4$vEzJ z59gQ%p1?;x{y9GMPoG2m3{zV+vb1{ITVM%=HB?BCgpOZnrC!)c8&$T#HEq-8Y5UR{9|3RC@2Irq zc!)#{2cc-nQWvC&NiWPEWGR}tpy_tL9P%s*V-?L^*|c=I(qjIkC<@DYkUV!I6H>42 zUVty2`E{meXE13u*s59$SsnS|8tNGf+9v1I<{2>M@b7Z!Fx^?^=OHmuq=O7=lb}zJ z{QQFyNXr7Y?22o150=XIIA2fJSMxxiWa7a(RFoUcET8zrBKO$$RxIw!_|fh~u1_}O z#9*xnWoHrKmb+Y81StaNB<$BA|01k)@03ob1DS$zVNktmARRJXSmLM$-;1}u`UGx$ z&_OH=Jtj$*Dbn1_AUE)tU`Hv*!cKPl_kZ!9AN?>pueg@`JnAt#^si55G1PBOOLNG&L!xaW$`Zfjj_ZCI@q*$QQ=J?Gm5#T!6ET61UM zF$=r+!6!b>cfNcU4|?q5xYHA!#BzU{?u@0Evo)nzpplA1%&&VfK zL0Za9KgXbL$T-?ptfX`YBkW*6M*?#IAkL9^Pj9%y!p-$xft6+`_P{ z-2NF)<%i$=2Ac=F>3U_joYCn94{)D3tWENctdw#F(Yz>YN>lb=Gic0lGUodvI$m^- z4S-7BIx&84nN%*XsW-Fc?^wZ#f8wv!mA-M6u(hTg?VfzXiN8B8IOE{C#gyT5j1E1itU-@^oY~F{(Bw>DjnT{wnF_^>zAwcR-Bc%VI zz4s22>?-g4KkqsBR#k@yni&O@6AB03C2VV2MjFH;pesevzNzycGv5* z{lmsyu#L&dV3GxxB(nm6KmjC@azuGX(rBi;tLol+&ini0oExjErzOCaG~GNrl6rco ztIj?5-1qyw-%w->=K5rL9@%$EgM%)$rC?R($pmGqDX#1~wa6OxyEKy0_xQIO+MU(6 zrtV?;O6SgR-bgy-sXp}vYC&g_km_u$4Z)6%ka%orZ&8sT3T@Bm%Fb|o|5SPkv=PWx z{)7O0K!d+Q5(5tM+%rGFfXhpC(3Zfq%>;A`IBHW{%GVDLGB*6vTC`9kh6h?}D>t~d zmP)(v2aVTn+wUTEaGErLga+VjW_as%PJhmGIq3J!AV2bOmNw4Qv*lnm>JDhS z>Q!4)JFZz;J9JeS)rTg8Xe-mX(kw{r@>lArswrK-LKMRxy}~m3?j?NYZ~m6KO^f{I zpZz5qc?hG8bM%acd_UV=%UdFC0YI zO)vz_gIGaLsecHko$+9N=*MZaX`EU1=o(x5-R|ft0MN9MoInK9;r37C^vI0w>CgV! z%zmql3B)wSQ-dODsFJI~O?1`9@;w0(?S~AF&T02gS3^j(U8Ev73r<`Nq5`o&uvvJ) zSc^#v&MFj2S|~@!2JZUQSNYf%zriDZ<<~j-S6|F%Wr&~ekz&CKiS09Ilm}VQTi^UL z&itk4@TY(NcYN#e>)E(rUwq;jj>d==Qfs0~iO}mUu(W#@X#xY2K#_+oxDi|_vW#?? z$*R3Q89+NlXXWqSEW5jfX>(`FWNyl8K)Jj3S|$TC_GGgPldgNoYJX?)&n7>GRP-(% zgWH^Xti(=yv#W{0M^SL;jW23r=0GfIO3`V9LQ%1+OyHQSf=w-jb2Uf5G4_#?V4|I= zNdwtS;|%C3glZaAps1)s3ZrVv8LiauR7EhR)4ZVNj3f;wqAG_$BCW{O??ydX6ROR^ zT3Ad`6$+nYO_-3CMqUaHEm3?u9~wEv4)E$wHP{%K=Q2+}>65z=yLW8odCz)0fA!Yi z;h>G%SQ+ME3T%I_5~N$eJ5X=1nZg}6aMf9#=hA=sGCOa*l}DZNY>s{LOBrw6pQWt} z^i!DEV4dsnKxi_F5oD@9OD7>}>v>`7iA_!X3ecmRc!<>E{>JMXr4BPJb;xnrg%uD( z;l`wy!mhjd_CLIz@1J`<&w1&~+5cHjXE^B7-(cyf59091JJOkNHeSKT_l-`;{-^~m z{{h}e-D4%*Pon62!m-bO4&S`uhx8?yfD~c?03ZNKL_t)=<^oy-fZU&B8QA9`562O; z7@|=NW7%@_QQR_GVllZB)N@;wal%4B$YLT5>jA`%|A>fu^nqGoJ|MqT^-t(h_PXOZUnCuoq?`y)fYUX~Qc}q{` zFvw)M*cz_;o;QeALc{jk=n8VX7FJCV7pNLV5|VMP7ZMi=DX-w-*xIXNY~Y}4A?Wg$ zL2tm^+&s68b9QVtJo^v+9~Pecc!u7R&d-w&5~7_b)(gXxAes)wOsgTH;v^bN#?s#t(l%0ihFpnd{&mYvP;IejAq zxj7Z;0)j?CS>>io40k5P=9O)0>)kc?Wk)w5?a4?*`KhKvjlDWGUa`9a>g%q}fk^}m zzWfZn22&M%Lk-r~XHf0f)C4WP0>_yYS5XK zlxZNU(@=l=4OPt*Epst!EG`=@fLNG$R|6U^4s16ZtE@kLm?o%Hg|$f%oC_AkJu3{X z!I3dqzKc^HaRPt-N59AMhaOC#D}kyS0fsde@MJ@w=LJSN+<86U{`>cF>$lIvkKx28 zKb50ia0dSH0~u}Egtew}U9w*AIE;j#MG>m0P0g;H9$Z%0O6b(VR?|nTnh5&FUY3du z7UKR&{f%H6_fts9b}JdF%enL1^ZD1meLII9^IH~ZN<>D#nYAsCw2@d{VG^Br9E#m|whWZ1Zx%cLCs)MxOp-}|rReG|q0o`hgw zZL%@hch6_%rPs1^$8Fra^Da((`I#&pb6hn+`;YvTKf|x3Xrp@mI$*e;eR=Qy1o$J? zzwRyS3WuczfmjvD;sJuCT$UH~esno!z2UVSo#*Ii8A%7I5v&V*Z!byYv^rFZ%GIJQ zkXU8Ppv~fFMj^9`!gul*YSOM%8-a1t-S)Lb%bEoWCbJJF2smB4rY>NhPOjC4t)Z;Z zJ*gGkrjrN%YJfr42o%?bD77=CS@rU^Xm9oSp2!m}scRmuN}@^CX?4-BUJ(i}^n8M< zqvtZL_OSCQH|}0$;h_)X32*!zIOS-DbC#snM@R#h^#u-LY3wNIan3it%RhYhlU#V| z^(<`KN`G;V;qsW3ETi8~G2R7%toLZ8=4^NviB@81_1fwhYgY{1DB{b+-sbgE-w9!Z zb{cW{5U*@f>l_BU6o)2Ys07YU8 z{(bdar4k26U|DHLDi3~{@oS{3Est11SHb$z!OwKSK(&swHl#V~@vUxl-8iTV-qr-D z8LAh-O>Gov8o2{y`g6DzPE9SQ)nFJwJcSCz_7FdUY=wuPa47%d z58lK{#~sOddBoykA5p=%V1PlPj8{f1EG$62%-!EOn{R&jl2j(yUTIR2Mk z%y8QQ=%$2z2^8WzqE#52kD-cTACz{w#+d5bdP)jlIbiALD@BS)f0rHVDhr-Q2=Dc^mhfBZv1AgJxUd8@TdJ-$W0fU7kg1?~ukh003 za=)*zn)U|4`KZ7Q$8g6TaO>^3(Ga^h5Bu*2n>SO;EmVPQ)qNWWTqzE`MI{KP!?ibv zF;xSQ`@g;FpBiKS_<`J-egN0IxVOrIDn=sI5B9RiWV(tO*5=7T83Q5tzc$w;OOK;ks;vZBPC=9{2j+ zfTNCJMN{Vba|n<-MUtq?Szck6OF8%At9jo?KFKAQUqd>-kt|IajdFZ3rk~m%rWHY4 zx!x{_MI%+F?8eEoT&xcDO zz_Xk~?;f7D6*-fLdPV;=EfQooEff|wNNgZThOfgc9SV7B8{F8su2xcCcSV2c?u zkd%W@K9y5{^;Nh-4rRPyjx^Gh&@dVC@dozlF;W@|G{x&-+GJF!GsH_xSu~||g-%Lv zNl)vFc*=9Midm@vesJUx#DFh~5Rhh-Z0xblkvQR&&wrkCKlE`9J^na;?pI!oKX5bo z<~eNi>JnAdg?q(VL-}aY{~!jy&+6IVH4oF6zak-Z5Ag-D`+}wk>Z8qED{a(QyV_&0 z6c`6;(#SkC)pe8mH^};qh&)w9?py~9_g~QPa#j7ndg*_!)~+f*t75b|e?yjnAyAX- z&fT2*+Sjpg!wvL{&?&UmP!!P?P(0vbx79SdW2+yiGguBCtI|EZR@do8wb3`WMBMya>yW$^&pMfrbVj`ap3jC=^ISQDD_Vp%|z1k{-LAV>@uzlTYVSZ~9%_ zz8lEq4FlV!fIxc^M~-EASdc4RdBb+z^`TGm#cy1QmwC)wAFmFl1wCw_WjGi2m5V+q zii=J^()P=3O{sj<-JCpzYsyfw<=kAzdVMtld9S`I3ajZ(=uDXo>`|b~N`;Twn>92Y z%vDf9EK!Iar=;A^dHGsvhqM%ws~|p7z*JFjxR~5s;4$hklGLEK>RE)Ez+jNrG;~s* zkyW_f{kfPHd4-64@PS+tKHvcZfcO4`z{}Plz*@J&YF<2z2UXckHJ-vP)4S+OKJlBc z<+%A2Ga6xRii*cMNUZ@^i=e}=aP5faS%+5prfi4_bz1Df^y*foOd52udpKRg5T`5m zdJXT5hM_%SDF439peL1ow{4R-9WXQjhO8l9LT7-9Sv#ekFfi!>I>C=^gNAm$SQ}0! zv1rilZ!SAm!9T}iBDgCl4LMSHBsZ9Jfsq$(FrG)e{DnN^6)%EqTUe5WIXed)3LpF+ zjX;s*tT@BXJ2U?I(_i9WKKt)1jYrTQV2!2798w6Vvtm~umODKFLJOxpXQ3qo}=RH@o)GXTSM6M-q^ zaC5p8cb9Q7;WlcWniC5XYeXNZC8hP{XbH@x zu9lS9f~8WXX}9dF!nZY@h>6N7;4}v3sY84y+6IY5ixGL@*t&I**Z;=LdBM}4%+|Rc z3CkE`vB?1SLDb49rx+F_>44(;EBWU?{VQ(x*4H>}<01>G;f~>u$NlE3c<`^D$v9oW z40=HTUD?Uj(=*ddS+72iRt0vcGcW{NkNCE=Y?aQ8G$xXh*RFj^G!vWSBtv6>oqCK3 zM5m(2Nrno;-Td%<|IBwj`&mwX^64D^^5?_011L7lRevWD2~!0cPC{xZaEKU;vD0a9 z_nU?xb$*hVXv1r&A&x2(xIiD(pgl^cJQ3?#qy(=~7h+1=D=}?*hx>PU+lP!XFW(!g z!@Z$Xeu}Do0ob+<2<}mXq1C07meWqzr3{6%H=blE(a7mO&FeR zUM(s~m}WT_I%3uZd^&XCJpJGm$z6HD(n61uU-&`}`;{{pZJMW;Gt8L*N)Xl-E(a?( zuM9^8W9jpyuU^2rKKN;Vc=b&T7PpYh4Hznniy=MZG2YYbr??`=7a?#-y{kl?K89`? znojGcMYWO!U9LSP=n6Q)KTSzV>IS!UCJaDhVyGP&nRX3(I;qW#g0z}ODBT_0z<4uq zrza5brji{;_lr%_E8Vx4X=JB zPde>nbTlSSpqH4~(YIk;$I8miyIJhbv2^_p_|*UY8*aboBK8wyzGoSl9(N5EdCgz^ zFVcrUf^jM&X&T;|uLa97p01Toq78t6hI|8wqV;BOMRUIg(+uhfag~DITvTlt@ye7* zt24=)wS7}MXr#M%GlwQRoWX8Kt{ zDn%SUdW^FKCTq!E4JU)h9@2%UHU=9Hbz#Cmru+9hOpDfpp`><92Mlq-U<|7FcHuez zCsz4ggO;x7xjSK4y^GouJx|vTFa;PvhyM{S;Zkp$$SG|;DU-3NHxRGUV=$i#l za9)u_NsZ!s!8l7dNqf6zn3;PV`t81gK~ zizhKe+eMi{TA0PHZxn?zQ-^0HJx>AM(zt)>G+${^$*OhlTqv1HqHacHX&rPmp@DAh z_sI(%rU`-53#eNney%sqxZ5+;mY+2LnteD0T0J zX*nz9-BkmV8Z?;7mw(!oZU*_469yX8BZ)Bq9!X5R_Az=+4Bm(Cyzw5J7`)F|886XK zlxLp)ME>~AujSCKo3Y-JCS!czN&EAm!*2VyJjV|+QWEl;uH`dte+QR;{cH~D&#_s@ zBsi9>;jYcwc=fwKgx|UqKi{wSoQo5oxW@wU>$*iXb17<3J29-U9mZDfU*DK3mY}f) zPGwqlwf5RX=XC~%;PEEV8S^Y>kSB2aZG7c#-@(-vewSyxhnQYR)QbDiJQ`-pq86@p;pLRhd~3ka9QRKxEN20RP8y8X zR(jGQR&;${>Q21*Nmr}=L8iJSm4$CZThM&#+KyOg*Ood}V%hGP)~loqG4~!P44uMs zO`lU{7iY5pgLZ!B6o;DleeLG(meq%k!dOw92}Bl;!&r~12|+=HL!F@B({~ACVCTrQ zvM}J3Uw$!%oOves=02l2OJ8~zz!rg%tg$^g&T_o=xp~(T@A>d&`Q+!n#*Vvpvti4= zm^5KDTEV%{EjK2-A8X?Jc^@bko+=co5#C$gA5=-uvV#lveCZ^^99 z+(s6y`=%L|PB$1#_kinkQsmkas!Qj$PoFikE_A8MQsHGTFQpiEo(od_tjQS?WFcl3qo zSm(yqDAUneql6O1HqKgKg9tA{_?_jRexb0miyxo$MZWycAK{4OPvA+fe=UB~Hr%EG zX-~k1$)<0bUv&1mpGYc+dLmu>WcI!Z7zm)q2N8Xuu;cTe=9}+%KjR%cFk+b7x{3Wy zemJMR^ko!>9!1eh>BS|oI5c-$^)DYI*>NYEw`?WZG)J;wk+~y|r8wkZL~ISJ8$GXT z%68wz4F5My7sl&=;eiJlo~^29trz=06}V6{Xe&Q!!HZybj=15&AL8ow{v-P;Xi<># zgT<;kNK6_ZAhpo4*;$@=jB2|%sB3Taqto=a4MC@fbS4*_?&rOlFwA-%ZNRYh2}66) zbt*MU*L1sAqUVZg#j4W0dkPq)PZ+xWT3vN&q*Y-d6T=$S?zO62pG>4J8KQ7fl2DAQ zqUvxyth-GNo`Sq^7=@%zl03!q2MmjXTL$p(UwskBzU+mtbzeq4p~oPEIH{-b<>NF5 zV_6wG@^rwLzWP1h_mNL<$q%olx6mhEBa zQd0>IY~DNiPFToutj}16>N9(17jXo{yd9lFJ`88PYOlm@*=fvO{kvoDa@NQcrqHJx4+hU*zO&i2962oq8NE zef}9d`$>;uUJR)%NK|<|Mu&@j$U#BX`wiyT(|3X9`$Rl;SqoI$2hg2;TZRM(+;{h{JSq!t0M9o z5&7)iQuih7MPDNFS?~SZfH$v~{68I}S&i#8h!Cv^`KFXZU-ANOyx{BXzU(qKBq=T* zfwd$y!R1BhzL>HzI$ihNYO+nSAe?f6CutPYwK{Be>1BFlJfjMj-6xv%Bx%DQ5v^BhiP(*xkR}5Xpiqkzj?MFi%sZZQ+HpMnac||?+wS0FpZGWa^-Evn?%g@Izk$L00$!AC zn1c~gV@QGx4C)mZ0h}s9ylkR$Pz0wMMASx54Tb^g6)`op*sG%wzcxUicDA=nO)y{W z!o>`UZnwPzGF4G7EuQ|=Ks41Udx7YXWN^3;%qETnGPd4%Cyzh+1o|5{@}|d`+2wNP6cGZxS**i5m7z7ubIDZE*8ln8l4) z8yuu?3KkAJ-~h%~EMkTZqaHuEjb0Aq9-oxZPTEuS`+iINZF>_~*y{j81Py6;^sqTwW?tPji{0s%V^Q^nE(2_eBw8LlLs5YI7M@TUZ7!!20D{hC9xqHv+h`G ztDu;61~>WC9cA&V^{Y0zkLVskAR14JJsZHzDkvJZbe#%rqG_Vjs*ld31er)up0L{A zLm1u+znGazpgQXptyVeBu(zG$fIBJ%gjCYQTW6&#**~=f&SrxpmW2# z;;iSS!!~p5>t4#6U;Prk_1*9D{*Qc?ORl&YXDsuBK6%o^We!(lNTHYna_{K(dvOY9 zf+cb8C^Qt6l7wuBrmIjn-ScS8)1I}BWO8fhgvxpH;+KMQXN1E7bqE=FE#Z*|PwrHJkyo#>&9Jqfr1?!_HEUh z_eJC{_qLjEZv+fCs_I{;>igD9{(EzX)DPLo@MEb0d6IM>VZ&QNvMs)uysjZUNBG!Bxr4SsdvbI|CUwqZeCP)&1eojguwT z*e302I-Hn(BI=$$hjxU7(|HxEzpU=@o@wYtRzn#Rp?MvGjp*!VwzR{@rv3HFVzaRc zs(RI6yv4Zkk6c8u1~t*s-Is#8z=yXUye~pmG%@7!it`TdmPmSVsH6-p{2u3=|6Sxk z`|_|eUc!Mdd=_lo%5nnphS05{Q z7aAvuLrAJZH)wf7HR^r%cbg;_5?q{^^m{4ZIlK$BC7W2BC4N{(QH)txUgChQ`|!LM zKAY!1>nR+6)RFY8AvFc5&9P#!rWYq_;X$9RIFh2kxE#Iq3cmU;AK|L6oqj z;+uv~YFaeJvWWDiUVV*~yg?8jbV0;v3%;Z!5~dk8JgbV)h`#Y~`H%R*-@l7HZ{ErC z-~1M~JmxX1%q`HH69(d9%5(V)3W0a+iH|C-l>NKh`f%>FtdW78<>d3zkksH?m z!`=ZJ-tWDCBJiqp2yhRl3Qc0+wCa_lw?hi7B zzt1Y|lZo-l6!<1HyyTlMA`{VUHIP{g&Z?dn6BCAR*LWHf>9oFORm3qJc&)bVk*n+_ z1_V|;Oy8NRNGF`k)CHe_I1@U*8CYvUJYt|Q9+wp)7`EDZ^tNR#{l|ae8}EMy$2{XH zob<|UTqlZle)3YRM$z9MQIOjvh)PhM|oa|(l&T*W-<8a{1PcjG4WNUP34hRy{(Nz>Pco#ZFCf=H>!-ZU}*qn zUwavkIpSEZyZu_O9Pc9AxDcZ8jMNmLDeC5mbkxdhkxCrib`5K`hINL8aVPgfr<{rL z%NPy%iWIWZQoKqjCP{116(=?(eqQgO@I`n(-eHqa5cdq=JlV=JIU~m7F^3$qKaV@@ zas1L5zre$fJBEP~2C2gsPv07XH>nAq$rWe@e%WJ5=`AmD?fGBltMBDb zl48al=aw)j7;72jV-iV7dMU~L96#Q*#L2(>Vov+r-(#dX{lx*^hin@0iZKDqsK}J0 z>w5p`l*Wckv*3+Dq1;!#A-ES$pyFwyL3st^YZ1H+4JTs+H!LujBi*@!^WOI!e)QF^ z^O&bSjc5GMD{%X6BHK71iFqPEhFt^(nZp_jQHxRLM4%5W(dVjQp^1lmxE3RU{ZD%| z*Iaff`z4m~xFDVDQP@DwEYdl8+cpIuI;DlGf&+X;GS>&m0uUbCpg~H2NXwJ^&-%Vc zMBcx5HjMYy!dLYnQ2>49It=*9cac_F|bek<-8JZq9zoTiEBv*O88fm?U%& zQ~G41F^RRUaYfb7tBlBX!XmMD4bhfMJd>=0nFpo%5U^7f&ogok>Y`tm5%o5s4c?dn zvHCnVNc1Iz!c>=RJg8-7_TKa@(oDH)Tzx{+mn%sL~K? z>m&y$^uJ=_ah(C7tpc%Lf0-CkREgKsP)O^LK@+!T7C6m`_ZYRH!ir|>w!-Fzp2Vpy zdI57!c^uhXLO$Qe8jGbLtQY~8gV7vk6fc%M_t;*-EqCnVi|1U(=e~3<7hirgE4jlA z7U?DPU@R_kG003Y;t&cfJ``#(c*;qEH(EJF5u3GP`AsOY2*FAv%iEMxEliv^glen_ zBFkKbmi1^gJ@eRDJbMvbWhvR+Jkt_4Xk7=5l&a8pcX(fr`hx!MCDN>b`2piWkD)b) z8N|@7Km#*Xs#%~Yc@xu2#Dw4BeJ!Lc-JT*ns9Y1(a=mn-u$`Xl&~_itay>&+nM)-4*hh2v zV;{rAj(so-{T}^PFj|l%FU{)(I-F2_s%|>V*7Ts*#UDR z6nP#dA=cnhgZCl)&q@z(9Cna!-SUv9z42l?T0wx_ zJecXgb=q{wS4|d=Q4Rk6hDc!z7QD{I`!`M$oHO+E0(RZWO<(#7pZ~~*Iq=9M`GwcM zp78;P<2Lk|OKUq;^-&QKU$6Y-sxF!ln7yq!qNPbvfPze+cjFE|{km7vzwvtdet;im z_(8&wnP=C&+c@*nA7?bM^lboM#(8kU*M9GJ*?r-;Y@T0aWhv*j-McvPm1lC|Z@-1Y zhjX^{>y!Joe&L!ZbpFvgVA%UW!)sLa9qSeUPc*KfavUm)313d?P4K}V?WTX#jhyxR z*Kq`fV&x7}YbY|yAWhK1SN5L4AxN7h-8DU;>i$jhlF>bE^N!0&6)q!{U~k4AiCkRi zSSGFoFf^L7q@ z&=&suZ6D-aAO8d+--9GUF}TcO9Q1k#0u=d>%q`)vj7^(2aPmV=;K@&TEKfQ8(HyYP zM&@inA|An!#QQ9@)%oXe-X}lKjw`QZvomZ?6H;|Zk>d-` z+}u2Q0T@H3F1Rci3pPP}J+8ZZJFj}%U$XIuPbcg5=p~lILC`fv+t6~qn5MO8JCC4! zhL!h}w)8NyTWVW3rqv`^!x`)}3{b@2oW}wVK`dm>k$6usRF*FIHeY$y``Ep-!V6ya zUzq>7lNl{6kn~MOR5L!d6THV*)4t}a(Xus8V`E6%{Uu54r@|zNxfP{Fu9J|&F4tVm z?O**8JFdKrVr9hKmMv^O>Ug$1?lFuHJqXiF!XEG*s|M$VWak}_?S_1eI)|vkEo?^i zI{-0h%UNt9i&gGR<&L~oMBcd<_lEcGFYo<_ftRdT{yza=P@SZeBk`dUCt$`K;u!)0tT2;*cII*T9;=a*Rg#V%@P&DT)HDpHA;r8QlUmM3@pUdo3RRcA>` z>Oz@P+bMc#TUEP&!7QEg zDjJ5_E=<$Bt=0}0WGdKbg0gOs0276u*V%4XW$s2Rg<0c1w6whbRKYuk0zK2iW}zV8 zJsNShcn&`8R37@g7qa;HN5Ym(48>r2JuEhG|6OHG#v(nkv)CV4(4giE*wN=F)}!KP5+eUQ5Xj9`)oC<=%7N}5`l z;a6pcFLaT$Djb>|Zi(FcnB__->X*jaOkAlJAF1Y{lH;=u*Vh*3e~ z6hUL5D!vSDD=2J;6&4jj9#jl4i`OWS)oc_JYp`)FX*9vD0kjk(H}npb>zG!dY2r0{ z>}zoAiPJ}e#iCdP)+qnqU;PRDZ{Eru{{Elyy=$)}>CNGsfF$5^aAS&W8Jl>HICy_f zJM|=<_Jqgt@P{4CVt+2!r6xIvg!9MP1SYemh=B-46Hvxr*M7ud1GC5oAw=j!1SuYddB&_C_*jOXV_ zZJ9@4s(sd^ga%O7OmfzShD4_*zM6{GTG!B;?u5ZnyuU+ZC#*Tu^)QJE4#inc;yhe= zDd&FZ6I^=!`8?r_7jVqWUct&>K!2l&v$XImXgW|yeR;!FTO_<@%(t-G)vxFS^kLpL z31~F!A_dHeVhe{zTAd5t#K(6MCX^N%K$tP8b9iF{t*pT!3~O5IPE3R9+_6%0Eezu|MzU;Ow%$3>O?Fc%ER7-4qm!@#7!Ui6qmpI|FEAx zo)z@_pk6}f!V4r)1>V%@r6#zEn39TA`oNbDB;O(UG!8!XRUunZTBH=vxd=$=?vhTK zEAn81@>3 z;l2p&JW?e>M-O(>#0zENTqfB@NUJiX@SD8i({_N1ASWpl>w`?#8y_YLKAa~eT0*&a zhZr#dEE$J4K1@zjurAoLsuBtn^@u8|Dyjx0#Tkb)Ipl&Z`SCGXR-FfH$b1O*l4yxq zuce?)uo9{y#f9mH31WIvD6S}2$%iZ~4oLb5ew2WB^lXpB2t{77@4lOP=<&yJ+RvTL zqfa@JgZA6XK$O%f#uOnRB(*W6E!=jA2%;W0EU;bz+8mGAe%WPQ@Y&CB^|!u(?7V|5 zi7=-LiOa)&HeveVfJ6+1Y6#G@30{<UjXu!0dn zf_is4B=Njz0R}p1k$Cl`PIIR5y$T_8>TxLxOHB3p-Unwf^@7hcY;NK1JNe-UKgLC$ z`y5A|d=ig+?W^E`gBWidkR}PLK7g$V%&XLdc=l^&np;k@O{AN3gW>ju2}5ZJP(>9< zaB5S)nFt)KeV|p-BLI36)5$$fJwg)cf5CE>%5FEVay7-7(M|i$|GGg$&JdA{*8#&i zXn4J-8@VesBn4>+o%Wn0mk zs;6f;RftYEQ*&?|u87S}eRIr>uI{o_f`)SZ-t1#fcSo91$Sa+Itm!wQN)eLsdM~4-wtNj`NX5GdX3c-M55x z!f~Cx-T1v_9zx`rH-T0Kt|a_iNojEep04wr9)dU_K}pR3;~hiI+35<*e*1CUQ=h`& z&wdUZbr{U`7>c63lvEO|sfEmi5)!Hb)Zsn&T=1#ItKs7BUBr1D2=jw!|s#YMwN4i*2tFQ6VIZ0!qctO;mB8*K4 zI!la_W*PH-1mht-g_T~9v7f_e0$RinwYVNhf;VY!q4J7)Pm#L_js(qxH0hy5LD~aP zf%ZJcCM21lR>|`WDGEeKZ0Pqn`p|ap_1cm5OpxZ@}m2c$NV3d>1b>1|wQeQ0^bn|EG$t)v>~ zwsH!zX*?kl<$85<#))!B43hgkkhVdZZr8r+kr?- zw_LvORH!2I1`+w&^{Q|ED-;~-)TEybI83Xb%LbGWHh0ECmJR9Mxx!hmeiet^eHSe4 z2AftC14}3#j16H_k=-0(c`6!?k41{F9zf-Tw%t8$&?ri~z0w@I0T`M|p|hp~Re%l5 zILX!m7<7-6r0Ele*`nu3`p2}OgEsAEtr`Zsmv~Y4aN_HXIYw*3&=9&elRu~bUTsZI zri-N8g=;Ivqf9LpgZfZ_s?@fS0u`^n{QKV5`^qGN3gaY%iMb+7uE=06QA}Yl^cbsU z1X_20Zk5==I46qkEh5!fQ(D3hPl(Ts}s6NnELvf{@m`YnXA} z4L5P_dEe#SZ(qa(mt4i&D?@COFu$;oq(2X0A+doX;KDRR)yIsA0_Vf~uI`O;>Sn_G zu-4X7x;SOQmw;R2Yf%M`HBs4g(5Y3ruaEj5ur(rNk+s z4oP5)!6Wp%uz9r1{!2G;uy>e#%J;ox``o_x!6GIBpXbptzF?R+at)TXMm_yrLcc$t zD3o5>LkntJmu6(+6~?UKT+V!&a_FIlal)|==G2Fs$m1XVa1Pve3mfKoSR9E}@C71) zuIarZVsXxs*dUQFuelo+^hkq|{mPJ?*ImWMU;H;NJNIi8+izpRdN$dF)H#Sb2r5$1 zmBc1dw?nYubpeR$ix4YaGL0jg4o zRkam#SLZR50ej7fK{=i9jS|&#I4R|vFHo=)HHOR=n9P%oVRXTF`0_vf1EXC_Jo&}X zXX_JBr`T^F+#to`u>?1zrW2mDG!nYU!btbtC;E)qy=y2@1{7-j&z7-7Nk!7BkEE$K zA@@C*$X-uHS_cf1h2hDn`YGV>_0{-UtZ%eov_ihJoAlNloOR|aINGG-%R`JRNt%R* zqmO>x>M@n zyYd{rX(I=naw^9@{h2I2_EgxofiVu>>wySKnv%qF$`z3+VM+~!$EXQKJQjL^c)k73 zA(vluCFh?1UA}q2C0up&O>E!2M2?}?pC{=J==BDew2w_J9;GOJOkofE-xWpZxO*R{ z2paA+YfPCKhfOS^zOo-K^$b-vS~|gL60{K|h|n_K*J~>iLgM}0aulnkZ%SG2Oh|V#Rd9_rSKVp zz9osSJ|P55YFpphzIDQXE`%8Q*%}N4e&b z%XsuJK93`xcLw?5CUh|&jr=Jon=fKoX3cAEi0tJB;hLI79d4)S>L6^^6LUt2(S7CX zIuZHBy`74*4j86`hIQAZpYqV5m8BdDXD<%Fyqn~LAMml?`fZL~T*Quc2LY4~LMZiN z93PTTN@Lpjs&`1_hvw#51_TOdyG^vFDC3&4OEuE8xI;DBZ|))Rs!G1AQ*2*wMXWK z!onTdh@r{Z_t+yi;b~9hpeH;Q4nGX$7Vwf%7>g5wNh}FOOHpvSi(URA6lW;L_9(nZ zZ35P!!1m=a*WY*>-~ZuN{P6NCxcrLix&Fr6*|~Fxk;{<8lGq-D`31xpL=0(?M#$#K z$9a^zDn*es0tsDf;?;8U(i#ZX^+gRLRfvIlO^SG(nbG99)qsI8eT;v}0LDwq;Sj`! z_cInyXX*PP2aT8cjiWa5h-8x|6bS5g(kh3hK@ymusN63(&2l4m*A z+OT@L!z#FOL8?h;-QDN$h|ciJ@yPN>N?Z)B>w|3PGpqtc|4G(q3+D zISlQ?QYVVuNuGTf5~q#-UQ3r}wqdEv1PrxxCY1pE9)Sf|10`wp5wj{KNn|D%nhKm6 zYudC_onq889WXSfKIJ}WB}m6_G+Y;(6Ntc&^I!~43{EWKJ&6ygwkn`D01Ne@jKhP= z93F=#^wJdd1wEUR6oE6poO@O*uyr&0J?xrz5X1YB;2D-nR6MxtbR&Zmqs_^lFnrC5-2JXUDe7vpvfcm z%}FJS*CI6O@qxw@BZJFg)u~XVJqqU;6a|AFyE%RHA}8;=gH zK#byC9>T4pbgrV+BEXnH)p8LuY3&FXE^?rrB}te>6%Ne&fLlf*dJj37SHANf$Wnt@ zoD1}$NGow(F^z6dEm+oQKMbO#3I(imVGZK~FLiE5^+P7CPZbOwC&oU46OFowe5CY^ zfxB+whO^G%+<*N%8}~nm$G!BWY&_`^j5qY@r4n@tAsa%KU?W^kM+9_j$JYAudg{5m zwr9Mo@)X)r`{+Il``{qc z6%uRX1Z#?(JFSN)RjI7<`DeGts|kZJ>0Y#l)aw@QrZzL5c`R$QaGoN1o)to^Q*_=< zZg5i}-7fC;G+}7`o5(5?5}7hZ(Kf*ATF*|~d{gm;#`N7dc~NPJq+6e}>YQpTt=0nt zf<_8R01ORLZz(<{7I$7k*qZU=c@}_guFRz=eN8Zu;GIx-W$cxaEm*e7;vole$SEgr z@F}OT&k4uDe%qk80jCCMEt-Ha7Hcfhw3b(b3z7|mtm5%S5!=XuN`l8=Y|v38V#o#o0%n{U3ATW`OE+qds#ILa7~MhwTJppR&9mZf1$Dq=&I zy-Xr5ZIMOW|Jt~Pnz-{Y}KiVPN5?9a1hq0c`3K8N*F z4%xPiEeG#UvTX~S4n3CrwjIDBTNc=~XqX!WzP?c~1sE-Y9s(c!X+yznZ4ifBYw=#g z;L`|N+Lz`@?#=0F+0aL4smVMZ&=iLz)?5?_Ei z)Ms%q0%b{~C_Hq_gE^>b-N9Fa7+91#FC-T2&KIP;l3wL;3U=cLuH1e*CqDO=IQ>um z8)GA+^K;=`aKRE*y;keFRX8)&u{YXS!AR?EoU+HzguqV*)N>zQ|`bzw4fJ0dX7;DMhm~{7;AO6{& zvEwUWVVm?&mt)h=1rPOe(`lp8dGuxR8`3|^WLNK944n!EkczhR-J}LC8mAswx%8(w5c)7&_mZoUSywq*II6CIzN`@=h=0J0Wyae^6GPFu*+&J2oDftNy7ve$)MIijU!$_#m5Ha^ENYl&!>=EzQgARX3RWoaJwtEnCJs9J5gh!;liBCQhr(e8!Jv=J6=&0s z^N{vPO+r!zlW9FTQJ^R^bY=0uTGTtdcNIz$u^2HJ6SNLuOrU!REG-XNUS47M@(R0`mRVUDGaQe}@`A!S3RmD<_?_O@*0|OP z)>=}V(Cel2(v)65Wp2=Cety7UFksWB1vV@$uwi}!^9yrq-mrmf`)p*(#swDU=b1}- z^h6n?LA31>D11&Lmdp`9i(!t9J_f5nSD@-bdNHw}H-HaQn*WmUowc~U`LeQK>dsrVWb5#O($egF=gTOn#=wiO| zk&kiLt=oCR8PDgS=RA|~h7Fj34cAC~)PI=jKrhAADh8Z})aKIqSJq*}S_?w`53C0E zI7$D{S_hF|5s~+=?}&B4u%<8X{oep@SYL^s>P;9sT~yxP7-5|6W-!b-=ePcV{NnGi z&46`d$g0fJ001BWNklJDqv^;k=BG^dNsdR4Z}oHKT8GI z)iO7@>uUiFlb~TbV3J-QKjA5z{@1nVUh?3O}3Lu*;m5j(~s%svZj zIpQFWJM~l!d-zFgI{IkXx<8l!xCD%l8-t6fZW2%l1{(kh6?3r4DhWD)kk_D<*toGQ z4m476FeXe=#55$yWtTioW8x7|8dOaf0W=I%q(S|PP4|x8p$dShEI$PweANs=09#i3 zHe?WpC~550d+#x6Lhc;;|Fiez@v>x9o$q(;9g%tRO!wBkRkvT7xG2f=xu+jeO6^?T25yTSRRHaxXO0YL^4MFf;aWlh0+*QmCXB)wNP3n2(q@qKcBmWAOGzau=2&9k6yAu zm2^YQCy^jHK)vE)Sf#1%wP_P0Qj$V1$2Zf4eILVx-sU57H7o*Fx;Lg!{5Qp+AQ@|m^s`t3}JPXI8aE=P0ryWjrSr+zv!f1|30gi0ihf+Pon6W7P9#;gP(%d=1{sYY56ICbpq zPnh7@njA1Vc8n8OUBPAdy&w0v_Q9O~?9awtc@-Qx4tWk)m|Unel-}W^;8|=InL}Cd zze&P*y+&cINhWgA8jQ74#WhWhR>4b36Kv;V)hjJYYPU2&VKA;T>qheW3si0 zJ2)UKJwq`_Su$E0Q5FR@&nTmyoVr;O?;sMI9Tnx#U{I&=ob!#%C>`ox)fjO;PJK)W zqY;P0V{ML~DoY}izQm7)oCnTG&QJfrpD;Onn&HXi7_3E|@?DGAr8;^h2{Ht40$6Qg zcq^fQyk?>;uC!9TD+}H@w26tqVg%=utnf(b;KTpSTVC>KeDK}x=CiN;QoihazlX`C zm!ZoULmRa}T7l>w0m-6Sg+u_Z>QUoSV^9o=!xEZJ3u4lpUqtqOhTPV)rn}Jg_agEX z5jnFsvJ1d)dqKk!B4`*bR^tx5qr}SASE4!%A9&O84#SN-Ui<84vwr=Jtg2wOK#j!% zxv#ofHhvIAZg{7vt!PF@gM&yJ6Hkp1OtffiIX0-rh^a-=tr(%CLXHMZhur9TZkN&& zOnsK0Y3;9?xFkuN5Zt0F)t&C<>n_l_4d*s(^=|T>J2b0VfFn)tR^rKBPZ$zcs6#{X zdcQvcFlf&{M`Y!@Qykg@oyJOa{+OWxjdcK-%+8=%$E`YFQdcf%Qg9nCKlSe$ZAARN zk-)!}M5L)5s#}E*jl}7L$GKp48oI0kjsmmJ*m^80(b%qGal(Oo|drMz*%h zvCA*#iqF13ryp=XPJPzBSv!3dTzWYyFN4j(e`4d*L~)e>Tm(!679s(p3$e&;MLl;y z+hV6$dX8mRQQj5KX!Emr4NbC4osbh3#n)QdY?dxMqj`+?;5-@C$Ri@4vczp|v3>Ry zw$I$m#gAXd#`QOH@unNuy?85rcZaN;k`tLwLe`2xU zOcFHaU4*Z_^Fe!3YpEP6E>1}eo|woT0frTWmZ50(CX9OQz)%*BQ9i;=3+&1gAKlp@ zyWjozzTf?Q4n`wJCsw0{Yz!8UjTYj)*b5kmf_3%rTDMUd;gd@y{B5`mxNz*X4a|bB zz~G%jYzE>eJ!n}nm}YRx&Aj6eU&QgG#I7_dkk*5$g7|E{an4XhuPV~WCQF_@MUm&Q8h^#r)lUozA3Om za;l9dxxslyQI?1SD+ZI9Sg`nL$=DREDK*AxGH#cI=bPGZRMwnH%Tzn+u5NHmeG}IH z2}5&%&T(d%NqQy;Jd<8=Z+heJQj%Kf$8(GNXp6cdJqLYW4uc$dSL&TG zs5afqp4XfCAk%rPQcYe`EriTYLp0TXwEThUbI7#L?evgzj&M>Ww|6VPn^;(TA0`Tw zKRtMCW`jltm`n@2&jN6AC8aAd#*z((cvl7aZpb2z<4@ku(HbV z*b1XdPIB43{~af;xrVjVrx;y!l9kI&A**Y!x&ot7m`=nA1(}UNq5`s!|8E)!mZv>2 zYc7Hg;jCvI)m$HeDpY5csK;t>SqiOePc1so00M9trUcUhcD5dI(al9rC!LduM#i2+s zB`X}N^UIP7jeXFF&e{pY@9Syoo(OET`+aRbLmRF zIE*zx$cY3yM^EEG(kWnSE6%!!Wn$NjR>!!qD3zqs+=yut3e;@ow4F zd;~Enfs)hoIHY}rNHlxEP>(g^&889%TM@hl4NB?4xnP|OkIjcn@|<^l6MX$Mo`HPsLn%+K!ctzh4YU$Orv?!+zf?`LR3vSnRD$fg zd++-$G-JI$kRlmSeyY1!0^d}hA)bs;eLzNxZ^tr+O*d(CfxkTJ3EX%b|ZiM z{ol{$ERV38J4opnEP?Z(ps~Ih?^kWHM$nv!o2$=WjkJlvO>o|kVaZL7^&T}9XE$*V z(x&+tI=nV<2GQ7!lf-*GEy(M^L|s%?P9l93u(mqrhg_mF6n#yjCgxJ~yWwsrU}*k8 zT0J}MebH`u^SlXz%)Ae2b>nZ}grO<2`zH+H81+$-8mdssVz+nF&}cjQ>}K0N@Y$S; zt_ZsIA*)T+MtfLP9X!(#uvY*zr6+}SU*DJpL?S zYYHkpxeIG2$%&*A0EUE+#4xdo6OJnH0Z$x*!{2BU@=Fb^jfJ2rxbp!NR*1M|%Mn z?kLdkJ*xWr#Y)@>=%OlgedWFptoC$wlk6im@TWiUEbi}fvgrYtDewr+2-d~hSp^zQ z6}+V_rg>E`sl)A*SZxdkXma5$Vy51XlLg9!7V+$};KuhQW4XM|xw%gyG2}3nWIdn471q^i} zu=KI1`sOL^g{UY=b2JR8c)UOOO*<2Y_WmFJq1Ofs={on^M>={68h{E7VC@ug>8nUA z%c=*9m_U1}3+8spczq2>10aI%IEejS)bKRLZ={{L#&S4daF37YyD?RCVknB6Q(5aX zT9qhnuIPQ?y7n}Az^;1^*7}&{lnhgms{E7i`Vk}wzUHB*RL|=>=Y7xzc;`R~I)N$` zy2`~!gBi739;Me+ET-#jfAvlRPu_VS6P+!pWph1=w1n5K=TG!Lw63~H$*}`h9{Xv7WMJA80^kLSMeb-1gpBwJs`RyqxuDQ8pEb#oSKgH?Rg zC_wO+P+!J;8yl}Dk2>Ep3dCY?IAbZxf?;7`Typk}Z{$sX`~oK1dpzoC-@(<7c^uQr z)^VeZ9Bo{hG^}gobY||t3BaGhAJvC~guCVY6M2S+ykK#p7l7do-MshD1%70)5_bY1 ziyx3-+#d@LvR1S>=xkeWpt+~cvyhKaNz79Bv=(*d~le%b5DM@K2 z#G!ye^)}tLmByh-NJP)wVhvh()3a?NB^5o-#6HZWBz3YV>K`YQpih*NluQQNlq6}b zQwmo&HP9L5q;g+*94+h zrM|Y@g0=o4>J{n92m zM<|J4PLoT$FZZN5Ya{3pU)#f0#j~g&E(HB~57y-5`4T$J*(!F~!}BjUY_PX>oL~RD zzh-xBh3wcWd6Y}z!u92&*QxO`dpn8W_&SAK>*Q?pLDU=s);73D#RiY}3YiIwl(HZ% z6{ZL5zV)5F>5pE*_Qf3@@sw}kn#X?~lfiLhS;#X*Vq>NPkP@x7>r983x3d5YcVx+b zNBrGAYI~kB=0_Lj$O16j8B>N+s`^6UF^ko>V{c-u8pdmr8Y+t0+9tp0W?u4ten0oK zIgEG6)sve6&bv^si;etXZTH2!Cf(acaOvxgnxRxE+!Vx<+dS4jCeRl&=o=b;{|YQr z|AkSy@!IjvNeX&xmjxAHSUPPR)efnpMKS6W;c^6E=-m5fMbAAGh8clxn$~=jZwAfZ z&k~P2fT2T9k_IsJe`M(#DPFg8-%m+ucmDN|>pSh@%ZvqVYu~!-E7}5!eF~1M;O}O1 z%;RTk_po=4wWL3iwxd@U(5QBw-^|Qww3XYD2(f(-?$$+IHHoPV2MEP^;~1436Cd;s zmF;aeY*1RrDVex+&SPIs6eO7@QENsTs!gC+S!2m^8^M#b&XIap=I2bq>eT22pw$0p zJD{ZH0H?iAqun#2cD>SCj4Kl8W=0HNJgP1_$Vm;5j5lBvSAfY!?2ijJiYYr8{M+_< zPCfWqe&Nqv#GWXFW9#*+8Vy#lYJ){JLMZsFU9JosL|f_R)noyqQM_%#6br?w3CiLu zBn+0$VJ3X_yLsIU{wEi&JHun1_^n*?jZZ{RA7{FjW24iO`ru&XwNVKLEltPI zNERi^rw%aOm0#gb+5SdEzC%QAU7RBez;I`QhDWLD9|2b`R^!f^HfSwW6A9p9va`q1 z4QKh&AN&FCQ;r$!9pI)g99o=$cMgjM6M})-I=7}B(GHPcEAe}aOT5EJENzy7<=3lwJfT1_hxbm-$p6V5C ztEa`-QZPt6$ZjSFV$OtN)=pI#QF_Y_rzyaW0t|AyCk$OUGY2qqELGdfA&mxw`RUgh zZC%ZbsJ)X?tqFXYYlqu_wRAF&PKeTj*P0%*Y)W+&FeQ4MG~1&)eMtq`AulD+?bKHV ze-r}8;XJ-D)rT4|3l z!Q?=sSuZ0pdkxKi&Gz|I379nrGvNpncDx?*rA|~9QeAKK+6eXwoq6rmD z(rbc{Wm6ecc#T1r0$M6&?2vLFegFG-pt``zU1q_p3i;SQyDLxfYpp)R(U#w ziIP*T)2dCShR`%uM`{4G*zSyS{^r6-w{1#^6+$Ob<{MhJyiCY#7H&G|P+~N*j@b$2YVCKeg!=L1S&N7#H? z`#K=3v2pE*QtqvF8&skFozET|k*;l8+@_$VrcqSIT}#8jtgY&y69zigwF;f0s!d6n z4ZoXlDN5d4udQkiXz2T;CY^5In@=IF@Vyz!SoO{8S7P}d6juwXX8_@hZPJ{5juWr$ zflCHB{l{Kg_PkX=)&Q+`cv=_eYC=D1?Tv~4_EAG5oqPV8i7>wrwFf-P|(h|GQbKSX3w!PzH+gtqS-+v+ZecYocMwUzh7_bl= zrBdBciUvZc$L#c@p5!!_w}|4VAxFWA#~VxG9Qg#yq@euZ2YJ&Ap3j+o`7mGnxX1Cp zCp`td<}waea|Tc;Zm_;dW3Q}ttF@@XaAm(M?Eu9EF=;UmxI@VfM+dkFn925p(=)a_v8#d^yTgz*$p@I>K}R*SMMK?9ZXSU$uq$h4l&WirFJRm zbWo#xD#;YXlTb=*2%(xf$HC+PF^17#sqV05G5yq3lMB_5By9vIRWO&NeMTBqe{@vx zv-PyEi@rG;hCVh>T29UISNbOms(MHnJqsAxIdN%+I@K_A`@=muIx|t}j#(neivP~r zKb<4B3zqvn9@~DvaMXmMA24)M2Q}>^ON+Rb3XYRcrk0q&CnDz=>P@Rqm)4Z0nM|ab zofdnhl}erre2xT9I{&n(AK&OHXr>aaetHj-OSKs>;kWalog_2+Ud!Q@wm_1QE|ElE zppAo83ZOd2yrL_qPbMHOp?(SilRTB~3dVw4T2nS@Ics%`^UCJA)-EMk2bzFEV}o64 zO}UZ`BbF&Jc8(j)pJQqa8x{_hkMX>hz7%)$RbJS?erKOl+3kL_gxc9%0cf9l!-1Ola`20sdk_SBXsZ7={Lsu<>JWLd% zN?@+UNe(3=Qlb{yV!LCv-!#TNd$9})!0@S@GF+jmf5<&j&v2BX z|DBIR+H8b&PJ`RsBD?V{FMrmvxW<7T958f_fwg!o@MQ>hv(W}sD=Wb0EnW*DHKyj) zE-+e)7M%pHZc#c4UosdBFven>Vr>v0Rvz~10{_-6rxnJz;Kb%bV4jPwYa-|})hbt_bYiidfNifD3w5E=i2KrErUH{4 zcHaIr-ux#oWaFbZ@Wqe*TJHa(r^3CjV7xXWx6N7^Z5k-yUZaSQnK*_zoK7Kx`_+IY ztdS+`$6;yli|v#7z&*-x_8vKj_!|-VE)ltTaXu^n!>1ZFJTiiYYd-UCH2O(D%S<;? z`w>H5PQ!G2m*Lrqyz)nXm{YskTsBe0d)ws0U~#H3?$e2HKIe_pLPLokL_V2ph=eX& zVEsBYf&dTfP4+=yb+i(vA0BIL=;M_o#u!}Pl9QhHZKNJ3^{bM)qeR%y=ArjQ==A6_ z8)u?s641_f=+hK*+`T=N~#^-QC_C#;TyJtqxl(YElp*1Yrz_@!I@x7VwlFm_f|54-Wqnu3Xh zB3HWE8&YKvv}rRcT|m|}A|TL=f=FE2xhN)5r<{Pz@K(5We}kKDzJ)1 zcPqQ^cn5#?vX^uI%sC$V*vIqvPkb_yOHSig3`<#}_^1NzByB?^DZ;6+S){q{q#fcJ zSV<;9af@xSeNx*E5iI=eVks7Y;nM{gzE4$uXR#7@()y-_swVZ92<7eu`Q`y{c+P)7 z{`mu3IUM1NJ+j>5P~0@|ehmiaL-J5X8)@73#U6k3dv_JJ#Dvkfk0C}=KV>|bFwBMw zhr=j}b_^tHE*fY0rU98&66v;pPiN&i8jcxSPt*X^lNW8TWBqmQ zAS8*FBeCw)SpZ53WYl-gqggSxw$53Z1qU;LA?f&Q4`}WTH*A?LtHnk0fZ3H>Ybkt)mT*1?b8Iu`=hLY{}>)Zoj4+*#P)=gjM0 z&s$&ozu3R9$-}?lN!k>6dc!~!sUS~u_ge*=DEu@Xm56h0~7kTj)LJz|A&Z=Y;)%m;t%x!myD zzvSL)$C>W!g7IXM;hn=(9X@G)TGs4gZ8~l0T^(#x`P!2Sg({L|6yCEx-lK4imC-7h z%@ALZiwOXv(lwY$xDBQfMN1PZm3q^M#}U2d6Z_XbN>X>~Qnx|-gUed9cI~(^^-LJ% z9mKu~gJ?Hgtl_hFi@#aGK&0}-LN4{76y|q)dv>r9*7~HVEM&N+!pt&G9c6mj6F001BW zNklDZu&&_XqGw*ompHm)qzU;~0z!i`E8m22J(UCCBsRJ(S)CqxhY+*Vnlo{CF zm|D$`I#gLq78ctbvi*uN<|h}+umB8qg(<^vRsH{fZ(fxCciwBx7Ei%0u^jJ`?N9jl zAO11#`J?~Ey$4HV(|xQLi~;9BOdf4iwN74@D1s(@Y*(HoO)(G)6&HhvN))FcVky+K zGucH2R##TYVp!@R@|D$9&lU=5lend5w2H06vAl`mK$oPZcA|jOuI0>!t4~-oZ3mrJ zftwUgRW~{mlCzxgR$(Zup-6ke&`ElpnJ~=j_{Ya-ZJlV2On5%(Ec7*C=nGy;5|U?n z_}vMEp8*W5jDS|b-w{2}Jg&QsI+r$fB+$|H+)WW-I4{!aS!(y%4SMT}()Bi6+lK`N zN^3Heti5XgQX~MQ`P+o1*E-))YEpkNVz7OsGf_*g7iL8i$sz4tPkl~Y?FXh%;H_}W?l#xma*my;Bh!rI>ucQm%B$F&I`Vs6 z!w>!Lb15!6fh;c}gvo+eS9AO;5N|3GvxcsHn3O8sQQN8dsAWJyj)Hdr(bI}edb~>X!Je6Ze7|J7RYP-Lx2+^3}>tmo?;0J>j!=99Ej&~VkImee) zv8BhLWGZ#%OcMu~@LYz}2$YycO$8V#wBEp5xzj)p;!%!heLUgwGSs>19D(eq(l{f71ut#Z4?%wcXqH6^M2 z&bxG~cGrE*n)lqI=chb3-!wv2>Ev|Ck>D#$y3m>HJQ2&!Tirf<+jFe6>qb;_>V3k1 zlVp=354NR23@rxA0<7~G=ito0@xed;E8g`tf6eOJr9Aw} zPvZ2W9>rv|4$GFIX;#O37fl*Erl-2dSzBy(x$U1s{9dza^8^$owR|#{8!Wvv=*eed$8}G0-SY>s%f|M?VzKI|{Cb&AS`QSD| zJ-#(LNDFb90Sv00OSg7DioWF?zWA)WQPV}2j;qUI1!LR2Mp_ho?WjasWn!Pslw`_K z?~u$=K#l|qv=+wqCOx+SNpC;Da$<>{QPnD^ePZdasr|qXq^0&5lq^6?-6ToWyo>y(9jya!B{ArQhJZcG74+h z6353c-paY%9j312)cOfd$Q8Z{XAwSM!LcJeAciemLdj z>l8~FgXF3+9$Yn@h`I<#bO?)WvE9YC9T9nki2UhdDHed?t_m7{N>zV(u@ZM4W@v|Y zHQiM)!3~7X9pw5O`0JnjY1TG(SSuVlnP6=OR`JdSVQ8rslht9vUVQVsVxkRZC3mc- zMWN8}@%2=|i%@1lX_U>$KD(0xj*pJBHeA6^r{n_GM~gsJP~YepRPoqoo9?@6(5AfA zHU$6@5BvBuG?O9#U+!T=Xk@-kRDby}@niB&aPO>7!8f;*xqXn+b(L zlJ+4zLiw3If&_$AiOqdt@K(ZiwfaepM0efmJ(-yzbl=BJ>D~gGBJG@sYDN9b!4N#N z(Q56}tv!`#O(`U`0Bw*)HSoh%z*r;Y2-FHL3G6dsP%qeeI_5){-vr80p+IgaP0oeA zea>9G$hrL;hNB@X*@%;?YpkoVYK2pmA7jTWdnYgB+yC4D4cFWYH(CnPrc}Hflbfq! zYQ*52#{~8fKIWo%9|h|skRyvi8Hj_;O{VXEAMg5$*YeSSd@m39+%Mvbp86!@fe&Cj zTqR$IOyd2>YxVA<6|BKFp(R1PQ)SzTalSbPzy1s5zkG<(c0f7aV~~ZvH!2uk@#cedpVN}) zw^|zD0XcnT{xYnh6&b+F4pG3L_sOBslTqF{(6_rZ~bh)uK( zRuaU!1}F%vZhj*s4cY-GK2AVl;UEUZ-G)|QBM`0&XsMoO4jpNx3ogu9wI;`4?sI6T z=x+tI$t)P`n~d}noRzClp+&w*bU#aE~GZ1Co4vbX|&i!{~bI_wFlH$4|Oz*2Pwp z9cL2qm*URu5t;=svpZoT_uePxc&L9(3_6Qo&?^{Aia;u31B0RT1=d)m>hXET)MnhU zagOURoTG4_<15EmAFeQxoa0N&3{+Si8ZNnXjk7zOT=Inv;cI{Kzu=B7!*H3xd#o75 zYZI;(C!8T?(nQS!R6b75$LWU+lMtOMW-^7{JpS7< zdow+`f-L3KvmLLE)oLPI7d2696>pm=H4TfI-0Q`**gkFBZyRHNV6hAfz;Kt}VyN5i z0gwMo>-~SKpQUcHMvksAf_TLnhub|M-`L}yU+{Ze_ZNS~HP(<#_Axf-3h)Ckgk;q^ zI4ydai;n`w_&;CK7s7u|93qNUgSyBKwnB-=WtP$!CgM1^yG@C(zOu?%Ho{D%8j&Om)&JSCSOo1xTsn zpw&e`B$V%v>mW@6@4~p6lGKwipuM}@1q}TZwBFaBaa~G4S4Zru%&8)^p|;fkMmstj z)t*UJ*P~V9_1fpM69Gm2_f1-N5C%&vB=@4IxR`HYY3B$k1|OUI7S%F5Yn(EwSe!zc zXB=SJoK88jy~+8D7dXDW&gu1IESnr1PdGkW!cLWuIF?2^vOeVF`(qybEl=WMKlvXS zdyicTlqLk!`4G4!@%$3OS6^1PxQS~j#v)Gf2PGK_8)vxX^{?fvfBx5OZ}0Qaul#C0 z?`cn=xbjMh%rG39VC)+6V>}_5SxjsUsJ5U$&}7;AxcCUl%tFAh*zOwJUx>)}F2dOs zfZ^^08Xm5yzYBccVm0o>b*?(5b$W6VR%JapJ-}{G*m%urdDrj#4yRSI#Q{RUj38y%bhK+4P@HWjG}-}+kH2@u2UA7`qYVdLRhuyg&HhqpGFsx>rU0q! zU?sIc)PzG{0g*Yq!=ok)oy68=%Al>{@lavyAxY1nLmdjMo;L}zq?pOEI0_se2^d=YrHO=IW+8aT?J9j#ohW`NaMAYbi*$%5{mIvKW>#`kQg@$l zU)%4hZ^AQ6NlF6LX5#=NFb$JF69q|LptLSd6AZ=)Vre&Yg8mD9&harx>=@&kvsDVacKGL}>rS;O*@!DXIX#qhLW{(q2%J)Fr> zPG+*|gbrZA#xTlil425@+R|Z67N$&@rO* zZ(A(I0x;a&K*P7jP`69&`KOV7iXJt$PEDG-p~zR~Fqy|sOJr*g`M|&OhX4M{jBY*0 zWs)=9-vjHBY=FAZu@5FARhQbwjD*mc^y-6HTD6stpe8sgnMhHR%1sGyqF{51+^|c* zxxH-;ih|45PjW0@Lnj9e#gY4@kkuvuRS-}^y&H88Fj_QTI;uti~g8pSnd| zl?EY55==cu*n1la*?E9LT2!OH5;Oq=B7INTx>e~RWC!nk9a`2s%e2##*O|YPPSP_? zN^oy@-fVC|CEV3^d%M9*w`0OE?^oRkLu&S_hqy$wp0DL*CG$d_d-g*UO45AIMW2(E z&Y2i>-K=IH(|>OGka#ds#dk{w{nlGS4) zc8qXv`6-_AtIuP4;!<>VL?#CDK4=n*iH2$wKrm5wO*DE4PDNgnvh>KfAZH5KeT?_K z_!a!q-@civ?)v~9`EB1x_648Im`JHXXZo-N66C4{3$z6$Y!XS1&)dvBiN+KSvEg(a=?**K#DjmgWS%aP0;i#OKmX+B6vo@2d`jf8xXWLn)C#J;TWpK% zQ&+t*x4sPzs*Nq`BMJnWiRF`F$~KR zEyiSK2~eC<93WU~Cl(`kaj2A07ab$swBg+w3ATf3f)kQeAyWYte(!(+hvq|Ep0VR6 zoY~)Dr<}6Byw2&hlPpaNGFLD_u|D?jyaO9-9Z`=V+QVW4-&m{8Y0P-2=~SL(ilB9< znb9z#2=BvG&>?W7Q(II?t3_!SkKLMAyDmvGqZ?>LgdSI(UP@9IFw6z!&?8o!0R$Xg z8EeO>N|{rw&S(gIM(%J!MsY_Ba0_!?oHEu>8j*zK#%l0lM zXvJ}wY)DB!E!P(9)6vFRm&q74nJ0St8td{hRjIBEmXxO6VAO@@opiLkCtf)h~4 zSP9ox0YQ_L=jnir*@9QZEn!C^*fEb*`lCW zY@fDmk>tDp4EKypRo7MZw}Ee8tj3*-Rq7he69begT1gP^sZqvzyNtHBDc|;1-u(RM zGrF+BaTSW`6bocoPT_n=Lox>M1+fM%1&$(25<-weAOLC7ot8#`Y^ty)4c_ctg)GY{ zRdEu+n~LFpEgT!;eKz(kaD1@F>GhMWpy7pT z<^zWG^$*+sK8LIR3s)0BB%QVCJ6~Gq($y&%DP$6-2_aEkt6M?HLh*p)F(tXgiV4AI zfX7lAhh~u?*MN`S95rx$k4Yibwv;&r@E09F~{x-qz*^A(h@C*3|hJ;uNtK6{VPZD5m5k z$c3AE?;pRE_rC7$IKH~ZSAPBDIsV8;Q(k?F=}JzP5!)TaNvR@UT0R9Lv>mAy^r6Le z_uT$iM4l-k+l!@G0ET-CX!vYZ{Y~J}_vniK)4T^Wd9<1opiU7JN-MXwi@o(6oV}Iz z{=pw`@tyDC^wJ2Kj48{4Jj*~0E|y^)k1-Os!&G?;)o4Ca2gJuHg?3?GQ3_P6e}hN5 z7{RGWvjNjwm@L69`+MBFyU9S7xbnnl)+8e<3NkGidIDEC3NY({T^|MuB{p_eX~e8? zVZ18d2H~Pnjd@rNm%lVh43_d}D2 zX~0fT@L9btDPVwJ*PZ^`=bQ;cs~>)tF#2%7&|0e``MAOnEfR@H?<4C;MCE3bPGsaL z;G#2yeHzHep*FPhfk*{>QvE{ZlGNJ!gleYRPgtuqpIG%)4nCg1;xk$~3{_J&hcSXv zixWecS^UzFsaW=$=j??GY;12*Ob@u?lFK+TSmV;ACFJ0MEVc_&gee8y1Q#W%u&f!m z15=b_%S&vzDIXX3)?fO0PCxbu><+tAzh$#N8HI5YK6u2 zY1-Z(BF_?$4=$Es0T}LSpy81bGJ z;6_9Cv22g0+`4rhoO0>06P#RKVNg13QIZ3hDn>)Fl8Hr}Q-fMf0D}hiC}@JOY)nO# z#wAI71!i=P)}VR@W`oB%3q7M@NQV>JW2@RKOQmyH>aqFMq{wYwTO95FKA&^nexBL) zKL4&MEx<5uU1~ee+o55YSt0FrpQ4X-o?5Fms^A11=-dS8-fKwcWH*>(>d{XHJ4~S7 zEK36oylP|IMF${A`xrIswRlcdiO`aos7f28`RezU(gwqTi58gA3Wc#86eTxZyvT)} zZKinEm)AJCyv~W`RqVK6$r&Ms1k|S8Pm}$w-OB_0;cNYaBq`@o>oojaz0YD>Y=n!%*GZ=H=f~5zxivp8*bvtr4ep# z7sN5l2TZ30c!Oq!(gZz%MG>F$Qll8zsHtf_DR0{nYzv8?#fXHz^?|ss!FUtYon}99YYuVGTf0 zW9wo~l2S{hUBpYPpqAD_seUF&AY5bV*#wMLp7DJYlNJM7=Ia===ACImBWma}Vq~tn zj@$;o;59LoO~2?&RDJ7}ir#=RIhi1l{{0o`m`P$qt6WkLuCMoEu0{gU5;{b$fN=>Tn2tdIZOKB{{ zz%sTOTSmBk^8$N2JCp}|T)MQ(>6J@awK*d(n4-XlBQpjx6!k?>z(6Rq#EWNyW#}@N zO3$=(43;y_yMkMc^5`FWHV^*Jf6o|YxI7G0Mi(qu#SkYR9%C|`bEs3Yse|blz5b*8 z#g z^O8>%Q-j5J7v0W_$g@S{#fzm_0ET-yX!t=@{k41UDRtNRn9(k$Lz95F_&ZmY6nmSP zn{Q@tFy+jv|C0B<>Q!7}bCy(blQ9`!?SQFzrZ_Y=NU2zq%RFott z%|KHuxw6D4WHw{!9VR!F3guwP#N=#?=iJ^78{1n9^NdTD*SK_jjU^m1osySICW7&S z?G*)n~3RN=oPO0%>-%KUm4!IJI8&4Oi(cg*I*)DYbNNKmIyWwTCAz~?D=z6pq z&V^3{*A52^)QQJ^6;cZ@%(+d@NpOfjRF1m#+n`AM;74CEpK$f*Si~kpyU2X{PuCJh zZ(}G#|AaxBY>s3KpnZ|fHMpT&?58_YC02LAMWWNF9;0=lcXch9Xe+Go7={Uy#{~ON zYeJx+&6tYi064q9#ku`Gc8h}5)e$F$tDG7vv+ON)Trd>HsKb;2;5&6-p!6QG852_= z0@fIo3K&c>as^q=`Ot+kT=MWQ;;BFX3z*NkAH|X-Gs9q_;9Q)}5QInurc)p2OJ3k$ zn=@~I3vYkXi#Yeuo4EEX9?i8+ei}UBD#oiT6eAc)@bEH~klQIPTB=o7uQAC)RDDQ+ z7h)J(OYmbdsu=GPuUKP)fLXw)!R3O_hfFf%{CJmJ_V?ME>@ymUICcCo zPM9G}&Xc(ktBO@eU?=%j5^rU*>SeYtm)aqy5tM3O=y!ZlDy>78<(`CZX}a(-$F--g z2$xxB9Z4xQa|`jKgu931TCIxTnWvFC0Hm!`(V2P7Z2=7P60Ms(Z;Os;{b>N~$YrJ3%mv*d%)$4rEHXRa0UDrVY<-0QlPIgiPiADPD|_lx+6d_8)k`Qh zayTMYwm<>ZH&g@g!M0YXlGA_$WQh4~S$i`&*;FoE<77M+>q`QA2e%MME(33m{v--| zlV4*aqbPeIKSm-4Euh>nBD6>pX)BpPsRJ2Q-^ymy$F1U_1!JajA>kS*WuVClyp z8J)KrJ@pT#{SQN<)(C}h2=xdCubrwdyk`LdvKt<(axDe9MvZ}YTy0yHf;zIvznPR4F%#+a0I^K65j6fEueaX1id zg>K}p_9uLAQEXqPA9NhHAa7Nt+#%M_^de(oC{y73%)?>$mWhU398PP=0q-*cxd=!y z(Jrzs73+CIRwV0YfUxn1i_7K@*G&4v?>c<<#V+>$y#POgr&y=$TgpDEujdCW*Rjl+ z2P6Fv61WWbL*rT#!VvR)28|G!CU=|JuxvT&vhX&aGr9&YL-_j_&c=*Q!ZHA~2PFLg z%9w8sXpA^;82%FMWQFg+tSguvD39Yi0ZH_~OkHS86@(^QPlg);d;*@#z>S5y~LDKxh0v#3@+ zYZ}vSOsz(PN>KSFjpVs-~e zBnj9v6!=#Fy>X8zg~x2b!K_oYkY*RHD~yp24p*7Z=U{KyBB~1Rf%y9v6o-m1w6+f~ z9KAkTvJ}|T$8u0r6hWA=59PkMs>EqY?#H*Eoh8pJV>!4X5pn}&Lg%Havh{`sPRec9y=n~zHI z+*|DdLP7ALdTerb&Phgq9qckkv|ho3as@k~1vQ&Key`7fytsI2v|8bEq@yW=23XVe z@~jPguL)hR(+K~r)iU7lIr(K%pq-|**lw4ao0hH=7R46qzkpSL6hhWGhCVMLzAfL> zW0}4mXYOw~ouLm7J%#zm1CM+q%o^B?E+)$79t7MzQ3W+9M6Zbvdc~mZ#(q*GrAlN> zZvr3oNo!#}o&#DDOGs*}FleA-q+?i!bQp*T%j;`KPG$iz@Dxii-?S@JG(5zxb0mj& z5q4%WafeW(pl+CkGKOhMn1QW`w&u6^U_-1sI`kz1?Mxk(9;#NluHkknlfiR=@5T(n zF@D5fc&36Heyp@Wfrzxa`N6_K=+L`;B!k%+2IjqE)S2*O!?S~3^p6!mgH;JDtm_{~ z!qbJ+BfVes(I-YIZ@O6oC5wX~H|>%1lKz4u3f9nNdoM)j`h1czi_a`HJypwL@J+>} z29BlM+Wwk zNk4Sz555N0PzJ{Mo=b9VyFWBv*Jjm*dxIIz?^XnZA35)=z%SKEaguilEX>lr`RPH! z#?i#seI8z(^b9N{YPyI8@Duaejf}uZSCLV&~k7#Z`m%rt%H)RyL%o*>w137 z{M>BW{?fxRAV*WCcE@Eh16a5i72X(D;}Hsjt6rpwUT;!B&+YW6InD?4?aXi2$DS#e z23wHpa4he=JLfoYul;tU*X18UACjv(C1Q|rJo?_KB00^=%0q3V80{}NCz7ESHnJ3+ zkSd}*h}lNje*f*gTj)tHD6w5>wp49s6`lzP|(fqr5_Ddm66Y0GUNPfKldV>Y__Sfa#Q|J zy%QoRJ1|)tFTxUPW>s9!zJ!uqZgyTYt`pJUlmdbQ;ujX*R4V2U;TVvSYK3;l`odQE zn`Z=!kx7r?PKo2Pcue1L#N>9DiHnTP=kbBDQ~UP?N&X8&8eq=88l*`x4RteSI^z|m zDpGdLKldEsgnN2vtv*oX|Ln6rp(>&Qg?K^1?~z9@`raSZV@$yJD!6%9rha2*=S@*8 zuh*Bt^I`$lb#9O-J$4=wXaE6Pb|RIaw{cuu5Ta9@b8S=O2|>>a8*yP<)1fTlqayO@ zQ;P=sEe`cU-l3{I`2KkM=B1@~Ga;h#L4E&jZ5g~JYY!J^ql*E3Kg>RN z-99M}dpR!~+47b-|AHjf@u0CVg(S+}LZ%dCZZ%CKsayzsldUZ4eyK zmSQ6nUhMjq{$-XfmAYfH?2b)LltmEQTiQZtL=}P!I0nKEunVgFWBTg*jckXq7%!<) zM=h{_b5l!=Uh-YF_;>=7>@C{-{8i0u+9#KZPdF8_?XLco5nx9y=vt^ zGgjhP8nVvqXxEc$|7gnhs9wFp%izm9R>t#8(-r(}i<+=sVR}@9d{&$$Up`%Z)Pg)+ zP_E0rQp+))GX4nzSlh68tN9<{-2UhPBE2fw<5qI&3rYVSX$n{7Gm0C^1+h6obV&VQ z3a;Ty2~l^63A*pV?nnbN?VY}AFZ3&)71$PL0((im?yGY{8{@O`(ujsxZfgnI?u;@8 zI^>&Lfl#4iTSiS|bId60A6MFmI7!oFst+2qMEw)0@20u-WT}y}FrwLthsnH(lk!h1 zln6iY^0XSj$RfEc*+?7} z>lGi#dqv9k)$DdP%9gW&yTdV?=Tw%t28?P1AWlxC7o zxiU+I@#MNfM}amFxInwN|2u6j;O9_XW={j5`0%_@M_)fKvJqS%4ddb>TglG^*i({@ z;>BO!zARC}Kg4)T%zg(ND#fn-FrJbtuJG9A2=!{!%#dvJ{M75(E! z1XZ75TZCK5|H!q$i|If*8_6! z`EzKTOUCVGtLv6V$7>4`IQ7cal&XY(Zh&+v7+lRUCpXPAG(f5rAl^<9!HWM&a!0L= z_E%{mDUobCz)hk-p`lSDeO8>EA|i$g2(Ex#I8JNZ(BH2$IB0LnY+%J@>U74#>{5Xs zo~fg^Q)e`UiNz=y<`T|9rVft^MR0-j>zLL*yJxWhEDa#rt|y9?bnc_tuj z3ge!oeBVs6JiE6!MJn~BXEiPOR*Z~KN-ryU+BA>PGrvx`b-NO#LL(_CZhutkP-e*3 z<5B^_1OFPGfFpc@Ye`$^?i{N~&5^+J3+qj8B@@?vXZ!DkVefK^u{7t|7)=F2NChG! zZPKXJ&Txdk9}TA1Ypke{gf`eZpIn|l)el8nyv>-New90coq7~z4aW?kpm@#!kqZTb zL?=%iV+y$&c>WjMX#UIE^VOdTUSaEp6%pUlwI@41=-Tg0s&k*eRy^Q@Deo70_^@q)#R*B8n7TSzqSB#X+)>$`7+f=@KWr7#d znwPw%BLh{ZAj$D9xv8Tz!@@s!!2{#EW+Mc zaJK2W`Fio|y{aQFf}xEj#=FN&k$+dPAw_1nsu&3E@t5<__J*FCsCjCqPGQ;Qalfhf zCixoY2wI!?c8>T~8vyeyq*qc@@aLC(^$Pdr|j&E~07?4`wg7jo+Igf<++ShOTPdycn7^kjSp zf7JJhQr1t5rW=_g02qq1r>gyXPSq`QDoFOk{i!IU|EA3)GV(IJowH2fFVc^e4Kwm5 zGOMj^9SuEUuV@f(fuVi%pt$K3ESeBSaiqAbY!m!Q_Si~|L~ne~+-JYkl~n``qxvr! zT5!^%O9x7~4tK)~yao!a=L4z-b~+E$3Jv*$>T@vyE*&>YUq^dvUS7*CFWt(U9l}KgQ-&X&gz@XY}s_gu9&UQFg-t+osoEJv_!OnKOgSpGY(eYHrx40yxbW7x4)@Swf zFr8DuEvbg8+T6tpg^;`8oYxu~s;zHyX$eS3w}E8O;TZ`toxDM%Fi)0#lwVwKX4{*# z4gh^fbYYO>Fb~AE$94cgA>x9Xa) zDv^mC$VaGJ2`YiAYZa1wy;3%4j~kWTLJ7dp3dww)7fQSu+JVJ5ajX&yZ>&Ogp|=?u zl{-462BbG((q+|bP!6TjMbkI86s{D7`K>sQ4t~G++%JTA<)Si~)#Nhh`CJ1O5>#S7 z@~H&OIU{%vA^~Qvv5d;_pF2LR#1T!H#FKCLSc?+QMBWn~Uc=fmc_`jvi;g{u#sp8m zQ%#HL4$XhS<}Lds-yI!t|0l&6T4VC_kQJ^!2{o`H2VQj>m_u9!yADl@^Hs@ z!v1+`Oo_GBS?dtvqlYS4Sj>q<4Qe}`>C|rvB*b3RzcM{lUugF<(YxaS&U&?$Bht1!O!x69_2(O+h&E{d3l2$rI5nbeC z1Wq@JNLG?Waiq@EX60Anu3@)$bdJ=TX5?#UoH(dwd*eWIFs=;ktoS_cr3DJ$^)%D* zHA$FXW0#QHDG}QkIeO1(svvp&8RxpSiEVdsGSO&p-(Wo~Y>?_&RCVwkFu)AMlv`+{ z4k;n_V2J^!hTy47>hh-1nGzM?NTk$u8!dN}1sOTk?plzNH6XDFo4wheGMHe7ucUO6 zmFkahH{czRaE~q)HRP#7*7xWHW7#yEYWK~a#SU%-g;Io5gi>(iB8zK#0CrbYc8KS$dr5k@V;aL2?iOAil)=v4Qi^zBi6&~~Xpv~!VPl@Q>j|#kf(+5*u4=VhV9;V=t>u7=NYwtQ$si=tt{QTwc-1!?- z87J#004whlLAZ16ETrAA9%hiUr#f_aDpaCIA@w(-ZH75VtagH);S4aJA8NKb@Z9u| zNkfp>t=d0l;?fn(g`=GQ3%VhWmji}f5ol_eJhObQ5EA`E(CTN@&g;!5$EC(!Y%0(h z+EB<8_%73yj`0iRjqaiQjWN4_0$Q?UVRhpjmkQP~=kt^JvT+v>OT%8tAP5;NKyPhX}_&6>&-5 zE=>tm*ap(x);Xt@!uWiq>J;4viRm6|`9=<^$xfVu&vzf`SUCE@6WFY;?omrssj1k! zfp9}@<o- zqfF~2&K7fX-MJk{gGZa_>T+U537X={SEw2wI~l(|@=i^ZMc#`e7@pOB32NrIQmr7KsK&Yp7r1W&1m zz`Bc-?V|eBQr4H}rWp2f$1j0eZwcEC$W2z8Sgt!M#p`Ic{hDB$MB&2zyO}7$DPDzA z5vcM|k}82vGgL%Vd|+1!S7DnoT+7a*H?AXLt|K#*Hqlc@;~BvttC3+cEzjEnI;@&z zeV(RJ379v6rA8tRqO0J1)fp3wfWXwZGne_<1h>t{+Ud;;HoH^F466{Aq{KlRrOfW) ztMEdO>A3#oxWOdtga)^txcfR+IEvU+&>u+m3X~^JWTK?~N#VWEk8gAei3@?5w6Fs? zWApW>RLokbs|)mot2814@f`N(GsXNvqqWY{?gscI88&@k7n~Fs$rCSZW3FLMe!s3b zflZTsBiN7>RuvIuc~T1yqzx)4Gflbi^HU5S0>1AfqgH|3%1l_h6x1Jt5Kai&C?9`o`xT{&pz1;?=ax4o(Ui6sK7>{>d-~pSYUL z`dYhy%7b{SUselfe7^8k?|mswAL9JI8%vK&s6^INitzI;S$`VG-iAA-qs50y?O_6^i(_1%k*Kdpt}eX2KO=Oi zQ60)s(m?HvS%3n{8PSYN^)lSdkMcHO5hc zl@*x?iS+twC20ln7-bsG+kni)$%~69DKXT3jdR`v+~mIcToMoy6Lpc>?(3|dG?fr> z<__@?pr9WGnzkL}frg~LLL++JgRWGuyMJwYhtcux7^w3`y>)DJ?{tHCi7E>#_cUZY zoiHGtbflaT48W}RdV89mU^m%FK4H!83)aQnLsuwJyX>9D80cuUD~WuDg?df?FX@5a zB7LAL4_ly9_2%n-x564LJHDqeGWre@$*NEudyv_7X)6VW%xSfeBQ35u z*3({}*q^W8iH}O~h4z#>p#kexvkZc`$-P<>LL8*b%BV zRefzj?|C3sOCN{=$W2l`UA`kL;h^NLRQ@SxEy!ICy(+`kEf>XPod zmeS@HI!Fsi-Wv-}xQK6|9%^D=(V}00la*V*;Ek zV)CP{SOaSYQ4w)~*>lUhX35xL~~`ql5j2mrteAECU~W2c(h^j#Gvz8Wc6H zp)pNUrggG48~pd*seKVY;P?~!8fvc9lr4?9lH6Km?yN>{L514m@SmtqL=2N}3^B^J z6oUp?wV~*6rPc;*cweC(?0j*$fXcv86RG6kVUX6oKES>sLq1yTMj(N6r(T{RspVt>c)cZcH`07L?%0ro1S4 z-Mz!hD#Xso!*O=nl<@SlYLyHtCI(tHa-1(UaZba3=e_>SwY}RD#96h8&lnTAl@)M>%Lzm|s|H{Nd`)P-aP!UCTYATHTUq{%LSn!t zZYX`ABL`I4ZPbC7hgR}1gj0b+QJM;Z@kmIxWKQL-&^X=rJ99JWi-()+`38x_O@s)! zw;Do}XR%0J;I{yiVhRUSdyaKaqiY5>C6kc`j3TgvstFmOsL4tf zfuz9-A2D5Ht(87q6X~Z0YO|TDvhXY61bc2JQc{C`(!n)pW&gs0=c2i>Ki=fW;abo{Ov%L)=Q$#B|_cqYR7pDIp zg!T9*N!Es2JWO4CS?mfGi>~2N`BNiW1Y%J73vn#y6t> zG3jf1cf~iBRhHiOfR;6t*6dW^+K+kN2~wBiPjca=9+Lp`DmY_1%lR0^GHNdsbs8?2 zh`SwxPCh?hTc<9A$zQf0YUT1PTJ_*ScmH-`MLj2R5 zj_;cxznOt#VVLm=OBYd;8|K#p&zf5ZPbWyiiY(Mmqq6xOS8~;fhAPc+tJ6`IN^+TY zvxyfK_)rS~Vb(e1bZq93k*(ug!=wrlY0)CLO8u5Fdxb8P{Fcp8-t$`nK(%4*6kVOq zX{B7aH3*bmRbEF%JLcL#wr0*J$&$TTFm#@ts&cutf#Y9xT?L+aMt5UG!urYP(+<_v z8}awR35q_^H;b&HDV}(K@zVGG!D>y5gA{co=r-8l>NiN+Qd&{9{*LU$#*xAND5p&J z@L_(8xTpV)Z?_6_cmJ|ao@Mx!J=P1i9 zL4U3Wz$xJL4@^#Ly4%OjSj*-2+KswDgFW77ft9kW1DT~yBie!?n&AD&BpfLy4h}6i zT5SpUti{t5?Hi!2Q?qCa_ufLCZ~$ zdHBhYR5i=sRNR_;AvmIDWEZ;M9licl2&t<*bIU<6(%5UMT>M~iwQB7R6;Le^z|auU+=&KLu1a;KYgzl8ro1?1sskFTW5y6-!62H1Ju z!0yCz&&VzPro$RusLJzfE}AG>ncRB$btSd4AKfafV@XS?K7zEh7^W}`>vG7w z0U*C{B721Vm%*kYeF>@$TR-IM%+G@`z@&T`qdx9U<$wb7J3shtr)A zv1R^c#1cvI>XqB847b@|uf<-ESyuT#NYWr2$ARJYTZjHBfN1rMad)F(us}Bf0lGYY7!rT3%Q0dd#I)Ao2BzZsRaq3(y z7&ggzl2IP4<&Kp+<ME=Gu3f~3G_NoF zb8>^mw03szdr%=!QDcowD6mY*_ed87H6BInh@xRPp-ZfVj7l9Z{@Tc%=Mp?FIygP# zLPq$j&AG^`!|X3yJxWU@g1~edCU^;h4)NY}SkaR7PUmwUl-m zL_P-{U>HHWAh@w!YqC7?YRO>(1!R9LnY z{PPC)C=N(N4clj0c_TkiYfd=@-tS|MJ-9TqCc3X5KAb^^MvfU-Pl8=FnzwTUh^l|) zx({RamX6&PYx^w3F?R!nP$cv6d6OV-?)=B-rHqn(HL7c(bb#{+K35 zdQwsun1?6!XRfIv{q^7u@9~zhS*bC1z(XCXDo-eX;B;knnr18Zo=%e)vxCVT8Q#K% zYT#6RiQ?QtSiURI>D!%Y7@(?zJ-}APcS4}n?dttP&yD0-9TD+8IW^p+W5bH5g)UM^ z8cc`{N@k(#k{7+uvl(hwv~#De7bsdv09?-v4x4NtydvWLU~7g0EAq0p8G(uaQT6i6-- zj#4P1L$#v!ofzS>VU7i7(b@J_5p^{I>JJ$8lmS6IU{bcr))C*m)yVHwG4(J54U%*- z+6E-Dd}FMWiBqnOs#`c+F?xt|9T%-hN3^b`ymAQ_)xgLB<(9NYX#w#l?R*=_ND(-1 zM=i`0MK0;H;W7hK2`Xxl55iZf_~qG?wkP}HdhnHB*hxOxF^`}4wOv0iw)&KKf;E<9 z+DJ`x*x6auUC!GvZrdaK%pfhUJ(e|w1MZmeR)CC6$X8Z|+9dVi`4KkJZQZ)~w@6ig zZV+p)kYoHma9Gi|soTp#(^pr8LQ7?F=Qu#kq z#C`Azi$%mYfhwf09pw&RnNGxu5qY-OZeA8x*>+SS4H8nYF`{gGrgtnR_w6bq!Vr&Nm(bS zqtSAz-ENydO5=kNuIM1SWx8R?TXv^fK=!xCPf59-Df1vdo~Z%k7o6OBPk!I#^#KToU7>5UyDLuL z+uyDWWOZ6}tyGucoyIMq&MXVZ8R1Z2gQ0_AWmt6z%f3)DEI7*Zh#oA*I7Dff6DJj9 zBN7|AnZ&hqa10#c51$pAV(|KiR355{IUbbHiesoZn)1-Q^(#Ix}e_xWIC8~ z!3em`Zen0bIn^&niR2(9%Po{&c?{ACadF_B(OcDu(TPY93AD0h=N)A^m>4bh^5aTY z7aed%ey9aSD^%3twY`OpO4vOpiHau-cWB#xMH;FzQI^xwOlgJZ3AhbzHnK&vcj+=< z4j1_v`6>33+tHtTVdgp8n9?JDz%oHxTOe(z_8vY0TzCie%-OVy&QKGfJIgp*8Yhxr zX`iZ{s>S((?Ow943Vnt_LldwnmbQbR^5V5poK@Wc8yflDYw}!u9CPiuBA<Cv1p*c!Z zri^RiB|NrugUk?_FpV*5nJhOSCik$)~V(?8V1c)!h00+QpgCTu~#?Y)s3in%b95ee5vH=qOY*m`9=J?m__gDGvUvCeJrk z#ja>Xjqx*P?jiFF)RpPTA;{|B-QrAgQi>4$oFqrtz$x;)G80D(xv6F6?S1I3v63L0Z6g;>0%_ z5gishWIS_y4tqvdc@$ZL*`cS~t8i4HEIAgMe zp%p6YQks5TP7weUD@p&6K{12q{e2WoXupha`TTIZ2OxM3S#u2x{HPTa&WP#->EHMM zTksMxeF$7cG9t8;L72c+noKt%CG=o8mfG@6gL^V8Auv1^35@2la%Dqg%1nQfWD*_w z(=2PwL#lh}haK|+qaL~tn^JCXNZVD84#^D_{?BN$#ZzctoR^m7Mrm7TR>TVO#N|la z`q1f<6kA6=@aQ<+{hAo4!iRmiRyf{KMx6(-x{Q*y3r5XPc8Ffn8X8!rG_`2HfJ|&{ zJEb`RdYAPm_C zV2#%i25ga~3vWW!m;K!uom08T&ejb*;|YQkrtGk}0di4MKm=jXw$T#BZzEtl8+sC1m=pzB zU9pr=a7I)B*-N)pB{wb{we}!z@p+7XHyhCx4$*6{9(a$87!<&QP4R5uXfNHj-SS0m zfg+@bb#`ZkY1Ge|vV?p#97*CDy!t3S&iDvYJz}^c{GZ9Cg$^4(R_X>*{p#?!afz9;W#V%r5hg@M%38ZUpvgu_^yf8SBjrL>a7jxP z>At_;U$+szWlqDR&I+QdVS=U~rOxEW7W%#`^_@naEfwmKGV>6cjjyt0Xxh=?8S#eD zik%l52(@p?{}k=IGL^>RaAJ*9y0$)e*SM*^?T^Q(r+6%Ia0}>hN$_F=PP*uD9|CSN zyvB-2&gMwpc(@s@HP=3-s5Z)(Td3^Hx>(gu1d z{<}%t3A3)#LT0{=dl89RD34ZxO9ou?Hx^T~4P~v^{PUL$3j^3r?5yr5Xew1&%LQV!JvO*=iTg`d z?B(xZ6eoncX16qFQCo=(TNg_D042-{9Wj9k1}0^(!JAf)ccEfrfqyw-Fd*GeTZzMK z%t51i{kwI7jD8w#5mmV_3%7is<_vG(-${_Wbwp|`F_z{=zwn;E`YWxIkrNN_F-@SC zrw%9sngzLzAbU>iJx{hYv@C>!)=dylrYGnxM!9L^4 zSQkIB4+SC;Rwh%a{f|+y?3nmn;WTacoETbn&Ao$Uie%UxdsZWli#tlO@QLX}df5HS zG>Ic1s=A^R2wo_cystsM9zO^SJ?;UEsw?*2OnPydA6UD<9Q_*rg~ES~qMwOc&XRS% zN#fYG?Nv)WR4v~PZ!E`)T-0bV^qn`E+( zb0|UtpC`K|Z>6*i^W~Tx%xN8CSj&N_z2tC}=Znv5w}%SZ1ijmZ!O7HSl9(SH=#jUqj#XNVU`43qT+i)@NZtqK$^G7_^wLuwc>Du7 zX3-@*4fzB-|0>%D&1F;^JiW%gJ>mb%>WufXvF>shI;phfI}I8QC(x&2kTZd!Y%o>-WUAla-cJ zR08CXt|1W}UO=n&FA+T`x9*?C>m9B@B%*#?7`o);*@TX=J9X(8yO{vk}37h9pcx&JHe4VErM8}T>-=Q0lC1pCT z#p`Fxw2Fm)$ki70O3ggKT%moyVzzZ0u^&t4<5p9{`#H;Yxo&32iUpu#x3Uj1$LjHe z#%}39L3K0Z8E4{9+4C1mZvM>0&FK-?;b{fc5gIJW=iyu~8eDcCj7pj7yFhJ{}8Fe1(aB#uxxl@ie2J*7<_)A1^E9*CGJ3l|loW0Aav(li zw{LXF`fX+?L9>Ln zU{R4s3jN0f-|UJG5vOuM_#)7c3Ek4`&tWfg!P|NJ?adR~It&2vOcCZM6X9u|AE+@x zIMvC`SHS$*$Yl_Bb?_#WczeUtV?j=l`#@$D%M)wAvoIA*3>HAerbaH1b;2wQPq7Xs zmof+0cP0CA6;bD3uJo&i+n<^)J~$q9Ty;f@TKdRh$+SY(0s8IAnbXa?`=e#|C*poM z>8=HJOF6dO9Yd= z_QZZ2wU{kKG zwg#*HKGcp=&d#8`cX+kV9hPnT;T^3lpHzpA5Kd`Uu`52}M>TQ5(D$7mv<&$OZ?d>j znrLu-5-o|Xp(TZ1mAd`u@4jZ3b36laKO<(Ai_;G|9WJ7nkz#>@3^YuU0C%E>pC@Vq zt|LxMC<`>OR9d!(2t-KvySkzWyrb8+nNO^>1Dv_8=>F4z$;)D_1;2rIvS?+vfGt#7=X_SK%?gW7+@-wnK`b6mX5m^YhDrNLm$7TT-i z0tX84-amd(e0^XwCalxs1X~yZ>Vc+(Az*_jG8;ZIPHc0`enUQYv# zV*^n`Dy5i;=tQf^OZp{LLie#igT#`8dfS5_Lg$ZH0~KU7y51S<``mWx>{<~a8M-oQ zs@};THx3-j0w4`+VCJZUddYq!<7+H6sW#aaE^+sx)n4ADOzEpdArCbzo?A9NxqQ@6 z;hf8GU=C{TvS`w_|L4?VR9HAFJMH>`Z!`0()!Auw%jA_+eMC=iX`D$eni2K=0H&N+ zZizXdtgw#t%y@Iyn5a~+Q9)H%r^~SFub(j)at;rq6TC8(I_#_IczO9n1uT{#h0fTM zrN7M`kjpR~uz%P7t)v6qkOOaMto40C*?mu(GI<;W_t@9F7iWwzIsQbNT1BfH_~+Yz zq8^2f4X?f?{!t>rF72PI#6gEdG2Hr`wbPijRakYQ`ST}tx>|{##I4Hl zLK^l%yP-Lj9xZW|qs}0cpj}<$Gb*%wQOIEV*9Il(D8;^19}6wETc!rb9dp5XApzpG zlwR>E=>(TEjcf>*1`%uEi$CL*P>Pcd9A|m7`&B}iI$A*$^d!ZO0vQp}CdqC{g15uR zY#u#S=?2+`+4<{1l8=OD9J0?J`rccuc*^9#Qg)*n5OkI?M8 zH+=8;D3xMNGcLZ^Pp*)grCv-tKuo#Q#oaV~{C#|gr_zukN$CqEp^uIg++XP2c4;OU z{?Sl3#=VLbV32v%&ZG|5G{|NA2Bd9a!H|bieX!q#5rzSN&}tSFGO~lz+2&m55u+49 zu}4U|MX3eUMEo@Ew5y-hToc*R?Kgx7ziz%>2Gh#& zzfBGKL60{?LY%Xj+BmRoYX}#3)uclquVlt}xCfPBYQdb0vFBY~1OzwJoqN^T&@mjK z6vjD68KJWgW>CTT7^7(8cGG*qH-yAIVy+%f3X<2Lg!^Bo1X)7LfmC0N7VBjFk}FvP zj!wZ?T1c-taP67pbaqs34tu6abd}7YMI;4QN5Xc3WA@YghN{tx{!PZcAbVR>;ikWA zZ~;~4D4#GZaXKrd#1_c56(TiW&ZGR*#UXhi?|ivryF5Gh!Y(mGp4iM7#$1Bx;Nk9Z z?G$Sbh&?2oT_j8LIXh!~K66yMgdu6EDyNzf2KwYrk1pou!6#C@iVplG@=M`y&g7w( zwHe&sWW($pSawqpmCZ<)R#BwFKtB1Nnk_}Lsi9J#LmK>0`wi;k-_@#UacdPYzl-)9 zGQ4UPYpQzRi`xI|>B{4w>fX40UuG<0ETN&ig-{bCTQMU`ku;dBLqd`*naY}sVMK}$ zB}y1d$d=tO5%H=-k|iWc24O7mJ2QHJKJ%aRJoj0?=X;)W?{m+^!=X#AH#MA_<9OR}VLrKAFOulo{pHn7$IDX>UgUY&O^4g(TS*<7 z2DL*WbuL|reQGYXmXLUVzUF#3O&Kv*k-_LTY zaqCiN@;%NsWk$;b?#l(=_M7@EecTS8{WR~dHv7zVcc|}xhL>D|wA)OT;H|csNePGb zvL9EPjivQtO#jS8gl;q_A!FEP#Ma6WA~U)qMaE+_S|2@5C~t&p!J(rac zWCZI|B8G=w&&&cfN{C=Avr*R+O%<&0COpDSwDw6Btj-hBhheqntF==P9P7Ql>Enp3 zx>^4ObKX!Vz zQ-CW(>vLsv3zjQ225747ndRxI0vz&yP>WX3(XD&KD-Z)F+z+r>^H=xbq78b5dZ z?#!Y};UR54$Pv+~=iK{5XJ4r)zf-;<20|m5?kx4_1UPU z^;u1pB;1fqNoC?)wL@Lg=-ll6yQ>Q`bt3&7i>8GsYdn{;e2(WSk0$+u&v#_yzw+-r zVw~V?b-<|UgZtDv>}fqaOns7iNz5c+K;{-Td>JphUU5#!m1t8UOdV9M$Dt!!&?6UE z)L32MT?2}nQ7^C{D->7EO$40hYA<@2pZe8p;CpJZ%Ux>ukJAGgRa&uwM-$uN@iKR0 z%f(*~FONQxe9I$%C~7x1bLTMM>u2Sp!Dgzmsru|-(UfcG({9y%&i=2dk;sF?@loUR zSZU9`@?zz?3)}f6BSJhu2k+zl9j}$zg|=B7`tbAH-1^N%Ti)l4Evt)(dmoW2%{boQm{3w1MQhEBls9|S)BAmAPfw5K zHHr3t{Eg5rk!yiPB|-};ao<}n=)6Gd$q?c%&95uKbx3Eyc;7OUH}bs@yh>q5rIEu^?M?SV{XV2@=bu8B=Po zBu5tQ?oyYF*e@7o0u%NK!_3G<*If=m6@7Fps{iLf;ERnray1Y}Mj zD1L_OQphB6_b+9V774)-$6cW_VvKink6bgAm86j{K~Fh5*=_qRXV$?GxVy149@xq$B-5fL^ROB1d!MhItEWhO_u5Uoz-1B{ zQ9E=zj!P$8SNF(cJsq-$)CpXz&L^A(4!3b+|G^T5%J1rbD}^PiwJ3e05)%B=?f|qW zGUAb*+|ZN4L5Y~}s;lAGj3lgrEAD`a9}UhbXp&3`#j(QkNkTY%+9F2RB>9l(#19Ch z&K&gOT#@IYUU~X!{!7g@#n_{_on5jJpXPeMEq|2n-51gHc=dd?L9(_|8vptwo!ZYE zMFf#oh9;TXT8ghG3|jM2Q5xWTh`5>&mcuIv0gOy1zd`>#&BnmK7hSGq zeT%j$UB9s$Jn0oF&5#5cj1ji%nt#IrZd^8C|I*hl8if-Z(yr(16=HrOEUZFAgN{Il z4DLNx%Q|@0-&ZPb-}4Bexsy@M&&^PWP5?lFWMIAxt>A zJ8%3d$jn9lgwj=x>ZLOy2U5YRe5igeId*ji{j)ZCOj9yQ;AkK_{57b!R`ZSWN~95(DcNo;qO(#j&l- zdt;(`DIIXa$Ybl48PL(tTG%DQz_&P#8WsRNS(=t(K`q$*5+3Z4w*^2aFv zb2c1Urh4!WOj26scP3A)nv8r}v9QJqmY`{GaWHl~oF#?NJ66dPp)DAfiD?5Dtcm&r zOL1k3utCcR#(~vQlh!(3OIfdI7H2t0rWJYrZ%I`~Gdg znmP-e79p#XnYGRk<@Ekq*j9fK2d3RQddk_k5ja$N)xFeSF zVyMp8+;Yi->bVT;l~jg1swM6fIl)LRUKu3@29n|5M19T&ie z23Fx&RyDF4f*xiC7SM@-Tk%9HW&LimHT%Yna=$TUdNE?r7qN)l$##$bI6Xs6u6y4Q zGkCkUlm%l!XDrjFKsWZ@*OaQ?aOv(VNkm(0cL{*@+{y|Ee7!Ayc54+tqUrJrq4!$yK}3QCoo zvR+dm9a0)6Id;d#;>%m&qxSCW`vXXe2V&0owrL;2-Vr6ljMQ)n2sCB=aWfh04mE#a zf>UhSB~wWx&Rc7gz3AM*#B=GoR82MpCv;66dbJv49&9LSnGzz_U9IDFKnAV#XmYm~ zH&V=58Oa_f4~1x;m(55dsch$OLKDW*O`f@KS;?$RWn;(ba@cTwx)YhUE420091s9P8cv#VklQ-AFc08%*SZgbsgl#}7b)%xSW6CxGyfvgO}g zjYD4ACZ|#IdX6Q&e0#^-o57ghGPG~nQb%4~dSJ97-YvhOt|qF`MjlI$=A~*LfNvS2 zw<<{Ww4Ka&fHcknCg#$ApBE^LX({qZKpp4eV{dW_daLT%q8B-(Q=#2?la%7Tao>$dHeiiGWPg~((%d^%9t;^d4DRXx?IGPGHHHJ?6K9A>SL2UX- z07Dyk)zB^cWi_nWc}w#+cpeNVXWPFWVSUJ+wkaL;HzW$t~&r7JpoKtz#6*)DUe8la*@d*nhL@$gicSq+U0Q~mCod`iHN2bP zNUP_h`u4w*!dOfs*{)xKCxR>EEZS<1)E&2fzTS&_6k7;|DDQVMILq&G>rr*k+94D1;2>Ihdyly~CHDn&7=P<=$tSUI!CCV^X=ml&87 z$VmrM8;C`p9YG-vh)6n{M$gyr7OA23@4orJCoE#}Y7d#$n;b?Uyt}Rb1>8vvJAsZ+ z08p<5x9gb`m|7FEK#!rUyN}JlZXMISoPJ>6RtbV6;#zs_20lA@GmW$vP1;P^nH0pp z&#_c2W!GfJEtJ_!IoJskd$Dd}Ein$_2^<#6{9MHBR?Su_u?4K!I`ZY7S|WpRR%kYKWyW;RN>NZPbc0%dyRZ z(II3K9_~d_A4@|lT0qw<{|oWFV4NPO#+m*mQT;8adKB(%3O-xB-WI*(3lr3F*x?)a z^iu*Mj}jqDH)x)ML8+RJ$DZ?Uoj`-%$fa=>DdV$^TJaoV)szS+`i@yzpfK)65m@?ra)lCnsk*(ei?V^GxWu}73 zU5R?e`k?o)m#1-8O%oWhqHoVxDwP6W2+_g)rzKNDG}*S_pZ6iR{zqWSbs`{>eF!ir z^#4O#Afomhy=VHpIC{DE!X&9E54C6=QitqfX-@g=zgKEdlNVv@J=O2UX3xG_RtV72 zbGSJRbd)yBVkT>1LGcAZh0iYL_?puE9=q(*{616FGonF7zz+1=zMiFX|HuZzA!dI4yj&adwsVcd1sPqt~`y(@a&_L)b=(? z6X0WNM`&|LxeSHEG|4wa*%TlteuB6Dj_h%UYQ4=H&!wzySBDm;Vq@_VQ8_w~KfKI4 zW-If7XJ7F~(=9b|*$);kyYt@3AwK^><+wrqkRJIMytw4{fHSLdRd=+JtXFd@*heM8 zjZ-wBiE>_xd>vbvegT1q*^T(R4Hpy7XgM(uQXNp?iJIHA*Vam2qg*5Dc4Pa4DDfmA zB8kKpxt-C5eEQj@R3~7U{ktC5IcfO`8@<-c?1I?JRG{b}9D#^vzYT9Y?9_f{AjCGK z4c&G0?E14SFNVv{V95?`w)#bsxib>blokkdL65h+X2h0cSu-5+1tWK^K40VHpVaV@ zsuH(+iXB}}gI+?hv$aK|c}&o4qlWuwnnXc(yZ1nd61?SVgOPiVpY;0cAA_w~_k`-qVGk>y)HA+ z9eOtq^u$?QfO1ApciEA(??w;TTh=r9vP literal 0 HcmV?d00001 From 58c84daefa2898514a350c392618c797b41263b9 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 27 Apr 2023 19:38:32 -0500 Subject: [PATCH 082/734] Use tempfile to generate test cache directories --- tests/test_cache.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/test_cache.py b/tests/test_cache.py index cde986c1..9fd21ac8 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,5 +1,5 @@ import os -import shutil +import tempfile import joblib import pytest @@ -29,17 +29,16 @@ def refresh_environment(): @pytest.fixture def test_cache(refresh_environment): """Initialize a temporary cache and delete it after the test has run.""" - os.environ["OUTLINES_CACHE_DIR"] = "~/.cache/outlines_tests" - import outlines + with tempfile.TemporaryDirectory() as tempdir: + os.environ["OUTLINES_CACHE_DIR"] = tempdir + import outlines - memory = outlines.cache.get() - assert memory.location == "~/.cache/outlines_tests" + memory = outlines.cache.get() + assert memory.location == tempdir - yield memory + yield memory - memory.clear() - home_dir = os.path.expanduser("~") - shutil.rmtree(f"{home_dir}/.cache/outlines_tests") + memory.clear() def test_get_cache(test_cache): From 34a0e4bdf926ae97178b0bed1b9edd06c7bc5b10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 1 May 2023 17:47:19 +0200 Subject: [PATCH 083/734] Add "Pick the odd one out" example --- examples/pick-odd-one-out.py | 43 ++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 examples/pick-odd-one-out.py diff --git a/examples/pick-odd-one-out.py b/examples/pick-odd-one-out.py new file mode 100644 index 00000000..5cd4b57c --- /dev/null +++ b/examples/pick-odd-one-out.py @@ -0,0 +1,43 @@ +"""Chain-of-thought prompting for Odd one out classification. + +Example taken from the LQML library [1]_. + +References +---------- +.. [1] Beurer-Kellner, L., Fischer, M., & Vechev, M. (2022). + Prompting Is Programming: A Query Language For Large Language Models. + arXiv preprint arXiv:2212.06094. + +""" +import outlines.models as models +import outlines.text as text + + +@text.prompt +def build_ooo_prompt(options): + """ + Pick the odd word out: skirt, dress, pen, jacket. + skirt is clothing, dress is clothing, pen is an object, jacket is clothing. + So the odd one is pen. + + Pick the odd word out: Spain, France, German, England, Singapore. + Spain is a country, France is a country, German is a language, ... + So the odd one is German. + + Pick the odd word out: {{ options | join(", ") }}. + + """ + + +reasoning_model = models.text_completion.openai( + "text-davinci-003", stop_at=["Pick the odd word", "So the odd one"] +) +result_model = models.text_completion.openai("text-davinci-003") + +options = ["sea", "mountains", "plains", "sock"] +prompt = build_ooo_prompt(options) +reasoning = reasoning_model(prompt) +prompt += reasoning +result = result_model(prompt) +prompt += result +print(prompt) From cb37ec30b6e85eb9d472fea9fb7ef83d706063ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 1 May 2023 20:11:10 +0200 Subject: [PATCH 084/734] Allow to add linebreaks at the end of prompts --- outlines/text/prompts.py | 27 ++++++++++++++++++++++----- tests/text/test_prompt.py | 7 +++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index e48f961b..14b1beb4 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -128,6 +128,18 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: >>> render(tpl) ... 'A new string' + If you want to insert a linebreak at the end of the rendered template, you will + need to leave an empty line at the end of the template: + + >>> tpl = ''' + ... A new string + ... + ... ''' + >>> tpl + ... '\nA new string\n\n' + >>> render(tpl) + ... 'A new string\n' + `render` removes the identation in docstrings. This is particularly important when using prompt functions @@ -169,19 +181,24 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: A string that contains the rendered template. """ - # Dedent, and remove extra linebreak - template = inspect.cleandoc(template) + cleaned_template = inspect.cleandoc(template) + + # Add linebreak if there were any extra linebreaks that + # `cleandoc` would have removed + ends_with_linebreak = template.replace(" ", "").endswith("\n\n") + if ends_with_linebreak: + cleaned_template += "\n" # Remove extra whitespaces, except those that immediately follow a newline symbol. # This is necessary to avoid introducing whitespaces after backslash `\` characters # used to continue to the next line without linebreak. - template = re.sub(r"(?![\r\n])(\b\s+)", " ", template) + cleaned_template = re.sub(r"(?![\r\n])(\b\s+)", " ", cleaned_template) env = Environment( trim_blocks=True, lstrip_blocks=True, - keep_trailing_newline=False, + keep_trailing_newline=True, undefined=StrictUndefined, ) env.filters["name"] = get_fn_name @@ -190,7 +207,7 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: env.filters["signature"] = get_fn_signature env.filters["schema"] = get_pydantic_schema - jinja_template = env.from_string(template) + jinja_template = env.from_string(cleaned_template) return jinja_template.render(**values) diff --git a/tests/text/test_prompt.py b/tests/text/test_prompt.py index 1e00fc2e..07a551b0 100644 --- a/tests/text/test_prompt.py +++ b/tests/text/test_prompt.py @@ -33,6 +33,13 @@ def test_render(): """ assert text.render(tpl) == "A test line\n An indented line" + tpl = """ + A test line + An indented line + + """ + assert text.render(tpl) == "A test line\n An indented line\n" + def test_render_escaped_linebreak(): tpl = """ From 8897af4862a401b7af0dd96edc3ef93f58e5ffe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 1 May 2023 20:16:18 +0200 Subject: [PATCH 085/734] Rename odd one out example --- examples/{pick-odd-one-out.py => pick_odd_one_out.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{pick-odd-one-out.py => pick_odd_one_out.py} (100%) diff --git a/examples/pick-odd-one-out.py b/examples/pick_odd_one_out.py similarity index 100% rename from examples/pick-odd-one-out.py rename to examples/pick_odd_one_out.py From 3fab8447db519f84a50b6c28d71fbbad0414d15f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 1 May 2023 21:42:49 +0200 Subject: [PATCH 086/734] Add examples to the README --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 12c99b6b..88d2df1f 100644 --- a/README.md +++ b/README.md @@ -243,3 +243,10 @@ tell_a_joke = text.function( tell_a_joke(Joke) # [2, 3, 5, 7] ``` + +# Examples + +- [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) +- [Meta prompting](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) +- [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/dust/math-generate-code.py) +- [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) From 2f544db89767094931148e3ee31ea60e6b995926 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 27 Apr 2023 19:16:00 -0500 Subject: [PATCH 087/734] Factor out model-from-path code in completion decorator --- outlines/text/completions.py | 54 +++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/outlines/text/completions.py b/outlines/text/completions.py index c4e23970..0f659784 100644 --- a/outlines/text/completions.py +++ b/outlines/text/completions.py @@ -4,6 +4,45 @@ import outlines.text as text +def get_model_from_path( + model_path: str, + *, + stop_at: Optional[List[str]] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, +) -> Callable: + """Obtain a text completion provider model object from a model path. + + Parameters + ---------- + model_path + A string of the form "model_provider/model_name" + stop_at + A list of tokens which, when found, stop the generation. + max_tokens + The maximum number of tokens to generate. + temperature + Value used to module the next token probabilities. + + """ + if "/" not in model_path: + raise ValueError("Model names must be in the form 'provider_name/model_name'") + + provider_name = model_path.split("/")[0] + model_name = model_path[len(provider_name) + 1 :] + + try: + model_cls = getattr(models.text_completion, provider_name) + except KeyError: + raise ValueError(f"The model provider {provider_name} is not available.") + + llm = model_cls( + model_name, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature + ) + + return llm + + def completion( model_path: str, *, @@ -65,19 +104,8 @@ def completion( Value used to module the next token probabilities. """ - if "/" not in model_path: - raise ValueError("Model names must be in the form 'provider_name/model_name'") - - provider_name = model_path.split("/")[0] - model_name = model_path[len(provider_name) + 1 :] - - try: - model_cls = getattr(models.text_completion, provider_name) - except KeyError: - raise ValueError(f"The model provider {provider_name} is not available.") - - llm = model_cls( - model_name, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature + llm = get_model_from_path( + model_path, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature ) def decorator(fn: Callable): From 983eb58be0b1e1112fd29892d4b56ae8a8c1059e Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 27 Apr 2023 19:17:03 -0500 Subject: [PATCH 088/734] Update BabyAGI example to use non-decorator completion approach --- examples/babyagi.py | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/examples/babyagi.py b/examples/babyagi.py index c2e778ff..e3213125 100644 --- a/examples/babyagi.py +++ b/examples/babyagi.py @@ -5,25 +5,12 @@ The original repo can be found at https://github.com/yoheinakajima/babyagi """ from collections import deque -from dataclasses import dataclass -from typing import Callable, Deque, List +from typing import Deque, List +import outlines.models as models import outlines.text as text -MODEL = "openai/gpt-3.5-turbo" - - -@dataclass -class LLMFunction: - model_name: str - prompt_fn: Callable - format_fn: Callable = lambda x: x - - def __call__(self, *args, **kwargs): - prompt = self.prompt_fn(*args, **kwargs) - model, init_state = text.chat_completion(self.model_name) - result, _ = model(prompt, init_state) - return self.format_fn(result) +model = models.text_completion.openai("gpt-3.5-turbo") ################# @@ -41,7 +28,7 @@ def perform_task_ppt(objective: str, task: str): """ -perform_task = LLMFunction(MODEL, perform_task_ppt) +perform_task = text.function(model, perform_task_ppt) ##################### @@ -80,7 +67,7 @@ def create_tasks_fmt(result: str) -> List[str]: return task_list -create_tasks = LLMFunction(MODEL, create_tasks_ppt, create_tasks_fmt) +create_tasks = text.function(model, create_tasks_ppt, create_tasks_fmt) ######################## @@ -117,7 +104,7 @@ def prioritize_tasks_fmt(result: str): return task_list -prioritize_tasks = LLMFunction(MODEL, prioritize_tasks_ppt, prioritize_tasks_fmt) +prioritize_tasks = text.function(model, prioritize_tasks_ppt, prioritize_tasks_fmt) objective = "Becoming rich while doing nothing." From 7bbfdbb67e16a01af358a993ac86cfee5d4aae7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 1 May 2023 17:31:52 +0200 Subject: [PATCH 089/734] Add possiblity for LM to choose between sequences --- outlines/models/openai.py | 27 ++++++++++++++++++++++----- pyproject.toml | 2 ++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index caa31d11..9bbc0e1a 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -5,6 +5,7 @@ from typing import Callable, Dict, List, Optional, Tuple import numpy as np +import tiktoken from PIL import Image from PIL.Image import Image as PILImage @@ -23,6 +24,7 @@ def OpenAITextCompletion( model_name: str, stop_at: Optional[List[str]] = None, + is_in: Optional[List[str]] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, ) -> Callable: @@ -37,6 +39,8 @@ def OpenAITextCompletion( The name of the model as listed in the OpenAI documentation. stop_at A list of tokens which, when found, stop the generation. + is_in + A list of strings among which the results will be chosen. max_tokens The maximum number of tokens to generate. temperature @@ -49,7 +53,7 @@ def OpenAITextCompletion( """ - parameters = validate_completion_parameters(stop_at, max_tokens, temperature) + parameters = validate_completion_parameters(stop_at, is_in, max_tokens, temperature) @error_handler @memory.cache @@ -57,6 +61,7 @@ def call_completion_api( model: str, prompt: str, stop_sequences: Tuple[str], + logit_bias: Dict[str, int], max_tokens: int, temperature: float, ): @@ -68,6 +73,7 @@ def call_completion_api( temperature=temperature, max_tokens=max_tokens, stop=stop_sequences, + logit_bias=logit_bias, ) return response @@ -82,6 +88,7 @@ def generate(prompt: str) -> str: def OpenAIChatCompletion( model_name: str, stop_at: Optional[List[str]] = None, + is_in: Optional[List[str]] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, ) -> Callable: @@ -96,6 +103,8 @@ def OpenAIChatCompletion( The name of the model as listed in the OpenAI documentation. stop_at A list of tokens which, when found, stop the generation. + is_in + A list of strings among which the results will be chosen. max_tokens The maximum number of tokens to generate. temperature @@ -107,7 +116,7 @@ def OpenAIChatCompletion( parameters when passed a prompt. """ - parameters = validate_completion_parameters(stop_at, max_tokens, temperature) + parameters = validate_completion_parameters(stop_at, is_in, max_tokens, temperature) @error_handler @memory.cache @@ -115,6 +124,7 @@ def call_chat_completion_api( model: str, messages: List[Dict[str, str]], stop_sequences: Tuple[str], + logit_bias: Dict[str, int], max_tokens: int, temperature: float, ): @@ -126,6 +136,7 @@ def call_chat_completion_api( temperature=temperature, max_tokens=max_tokens, stop=stop_sequences, + logit_bias=logit_bias, ) return response @@ -140,9 +151,15 @@ def generate(query: str) -> str: def validate_completion_parameters( - stop_at, max_tokens, temperature -) -> Tuple[Tuple[str], int, float]: + stop_at, is_in, max_tokens, temperature +) -> Tuple[Tuple[str], Dict[str, int], int, float]: """Validate the parameters passed to the completion APIs and set default values.""" + if is_in is not None: + enc = tiktoken.get_encoding("p50k_base") + is_in = sum([enc.encode(word) for word in is_in], []) + is_in = {f"{token}": 100 for token in is_in} + else: + is_in = {} if stop_at is not None and len(stop_at) > 4: raise TypeError("OpenAI's API does not accept more than 4 stop sequences.") elif stop_at is not None: @@ -152,7 +169,7 @@ def validate_completion_parameters( if temperature is None: temperature = 1.0 - return stop_at, max_tokens, temperature + return stop_at, is_in, max_tokens, temperature def OpenAIEmbeddings(model_name: str): diff --git a/pyproject.toml b/pyproject.toml index 0421b927..08756e36 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ dependencies = [ "pillow", "pydantic", "scipy", + "tiktoken", ] dynamic = ["version"] @@ -61,6 +62,7 @@ module = [ "pydantic", "pytest", "scipy.*", + "tiktoken.*", "torch", "transformers", ] From c0d30408db0a88e416e490ae98a153a9c1fd15be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 1 May 2023 16:27:43 +0200 Subject: [PATCH 090/734] Add react example --- examples/react.py | 86 +++++++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 3 ++ 2 files changed, 89 insertions(+) create mode 100644 examples/react.py diff --git a/examples/react.py b/examples/react.py new file mode 100644 index 00000000..0ce176fd --- /dev/null +++ b/examples/react.py @@ -0,0 +1,86 @@ +"""ReAct + +This example was inspired by the LQML library [1]_. The ReAct framework was +first developed in [2]_ and augments Chain-of-Thought prompting with the ability +for the model to query external sources. + +References +---------- +.. [1] Beurer-Kellner, L., Fischer, M., & Vechev, M. (2022). Prompting Is Programming: A Query Language For Large Language Models. arXiv preprint arXiv:2212.06094. +.. [2] Yao, S., Zhao, J., Yu, D., Du, N., Shafran, I., Narasimhan, K., & Cao, Y. (2022). React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629. + +""" +import requests # type: ignore + +import outlines +import outlines.models as models +import outlines.text as text + +outlines.cache.disable() + + +@text.prompt +def build_reAct_prompt(question): + """What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? + Tho 1: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado ... + Act 2: Search 'Colorado orogeny' + Obs 2: The Colorado orogeny was an episode of mountain building (an orogeny) ... + Tho 3: It does not mention the eastern sector. So I need to look up eastern sector. + ... + Tho 4: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft. + Act 5: Finish '1,800 to 7,000 ft' + {{ question }} + """ + + +@text.prompt +def add_mode(i, mode, result, prompt): + """{{ prompt }} + {{ mode }} {{ i }}: {{ result }} + """ + + +def search_wikipedia(query: str): + url = f"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={query}&origin=*" + response = requests.get(url) + page = response.json()["query"]["pages"] + return ".".join(list(page.values())[0]["extract"].split(".")[:2]) + + +mode_model = models.text_completion.openai( + "text-davinci-003", is_in=["Tho", "Act"], max_tokens=2 +) +action_model = models.text_completion.openai( + "text-davinci-003", is_in=["Search", "Finish"], max_tokens=2 +) +thought_model = models.text_completion.openai( + "text-davinci-003", stop_at=["\n"], max_tokens=128 +) +subject_model = models.text_completion.openai( + "text-davinci-003", stop_at=["'"], max_tokens=128 +) + +prompt = build_reAct_prompt("Where is Apple Computers headquarted? ") + +for i in range(1, 10): + response = mode_model(prompt) + mode = response[:3] + if mode == "Tho": + prompt = add_mode(i, mode, "", prompt) + thought = thought_model(prompt) + prompt += f"{thought}" + if mode == "Act": + prompt = add_mode(i, mode, "", prompt) + response = action_model(prompt) + action = response[:6] + + prompt += f"{action} '" + subject = " ".join(subject_model(prompt).split()[:2]) + prompt += f"{subject}'" + if action == "Search": + result = search_wikipedia(subject) + prompt = add_mode(i, "Obs", result, prompt) + else: + break + +print(prompt) diff --git a/pyproject.toml b/pyproject.toml index 08756e36..2d1bd9f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,9 @@ filterwarnings = [ "ignore::FutureWarning:transformers.*" ] +[tool.mypy] +exclude=["examples"] + [[tool.mypy.overrides]] module = [ "diffusers", From a9f39380ef5c549cd77c8f7d24135ca791572c78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 2 May 2023 14:56:28 +0200 Subject: [PATCH 091/734] Add possiblity to constrain generation to list of strings --- examples/react.py | 9 ++-- outlines/models/openai.py | 100 ++++++++++++++++++++++++++++++++++---- 2 files changed, 94 insertions(+), 15 deletions(-) diff --git a/examples/react.py b/examples/react.py index 0ce176fd..f1c0f02b 100644 --- a/examples/react.py +++ b/examples/react.py @@ -48,7 +48,7 @@ def search_wikipedia(query: str): mode_model = models.text_completion.openai( - "text-davinci-003", is_in=["Tho", "Act"], max_tokens=2 + "gpt-3.5-turbo", is_in=["Tho", "Act"], max_tokens=2 ) action_model = models.text_completion.openai( "text-davinci-003", is_in=["Search", "Finish"], max_tokens=2 @@ -63,17 +63,14 @@ def search_wikipedia(query: str): prompt = build_reAct_prompt("Where is Apple Computers headquarted? ") for i in range(1, 10): - response = mode_model(prompt) - mode = response[:3] + mode = mode_model(prompt) if mode == "Tho": prompt = add_mode(i, mode, "", prompt) thought = thought_model(prompt) prompt += f"{thought}" if mode == "Act": prompt = add_mode(i, mode, "", prompt) - response = action_model(prompt) - action = response[:6] - + action = action_model(prompt) prompt += f"{action} '" subject = " ".join(subject_model(prompt).split()[:2]) prompt += f"{subject}'" diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 9bbc0e1a..e4a5cb63 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -2,7 +2,7 @@ import base64 import os from io import BytesIO -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import tiktoken @@ -79,9 +79,49 @@ def call_completion_api( return response def generate(prompt: str) -> str: - response = call_completion_api(model_name, prompt, *parameters) + response = call_completion_api(model_name, prompt, **parameters) return response["choices"][0]["text"] + def generate_choice(prompt: str) -> str: + """Generate a a sequence that must be one of many options. + + We tokenize every choice, iterate over the token lists, create a mask + with the current tokens and generate one token. We progressively + eliminate the choices that don't start with the currently decoded + sequence. + + """ + assert is_in is not None + tokenizer = tiktoken.get_encoding("p50k_base") + encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] + + decoded: List[str] = [] + for i in range(max([len(word) for word in encoded])): + mask = {} + for word, tokenized_word in zip(is_in, encoded): + if not word.startswith("".join(decoded)): + continue + try: + mask[tokenized_word[i]] = 100 + except IndexError: + pass + + if len(mask) == 0: + break + + parameters["logit_bias"] = mask + parameters["max_tokens"] = 1 + response = call_completion_api(model_name, prompt, **parameters) + decoded.append(response["choices"][0]["text"]) + prompt = prompt + "".join(decoded) + + return "".join(decoded) + + if is_in is not None: + return generate_choice + else: + return generate + return generate @@ -147,19 +187,56 @@ def generate(query: str) -> str: answer = response["choices"][0]["message"]["content"] return answer - return generate + def generate_choice(prompt: str) -> str: + """Generate a a sequence that must be one of many options. + + We tokenize every choice, iterate over the token lists, create a mask + with the current tokens and generate one token. We progressively + eliminate the choices that don't start with the currently decoded + sequence. + + """ + assert is_in is not None + tokenizer = tiktoken.get_encoding("cl100k_base") + encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] + + decoded: List[str] = [] + for i in range(max([len(word) for word in encoded])): + mask = {} + for word, tokenized_word in zip(is_in, encoded): + if not word.startswith("".join(decoded)): + continue + try: + mask[tokenized_word[i]] = 100 + except IndexError: + pass + + if len(mask) == 0: + break + + parameters["logit_bias"] = mask + parameters["max_tokens"] = 1 + messages = [{"role": "user", "content": prompt}] + response = call_chat_completion_api(model_name, messages, **parameters) + decoded.append(response["choices"][0]["message"]["content"]) + prompt = prompt + "".join(decoded) + + return "".join(decoded) + + if is_in is not None: + return generate_choice + else: + return generate def validate_completion_parameters( stop_at, is_in, max_tokens, temperature -) -> Tuple[Tuple[str], Dict[str, int], int, float]: +) -> Dict[str, Union[Tuple[str], Dict[int, int], int, float]]: """Validate the parameters passed to the completion APIs and set default values.""" if is_in is not None: - enc = tiktoken.get_encoding("p50k_base") - is_in = sum([enc.encode(word) for word in is_in], []) - is_in = {f"{token}": 100 for token in is_in} + mask: Dict[int, int] = {} else: - is_in = {} + mask = {} if stop_at is not None and len(stop_at) > 4: raise TypeError("OpenAI's API does not accept more than 4 stop sequences.") elif stop_at is not None: @@ -169,7 +246,12 @@ def validate_completion_parameters( if temperature is None: temperature = 1.0 - return stop_at, is_in, max_tokens, temperature + return { + "stop_sequences": stop_at, + "logit_bias": mask, + "max_tokens": max_tokens, + "temperature": temperature, + } def OpenAIEmbeddings(model_name: str): From 8561628fcb523cacff18be88a0cf4ffe17984af1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 2 May 2023 15:20:42 +0200 Subject: [PATCH 092/734] Pass `is_in` and `stop_at` to the model call directly --- examples/react.py | 33 ++++++-------- outlines/models/openai.py | 83 +++++++++++++++++++----------------- outlines/text/completions.py | 2 +- 3 files changed, 57 insertions(+), 61 deletions(-) diff --git a/examples/react.py b/examples/react.py index f1c0f02b..ca18fdd3 100644 --- a/examples/react.py +++ b/examples/react.py @@ -47,33 +47,26 @@ def search_wikipedia(query: str): return ".".join(list(page.values())[0]["extract"].split(".")[:2]) -mode_model = models.text_completion.openai( - "gpt-3.5-turbo", is_in=["Tho", "Act"], max_tokens=2 -) -action_model = models.text_completion.openai( - "text-davinci-003", is_in=["Search", "Finish"], max_tokens=2 -) -thought_model = models.text_completion.openai( - "text-davinci-003", stop_at=["\n"], max_tokens=128 -) -subject_model = models.text_completion.openai( - "text-davinci-003", stop_at=["'"], max_tokens=128 -) - prompt = build_reAct_prompt("Where is Apple Computers headquarted? ") +complete = models.text_completion.openai( + "gpt-3.5-turbo", max_tokens=128, temperature=1.0 +) for i in range(1, 10): - mode = mode_model(prompt) + mode = complete(prompt, is_in=["Tho", "Act"]) + prompt = add_mode(i, mode, "", prompt) + if mode == "Tho": - prompt = add_mode(i, mode, "", prompt) - thought = thought_model(prompt) + thought = complete(prompt, stop_at="\n") prompt += f"{thought}" - if mode == "Act": - prompt = add_mode(i, mode, "", prompt) - action = action_model(prompt) + elif mode == "Act": + action = complete(prompt, is_in=["Search", "Finish"]) prompt += f"{action} '" - subject = " ".join(subject_model(prompt).split()[:2]) + + subject = complete(prompt, stop_at=["'"]) # Apple Computers headquartered + subject = " ".join(subject.split()[:2]) prompt += f"{subject}'" + if action == "Search": result = search_wikipedia(subject) prompt = add_mode(i, "Obs", result, prompt) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index e4a5cb63..2ab5e152 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -23,10 +23,8 @@ def OpenAITextCompletion( model_name: str, - stop_at: Optional[List[str]] = None, - is_in: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, + max_tokens: Optional[int] = 216, + temperature: Optional[float] = 1.0, ) -> Callable: """Create a function that will call the OpenAI conmpletion API. @@ -37,10 +35,6 @@ def OpenAITextCompletion( ---------- model_name: str The name of the model as listed in the OpenAI documentation. - stop_at - A list of tokens which, when found, stop the generation. - is_in - A list of strings among which the results will be chosen. max_tokens The maximum number of tokens to generate. temperature @@ -53,8 +47,6 @@ def OpenAITextCompletion( """ - parameters = validate_completion_parameters(stop_at, is_in, max_tokens, temperature) - @error_handler @memory.cache def call_completion_api( @@ -78,11 +70,24 @@ def call_completion_api( return response - def generate(prompt: str) -> str: - response = call_completion_api(model_name, prompt, **parameters) + def generate(prompt: str, *, stop_at=None, is_in=None): + if stop_at is not None: + stop_at = tuple(stop_at) + + if is_in is not None and stop_at is not None: + raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") + elif is_in is not None: + return generate_choice(prompt, is_in) + else: + return generate_base(prompt, stop_at) + + def generate_base(prompt: str, stop_at: Optional[Tuple[str]]) -> str: + response = call_completion_api( + model_name, prompt, stop_at, {}, max_tokens, temperature + ) return response["choices"][0]["text"] - def generate_choice(prompt: str) -> str: + def generate_choice(prompt: str, is_in: List[str]) -> str: """Generate a a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask @@ -109,28 +114,21 @@ def generate_choice(prompt: str) -> str: if len(mask) == 0: break - parameters["logit_bias"] = mask - parameters["max_tokens"] = 1 - response = call_completion_api(model_name, prompt, **parameters) + response = call_completion_api( + model_name, prompt, None, mask, 1, temperature + ) decoded.append(response["choices"][0]["text"]) prompt = prompt + "".join(decoded) return "".join(decoded) - if is_in is not None: - return generate_choice - else: - return generate - return generate def OpenAIChatCompletion( model_name: str, - stop_at: Optional[List[str]] = None, - is_in: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, + max_tokens: Optional[int] = 128, + temperature: Optional[float] = 1.0, ) -> Callable: """Create a function that will call the chat completion OpenAI API. @@ -141,10 +139,6 @@ def OpenAIChatCompletion( ---------- model_name: str The name of the model as listed in the OpenAI documentation. - stop_at - A list of tokens which, when found, stop the generation. - is_in - A list of strings among which the results will be chosen. max_tokens The maximum number of tokens to generate. temperature @@ -156,7 +150,6 @@ def OpenAIChatCompletion( parameters when passed a prompt. """ - parameters = validate_completion_parameters(stop_at, is_in, max_tokens, temperature) @error_handler @memory.cache @@ -181,13 +174,26 @@ def call_chat_completion_api( return response - def generate(query: str) -> str: + def generate(prompt: str, *, stop_at=None, is_in=None): + if stop_at is not None: + stop_at = tuple(stop_at) + + if is_in is not None and stop_at is not None: + raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") + elif is_in is not None: + return generate_choice(prompt, is_in) + else: + return generate_base(prompt, stop_at) + + def generate_base(query: str, stop_at: Optional[Tuple[str]]) -> str: messages = [{"role": "user", "content": query}] - response = call_chat_completion_api(model_name, messages, *parameters) + response = call_chat_completion_api( + model_name, messages, stop_at, {}, max_tokens, temperature + ) answer = response["choices"][0]["message"]["content"] return answer - def generate_choice(prompt: str) -> str: + def generate_choice(prompt: str, is_in=List[str]) -> str: """Generate a a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask @@ -214,19 +220,16 @@ def generate_choice(prompt: str) -> str: if len(mask) == 0: break - parameters["logit_bias"] = mask - parameters["max_tokens"] = 1 messages = [{"role": "user", "content": prompt}] - response = call_chat_completion_api(model_name, messages, **parameters) + response = call_chat_completion_api( + model_name, messages, None, mask, 1, temperature + ) decoded.append(response["choices"][0]["message"]["content"]) prompt = prompt + "".join(decoded) return "".join(decoded) - if is_in is not None: - return generate_choice - else: - return generate + return generate def validate_completion_parameters( diff --git a/outlines/text/completions.py b/outlines/text/completions.py index 0f659784..18bf520b 100644 --- a/outlines/text/completions.py +++ b/outlines/text/completions.py @@ -126,7 +126,7 @@ def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: """ prompt = prompt_fn(*args, **kwargs) - result = llm(prompt) + result = llm(prompt, stop_at=stop_at) return result, prompt + result return wrapper From 4221279ef07fe986fb85056850250cc56ade35e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 2 May 2023 15:33:09 +0200 Subject: [PATCH 093/734] Add ReAct example to README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 88d2df1f..e523cafc 100644 --- a/README.md +++ b/README.md @@ -248,5 +248,6 @@ tell_a_joke(Joke) - [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) - [Meta prompting](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) +- [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) - [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/dust/math-generate-code.py) - [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) From d13639fbabd43c2effea75a2e665a0368b977bcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 2 May 2023 10:18:29 +0200 Subject: [PATCH 094/734] Remove the `completion` decorator --- examples/dust/math-generate-code.py | 19 ++-- examples/meta_prompting.py | 52 +++++++---- outlines/__init__.py | 2 +- outlines/text/__init__.py | 1 - outlines/text/completions.py | 134 ---------------------------- 5 files changed, 51 insertions(+), 157 deletions(-) delete mode 100644 outlines/text/completions.py diff --git a/examples/dust/math-generate-code.py b/examples/dust/math-generate-code.py index afe3d040..06921158 100644 --- a/examples/dust/math-generate-code.py +++ b/examples/dust/math-generate-code.py @@ -1,4 +1,5 @@ """Example from https://dust.tt/spolu/a/d12ac33169""" +import outlines.models as models import outlines.text as text examples = [ @@ -13,9 +14,11 @@ }, ] +question = "Carla is downloading a 200 GB file. She can download 2 GB/minute, but 40% of the way through the download, the download fails. Then Carla has to restart the download from the beginning. How load did it take her to download the file in minutes?" + -@text.completion("openai/text-davinci-003", stop_at=["QUESTION"]) -def answer_with_code(question, examples): +@text.prompt +def answer_with_code_prompt(question, examples): """ {% for example in examples %} QUESTION: {{example.question}} @@ -31,7 +34,11 @@ def execute_code(code): return result -question = "Carla is downloading a 200 GB file. She can download 2 GB/minute, but 40% of the way through the download, the download fails. Then Carla has to restart the download from the beginning. How load did it take her to download the file in minutes?" -result_code, _ = answer_with_code(question, examples) -result = execute_code(result_code) -print(result) +answer_with_code = text.function( + models.text_completion.openai("text-davinci-003"), + answer_with_code_prompt, + execute_code, +) + +result = answer_with_code(question, examples) +print(f"It takes Carla {result:.0f} minutes to download the file.") diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 9e0fa0f0..0e06a966 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -11,41 +11,51 @@ """ import argparse +import outlines.models as models import outlines.text as text def split_into_steps(question, model_name: str): - @text.completion(model_name) + @text.prompt def solve(question): """{{question}} Let's solve this problem by splitting it into steps. """ - _, completed = solve(question) + complete = models.text_completion.openai(model_name) + + prompt = solve(question) + answer = complete(prompt) + completed = prompt + answer return completed def fill_in_the_blanks(question, model_name: str): - @text.completion(model_name, stop_at=["."]) + @text.prompt def determine_goal(question): """{{question}} In order to solve this problem, we will analyze each of the options and determine """ - @text.completion(model_name, stop_at=["."]) + @text.prompt def solve(memory): """{{memory}}. Let's begin.""" - _, completed = determine_goal(question) - _, completed = solve(completed) + complete = models.text_completion.openai(model_name, stop_at=["."]) + + prompt = determine_goal(question) + answer = complete(prompt) + prompt = solve(prompt + answer) + answer = complete(prompt) + completed = prompt + answer return completed def ask_an_expert(question, model_name: str): - @text.completion(model_name, stop_at=['"']) + @text.prompt def find_expert(question): """ {{question}} @@ -63,7 +73,7 @@ def find_expert(question): on the screen: " """ - @text.completion(model_name) + @text.prompt def get_answer(question, expert, memory): """ {{memory}} @@ -72,21 +82,27 @@ def get_answer(question, expert, memory): {{question}} """ - expert, completed = find_expert(question) - _, completed = get_answer(question, expert, completed) + complete_expert = models.text_completion.openai(model_name, stop_at=['"']) + complete_answer = models.text_completion.openai(model_name) + + prompt = find_expert(question) + expert = complete_expert(prompt) + prompt = get_answer(question, expert, prompt + expert) + answer = complete_answer(prompt) + completed = prompt + answer return completed def ask_an_expert_simple(question, model_name: str): - @text.completion(model_name, stop_at=["\n", "."]) + @text.prompt def find_expert(question): """ Q: {{question}} A: A good person to answer this question would be """ - @text.completion(model_name) + @text.prompt def get_answer(expert, memory): """ {{memory}}. @@ -94,8 +110,14 @@ def get_answer(expert, memory): For instance, {{expert}} would answer """ - expert, completed = find_expert(question) - answer, completed = get_answer(expert, completed) + model_expert = models.text_completion.openai(model_name, stop_at=["\n", "."]) + model_answer = models.text_completion.openai(model_name) + + prompt = find_expert(question) + expert = model_expert(prompt) + prompt = get_answer(expert, prompt + expert) + answer = model_answer(prompt) + completed = prompt + answer return completed @@ -111,7 +133,7 @@ def run_example(model_fn, question, model_name): parser.add_argument( "--model", type=str, - default="openai/gpt-3.5-turbo", + default="gpt-3.5-turbo", help="The Large Language Model to use to run the examples.", ) args = parser.parse_args() diff --git a/outlines/__init__.py b/outlines/__init__.py index e8375e89..ff7b0fa0 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,6 +1,6 @@ """Outlines is a Generative Model Programming Framework.""" from outlines.image import generation -from outlines.text import completion, prompt, render +from outlines.text import prompt, render __all__ = [ "completion", diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 96a81806..4b187905 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,3 +1,2 @@ -from .completions import completion from .functions import function from .prompts import prompt, render diff --git a/outlines/text/completions.py b/outlines/text/completions.py deleted file mode 100644 index 18bf520b..00000000 --- a/outlines/text/completions.py +++ /dev/null @@ -1,134 +0,0 @@ -from typing import Any, Callable, Dict, List, Optional, Tuple - -import outlines.models as models -import outlines.text as text - - -def get_model_from_path( - model_path: str, - *, - stop_at: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, -) -> Callable: - """Obtain a text completion provider model object from a model path. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - stop_at - A list of tokens which, when found, stop the generation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - """ - if "/" not in model_path: - raise ValueError("Model names must be in the form 'provider_name/model_name'") - - provider_name = model_path.split("/")[0] - model_name = model_path[len(provider_name) + 1 :] - - try: - model_cls = getattr(models.text_completion, provider_name) - except KeyError: - raise ValueError(f"The model provider {provider_name} is not available.") - - llm = model_cls( - model_name, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature - ) - - return llm - - -def completion( - model_path: str, - *, - stop_at: Optional[List[str]] = None, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, -) -> Callable: - """Decorator that simplifies calls to language models. - - Prompts that are passed to language models are often rendered templates, - and the workflow typically looks like: - - >>> import outlines - >>> from outlines.models import OpenAICompletion - >>> - >>> llm = OpenAICompletion("davinci") - >>> tpl = "I have a {{question}}" - >>> prompt = outlines.render(tpl, question="How are you?") - >>> answer = llm(prompt) - - While explicit, these 4 lines have the following defaults: - - 1. The prompt is hidden; - 2. The language model instantiation is far from the prompt; prompt templates - are however attached to a specific language model call. - 3. The intent behind the language model call is hidden. - - To encapsulate the logic behind language model calls, we thus define the - template prompt inside a function and decorate the function with a model - specification. When that function is called, the template is rendered using - the arguments passed to the function, and the rendered prompt is passed to - a language model instantiated with the arguments passed to the decorator. - - The previous example is equivalent to the following: - - >>> import outlines.text as text - >>> - >>> @text.completion("openai/davinci") - ... def answer(question): - ... "I have a {{question}}" - ... - >>> answer, _ = answer("How are you?") - - Decorated functions return two objects: the first represents the output of - the language model call, the second represents the concatenation of the - rendered prompt with the output of the language model call. The latter can - be used in context where one expands an initial prompt with recursive calls - to language models. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - stop_at - A list of tokens which, when found, stop the generation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - """ - llm = get_model_from_path( - model_path, stop_at=stop_at, max_tokens=max_tokens, temperature=temperature - ) - - def decorator(fn: Callable): - prompt_fn = text.prompt(fn) - - def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Tuple[str, str]: - """Call the generative model with the rendered template. - - Building prompts with recursive calls to language models is common - in prompt engineering, we thus return both the raw answer from the - language model as well as the rendered prompt including the answer. - - Returns - ------- - A tuple that contains the result of the language model call, and the - rendered prompt concatenated with the result of the language model - call. - - """ - prompt = prompt_fn(*args, **kwargs) - result = llm(prompt, stop_at=stop_at) - return result, prompt + result - - return wrapper - - return decorator From 22d76f0a82aeeb0ee503cfabff38bf26ed4d36c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 2 May 2023 16:34:54 +0200 Subject: [PATCH 095/734] Remove code generation example --- examples/{dust/math-generate-code.py => math_generate_code.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{dust/math-generate-code.py => math_generate_code.py} (100%) diff --git a/examples/dust/math-generate-code.py b/examples/math_generate_code.py similarity index 100% rename from examples/dust/math-generate-code.py rename to examples/math_generate_code.py From 94d152f24be97bf0cd97c1911be4007b931298f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 2 May 2023 16:36:38 +0200 Subject: [PATCH 096/734] Add controlled generation to the README --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index e523cafc..e192ffa9 100644 --- a/README.md +++ b/README.md @@ -244,6 +244,10 @@ tell_a_joke(Joke) # [2, 3, 5, 7] ``` +# Controlled generation + +Outlines offers mechanisms to specify high-level constraints on the text generations. Passing `stop_at` to model call the user can stop the generation once a particular word, sequence of symbol is reached. Passing `is_in` to the model call the user can constraint the model to generate an answer chosen among a set of possible answers. + # Examples - [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) From e0b7ae9a01ae1413fe9a1c6a4d242b15d602689b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 3 May 2023 17:01:29 +0200 Subject: [PATCH 097/734] Update the README title --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e192ffa9..79bf85dc 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ # Outlines -Build _reliable_ workflows based on interactions with large language models. +Build _reliable_ workflows based on interactions with generative models. From 709c0e91813d953ad3e1de0c030607ac836e7255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 14:18:10 +0200 Subject: [PATCH 098/734] Add documentation --- .gitignore | 2 + docs/Makefile | 20 ++++ docs/make.bat | 35 ++++++ docs/source/conf.py | 42 +++++++ docs/source/index.rst | 108 ++++++++++++++++++ docs/source/installation.rst | 2 + docs/source/integrations/llamaindex.rst | 2 + docs/source/integrations/messaging.rst | 2 + docs/source/integrations/python.rst | 2 + docs/source/overview.rst | 2 + docs/source/reference/batching.rst | 2 + .../reference/controlled_generation.rst | 2 + docs/source/reference/multimodel.rst | 2 + docs/source/reference/prompting.rst | 2 + 14 files changed, 225 insertions(+) create mode 100644 docs/Makefile create mode 100644 docs/make.bat create mode 100644 docs/source/conf.py create mode 100644 docs/source/index.rst create mode 100644 docs/source/installation.rst create mode 100644 docs/source/integrations/llamaindex.rst create mode 100644 docs/source/integrations/messaging.rst create mode 100644 docs/source/integrations/python.rst create mode 100644 docs/source/overview.rst create mode 100644 docs/source/reference/batching.rst create mode 100644 docs/source/reference/controlled_generation.rst create mode 100644 docs/source/reference/multimodel.rst create mode 100644 docs/source/reference/prompting.rst diff --git a/.gitignore b/.gitignore index 14550d04..8e91982e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ *.egg-info __pycache__ *_version.py +docs/build +.coverage diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..747ffb7b --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..defd54ef --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,42 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "Outlines" +copyright = "2023, Normal Computing" +author = "Remi Louf" +release = "0.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = ["sphinx_design"] + +templates_path = ["_templates"] + +source_suffix = {".rst": "restructuredtext"} + +pygments_style = "nord-darker" + + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_book_theme" +html_static_path = ["_static"] +html_title = "" +html_logo = "_static/logo.png" +html_options = { + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/normal-computing/outlines", # required + "icon": "fa-brands fa-square-github", + "type": "fontawesome", + }, + ] +} diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..a9ebe8d6 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,108 @@ +.. Outlines documentation master file, created by + sphinx-quickstart on Thu May 4 11:16:27 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + + +👋 Welcome to Outlines +====================== + +**Outlines** is a Python library to write reliable programs for interactions with generative models: language models, diffusers, multimodal models, classifiers, etc. It provides a Domain Specific Language (DSL) to make prompting easier, constrained text generation and is natively concurrent. It integrates well with the rest of the Python ecosystem: tools, vector stores, etc. + + +.. grid:: 2 + + .. grid-item-card:: 💻 Install Outlines + :link: https://pypi.org/project/outlines + :text-align: center + :width: 75% + :margin: 4 4 auto auto + + .. code:: + + pip install outlines + + .. grid-item-card:: 🚀 Normal Computing + :link: https://normalcomputing.ai + :text-align: center + :width: 75% + :margin: 4 4 auto auto + + The development of Outlines is entirely funded by `Normal Computing `_ + + +👀 Sneak Peek +------------- + +A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: + +.. code:: python + + import outlines.text as text + import outlines.models as models + + from my_tools import google_search, execute_code + from my_response_models import command_response + + + @outlines.prompt + def agent_prompt(objective, goals, tools, response_model): + """You are an AI with the following objective: {{ objective }} + + Keep the following goals in mind: + {% for goal in goals %} + {{ loop.counter }}. {{ goal }} + {% endfor %} + + COMMANDS + {% for tool in tools %} + - {{ tool | name }}, {{ tool | description }}, {{ tool | signature }} + {% endfor %} + + OUTPUT FORMAT + {{ response_model | schema }} + """ + + + @outlines.chain + async def agent(objective, goals, tools) + complete = models.text_completion.hf("sshleifer/tiny-gpt2") + prompt = agent_prompt(objective, goals, tools , command_response) + answer = await complete(prompt) + command = command_response(answer) + + return command + + + agent( + "Write a library called Outlines", + ["Easy prompting", "Multimodal, multimodel", "Constrained text generation"], + [google_search, execute_code], + ) + + +.. toctree:: + :maxdepth: 1 + :hidden: + + installation + overview + +.. toctree:: + :maxdepth: 1 + :caption: Outlines + :hidden: + + reference/prompting + reference/controlled_generation + reference/multimodel + reference/batching + +.. toctree:: + :maxdepth: 1 + :caption: Integrations + :hidden: + + integrations/python.rst + integrations/llamaindex.rst + integrations/messaging.rst diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 00000000..b89cd02a --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,2 @@ +✨ Installation +=============== diff --git a/docs/source/integrations/llamaindex.rst b/docs/source/integrations/llamaindex.rst new file mode 100644 index 00000000..910960d9 --- /dev/null +++ b/docs/source/integrations/llamaindex.rst @@ -0,0 +1,2 @@ +🦙 Llamaindex +============= diff --git a/docs/source/integrations/messaging.rst b/docs/source/integrations/messaging.rst new file mode 100644 index 00000000..144e9aa8 --- /dev/null +++ b/docs/source/integrations/messaging.rst @@ -0,0 +1,2 @@ +✉ Slack, Discord, Twitter +========================== diff --git a/docs/source/integrations/python.rst b/docs/source/integrations/python.rst new file mode 100644 index 00000000..fe36a28b --- /dev/null +++ b/docs/source/integrations/python.rst @@ -0,0 +1,2 @@ +🐍 Python code +============== diff --git a/docs/source/overview.rst b/docs/source/overview.rst new file mode 100644 index 00000000..9950e01f --- /dev/null +++ b/docs/source/overview.rst @@ -0,0 +1,2 @@ +🌎 Overview +=========== diff --git a/docs/source/reference/batching.rst b/docs/source/reference/batching.rst new file mode 100644 index 00000000..030c11e3 --- /dev/null +++ b/docs/source/reference/batching.rst @@ -0,0 +1,2 @@ +Batching +======== diff --git a/docs/source/reference/controlled_generation.rst b/docs/source/reference/controlled_generation.rst new file mode 100644 index 00000000..3413d2c8 --- /dev/null +++ b/docs/source/reference/controlled_generation.rst @@ -0,0 +1,2 @@ +Controlled Generation +===================== diff --git a/docs/source/reference/multimodel.rst b/docs/source/reference/multimodel.rst new file mode 100644 index 00000000..598e428f --- /dev/null +++ b/docs/source/reference/multimodel.rst @@ -0,0 +1,2 @@ +Multimodal, Multimodels +======================= diff --git a/docs/source/reference/prompting.rst b/docs/source/reference/prompting.rst new file mode 100644 index 00000000..830c57b8 --- /dev/null +++ b/docs/source/reference/prompting.rst @@ -0,0 +1,2 @@ +Prompting +========= From 773aaa6b3844917036ecac9532c748dae9a3e1a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 14:24:41 +0200 Subject: [PATCH 099/734] Add ReadTheDocs configuration --- .readthedocs.yaml | 15 +++++++++++++++ requirements-doc.txt | 3 +++ 2 files changed, 18 insertions(+) create mode 100644 .readthedocs.yaml create mode 100644 requirements-doc.txt diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..4512720a --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,15 @@ +version: 2 + +python: + version: "3.8" + install: + - method: pip + path: . + extra_requirements: + - rtd + - requirements: requirements-doc.txt + +sphinx: + builder: html + configuration: docs/source/conf.py + fail_on_warning: true diff --git a/requirements-doc.txt b/requirements-doc.txt new file mode 100644 index 00000000..ae0fede4 --- /dev/null +++ b/requirements-doc.txt @@ -0,0 +1,3 @@ +sphinx +sphinx-book-theme +sphinx-design From a2e0b686817b3746c66eae737bfa4f90cb35de3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 14:31:39 +0200 Subject: [PATCH 100/734] Add GitHub workflows to build and publish the documentation --- .github/workflows/build_documentation.yml | 25 +++++++++++++++ .github/workflows/publish_documentation.yml | 34 +++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 .github/workflows/build_documentation.yml create mode 100644 .github/workflows/publish_documentation.yml diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml new file mode 100644 index 00000000..1a70a4df --- /dev/null +++ b/.github/workflows/build_documentation.yml @@ -0,0 +1,25 @@ +name: Build the documentation + +on: + pull_request: + branches: [main] + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Checkout the branch + uses: actions/checkout@v2.3.1 + with: + persist-credentials: false + + - name: Set up Python 3.9 + uses: actions/setup-python@v1 + with: + python-version: 3.9 + + - name: Build the documentation with Sphinx + run: | + pip install -r requirements-doc.txt + sphinx-build -b html docs/source docs/build/html diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml new file mode 100644 index 00000000..57c93a5f --- /dev/null +++ b/.github/workflows/publish_documentation.yml @@ -0,0 +1,34 @@ +name: Publish the documentation + +on: + push: + branches: [main] + +permissions: + contents: write + +jobs: + publish: + name: Publish + runs-on: ubuntu-latest + steps: + - name: Checkout the branch + uses: actions/checkout@v2.3.1 + + - name: Set up Python 3.9 + uses: actions/setup-python@v1 + with: + python-version: 3.9 + + - name: Build the documentation with Sphinx + run: | + pip install -r requirements-doc.txt + sphinx-build -b html docs/source docs/build/html + + - name: Publish the documentation + uses: JamesIves/github-pages-deploy-action@3.6.2 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: gh-pages + FOLDER: docs/build/html + CLEAN: true From 23b5d492359bc5d2320f0b9146227357d6053da9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 17:03:27 +0200 Subject: [PATCH 101/734] Move the `tiktoken` imports to remove global dependency --- outlines/models/openai.py | 5 ++++- pyproject.toml | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 2ab5e152..01d661ad 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -5,7 +5,6 @@ from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np -import tiktoken from PIL import Image from PIL.Image import Image as PILImage @@ -96,6 +95,8 @@ def generate_choice(prompt: str, is_in: List[str]) -> str: sequence. """ + import tiktoken + assert is_in is not None tokenizer = tiktoken.get_encoding("p50k_base") encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] @@ -202,6 +203,8 @@ def generate_choice(prompt: str, is_in=List[str]) -> str: sequence. """ + import tiktoken + assert is_in is not None tokenizer = tiktoken.get_encoding("cl100k_base") encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] diff --git a/pyproject.toml b/pyproject.toml index 2d1bd9f5..b989843b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,6 @@ dependencies = [ "pillow", "pydantic", "scipy", - "tiktoken", ] dynamic = ["version"] From 96f03019c1b877dbbecb25ea7cd013695509a1c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 17:03:58 +0200 Subject: [PATCH 102/734] Add Sphinx external linkcheck --- docs/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index defd54ef..3587d226 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -14,7 +14,7 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -extensions = ["sphinx_design"] +extensions = ["sphinx.builders.linkcheck", "sphinx_design"] templates_path = ["_templates"] From ff3f59320f1499016be406301eb851580fe9e47f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 17:04:15 +0200 Subject: [PATCH 103/734] Add installation instructions to documentation --- docs/source/installation.rst | 51 ++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/docs/source/installation.rst b/docs/source/installation.rst index b89cd02a..e7d6d099 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -1,2 +1,53 @@ ✨ Installation =============== + +The latest version of outlines is available on PyPi: + +.. code:: bash + + pip install outlines + +Outlines comes with a minimal set of dependencies that are necessary to run the library's code. Integrations will require you to install dependencies manually. + + +OpenAI +------ + +To use OpenAI models you first have to run: + +.. code:: bash + + pip install openai tiktoken + +.. important:: + + You also need to set your API credentials by defining the ``OPENAI_API_KEY`` environment variable. + + +HuggingFace +----------- + +To use the integrations with HuggingFace's `transformers `_ and `diffusers `_ libraries you first need to run: + +.. code:: + + pip install torch transformers diffusers + + +.. attention:: + + HuggingFace models are run locally. Outlines uses the `PyTorch `_ versions of the models. Please refer to the `PyTorch documentation `_ for questions related to **GPU support**. + +The integration is fairly basic for now, and if you have specific performance needs please `open an issue `_ + +Other integrations +------------------ + +Outlines is designed to be fully compatible with other libraries, which you will need to install separately. You can use any library with Outlines but , whenever possible, we recommend to use libraries with async support for better performance. Examples of possible integrations are: + +- `Llama index `_ for vector stores and document querying; +- `discord.py `_ for Discord integration; +- `Slack SDK `_ for Slack integration; +- `aiofiles `_ for asynchronous file operations; +- `httpx `_ or `aiohttp `_ for asynchronous HTTP requests; +- `asyncpg `_ and `aiosqlite `_ for async PostgreSQL and SQLite interfaces. From 56b7b5c337160dd3a39397d68b47b29da091656a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 4 May 2023 17:11:14 +0200 Subject: [PATCH 104/734] Add 'Hello world' example --- docs/source/overview.rst | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/docs/source/overview.rst b/docs/source/overview.rst index 9950e01f..224971fe 100644 --- a/docs/source/overview.rst +++ b/docs/source/overview.rst @@ -1,2 +1,28 @@ -🌎 Overview -=========== +🌎 Hello world +============== + +Here is a simple Outlines program that highlights some of its key features: + +.. code:: + + import outlines.text as text + import outlines.models as models + + + @text.prompt + def where_from(expression): + "What's the origin of '{{ expression }}'?" + + + complete = models.text_completion.openai("text-davinci-003") + + hello_world = where_from("Hello world") + foobar = where_from("Foo Bar") + answer = complete([hello_world, foobar], num_samples=3, stop_at=["."]) + + +- **Prompt management**. You can use functions with the ``@outlines.text.prompt`` decorator. "Prompt functions" use the `Jinja templating language `_ to render the prompt written in the docstring. We also added a few filters to help with common worflows, like building agents. Of course, for simple prompts, you can also use Python strings directly. +- **Generative model integration**. You can use text completion models from OpenAI and HuggingFace, but models are not limited to text. +- **Controlled generation**. The ``stop_at`` keyword arguments allows to define when the generation should be stopped. Outlines includes more options to control the generation; these happen on a token basis, saving time and costs. +- **Sampling**. Outlines exclusively generates sequences using sampling. You can generate many samples with one call. +- **Batching**. Models can take a list of prompt as input and generate completions in parallel. From defcb721982df066d2ff582946a0bd06240450b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 5 May 2023 20:18:29 +0200 Subject: [PATCH 105/734] Add features to the documentation index --- docs/source/index.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/source/index.rst b/docs/source/index.rst index a9ebe8d6..d425c8ab 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -80,6 +80,14 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: [google_search, execute_code], ) +📜 Features +----------- + +- A powerful domain-specific language to write and render prompts; +- OpenAI integration: language models, embeddings and Dall-E; +- HuggingFace integration: ``transformers`` and ``diffusers``; +- Parallel model and tool calls with the ``outlines.elemwise`` decorator; +- Map your chains over different inputs in parallel to avoid overfitting; .. toctree:: :maxdepth: 1 From c7295435275874c189da86dd13be9549e7dabd07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 5 May 2023 20:18:45 +0200 Subject: [PATCH 106/734] Add prompting reference documentation --- docs/source/reference/prompting.rst | 70 +++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/docs/source/reference/prompting.rst b/docs/source/reference/prompting.rst index 830c57b8..5f0b925e 100644 --- a/docs/source/reference/prompting.rst +++ b/docs/source/reference/prompting.rst @@ -1,2 +1,72 @@ Prompting ========= + +Outlines provides a powerful domain-specific language to write and manage prompts, via what we call *prompt functions*. Prompt functions are Python functions that contain the prompt in their docstring; their arguments correspond to the variables used in the prompt. We use the Jinja library to render the prompts, with a few tweaks to make the prompt writing experience nicer. + + +One thus doesn't need extra abstraction to write a prompt with few-shot examples, Jinja can handle that: + +.. code:: + + import outlines.text as text + + @text.prompt + def few_shots(instructions, examples, question): + """"{{ instructions }} + + {% for examples in examples %} + Q: {{ example.question }} + A: {{ example.answer }} + {% endfor %} + Q: {{ question }} + """ + + prompt = few_shots(question, examples, question) + +The original template is still accessible by calling: + +.. code:: + + prompt.template + + +Outlines also provides a few utilities to simplify workflows that connect tools to LLMs (Toolformer, ViperGPT, AutoGPT). We noticed that the same information was always repeated: once when implementing the function, the second time when writing the instructions in the prompt. No need to do this with Outlines, information can be directly pulled from the function definition: + +.. code:: + + import outlines.text as text + + def my_tool(arg1: str, arg2: int): + """Tool description. + + The rest of the docstring + """ + pass + + @text.prompt + def tool_prompt(question, tool): + """{{ question }} + + COMMANDS + 1. {{ tool | name }}: {{ tool | description }}, args: {{ tool | args }} + + {{ tool | source }} + """ + +The same goes for output validation: the code is implemented once when defining the parser, a second time when passing the format to the prompt: + +.. code:: + + from pydantic import BaseModel + + import outlines.text as text + + class MyResponse(BaseModel): + field1: int + field2: str + + @text.prompt + def my_prompt(response_model): + """{{ response_model | schema }}""" + +Please refer to the `Jinja documentation `_ for more information about the syntax of the templating language. From e1bca0ab82403d621017350d735f1b511052d29d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 16:30:23 +0200 Subject: [PATCH 107/734] Replace `joblib` with `perscache` Joblib does not support async functions. --- outlines/cache.py | 10 ++++++---- outlines/models/hf_diffusers.py | 2 +- outlines/models/openai.py | 8 ++++---- pyproject.toml | 3 ++- tests/test_cache.py | 18 ++++++++++-------- 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/outlines/cache.py b/outlines/cache.py index f90127fa..c3ed02d0 100644 --- a/outlines/cache.py +++ b/outlines/cache.py @@ -1,10 +1,12 @@ import os -import joblib +from perscache import Cache, NoCache +from perscache.serializers import JSONSerializer +from perscache.storage import LocalFileStorage home_dir = os.path.expanduser("~") cache_dir = os.environ.get("OUTLINES_CACHE_DIR", f"{home_dir}/.cache/outlines") -memory = joblib.Memory(cache_dir, verbose=0) # type: ignore[attr-defined] +memory = Cache(serializer=JSONSerializer(), storage=LocalFileStorage(cache_dir)) def get(): @@ -42,10 +44,10 @@ def disable(): """ global memory - memory = joblib.Memory(None) + memory = NoCache() def clear(): """Erase the cache completely.""" cache = get() - cache.clear() + cache.storage.clear() diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 050e559f..153e3d11 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -20,7 +20,7 @@ def call(prompt: str) -> str: return call_stable_diffusion_pipeline(model_name, prompt) -@memory.cache +@memory.cache() def call_stable_diffusion_pipeline(model_name: str, prompt: str) -> PILImage: """Build and call the Stable Diffusion pipeline.""" import torch diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 01d661ad..cf9d9183 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -47,7 +47,7 @@ def OpenAITextCompletion( """ @error_handler - @memory.cache + @memory.cache() def call_completion_api( model: str, prompt: str, @@ -153,7 +153,7 @@ def OpenAIChatCompletion( """ @error_handler - @memory.cache + @memory.cache() def call_chat_completion_api( model: str, messages: List[Dict[str, str]], @@ -279,7 +279,7 @@ def OpenAIEmbeddings(model_name: str): """ @error_handler - @memory.cache + @memory.cache() def call_embeddings_api( model: str, input: str, @@ -323,7 +323,7 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): """ @error_handler - @memory.cache + @memory.cache() def call_image_generation_api(prompt: str, size: str): import openai diff --git a/pyproject.toml b/pyproject.toml index b989843b..b6496eb0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,9 +25,9 @@ classifiers = [ ] dependencies = [ "jinja2", - "joblib", "numpy", "pillow", + "perscache", "pydantic", "scipy", ] @@ -59,6 +59,7 @@ module = [ "joblib", "numpy.*", "openai", + "perscache.*", "PIL", "PIL.Image", "pydantic", diff --git a/tests/test_cache.py b/tests/test_cache.py index 9fd21ac8..6d81ee85 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,7 +1,8 @@ import os import tempfile +from pathlib import Path -import joblib +import perscache import pytest @@ -34,25 +35,26 @@ def test_cache(refresh_environment): import outlines memory = outlines.cache.get() - assert memory.location == tempdir + assert memory.storage.location == Path(tempdir) yield memory - memory.clear() + memory.storage.clear() def test_get_cache(test_cache): import outlines memory = outlines.cache.get() - assert isinstance(memory, joblib.Memory) + assert isinstance(memory, perscache.Cache) + assert isinstance(memory.storage, perscache.storage.LocalFileStorage) - # If the cache is enable then the size + # If the cache is enabled then the size # of `store` should not increase the # second time `f` is called. store = list() - @memory.cache + @memory.cache() def f(x): store.append(1) return x @@ -79,7 +81,7 @@ def test_disable_cache(test_cache): # `f` is called. store = list() - @memory.cache + @memory.cache() def f(x): store.append(1) return x @@ -109,6 +111,6 @@ def f(x): # The size of `store` should increase if we call `f` # after clearing the cache. - test_cache.clear() + test_cache.storage.clear() f(1) assert len(store) == store_size + 1 From 71791458fb9e8d0030da200c704894677e7fb3c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 16:31:54 +0200 Subject: [PATCH 108/734] Add documentation to `outlines.text.function` --- outlines/text/functions.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/outlines/text/functions.py b/outlines/text/functions.py index dfbba774..ae1d2135 100644 --- a/outlines/text/functions.py +++ b/outlines/text/functions.py @@ -10,6 +10,25 @@ @dataclass class function: + """Represents a function that uses a language model to generate its output. + + When called, the `function` instance passes the arguments to the prompt + function, the rendered prompt is passed to the language model, and its + result to an (optional) validation function. + + Attributes + ---------- + model + A function that takes a string and returns a string that contains the + model's return value. + prompt + A prompt-generating function. + validator + A function that takes the output of the language model, parses it and + returns it in a normalized format. + + """ + model: Callable prompt: Callable validator: Optional[Union[Callable, BaseModel]] = None From 2e9a6d78382be565b755ee16eba43e42229187ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 16:53:34 +0200 Subject: [PATCH 109/734] Allow to ask several samples from the OpenAI completion API --- outlines/models/openai.py | 65 ++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index cf9d9183..8eb114a2 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -51,10 +51,11 @@ def OpenAITextCompletion( def call_completion_api( model: str, prompt: str, - stop_sequences: Tuple[str], - logit_bias: Dict[str, int], max_tokens: int, temperature: float, + stop_sequences: Tuple[str], + logit_bias: Dict[str, int], + num_samples: int, ): import openai @@ -65,28 +66,35 @@ def call_completion_api( max_tokens=max_tokens, stop=stop_sequences, logit_bias=logit_bias, + n=num_samples, ) return response - def generate(prompt: str, *, stop_at=None, is_in=None): + def generate(prompt: str, *, samples=1, stop_at=None, is_in=None): if stop_at is not None: stop_at = tuple(stop_at) if is_in is not None and stop_at is not None: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None: - return generate_choice(prompt, is_in) + return generate_choice(prompt, is_in, samples) else: - return generate_base(prompt, stop_at) + return generate_base(prompt, stop_at, samples) - def generate_base(prompt: str, stop_at: Optional[Tuple[str]]) -> str: - response = call_completion_api( - model_name, prompt, stop_at, {}, max_tokens, temperature + def generate_base(prompt: str, stop_at: Optional[Tuple[str]], samples) -> str: + responses = call_completion_api( + model_name, prompt, max_tokens, temperature, stop_at, {}, samples ) - return response["choices"][0]["text"] - def generate_choice(prompt: str, is_in: List[str]) -> str: + if samples == 1: + results = responses["choices"][0]["text"] + else: + results = [responses["choices"][i]["text"] for i in range(samples)] + + return results + + def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: """Generate a a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask @@ -116,7 +124,7 @@ def generate_choice(prompt: str, is_in: List[str]) -> str: break response = call_completion_api( - model_name, prompt, None, mask, 1, temperature + model_name, prompt, 1.0, temperature, None, mask, samples ) decoded.append(response["choices"][0]["text"]) prompt = prompt + "".join(decoded) @@ -157,44 +165,53 @@ def OpenAIChatCompletion( def call_chat_completion_api( model: str, messages: List[Dict[str, str]], - stop_sequences: Tuple[str], - logit_bias: Dict[str, int], max_tokens: int, temperature: float, + stop_sequences: Tuple[str], + logit_bias: Dict[str, int], + num_samples: int, ): import openai response = openai.ChatCompletion.create( model=model, messages=messages, - temperature=temperature, max_tokens=max_tokens, + temperature=temperature, stop=stop_sequences, logit_bias=logit_bias, + n=num_samples, ) return response - def generate(prompt: str, *, stop_at=None, is_in=None): + def generate(prompt: str, *, samples: int = 1, stop_at=None, is_in=None): if stop_at is not None: stop_at = tuple(stop_at) if is_in is not None and stop_at is not None: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None: - return generate_choice(prompt, is_in) + return generate_choice(prompt, is_in, samples) else: - return generate_base(prompt, stop_at) + return generate_base(prompt, stop_at, samples) - def generate_base(query: str, stop_at: Optional[Tuple[str]]) -> str: + def generate_base(query: str, stop_at: Optional[Tuple[str]], samples: int) -> str: messages = [{"role": "user", "content": query}] - response = call_chat_completion_api( - model_name, messages, stop_at, {}, max_tokens, temperature + responses = call_chat_completion_api( + model_name, messages, max_tokens, temperature, stop_at, {}, samples ) - answer = response["choices"][0]["message"]["content"] - return answer - def generate_choice(prompt: str, is_in=List[str]) -> str: + if samples == 1: + results = responses["choices"][0]["message"]["content"] + else: + results = [ + responses["choices"][i]["message"]["content"] for i in range(samples) + ] + + return results + + def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: """Generate a a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask @@ -225,7 +242,7 @@ def generate_choice(prompt: str, is_in=List[str]) -> str: messages = [{"role": "user", "content": prompt}] response = call_chat_completion_api( - model_name, messages, None, mask, 1, temperature + model_name, messages, max_tokens, temperature, None, mask, 1 ) decoded.append(response["choices"][0]["message"]["content"]) prompt = prompt + "".join(decoded) From 730e3221d3d13cf8d43440ac20c381030e33ea7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 17:37:32 +0200 Subject: [PATCH 110/734] Ask several samples from OpenAI image generation API --- outlines/models/openai.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 8eb114a2..a6cf72fb 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -341,21 +341,28 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): @error_handler @memory.cache() - def call_image_generation_api(prompt: str, size: str): + def call_image_generation_api(prompt: str, size: str, samples: int): import openai response = openai.Image.create( - prompt=prompt, size=size, response_format="b64_json" + prompt=prompt, size=size, n=samples, response_format="b64_json" ) return response - def generate(prompt: str) -> PILImage: - api_response = call_image_generation_api(prompt, size) - response = api_response["data"][0]["b64_json"] - img = Image.open(BytesIO(base64.b64decode(response))) + def generate(prompt: str, samples: int = 1) -> PILImage: + api_response = call_image_generation_api(prompt, size, samples) - return img + if samples == 1: + response = api_response["data"][0]["b64_json"] + return Image.open(BytesIO(base64.b64decode(response))) + + images = [] + for i in range(samples): + response = api_response["data"][i]["b64_json"] + images.append(Image.open(BytesIO(base64.b64decode(response)))) + + return images return generate From 7183d08e4d17f1f6d4e74be368faead6e87eb59b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 21:25:40 +0200 Subject: [PATCH 111/734] Take different samples from `transformers` library --- outlines/models/hf_transformers.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index bbcaa1aa..4c3cadd6 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -44,15 +44,17 @@ def HuggingFaceCompletion( if temperature is None: temperature = 1.0 - def call(prompt: str) -> str: - return call_model_generate_method(model_name, prompt, max_tokens, temperature) + def call(prompt: str, samples: int = 1) -> str: + return call_model_generate_method( + model_name, prompt, max_tokens, temperature, samples + ) return call @memory.cache def call_model_generate_method( - model_name: str, prompt: str, max_tokens: int, temperature: float + model_name: str, prompt: str, max_tokens: int, temperature: float, samples: int ) -> str: import torch from transformers import AutoModelForCausalLM, AutoTokenizer @@ -72,8 +74,14 @@ def call_model_generate_method( temperature=temperature, max_new_tokens=max_tokens, pad_token_id=tokenizer.eos_token_id, + num_return_sequences=samples, ) new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() - return tokenizer.decode(new_tokens, skip_special_tokens=True) + if samples == 1: + results = tokenizer.decode(new_tokens, skip_special_tokens=True) + else: + results = tokenizer.batch_decode(new_tokens, skip_special_tokens=True) + + return results From 323d1c5773db600e07a81d54b9b8c2c65bc4fa31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 22:54:51 +0200 Subject: [PATCH 112/734] Take different samples from the `diffusers` library --- outlines/models/hf_diffusers.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 153e3d11..57375094 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -16,12 +16,16 @@ def HuggingFaceDiffuser(model_name: str) -> PILImage: """ - def call(prompt: str) -> str: - return call_stable_diffusion_pipeline(model_name, prompt) + def call(prompt: str, samples: int = 1) -> str: + return call_stable_diffusion_pipeline(model_name, prompt, samples) + + return call @memory.cache() -def call_stable_diffusion_pipeline(model_name: str, prompt: str) -> PILImage: +def call_stable_diffusion_pipeline( + model_name: str, prompt: str, samples: int +) -> PILImage: """Build and call the Stable Diffusion pipeline.""" import torch from diffusers import StableDiffusionPipeline @@ -29,6 +33,6 @@ def call_stable_diffusion_pipeline(model_name: str, prompt: str) -> PILImage: pipe = StableDiffusionPipeline.from_pretrained(model_name) if torch.cuda.is_available(): pipe = pipe.to("cuda") - image = pipe(prompt).images[0] + image = pipe(prompt, num_images_per_prompt=samples).images[0] return image From d28498a44659043259cce80d4195bea9e9278856 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 10 May 2023 22:56:33 +0200 Subject: [PATCH 113/734] Add self-consistency example --- examples/self-consistency.py | 74 ++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 examples/self-consistency.py diff --git a/examples/self-consistency.py b/examples/self-consistency.py new file mode 100644 index 00000000..1a3a3bb7 --- /dev/null +++ b/examples/self-consistency.py @@ -0,0 +1,74 @@ +import re + +import numpy as np + +import outlines.models as models +import outlines.text as text + +examples = [ + { + "question": "There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?", + "answer": "We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.", + }, + { + "question": "If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?", + "answer": "There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.", + }, + { + "question": "Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?", + "answer": "Leah had 32 chocolates and Leah’s sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.", + }, + { + "question": "Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?", + "answer": "Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.", + }, + { + "question": "Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?", + "answer": "He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.", + }, + { + "question": "There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?", + "answer": "There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.", + }, + { + "question": "Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?", + "answer": "Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.", + }, + { + "question": "Olivia has $23. She bought five bagels for $3 each. How much money does she have left?", + "answer": "She bought 5 bagels for $3 each. This means she spent 5", + }, +] + +question = "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?" + + +@text.prompt +def few_shots(question, examples): + """ + {% for example in examples %} + Q: {{ example.question }} + A: {{ example.answer }} + {% endfor %} + Q: {{ question }} + A: + """ + + +model = models.text_completion.openai("text-davinci-003", max_tokens=128) +prompt = few_shots(question, examples) +answers = model(prompt, samples=100) + +digits = [] +for answer in answers: + try: + match = re.findall(r"\d+", answer)[-1] + if match is not None: + digit = int(match) + digits.append(digit) + except AttributeError: + print(f"Could not parse the completion: '{answer}'") + +unique_digits, counts = np.unique(digits, return_counts=True) +results = {d: c for d, c in zip(unique_digits, counts)} +print(results) From 50b7e01d5a8573fd4b563fd112db3d7df27ea39f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 11 May 2023 09:07:12 +0200 Subject: [PATCH 114/734] Rename self-consistency example --- examples/{self-consistency.py => self_consistency.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{self-consistency.py => self_consistency.py} (100%) diff --git a/examples/self-consistency.py b/examples/self_consistency.py similarity index 100% rename from examples/self-consistency.py rename to examples/self_consistency.py From 36ce95ac8f73151e0be019870dd27e40e1a65795 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 3 May 2023 10:19:46 +0200 Subject: [PATCH 115/734] Allow user to limit completion to `int` type with OpenAI --- outlines/models/openai.py | 64 ++++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 11 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index a6cf72fb..ead55053 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -71,20 +71,31 @@ def call_completion_api( return response - def generate(prompt: str, *, samples=1, stop_at=None, is_in=None): + def generate(prompt: str, *, samples=1, stop_at=None, is_in=None, type=None): + import tiktoken + if stop_at is not None: stop_at = tuple(stop_at) + mask = {} + if type == "int": + encoder = tiktoken.encoding_for_model(model_name) + mask = create_int_mask(encoder) + if is_in is not None and stop_at is not None: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") + elif is_in is not None and mask != {}: + raise TypeError("You cannot set `is_in` and `mask` at the same time.") elif is_in is not None: return generate_choice(prompt, is_in, samples) else: - return generate_base(prompt, stop_at, samples) + return generate_base(prompt, stop_at, samples, mask) - def generate_base(prompt: str, stop_at: Optional[Tuple[str]], samples) -> str: + def generate_base( + prompt: str, stop_at: Optional[Tuple[str]], samples: int, mask: Dict[int, int] + ) -> str: responses = call_completion_api( - model_name, prompt, max_tokens, temperature, stop_at, {}, samples + model_name, prompt, max_tokens, temperature, stop_at, mask, samples ) if samples == 1: @@ -106,7 +117,7 @@ def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: import tiktoken assert is_in is not None - tokenizer = tiktoken.get_encoding("p50k_base") + tokenizer = tiktoken.encoding_for_model(model_name) encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] decoded: List[str] = [] @@ -124,7 +135,7 @@ def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: break response = call_completion_api( - model_name, prompt, 1.0, temperature, None, mask, samples + model_name, prompt, 1, temperature, None, mask, samples ) decoded.append(response["choices"][0]["text"]) prompt = prompt + "".join(decoded) @@ -185,21 +196,32 @@ def call_chat_completion_api( return response - def generate(prompt: str, *, samples: int = 1, stop_at=None, is_in=None): + def generate(prompt: str, *, samples: int = 1, stop_at=None, is_in=None, type=None): + import tiktoken + if stop_at is not None: stop_at = tuple(stop_at) + mask = {} + if type == "int": + encoder = tiktoken.encoding_for_model(model_name) + mask = create_int_mask(encoder) + if is_in is not None and stop_at is not None: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") + elif is_in is not None and mask is not None: + raise TypeError("You cannot set `is_in` and `mask` at the same time.") elif is_in is not None: return generate_choice(prompt, is_in, samples) else: - return generate_base(prompt, stop_at, samples) + return generate_base(prompt, stop_at, mask) - def generate_base(query: str, stop_at: Optional[Tuple[str]], samples: int) -> str: + def generate_base( + query: str, stop_at: Optional[Tuple[str]], samples: int, mask: Dict[int, int] + ) -> str: messages = [{"role": "user", "content": query}] responses = call_chat_completion_api( - model_name, messages, max_tokens, temperature, stop_at, {}, samples + model_name, messages, max_tokens, temperature, stop_at, mask, samples ) if samples == 1: @@ -223,7 +245,7 @@ def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: import tiktoken assert is_in is not None - tokenizer = tiktoken.get_encoding("cl100k_base") + tokenizer = tiktoken.encoding_for_model(model_name) encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] decoded: List[str] = [] @@ -399,3 +421,23 @@ def call(*args, **kwargs): raise e return call + + +def create_int_mask(encoder): + """Create an exclusive mask for digit tokens. + + #TODO: I am not a token expert, and I may be missing + # something by only looking for strictly digit keys. + """ + int_token_ids = [] + + tokens = encoder._mergeable_ranks + for token, token_id in tokens.items(): + if all([c.isdigit() for c in encoder.decode([token_id])]): + int_token_ids.append(token_id) + + # TODO: This is a hack because OpenAI's API does not + # allow more than 300 entries for `logit_bias` + mask = {int_token_ids[i]: 100 for i in range(300)} + + return mask From 0323c0b6e05a25451ee94ad783e87ca11d66ee65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 3 May 2023 10:24:27 +0200 Subject: [PATCH 116/734] Allow user to limit generation to `float `type` with OpenAI --- outlines/models/openai.py | 174 ++++++++++++++++++++++++++------------ 1 file changed, 118 insertions(+), 56 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index ead55053..67417b2b 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,6 +1,7 @@ """Integration with OpenAI's API.""" import base64 import os +import warnings from io import BytesIO from typing import Callable, Dict, List, Optional, Tuple, Union @@ -78,13 +79,13 @@ def generate(prompt: str, *, samples=1, stop_at=None, is_in=None, type=None): stop_at = tuple(stop_at) mask = {} - if type == "int": + if type is not None: encoder = tiktoken.encoding_for_model(model_name) - mask = create_int_mask(encoder) + mask = create_type_mask(type, encoder) if is_in is not None and stop_at is not None: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") - elif is_in is not None and mask != {}: + elif is_in is not None and len(mask) > 0: raise TypeError("You cannot set `is_in` and `mask` at the same time.") elif is_in is not None: return generate_choice(prompt, is_in, samples) @@ -105,7 +106,9 @@ def generate_base( return results - def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: + def generate_choice( + prompt: str, is_in: List[str], samples: int + ) -> Union[List[str], str]: """Generate a a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask @@ -120,27 +123,34 @@ def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: tokenizer = tiktoken.encoding_for_model(model_name) encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] - decoded: List[str] = [] - for i in range(max([len(word) for word in encoded])): - mask = {} - for word, tokenized_word in zip(is_in, encoded): - if not word.startswith("".join(decoded)): - continue - try: - mask[tokenized_word[i]] = 100 - except IndexError: - pass + decoded_samples = [] + for _ in range(samples): + decoded: List[str] = [] + for i in range(max([len(word) for word in encoded])): + mask = {} + for word, tokenized_word in zip(is_in, encoded): + if not word.startswith("".join(decoded)): + continue + try: + mask[tokenized_word[i]] = 100 + except IndexError: + pass + + if len(mask) == 0: + break + + response = call_completion_api( + model_name, prompt, 1, temperature, None, mask, samples + ) + decoded.append(response["choices"][0]["text"]) + prompt = prompt + "".join(decoded) + + decoded_samples.append("".join(decoded)) - if len(mask) == 0: - break - - response = call_completion_api( - model_name, prompt, 1, temperature, None, mask, samples - ) - decoded.append(response["choices"][0]["text"]) - prompt = prompt + "".join(decoded) + if samples == 1: + return decoded_samples[0] - return "".join(decoded) + return decoded_samples return generate @@ -196,25 +206,32 @@ def call_chat_completion_api( return response - def generate(prompt: str, *, samples: int = 1, stop_at=None, is_in=None, type=None): + def generate( + prompt: str, + *, + samples: int = 1, + stop_at=None, + is_in=None, + type: Optional[str] = None, + ): import tiktoken if stop_at is not None: stop_at = tuple(stop_at) mask = {} - if type == "int": + if type is not None: encoder = tiktoken.encoding_for_model(model_name) - mask = create_int_mask(encoder) + mask = create_type_mask(type, encoder) if is_in is not None and stop_at is not None: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") - elif is_in is not None and mask is not None: + elif is_in is not None and len(mask) > 0: raise TypeError("You cannot set `is_in` and `mask` at the same time.") elif is_in is not None: return generate_choice(prompt, is_in, samples) else: - return generate_base(prompt, stop_at, mask) + return generate_base(prompt, stop_at, samples, mask) def generate_base( query: str, stop_at: Optional[Tuple[str]], samples: int, mask: Dict[int, int] @@ -233,7 +250,9 @@ def generate_base( return results - def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: + def generate_choice( + prompt: str, is_in: List[str], samples: int + ) -> Union[List[str], str]: """Generate a a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask @@ -248,28 +267,35 @@ def generate_choice(prompt: str, is_in: List[str], samples: int) -> str: tokenizer = tiktoken.encoding_for_model(model_name) encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] - decoded: List[str] = [] - for i in range(max([len(word) for word in encoded])): - mask = {} - for word, tokenized_word in zip(is_in, encoded): - if not word.startswith("".join(decoded)): - continue - try: - mask[tokenized_word[i]] = 100 - except IndexError: - pass - - if len(mask) == 0: - break - - messages = [{"role": "user", "content": prompt}] - response = call_chat_completion_api( - model_name, messages, max_tokens, temperature, None, mask, 1 - ) - decoded.append(response["choices"][0]["message"]["content"]) - prompt = prompt + "".join(decoded) - - return "".join(decoded) + decoded_samples = [] + for _ in range(samples): + decoded: List[str] = [] + for i in range(max([len(word) for word in encoded])): + mask = {} + for word, tokenized_word in zip(is_in, encoded): + if not word.startswith("".join(decoded)): + continue + try: + mask[tokenized_word[i]] = 100 + except IndexError: + pass + + if len(mask) == 0: + break + + messages = [{"role": "user", "content": prompt}] + response = call_chat_completion_api( + model_name, messages, 1, temperature, None, mask, 1 + ) + decoded.append(response["choices"][0]["message"]["content"]) + prompt = prompt + "".join(decoded) + + decoded_samples.append("".join(decoded)) + + if samples == 1: + return decoded_samples[0] + + return decoded_samples return generate @@ -424,11 +450,12 @@ def call(*args, **kwargs): def create_int_mask(encoder): - """Create an exclusive mask for digit tokens. + """Create an exclusive mask for digit tokens.""" + warnings.warn( + "The OpenAI API only allows for limited type control; results may not be accurate", + UserWarning, + ) - #TODO: I am not a token expert, and I may be missing - # something by only looking for strictly digit keys. - """ int_token_ids = [] tokens = encoder._mergeable_ranks @@ -438,6 +465,41 @@ def create_int_mask(encoder): # TODO: This is a hack because OpenAI's API does not # allow more than 300 entries for `logit_bias` - mask = {int_token_ids[i]: 100 for i in range(300)} + special_tokens = encoder._special_tokens + mask = {special_tokens["<|endoftext|>"]: 100} + mask.update({int_token_ids[i]: 100 for i in range(300 - len(special_tokens))}) + + return mask + + +def create_float_mask(encoder): + """Create an exclusive mask for digit tokens.""" + warnings.warn( + "The OpenAI API only allows for limited type control; results may not be accurate", + UserWarning, + ) + + int_token_ids = [] + + tokens = encoder._mergeable_ranks + for token, token_id in tokens.items(): + if all([c.isdigit() or c == "." for c in encoder.decode([token_id])]): + int_token_ids.append(token_id) + + # TODO: This is a hack because OpenAI's API does not + # allow more than 300 entries for `logit_bias` + special_tokens = encoder._special_tokens.values() + mask = {special_tokens["<|endoftext|>"]: 100} + mask.update({int_token_ids[i]: 100 for i in range(300 - len(special_tokens))}) return mask + + +type_to_mask = { + "float": create_float_mask, + "int": create_int_mask, +} + + +def create_type_mask(type: str, encoder): + return type_to_mask[type](encoder) From d21c6c71dad48f82da3e76af98b79696c7cafd17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 3 May 2023 11:54:27 +0200 Subject: [PATCH 117/734] Allow user to limit generation to "int" and "float" with HF --- outlines/models/hf_transformers.py | 126 ++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 4 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 4c3cadd6..b225a285 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -44,17 +44,22 @@ def HuggingFaceCompletion( if temperature is None: temperature = 1.0 - def call(prompt: str, samples: int = 1) -> str: + def call(prompt: str, *, samples: int = 1, type: Optional[str] = None) -> str: return call_model_generate_method( - model_name, prompt, max_tokens, temperature, samples + model_name, prompt, max_tokens, temperature, samples, type ) return call -@memory.cache +@memory.cache() def call_model_generate_method( - model_name: str, prompt: str, max_tokens: int, temperature: float, samples: int + model_name: str, + prompt: str, + max_tokens: int, + temperature: float, + samples: int, + type: str, ) -> str: import torch from transformers import AutoModelForCausalLM, AutoTokenizer @@ -64,6 +69,16 @@ def call_model_generate_method( prompt_tokens = tokenizer(prompt, return_tensors="pt") + logit_processor, stopping_criterion = None, None + if type is not None: + if samples > 1: + raise NotImplementedError( + "It is currently not possible to control the generation of several samples with the `transformers` integration" + ) + logit_processor, stopping_criterion = create_type_mask( + type, tokenizer, prompt_tokens["input_ids"] + ) + if torch.cuda.is_available(): model = model.to("cuda") prompt_tokens = prompt_tokens.to("cuda") @@ -75,6 +90,8 @@ def call_model_generate_method( max_new_tokens=max_tokens, pad_token_id=tokenizer.eos_token_id, num_return_sequences=samples, + logits_processor=[logit_processor], + stopping_criteria=[stopping_criterion], ) new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] new_tokens = new_tokens.squeeze() @@ -85,3 +102,104 @@ def call_model_generate_method( results = tokenizer.batch_decode(new_tokens, skip_special_tokens=True) return results + + +def create_int_mask(tokenizer, prompt_tokens): + """TODO: Make sure that we catch all cases.""" + import torch + + num_prompt_tokens = prompt_tokens.shape[-1] + + mask = torch.zeros(len(tokenizer), dtype=torch.bool) + + for token, token_id in tokenizer.get_vocab().items(): + are_all_digits = all([c.isdigit() for c in token]) + if are_all_digits: + mask[token_id] = True + + mask[tokenizer.eos_token_id] = True + + def processor(input_ids, scores): + expanded_mask = mask.expand_as(scores) + scores[~expanded_mask] = -float("inf") + return scores + + def stopping_criterion(input_ids, _): + decoded_input = tokenizer.decode( + input_ids[0, num_prompt_tokens:], skip_special_tokens=True + ) + is_starting_new_sequence = all([c.isdigit() for c in decoded_input]) and ( + decoded_input[-1] == " " + or decoded_input[-1] == "\n" + or decoded_input[-1] == "\r" + ) + if len(decoded_input) > 1 and is_starting_new_sequence: + return True + + return False + + return processor, stopping_criterion + + +def create_float_mask(tokenizer, prompt_tokens, decimals=3): + """TODO: Make sure that we catch all cases.""" + import torch + + num_prompt_tokens = prompt_tokens.shape[-1] + + mask = torch.zeros(len(tokenizer), dtype=torch.bool) + + for token, token_id in tokenizer.get_vocab().items(): + is_valid_float_or_int = ( + all([c.isdigit() or c == "." for c in token]) and token.count(".") <= 1 + ) + if is_valid_float_or_int: + mask[token_id] = True + + mask[tokenizer.eos_token_id] = True + + def processor(input_ids, scores): + expanded_mask = mask.expand_as(scores) + scores[~expanded_mask] = -float("inf") + return scores + + def stopping_criterion(input_ids, _): + decoded_input = tokenizer.decode( + input_ids[0, num_prompt_tokens:], skip_special_tokens=True + ) + if decoded_input.count(".") > 1: + return True + + if ( + decoded_input.count(".") == 1 + and len(decoded_input.strip().split(".")[1]) > decimals + ): + return True + + if len(decoded_input) > 1: + is_starting_new_sequence = all( + [c.isdigit() for c in decoded_input[:-1]] + ) and ( + decoded_input[-1] == " " + or decoded_input[-1] == "\n" + or decoded_input[-1] == "\r" + ) + if is_starting_new_sequence: + return True + + return False + + return processor, stopping_criterion + + +type_to_mask = { + "float": create_float_mask, + "int": create_int_mask, +} + + +def create_type_mask(type, tokenizer, prompt_tokens): + if type not in ["int", "float"]: + raise NotImplementedError(f"Cannot restrict the generation to type {type}") + + return type_to_mask[type](tokenizer, prompt_tokens) From 9d166d5c1315d76c70b7e2ee60d2be4e8b416569 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 3 May 2023 10:53:01 +0200 Subject: [PATCH 118/734] Add controlled generation to the README --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 79bf85dc..c4255d95 100644 --- a/README.md +++ b/README.md @@ -246,7 +246,17 @@ tell_a_joke(Joke) # Controlled generation -Outlines offers mechanisms to specify high-level constraints on the text generations. Passing `stop_at` to model call the user can stop the generation once a particular word, sequence of symbol is reached. Passing `is_in` to the model call the user can constraint the model to generate an answer chosen among a set of possible answers. +Outlines offers mechanisms to specify high-level constraints on the text generations: + +- `stop_at` allows to stop the generation once a particular word, sequence of symbol had been generated; +- `is_in` allows to constrain the model to generate an answer chosen among a set of possible answers; +- `type` allows to constrain the model's output to either `"int"`s or `"float"`s; + +Coming: + +- Ability to constrain the output to a JSON with a given structure; +- Ability to constrain the output to a List; +- Ability to constrain the output to be Python code; # Examples From 13324e5abe119b5b280635f9e13ee8f60e9f9ae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 12 May 2023 13:44:54 +0200 Subject: [PATCH 119/734] Remove image generation decorator --- README.md | 39 --------------------------------------- outlines/__init__.py | 7 ++----- outlines/image.py | 42 ------------------------------------------ 3 files changed, 2 insertions(+), 86 deletions(-) delete mode 100644 outlines/image.py diff --git a/README.md b/README.md index c4255d95..0337b0e6 100644 --- a/README.md +++ b/README.md @@ -141,45 +141,6 @@ joke_ppt(Joke) # } ``` -## Text completion - -Prompts are often attached to a given model and specific settings, but this can -be hard to find in codebases. Following this logic, we abstract calls to any -model that takes prompts as an input by decorating template functions: - -``` python -import outlines.text as text - - -@text.completion("openai/text-davinci-003", stop_at=["\n"]) -def few_shot_examples(question, examples): - """You are a question answering AI. - - {% for example in examples %} - QUESTION: {{ example.question }} - ANSWER: {{ example.answer }} - {% endfor %} - - QUESTION: {{ question }} - Let's think step by step. - - """ - -result, completed = few_shot_examples(question, examples) -``` - -## Image generation - -A similar syntax can be used with image generation models: - -``` python -import outlines.image as image - - -@image.generation("hf/stabilityai/stable-diffusion-2.1") -def generate(subject, location): - "A photo of a {{ subject }} riding a horse in {{ location }}." -``` ## Natural language functions diff --git a/outlines/__init__.py b/outlines/__init__.py index ff7b0fa0..92838576 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,10 +1,7 @@ """Outlines is a Generative Model Programming Framework.""" -from outlines.image import generation -from outlines.text import prompt, render +import outlines.cache as cache +from outlines.text import prompt __all__ = [ - "completion", - "generation", "prompt", - "render", ] diff --git a/outlines/image.py b/outlines/image.py deleted file mode 100644 index 07b256bc..00000000 --- a/outlines/image.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import Any, Callable, Dict, List - -from PIL.Image import Image as PILImage - -import outlines.models as models -import outlines.text as text - - -def generation(model_path: str) -> Callable: - """Decorator that allows to simplify calls to image generation models.""" - - if "/" not in model_path: - raise ValueError("Model names must be in the form 'provider_name/model_name'") - - provider_name = model_path.split("/")[0] - model_name = model_path[len(provider_name) + 1 :] - - try: - model_cls = getattr(models.image_generation, provider_name) - except KeyError: - raise ValueError(f"The model provider {provider_name} is not available.") - - generative_model = model_cls(model_name) - - def decorator(fn: Callable): - prompt_fn = text.prompt(fn) - - def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> PILImage: - """Call the Diffuser with the rendered template. - - Returns - ------- - A `PIL.Image` instance that represents the generated image. - - """ - prompt = prompt_fn(*args, **kwargs) - result = generative_model(prompt) - return result - - return wrapper - - return decorator From 78e9e78309aabeb7543fd96f8160d26f98391b76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 12 May 2023 14:17:00 +0200 Subject: [PATCH 120/734] Allow to disable caching The current use of caching does not allow the user to disable it should they want to. This PR changes this; to disable the cache one needs to call ``` import outlines outlines.disable_cache() ``` at the top of their script. --- examples/react.py | 3 --- outlines/__init__.py | 5 ++++- outlines/{cache.py => caching.py} | 15 ++++++++++----- outlines/models/hf_diffusers.py | 6 ++---- outlines/models/hf_transformers.py | 6 ++---- outlines/models/openai.py | 12 +++++------- tests/test_cache.py | 18 +++++++++--------- 7 files changed, 32 insertions(+), 33 deletions(-) rename outlines/{cache.py => caching.py} (88%) diff --git a/examples/react.py b/examples/react.py index ca18fdd3..76c17042 100644 --- a/examples/react.py +++ b/examples/react.py @@ -12,12 +12,9 @@ """ import requests # type: ignore -import outlines import outlines.models as models import outlines.text as text -outlines.cache.disable() - @text.prompt def build_reAct_prompt(question): diff --git a/outlines/__init__.py b/outlines/__init__.py index 92838576..a99de432 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,7 +1,10 @@ """Outlines is a Generative Model Programming Framework.""" -import outlines.cache as cache +from outlines.caching import clear_cache, disable_cache, get_cache from outlines.text import prompt __all__ = [ + "clear_cache", + "disable_cache", + "get_cache", "prompt", ] diff --git a/outlines/cache.py b/outlines/caching.py similarity index 88% rename from outlines/cache.py rename to outlines/caching.py index c3ed02d0..ecaa950c 100644 --- a/outlines/cache.py +++ b/outlines/caching.py @@ -1,4 +1,5 @@ import os +from typing import Callable from perscache import Cache, NoCache from perscache.serializers import JSONSerializer @@ -9,7 +10,11 @@ memory = Cache(serializer=JSONSerializer(), storage=LocalFileStorage(cache_dir)) -def get(): +def cache(fn: Callable): + return memory.cache()(fn) + + +def get_cache(): """Get the context object that contains previously-computed return values. The cache is used to avoid unnecessary computations and API calls, which can @@ -23,7 +28,7 @@ def get(): return memory -def disable(): +def disable_cache(): """Disable the cache for this session. Generative models output different results each time they are called when @@ -47,7 +52,7 @@ def disable(): memory = NoCache() -def clear(): +def clear_cache(): """Erase the cache completely.""" - cache = get() - cache.storage.clear() + global memory + memory.storage.clear() diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 57375094..b733557c 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -1,9 +1,7 @@ """Integration with HuggingFace's `diffusers` library.""" from PIL.Image import Image as PILImage -import outlines.cache as cache - -memory = cache.get() +from outlines.caching import cache def HuggingFaceDiffuser(model_name: str) -> PILImage: @@ -22,7 +20,7 @@ def call(prompt: str, samples: int = 1) -> str: return call -@memory.cache() +@cache def call_stable_diffusion_pipeline( model_name: str, prompt: str, samples: int ) -> PILImage: diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index b225a285..e83c191d 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -1,9 +1,7 @@ """Integration with HuggingFace's `transformers` library.""" from typing import Callable, Optional -import outlines.cache as cache - -memory = cache.get() +from outlines.caching import cache def HuggingFaceCompletion( @@ -52,7 +50,7 @@ def call(prompt: str, *, samples: int = 1, type: Optional[str] = None) -> str: return call -@memory.cache() +@cache def call_model_generate_method( model_name: str, prompt: str, diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 67417b2b..4c0cad26 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -9,7 +9,7 @@ from PIL import Image from PIL.Image import Image as PILImage -import outlines.cache as cache +from outlines.caching import cache __all__ = [ "OpenAITextCompletion", @@ -18,8 +18,6 @@ "OpenAIImageGeneration", ] -memory = cache.get() - def OpenAITextCompletion( model_name: str, @@ -48,7 +46,7 @@ def OpenAITextCompletion( """ @error_handler - @memory.cache() + @cache def call_completion_api( model: str, prompt: str, @@ -182,7 +180,7 @@ def OpenAIChatCompletion( """ @error_handler - @memory.cache() + @cache def call_chat_completion_api( model: str, messages: List[Dict[str, str]], @@ -344,7 +342,7 @@ def OpenAIEmbeddings(model_name: str): """ @error_handler - @memory.cache() + @cache def call_embeddings_api( model: str, input: str, @@ -388,7 +386,7 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): """ @error_handler - @memory.cache() + @cache def call_image_generation_api(prompt: str, size: str, samples: int): import openai diff --git a/tests/test_cache.py b/tests/test_cache.py index 6d81ee85..cc91eb60 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -34,10 +34,10 @@ def test_cache(refresh_environment): os.environ["OUTLINES_CACHE_DIR"] = tempdir import outlines - memory = outlines.cache.get() + memory = outlines.get_cache() assert memory.storage.location == Path(tempdir) - yield memory + yield outlines.caching.cache memory.storage.clear() @@ -45,7 +45,7 @@ def test_cache(refresh_environment): def test_get_cache(test_cache): import outlines - memory = outlines.cache.get() + memory = outlines.get_cache() assert isinstance(memory, perscache.Cache) assert isinstance(memory.storage, perscache.storage.LocalFileStorage) @@ -54,7 +54,7 @@ def test_get_cache(test_cache): # second time `f` is called. store = list() - @memory.cache() + @test_cache def f(x): store.append(1) return x @@ -73,15 +73,14 @@ def test_disable_cache(test_cache): """Make sure that we can disable the cache.""" import outlines - outlines.cache.disable() - memory = outlines.cache.get() + outlines.disable_cache() # If the cache is disabled then the size # of `store` should increase every time # `f` is called. store = list() - @memory.cache() + @test_cache def f(x): store.append(1) return x @@ -94,10 +93,11 @@ def f(x): def test_clear_cache(test_cache): """Make sure that we can clear the cache.""" + import outlines store = list() - @test_cache.cache + @test_cache def f(x): store.append(1) return x @@ -111,6 +111,6 @@ def f(x): # The size of `store` should increase if we call `f` # after clearing the cache. - test_cache.storage.clear() + outlines.clear_cache() f(1) assert len(store) == store_size + 1 From 4ecee0e16c978a984355b58734b26b4610eebe6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 12 May 2023 16:56:38 +0200 Subject: [PATCH 121/734] Pretty-print dict when passed to prompt --- outlines/text/prompts.py | 19 +++++++++++++++++-- tests/text/test_prompt.py | 13 ++++++++++++- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index 14b1beb4..635a5683 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -1,3 +1,4 @@ +import functools import inspect import json import re @@ -205,7 +206,7 @@ def render(template: str, **values: Optional[Dict[str, Any]]) -> str: env.filters["description"] = get_fn_description env.filters["source"] = get_fn_source env.filters["signature"] = get_fn_signature - env.filters["schema"] = get_pydantic_schema + env.filters["schema"] = get_schema jinja_template = env.from_string(cleaned_template) @@ -269,7 +270,21 @@ def get_fn_signature(fn: Callable): return signature -def get_pydantic_schema(model: type[BaseModel]): +@functools.singledispatch +def get_schema(model: Any): + raise NotImplementedError( + f"No schema rendering function defined for type {type(model)}." + ) + + +@get_schema.register(dict) +def get_schema_dict(model: Dict): + """Return a pretty-printed dictionary""" + return json.dumps(model, indent=2) + + +@get_schema.register(type(BaseModel)) +def get_schema_pydantic(model: type[BaseModel]): """Return the schema of a Pydantic model.""" if not type(model) == type(BaseModel): raise TypeError("The `schema` filter only applies to Pydantic models.") diff --git a/tests/text/test_prompt.py b/tests/text/test_prompt.py index 07a551b0..d3f3698e 100644 --- a/tests/text/test_prompt.py +++ b/tests/text/test_prompt.py @@ -209,7 +209,7 @@ def source_ppt(fn): assert rendered == "def test_function_call(one, two=2):\n return one + two\n" -def test_prompt_response_model(): +def test_prompt_pydantic_response(): class SimpleResponse(BaseModel): one: str = Field(description="a description") two: str @@ -240,3 +240,14 @@ class ConvolutedResponse(BaseModel): prompt == '{\n "part_one": {\n "answer": "",\n "thought": {\n "one": "a description",\n "two": ""\n }\n },\n "part_two": {\n "one": "a description",\n "two": ""\n }\n}' ) + + +def test_prompt_dict_response(): + response = {"one": "a description", "two": ""} + + @text.prompt + def source_ppt(model): + "{{model | schema }}" + + prompt = source_ppt(response) + assert prompt == '{\n "one": "a description",\n "two": ""\n}' From 835e17096cc4832b933867f7b999c5eaf9454498 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 12 May 2023 17:26:21 +0200 Subject: [PATCH 122/734] Update the prompting documentation --- docs/source/reference/prompting.rst | 69 ++++++++++++++++++++++++----- 1 file changed, 59 insertions(+), 10 deletions(-) diff --git a/docs/source/reference/prompting.rst b/docs/source/reference/prompting.rst index 5f0b925e..73fbca88 100644 --- a/docs/source/reference/prompting.rst +++ b/docs/source/reference/prompting.rst @@ -1,10 +1,24 @@ Prompting ========= -Outlines provides a powerful domain-specific language to write and manage prompts, via what we call *prompt functions*. Prompt functions are Python functions that contain the prompt in their docstring; their arguments correspond to the variables used in the prompt. We use the Jinja library to render the prompts, with a few tweaks to make the prompt writing experience nicer. +Outlines provides a powerful domain-specific language to write and manage prompts, via what we call *prompt functions*. Prompt functions are Python functions that contain a template for the prompt in their docstring, and their arguments correspond to the variables used in the prompt. When called, a prompt function returns the template rendered with the values of the arguments: +.. code:: + + import outlines.text as text -One thus doesn't need extra abstraction to write a prompt with few-shot examples, Jinja can handle that: + @text.prompt + def greetings(name, question): + """Hello, {{ name }}! + {{ question }} + """ + + prompt = greetings("user", "How are you?") + # Hello, user! + # How are you? + + +Outlines uses the `Jinja templating engine `_ to render prompts, which allows to easily compose complex prompts. No need for extra abstractions to write a prompt with few-shot examples, Jinja can handle that: .. code:: @@ -23,14 +37,14 @@ One thus doesn't need extra abstraction to write a prompt with few-shot examples prompt = few_shots(question, examples, question) -The original template is still accessible by calling: -.. code:: +Please refer to the `Jinja documentation `_ for more information about the syntax of the templating language. The Jinja syntax is powerful, and we recommend you take some time to read their documentation if building your prompts requires complex logic involving for instance loops and conditionals. - prompt.template +Calling tools +~~~~~~~~~~~~~ -Outlines also provides a few utilities to simplify workflows that connect tools to LLMs (Toolformer, ViperGPT, AutoGPT). We noticed that the same information was always repeated: once when implementing the function, the second time when writing the instructions in the prompt. No need to do this with Outlines, information can be directly pulled from the function definition: +Several projects (e.g.`Toolformer `_, `ViperGPT `_, `AutoGPT `_, etc.) have shown that we can "teach" language models to use external functions by describing what these functions do in the prompt. In these projects the same information is often repeated twice: the function implementation, name, docstring, or arguments are copy-pasted in the prompt. This is cumbersome and error prone; you can directly pull this information from within an Outlines prompt function: .. code:: @@ -53,20 +67,55 @@ Outlines also provides a few utilities to simplify workflows that connect tools {{ tool | source }} """ -The same goes for output validation: the code is implemented once when defining the parser, a second time when passing the format to the prompt: + tool_prompt("Can you do something?", my_tool) + # Can you do something? + # + # COMMANDS + # 1. my_tool: Tool description, args: arg1:str, arg2:int + # + # def my_tool(arg1: str, arg2: int): + # """Tool description. + # + # The rest of the docstring + # """ + # pass + + +Specify a response format +~~~~~~~~~~~~~~~~~~~~~~~~~ + +To build reliable chains with language models we often need to instruct them the format in which we would like them to return their response. Again the information is often repeated twice between creating the parsing function, and writing the desired schema in the prompt. You can directly pull the JSON schema of a pydantic model, or pretty print a dictionary from within an Outlines prompt function .. code:: - from pydantic import BaseModel + from pydantic import BaseModel, Field import outlines.text as text class MyResponse(BaseModel): - field1: int + field1: int = Field(description="an int") field2: str @text.prompt def my_prompt(response_model): """{{ response_model | schema }}""" -Please refer to the `Jinja documentation `_ for more information about the syntax of the templating language. + my_prompt(MyResponse) + # { + # "field1": "an int", + # "field2": "" + # } + + +.. code:: + + response = { + "field1": "", + "field2": "a string" + } + + my_prompt(MyResponse) + # { + # "field1": "", + # "field2": "a string" + # } From 76f616ec86f9097b10f4c577fc6773081ba24767 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 13:18:52 +0200 Subject: [PATCH 123/734] Use `tokenizer.decode` to retrieve token string We currently have non-digit tokens in the sequences output by the models when `type` is "int" or "float". Somehow we do not get the same character when iterating over the vocabulary and when using `tokenizer.decode`; since the latter is used to decode the final sentence we use this when creating the mask. --- outlines/models/hf_transformers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index e83c191d..914ef172 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -111,6 +111,7 @@ def create_int_mask(tokenizer, prompt_tokens): mask = torch.zeros(len(tokenizer), dtype=torch.bool) for token, token_id in tokenizer.get_vocab().items(): + token = tokenizer.decode(token_id) are_all_digits = all([c.isdigit() for c in token]) if are_all_digits: mask[token_id] = True @@ -148,6 +149,7 @@ def create_float_mask(tokenizer, prompt_tokens, decimals=3): mask = torch.zeros(len(tokenizer), dtype=torch.bool) for token, token_id in tokenizer.get_vocab().items(): + token = tokenizer.decode(token_id) is_valid_float_or_int = ( all([c.isdigit() or c == "." for c in token]) and token.count(".") <= 1 ) From 61ed86029846790dbefd05c87f63636d86e4d543 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 13:21:02 +0200 Subject: [PATCH 124/734] Disallow returning EOS token as first token with `type` --- outlines/models/hf_transformers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 914ef172..4dda0f13 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -116,9 +116,11 @@ def create_int_mask(tokenizer, prompt_tokens): if are_all_digits: mask[token_id] = True - mask[tokenizer.eos_token_id] = True + mask[tokenizer.eos_token_id] = False def processor(input_ids, scores): + if input_ids.shape[1] > num_prompt_tokens + 1: + mask[tokenizer.eos_token_id] = True expanded_mask = mask.expand_as(scores) scores[~expanded_mask] = -float("inf") return scores @@ -156,9 +158,11 @@ def create_float_mask(tokenizer, prompt_tokens, decimals=3): if is_valid_float_or_int: mask[token_id] = True - mask[tokenizer.eos_token_id] = True + mask[tokenizer.eos_token_id] = False def processor(input_ids, scores): + if input_ids.shape[1] > num_prompt_tokens + 1: + mask[tokenizer.eos_token_id] = True expanded_mask = mask.expand_as(scores) scores[~expanded_mask] = -float("inf") return scores From 95633a5e661391732cddb429cc61114e141c712e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 13:24:33 +0200 Subject: [PATCH 125/734] Add tests for transformers integration --- outlines/models/hf_transformers.py | 61 +++++++++++++++++++--------- pyproject.toml | 7 +++- tests/models/test_hf_transformers.py | 42 +++++++++++++++++++ 3 files changed, 88 insertions(+), 22 deletions(-) create mode 100644 tests/models/test_hf_transformers.py diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 4dda0f13..5a3c3f08 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -1,8 +1,12 @@ """Integration with HuggingFace's `transformers` library.""" -from typing import Callable, Optional +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple from outlines.caching import cache +if TYPE_CHECKING: + import torch + from transformers import PreTrainedTokenizerBase + def HuggingFaceCompletion( model_name: str, @@ -67,15 +71,19 @@ def call_model_generate_method( prompt_tokens = tokenizer(prompt, return_tensors="pt") - logit_processor, stopping_criterion = None, None + logit_processors: Optional[List[Callable]] = None + stopping_criteria: Optional[List[Callable]] = None + postprocessing: Callable = lambda x: x if type is not None: if samples > 1: raise NotImplementedError( "It is currently not possible to control the generation of several samples with the `transformers` integration" ) - logit_processor, stopping_criterion = create_type_mask( + logit_processor, stopping_criterion, postprocessing = create_type_mask( type, tokenizer, prompt_tokens["input_ids"] ) + logit_processors = [logit_processor] + stopping_criteria = [stopping_criterion] if torch.cuda.is_available(): model = model.to("cuda") @@ -88,29 +96,31 @@ def call_model_generate_method( max_new_tokens=max_tokens, pad_token_id=tokenizer.eos_token_id, num_return_sequences=samples, - logits_processor=[logit_processor], - stopping_criteria=[stopping_criterion], + logits_processor=logit_processors, + stopping_criteria=stopping_criteria, ) - new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] + 1 :] + new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] :] new_tokens = new_tokens.squeeze() if samples == 1: results = tokenizer.decode(new_tokens, skip_special_tokens=True) + results = postprocessing(results) else: results = tokenizer.batch_decode(new_tokens, skip_special_tokens=True) return results -def create_int_mask(tokenizer, prompt_tokens): - """TODO: Make sure that we catch all cases.""" +def create_int_mask( + tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" +) -> Tuple[Callable, Callable, Callable]: import torch num_prompt_tokens = prompt_tokens.shape[-1] mask = torch.zeros(len(tokenizer), dtype=torch.bool) - for token, token_id in tokenizer.get_vocab().items(): + for _, token_id in tokenizer.get_vocab().items(): token = tokenizer.decode(token_id) are_all_digits = all([c.isdigit() for c in token]) if are_all_digits: @@ -118,14 +128,14 @@ def create_int_mask(tokenizer, prompt_tokens): mask[tokenizer.eos_token_id] = False - def processor(input_ids, scores): + def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: if input_ids.shape[1] > num_prompt_tokens + 1: mask[tokenizer.eos_token_id] = True expanded_mask = mask.expand_as(scores) scores[~expanded_mask] = -float("inf") return scores - def stopping_criterion(input_ids, _): + def stopping_criterion(input_ids: torch.Tensor, _) -> bool: decoded_input = tokenizer.decode( input_ids[0, num_prompt_tokens:], skip_special_tokens=True ) @@ -139,18 +149,24 @@ def stopping_criterion(input_ids, _): return False - return processor, stopping_criterion + def postprocessing(output: str) -> str: + return output + return processor, stopping_criterion, postprocessing -def create_float_mask(tokenizer, prompt_tokens, decimals=3): - """TODO: Make sure that we catch all cases.""" + +def create_float_mask( + tokenizer: "PreTrainedTokenizerBase", + prompt_tokens: "torch.Tensor", + decimals: int = 3, +) -> Tuple[Callable, Callable, Callable]: import torch num_prompt_tokens = prompt_tokens.shape[-1] mask = torch.zeros(len(tokenizer), dtype=torch.bool) - for token, token_id in tokenizer.get_vocab().items(): + for _, token_id in tokenizer.get_vocab().items(): token = tokenizer.decode(token_id) is_valid_float_or_int = ( all([c.isdigit() or c == "." for c in token]) and token.count(".") <= 1 @@ -160,14 +176,14 @@ def create_float_mask(tokenizer, prompt_tokens, decimals=3): mask[tokenizer.eos_token_id] = False - def processor(input_ids, scores): + def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: if input_ids.shape[1] > num_prompt_tokens + 1: mask[tokenizer.eos_token_id] = True expanded_mask = mask.expand_as(scores) scores[~expanded_mask] = -float("inf") return scores - def stopping_criterion(input_ids, _): + def stopping_criterion(input_ids: torch.Tensor, _) -> bool: decoded_input = tokenizer.decode( input_ids[0, num_prompt_tokens:], skip_special_tokens=True ) @@ -193,16 +209,21 @@ def stopping_criterion(input_ids, _): return False - return processor, stopping_criterion + def postprocessing(output: str) -> str: + return output.rstrip(".") + + return processor, stopping_criterion, postprocessing -type_to_mask = { +type_to_mask: Dict[str, Callable] = { "float": create_float_mask, "int": create_int_mask, } -def create_type_mask(type, tokenizer, prompt_tokens): +def create_type_mask( + type: str, tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" +) -> Tuple[Callable, Callable, Callable]: if type not in ["int", "float"]: raise NotImplementedError(f"Cannot restrict the generation to type {type}") diff --git a/pyproject.toml b/pyproject.toml index b6496eb0..34924bce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,9 @@ dynamic = ["version"] [project.optional-dependencies] test = [ "pre-commit", - "pytest" + "pytest", + "torch", + "transformers" ] [tool.setuptools_scm] @@ -46,7 +48,8 @@ write_to = "outlines/_version.py" testpaths = ["tests"] filterwarnings = [ "error", - "ignore::FutureWarning:transformers.*" + "ignore::FutureWarning:transformers.*", + "ignore::UserWarning:torch.cuda.*" ] [tool.mypy] diff --git a/tests/models/test_hf_transformers.py b/tests/models/test_hf_transformers.py new file mode 100644 index 00000000..3734072f --- /dev/null +++ b/tests/models/test_hf_transformers.py @@ -0,0 +1,42 @@ +import outlines + +outlines.disable_cache() + +import pytest # noqa + +from outlines.models.hf_transformers import HuggingFaceCompletion # noqa + +MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" + + +def test_samples(): + model = HuggingFaceCompletion(MODEL, max_tokens=10) + + answer = model("test", samples=1) + assert isinstance(answer, str) + + answer = model("test") + assert isinstance(answer, str) + + answers = model("test", samples=3) + assert isinstance(answers, list) + assert len(answers) == 3 + + +def test_type_int(): + model = HuggingFaceCompletion(MODEL, max_tokens=10) + answer = model("test", type="int") + int(answer) + + +def test_type_float(): + model = HuggingFaceCompletion(MODEL, max_tokens=10) + answer = model("test", type="float") + float(answer) + + +@pytest.mark.xfail +def test_type_multiple_samples(): + model = HuggingFaceCompletion(MODEL, max_tokens=10) + answer = model("test", type="int", samples=2) + int(answer) From aec7639f9d0cf3e8d28eda67c1309dc355f22435 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 14:10:00 +0200 Subject: [PATCH 126/734] Add tests for diffusers integration --- outlines/models/hf_diffusers.py | 18 +++++++++++------- pyproject.toml | 1 + tests/models/test_hf_diffusers.py | 18 ++++++++++++++++++ 3 files changed, 30 insertions(+), 7 deletions(-) create mode 100644 tests/models/test_hf_diffusers.py diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index b733557c..83f80fd2 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -1,8 +1,6 @@ """Integration with HuggingFace's `diffusers` library.""" from PIL.Image import Image as PILImage -from outlines.caching import cache - def HuggingFaceDiffuser(model_name: str) -> PILImage: """Create a function that will call a stable diffusion pipeline. @@ -15,22 +13,28 @@ def HuggingFaceDiffuser(model_name: str) -> PILImage: """ def call(prompt: str, samples: int = 1) -> str: - return call_stable_diffusion_pipeline(model_name, prompt, samples) + results = call_stable_diffusion_pipeline(model_name, prompt, samples) + if samples == 1: + return results[0] + return results return call -@cache def call_stable_diffusion_pipeline( model_name: str, prompt: str, samples: int ) -> PILImage: - """Build and call the Stable Diffusion pipeline.""" + """Build and call the Stable Diffusion pipeline. + + We convert the returned image + """ import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained(model_name) if torch.cuda.is_available(): pipe = pipe.to("cuda") - image = pipe(prompt, num_images_per_prompt=samples).images[0] - return image + images = pipe(prompt, num_images_per_prompt=samples).images + + return images diff --git a/pyproject.toml b/pyproject.toml index 34924bce..f23cf653 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ testpaths = ["tests"] filterwarnings = [ "error", "ignore::FutureWarning:transformers.*", + "ignore::FutureWarning:diffusers.*", "ignore::UserWarning:torch.cuda.*" ] diff --git a/tests/models/test_hf_diffusers.py b/tests/models/test_hf_diffusers.py new file mode 100644 index 00000000..4068c4b6 --- /dev/null +++ b/tests/models/test_hf_diffusers.py @@ -0,0 +1,18 @@ +from PIL.Image import Image as PILImage + +from outlines.models.hf_diffusers import HuggingFaceDiffuser + +MODEL = "hf-internal-testing/tiny-stable-diffusion-torch" + + +def test_stable_diffusion(): + model = HuggingFaceDiffuser(MODEL) + + image = model("test") + assert isinstance(image, PILImage) + + images = model("test", samples=3) + assert isinstance(images, list) + assert len(images) == 3 + for img in images: + assert isinstance(image, PILImage) From 2f979a67d0809c6f6f782daf05fc938b7b769734 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 18:40:20 +0200 Subject: [PATCH 127/734] Add `diffusers` test dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index f23cf653..f6e3f31c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ dynamic = ["version"] [project.optional-dependencies] test = [ + "diffusers", "pre-commit", "pytest", "torch", From 364298e07c81fdb847e551e3fcd0c6acf3896893 Mon Sep 17 00:00:00 2001 From: brosand Date: Tue, 16 May 2023 10:30:11 -0400 Subject: [PATCH 128/734] Move `stop_at` parameter to completion call --- examples/meta_prompting.py | 14 +++++++------- examples/pick_odd_one_out.py | 6 ++---- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 0e06a966..dcfdad42 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -43,12 +43,12 @@ def determine_goal(question): def solve(memory): """{{memory}}. Let's begin.""" - complete = models.text_completion.openai(model_name, stop_at=["."]) + complete = models.text_completion.openai(model_name) prompt = determine_goal(question) - answer = complete(prompt) + answer = complete(prompt, stop_at=["."]) prompt = solve(prompt + answer) - answer = complete(prompt) + answer = complete(prompt, stop_at=["."]) completed = prompt + answer return completed @@ -82,11 +82,11 @@ def get_answer(question, expert, memory): {{question}} """ - complete_expert = models.text_completion.openai(model_name, stop_at=['"']) + complete_expert = models.text_completion.openai(model_name) complete_answer = models.text_completion.openai(model_name) prompt = find_expert(question) - expert = complete_expert(prompt) + expert = complete_expert(prompt, stop_at=['"']) prompt = get_answer(question, expert, prompt + expert) answer = complete_answer(prompt) completed = prompt + answer @@ -110,11 +110,11 @@ def get_answer(expert, memory): For instance, {{expert}} would answer """ - model_expert = models.text_completion.openai(model_name, stop_at=["\n", "."]) + model_expert = models.text_completion.openai(model_name) model_answer = models.text_completion.openai(model_name) prompt = find_expert(question) - expert = model_expert(prompt) + expert = model_expert(prompt, stop_at=["\n", "."]) prompt = get_answer(expert, prompt + expert) answer = model_answer(prompt) completed = prompt + answer diff --git a/examples/pick_odd_one_out.py b/examples/pick_odd_one_out.py index 5cd4b57c..d973c11c 100644 --- a/examples/pick_odd_one_out.py +++ b/examples/pick_odd_one_out.py @@ -29,14 +29,12 @@ def build_ooo_prompt(options): """ -reasoning_model = models.text_completion.openai( - "text-davinci-003", stop_at=["Pick the odd word", "So the odd one"] -) +reasoning_model = models.text_completion.openai("text-davinci-003") result_model = models.text_completion.openai("text-davinci-003") options = ["sea", "mountains", "plains", "sock"] prompt = build_ooo_prompt(options) -reasoning = reasoning_model(prompt) +reasoning = reasoning_model(prompt, stop_at=["Pick the odd word", "So the odd one"]) prompt += reasoning result = result_model(prompt) prompt += result From bbb8b0c2d1969831f1cff3f0f72023dd40489079 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 15:44:09 +0200 Subject: [PATCH 129/734] Generate within set of choices with `transformers` --- outlines/models/hf_transformers.py | 72 +++++++++++++++++++++++++++- tests/models/test_hf_transformers.py | 15 ++++++ 2 files changed, 85 insertions(+), 2 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 5a3c3f08..4b6886df 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -46,9 +46,16 @@ def HuggingFaceCompletion( if temperature is None: temperature = 1.0 - def call(prompt: str, *, samples: int = 1, type: Optional[str] = None) -> str: + def call( + prompt: str, + *, + samples: int = 1, + stop_at: Optional[List[str]] = None, + is_in: Optional[List[str]] = None, + type: Optional[str] = None, + ) -> str: return call_model_generate_method( - model_name, prompt, max_tokens, temperature, samples, type + model_name, prompt, max_tokens, temperature, samples, stop_at, is_in, type ) return call @@ -61,6 +68,8 @@ def call_model_generate_method( max_tokens: int, temperature: float, samples: int, + stop_at: List[str], + is_in: List[str], type: str, ) -> str: import torch @@ -79,11 +88,25 @@ def call_model_generate_method( raise NotImplementedError( "It is currently not possible to control the generation of several samples with the `transformers` integration" ) + if is_in is not None: + raise ValueError( + "You cannot both restrict to a set of choices with `is_in` and to a type with `type`" + ) logit_processor, stopping_criterion, postprocessing = create_type_mask( type, tokenizer, prompt_tokens["input_ids"] ) logit_processors = [logit_processor] stopping_criteria = [stopping_criterion] + elif is_in is not None: + if stop_at is not None: + raise ValueError( + "You cannot both restrict to a set of choices with `is_in` and set a stopping criterion" + ) + logit_processor, stopping_criterion, postprocessing = create_choice_mask( + is_in, tokenizer, prompt_tokens["input_ids"] + ) + logit_processors = [logit_processor] + stopping_criteria = [stopping_criterion] if torch.cuda.is_available(): model = model.to("cuda") @@ -111,6 +134,51 @@ def call_model_generate_method( return results +def create_choice_mask( + choices: List[str], + tokenizer: "PreTrainedTokenizerBase", + prompt_tokens: "torch.Tensor", +) -> Tuple[Callable, Callable, Callable]: + import torch + + num_prompt_tokens = prompt_tokens.shape[-1] + tokenized_choices = [tokenizer.encode(word) for word in choices] + + def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + output = input_ids[0, num_prompt_tokens:] + decoded_output = tokenizer.decode(output, skip_special_tokens=True) + + mask = torch.zeros(len(tokenizer), dtype=torch.bool) + for choice, tokens in zip(choices, tokenized_choices): + if not choice.startswith(decoded_output): + continue + else: + mask[tokens[len(output)]] = True + + expanded_mask = mask.expand_as(scores) + scores[~expanded_mask] = -float("inf") + + return scores + + def stopping_criterion(input_ids: torch.Tensor, _) -> bool: + """ + TODO: We can stop the generation once we have excluded all possibilities but one, and the + full sequence can be recovered during post-processing. + """ + decoded_input = tokenizer.decode( + input_ids[0, num_prompt_tokens:], skip_special_tokens=True + ) + if decoded_input in choices: + return True + + return False + + def postprocessing(output: str) -> str: + return output + + return processor, stopping_criterion, postprocessing + + def create_int_mask( tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" ) -> Tuple[Callable, Callable, Callable]: diff --git a/tests/models/test_hf_transformers.py b/tests/models/test_hf_transformers.py index 3734072f..82086f5b 100644 --- a/tests/models/test_hf_transformers.py +++ b/tests/models/test_hf_transformers.py @@ -35,6 +35,21 @@ def test_type_float(): float(answer) +def test_incompatible_constraints(): + model = HuggingFaceCompletion(MODEL, max_tokens=10) + + with pytest.raises(ValueError): + model("test", type="float", is_in=["test"]) + + +def test_choices(): + model = HuggingFaceCompletion(MODEL, max_tokens=50) + + choices = ["a", "and a long sequence", "with\n line break"] + answer = model("test", is_in=choices) + assert answer in choices + + @pytest.mark.xfail def test_type_multiple_samples(): model = HuggingFaceCompletion(MODEL, max_tokens=10) From b7c0931aca743623ee9f1412a76eecd6ecfd6ddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 15:59:13 +0200 Subject: [PATCH 130/734] Generate until sequences is found with `transformers` --- outlines/models/hf_transformers.py | 44 ++++++++++++++++++++++++++++ tests/models/test_hf_transformers.py | 35 +++++++++++++++++----- 2 files changed, 72 insertions(+), 7 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 4b6886df..868272fb 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -98,6 +98,10 @@ def call_model_generate_method( logit_processors = [logit_processor] stopping_criteria = [stopping_criterion] elif is_in is not None: + if samples > 1: + raise NotImplementedError( + "It is currently not possible to control the generation of several samples with the `transformers` integration" + ) if stop_at is not None: raise ValueError( "You cannot both restrict to a set of choices with `is_in` and set a stopping criterion" @@ -107,6 +111,16 @@ def call_model_generate_method( ) logit_processors = [logit_processor] stopping_criteria = [stopping_criterion] + elif stop_at is not None: + if samples > 1: + raise NotImplementedError( + "It is currently not possible to control the generation of several samples with the `transformers` integration" + ) + logit_processor, stopping_criterion, postprocessing = create_stop_mask( + stop_at, tokenizer, prompt_tokens["input_ids"] + ) + logit_processors = [logit_processor] + stopping_criteria = [stopping_criterion] if torch.cuda.is_available(): model = model.to("cuda") @@ -134,6 +148,36 @@ def call_model_generate_method( return results +def create_stop_mask( + stop_at: List[str], + tokenizer: "PreTrainedTokenizerBase", + prompt_tokens: "torch.Tensor", +) -> Tuple[Callable, Callable, Callable]: + import torch + + num_prompt_tokens = prompt_tokens.shape[-1] + + def stopping_criterion(input_ids: torch.Tensor, _) -> bool: + decoded_input = tokenizer.decode( + input_ids[0, num_prompt_tokens:], skip_special_tokens=True + ) + for stopping_sequence in stop_at: + if stopping_sequence in decoded_input: + return True + + return False + + def postprocess(output: str) -> str: + for stopping_sequence in stop_at: + idx = output.find(stopping_sequence) + if idx != -1: + return output[:idx] + + return output + + return lambda _, x: x, stopping_criterion, postprocess + + def create_choice_mask( choices: List[str], tokenizer: "PreTrainedTokenizerBase", diff --git a/tests/models/test_hf_transformers.py b/tests/models/test_hf_transformers.py index 82086f5b..53fcef60 100644 --- a/tests/models/test_hf_transformers.py +++ b/tests/models/test_hf_transformers.py @@ -6,11 +6,11 @@ from outlines.models.hf_transformers import HuggingFaceCompletion # noqa -MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" +TEST_MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" def test_samples(): - model = HuggingFaceCompletion(MODEL, max_tokens=10) + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) answer = model("test", samples=1) assert isinstance(answer, str) @@ -24,34 +24,55 @@ def test_samples(): def test_type_int(): - model = HuggingFaceCompletion(MODEL, max_tokens=10) + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) answer = model("test", type="int") int(answer) def test_type_float(): - model = HuggingFaceCompletion(MODEL, max_tokens=10) + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) answer = model("test", type="float") float(answer) def test_incompatible_constraints(): - model = HuggingFaceCompletion(MODEL, max_tokens=10) + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) with pytest.raises(ValueError): model("test", type="float", is_in=["test"]) def test_choices(): - model = HuggingFaceCompletion(MODEL, max_tokens=50) + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=50) choices = ["a", "and a long sequence", "with\n line break"] answer = model("test", is_in=choices) assert answer in choices +def test_stop(): + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=1000) + + stop = [" ", "\n"] + answer = model("test", stop_at=stop) + for seq in stop: + assert seq not in answer + + @pytest.mark.xfail def test_type_multiple_samples(): - model = HuggingFaceCompletion(MODEL, max_tokens=10) + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) answer = model("test", type="int", samples=2) int(answer) + + +@pytest.mark.xfail +def test_is_in_multiple_samples(): + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) + model("test", is_in=["a", "b"], samples=2) + + +@pytest.mark.xfail +def test_stop_at_multiple_samples(): + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) + model("test", stop_at=[" "], samples=2) From c4a4e125507148ba9cbc43ea018d1b75b14dedc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 18:04:58 +0200 Subject: [PATCH 131/734] Document and simplify the constraints --- outlines/models/hf_transformers.py | 171 ++++++++++++++++++++--------- 1 file changed, 117 insertions(+), 54 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 868272fb..05cd199a 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -92,7 +92,7 @@ def call_model_generate_method( raise ValueError( "You cannot both restrict to a set of choices with `is_in` and to a type with `type`" ) - logit_processor, stopping_criterion, postprocessing = create_type_mask( + logit_processor, stopping_criterion, postprocessing = create_type_constraint( type, tokenizer, prompt_tokens["input_ids"] ) logit_processors = [logit_processor] @@ -106,7 +106,7 @@ def call_model_generate_method( raise ValueError( "You cannot both restrict to a set of choices with `is_in` and set a stopping criterion" ) - logit_processor, stopping_criterion, postprocessing = create_choice_mask( + logit_processor, stopping_criterion, postprocessing = create_choice_constraint( is_in, tokenizer, prompt_tokens["input_ids"] ) logit_processors = [logit_processor] @@ -116,7 +116,7 @@ def call_model_generate_method( raise NotImplementedError( "It is currently not possible to control the generation of several samples with the `transformers` integration" ) - logit_processor, stopping_criterion, postprocessing = create_stop_mask( + logit_processor, stopping_criterion, postprocessing = create_stop_constraint( stop_at, tokenizer, prompt_tokens["input_ids"] ) logit_processors = [logit_processor] @@ -148,16 +148,34 @@ def call_model_generate_method( return results -def create_stop_mask( +def create_stop_constraint( stop_at: List[str], tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor", ) -> Tuple[Callable, Callable, Callable]: + """Create a constraint that stops generation after a sequence has been found. + + Parameters + ---------- + stop_at + The list of sequences which, once generated, the generation is stopped. + tokenizer + The tokenizer that corresponds to the model used for generation. + prompt_tokens + An array that contains the tokenized prompt. + + """ import torch num_prompt_tokens = prompt_tokens.shape[-1] def stopping_criterion(input_ids: torch.Tensor, _) -> bool: + """Choose whether to stop the generation after this step. + + We check whether either of the stopping sequences is present in the + current generation. If either one is found we stop the generation. + + """ decoded_input = tokenizer.decode( input_ids[0, num_prompt_tokens:], skip_special_tokens=True ) @@ -167,28 +185,52 @@ def stopping_criterion(input_ids: torch.Tensor, _) -> bool: return False - def postprocess(output: str) -> str: + def postprocess(generated_sequence: str) -> str: + """Postprocess the generated text. + + We need to remove the stopping sequence that triggered the end of + the generation at the end. + + """ for stopping_sequence in stop_at: - idx = output.find(stopping_sequence) + idx = generated_sequence.find(stopping_sequence) if idx != -1: - return output[:idx] + return generated_sequence[:idx] - return output + return generated_sequence return lambda _, x: x, stopping_criterion, postprocess -def create_choice_mask( +def create_choice_constraint( choices: List[str], tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor", ) -> Tuple[Callable, Callable, Callable]: + """Create a constraint that forces the generation to be among a list of choices. + + Parameters + ---------- + choices + The list of sequences to which the generated sequences must belong. + tokenizer + The tokenizer that corresponds to the model used for generation. + prompt_tokens + An array that contains the tokenized prompt. + + """ import torch num_prompt_tokens = prompt_tokens.shape[-1] tokenized_choices = [tokenizer.encode(word) for word in choices] - def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + """Pre-process the model's output logits before generating the next token. + + At each step we forbid the tokens that do not steer the generation in the + direction of being either of the choices. + + """ output = input_ids[0, num_prompt_tokens:] decoded_output = tokenizer.decode(output, skip_special_tokens=True) @@ -205,9 +247,13 @@ def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: return scores def stopping_criterion(input_ids: torch.Tensor, _) -> bool: - """ - TODO: We can stop the generation once we have excluded all possibilities but one, and the - full sequence can be recovered during post-processing. + """Choose whether to stop the generation after this step. + + We stop generation when either of the choices has been found. + + TODO: We can stop the generation once we have excluded all possibilities + but one, and the full sequence can be recovered during post-processing. + """ decoded_input = tokenizer.decode( input_ids[0, num_prompt_tokens:], skip_special_tokens=True @@ -217,15 +263,22 @@ def stopping_criterion(input_ids: torch.Tensor, _) -> bool: return False - def postprocessing(output: str) -> str: - return output - - return processor, stopping_criterion, postprocessing + return logit_processor, stopping_criterion, lambda x: x -def create_int_mask( +def create_int_constraint( tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" ) -> Tuple[Callable, Callable, Callable]: + """Create a constraints that forces the generated sequence to be an integer. + + Parameters + ---------- + tokenizer + The tokenizer that corresponds to the model used for generation. + prompt_tokens + An array that contains the tokenized prompt. + + """ import torch num_prompt_tokens = prompt_tokens.shape[-1] @@ -240,38 +293,39 @@ def create_int_mask( mask[tokenizer.eos_token_id] = False - def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + """Pre-process the model's output logits before generating the next token. + + At each step we forbid the tokens that do not correspond to a digit. We forbid + EOS tokens until at least one digit has been generated. + + # TODO: Do we need to allow " ", "\n", "\r" and other delimiters? + + """ if input_ids.shape[1] > num_prompt_tokens + 1: mask[tokenizer.eos_token_id] = True expanded_mask = mask.expand_as(scores) scores[~expanded_mask] = -float("inf") return scores - def stopping_criterion(input_ids: torch.Tensor, _) -> bool: - decoded_input = tokenizer.decode( - input_ids[0, num_prompt_tokens:], skip_special_tokens=True - ) - is_starting_new_sequence = all([c.isdigit() for c in decoded_input]) and ( - decoded_input[-1] == " " - or decoded_input[-1] == "\n" - or decoded_input[-1] == "\r" - ) - if len(decoded_input) > 1 and is_starting_new_sequence: - return True - - return False - - def postprocessing(output: str) -> str: - return output - - return processor, stopping_criterion, postprocessing + return logit_processor, lambda *_: False, lambda x: x -def create_float_mask( +def create_float_constraint( tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor", decimals: int = 3, ) -> Tuple[Callable, Callable, Callable]: + """Create a constraints that forces the generated sequence to be an floating point number. + + Parameters + ---------- + tokenizer + The tokenizer that corresponds to the model used for generation. + prompt_tokens + An array that contains the tokenized prompt. + + """ import torch num_prompt_tokens = prompt_tokens.shape[-1] @@ -288,7 +342,15 @@ def create_float_mask( mask[tokenizer.eos_token_id] = False - def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + """Pre-process the model's output logits before generating the next token. + + At each step we forbid the tokens that do not correspond to a digit. We forbid + EOS tokens until at least one digit has been generated. + + # TODO: Do we need to allow " ", "\n", "\r" and other delimiters? + + """ if input_ids.shape[1] > num_prompt_tokens + 1: mask[tokenizer.eos_token_id] = True expanded_mask = mask.expand_as(scores) @@ -296,6 +358,12 @@ def processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: return scores def stopping_criterion(input_ids: torch.Tensor, _) -> bool: + """Choose whether to stop the generation after this step. + + We stop generation if the sequence contains more than one period, or + if the desired number of decimals has been generated. + + """ decoded_input = tokenizer.decode( input_ids[0, num_prompt_tokens:], skip_special_tokens=True ) @@ -308,32 +376,27 @@ def stopping_criterion(input_ids: torch.Tensor, _) -> bool: ): return True - if len(decoded_input) > 1: - is_starting_new_sequence = all( - [c.isdigit() for c in decoded_input[:-1]] - ) and ( - decoded_input[-1] == " " - or decoded_input[-1] == "\n" - or decoded_input[-1] == "\r" - ) - if is_starting_new_sequence: - return True - return False def postprocessing(output: str) -> str: + """Postprocess the generated text. + + We need to remove the trailing period, present if the generation + was stopped because a second period was found. + + """ return output.rstrip(".") - return processor, stopping_criterion, postprocessing + return logit_processor, stopping_criterion, postprocessing type_to_mask: Dict[str, Callable] = { - "float": create_float_mask, - "int": create_int_mask, + "float": create_float_constraint, + "int": create_int_constraint, } -def create_type_mask( +def create_type_constraint( type: str, tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" ) -> Tuple[Callable, Callable, Callable]: if type not in ["int", "float"]: From 9cc575f570c1fe75e31eab742c4427ed4007bed1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 16 May 2023 17:44:56 +0200 Subject: [PATCH 132/734] Return when all but one possibilities eliminated --- outlines/models/hf_transformers.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 05cd199a..86142b84 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -258,12 +258,30 @@ def stopping_criterion(input_ids: torch.Tensor, _) -> bool: decoded_input = tokenizer.decode( input_ids[0, num_prompt_tokens:], skip_special_tokens=True ) - if decoded_input in choices: + + is_present_in_output = [] + for choice in choices: + if choice == decoded_input: + return True + elif choice.startswith(decoded_input): + is_present_in_output.append(1) + else: + is_present_in_output.append(0) + + # If we have eliminated all possibilities but one, return + if sum(is_present_in_output) == 1: return True return False - return logit_processor, stopping_criterion, lambda x: x + def postprocess(output_sequence: str) -> str: + for choice in choices: + if choice.startswith(output_sequence): + return choice + + return output_sequence + + return logit_processor, stopping_criterion, postprocess def create_int_constraint( From 10182475b9d11356f6525c67c61e08468d61a1bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 17 May 2023 15:14:12 +0200 Subject: [PATCH 133/734] Simplify the OpenAI text completion interface We currently have two different functions for the text and the chat completion APIs. However we only use the chat completion API as a text completion one, so we can consolidate both in a single function. --- outlines/models/__init__.py | 7 +- outlines/models/openai.py | 356 ++++++++++------------------- outlines/models/text_completion.py | 25 +- 3 files changed, 122 insertions(+), 266 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 15a11324..2e2150d8 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -8,9 +8,4 @@ from . import image_generation, text_completion from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion -from .openai import ( - OpenAIChatCompletion, - OpenAIEmbeddings, - OpenAIImageGeneration, - OpenAITextCompletion, -) +from .openai import OpenAICompletion, OpenAIEmbeddings, OpenAIImageGeneration diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 4c0cad26..c08e3f7f 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -12,14 +12,13 @@ from outlines.caching import cache __all__ = [ - "OpenAITextCompletion", - "OpenAIChatCompletion", + "OpenAICompletion", "OpenAIEmbeddings", "OpenAIImageGeneration", ] -def OpenAITextCompletion( +def OpenAICompletion( model_name: str, max_tokens: Optional[int] = 216, temperature: Optional[float] = 1.0, @@ -45,31 +44,19 @@ def OpenAITextCompletion( """ - @error_handler - @cache - def call_completion_api( - model: str, - prompt: str, - max_tokens: int, - temperature: float, - stop_sequences: Tuple[str], - logit_bias: Dict[str, int], - num_samples: int, - ): - import openai - - response = openai.Completion.create( - engine=model, - prompt=prompt, - temperature=temperature, - max_tokens=max_tokens, - stop=stop_sequences, - logit_bias=logit_bias, - n=num_samples, + if "text-" in model_name: + call_api = call_completion_api + format_prompt = lambda x: x + extract_choice = lambda x: x["text"] + elif "gpt-" in model_name: + call_api = call_chat_completion_api + format_prompt = lambda x: [{"role": "user", "content": x}] + extract_choice = lambda x: x["message"]["content"] + else: + raise NameError( + f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." ) - return response - def generate(prompt: str, *, samples=1, stop_at=None, is_in=None, type=None): import tiktoken @@ -93,14 +80,20 @@ def generate(prompt: str, *, samples=1, stop_at=None, is_in=None, type=None): def generate_base( prompt: str, stop_at: Optional[Tuple[str]], samples: int, mask: Dict[int, int] ) -> str: - responses = call_completion_api( - model_name, prompt, max_tokens, temperature, stop_at, mask, samples + responses = call_api( + model_name, + format_prompt(prompt), + max_tokens, + temperature, + stop_at, + mask, + samples, ) if samples == 1: - results = responses["choices"][0]["text"] + results = extract_choice(responses["choices"][0]) else: - results = [responses["choices"][i]["text"] for i in range(samples)] + results = [extract_choice(responses["choices"][i]) for i in range(samples)] return results @@ -137,10 +130,16 @@ def generate_choice( if len(mask) == 0: break - response = call_completion_api( - model_name, prompt, 1, temperature, None, mask, samples + response = call_api( + model_name, + format_prompt(prompt), + 1, + temperature, + None, + mask, + samples, ) - decoded.append(response["choices"][0]["text"]) + decoded.append(extract_choice(response["choices"][0])) prompt = prompt + "".join(decoded) decoded_samples.append("".join(decoded)) @@ -153,176 +152,6 @@ def generate_choice( return generate -def OpenAIChatCompletion( - model_name: str, - max_tokens: Optional[int] = 128, - temperature: Optional[float] = 1.0, -) -> Callable: - """Create a function that will call the chat completion OpenAI API. - - You should have the `openai` package installed. Available models are listed - in the `OpenAI documentation `_. - - Parameters - ---------- - model_name: str - The name of the model as listed in the OpenAI documentation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - Returns - ------- - A function that will call OpenAI's chat completion API with the given - parameters when passed a prompt. - - """ - - @error_handler - @cache - def call_chat_completion_api( - model: str, - messages: List[Dict[str, str]], - max_tokens: int, - temperature: float, - stop_sequences: Tuple[str], - logit_bias: Dict[str, int], - num_samples: int, - ): - import openai - - response = openai.ChatCompletion.create( - model=model, - messages=messages, - max_tokens=max_tokens, - temperature=temperature, - stop=stop_sequences, - logit_bias=logit_bias, - n=num_samples, - ) - - return response - - def generate( - prompt: str, - *, - samples: int = 1, - stop_at=None, - is_in=None, - type: Optional[str] = None, - ): - import tiktoken - - if stop_at is not None: - stop_at = tuple(stop_at) - - mask = {} - if type is not None: - encoder = tiktoken.encoding_for_model(model_name) - mask = create_type_mask(type, encoder) - - if is_in is not None and stop_at is not None: - raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") - elif is_in is not None and len(mask) > 0: - raise TypeError("You cannot set `is_in` and `mask` at the same time.") - elif is_in is not None: - return generate_choice(prompt, is_in, samples) - else: - return generate_base(prompt, stop_at, samples, mask) - - def generate_base( - query: str, stop_at: Optional[Tuple[str]], samples: int, mask: Dict[int, int] - ) -> str: - messages = [{"role": "user", "content": query}] - responses = call_chat_completion_api( - model_name, messages, max_tokens, temperature, stop_at, mask, samples - ) - - if samples == 1: - results = responses["choices"][0]["message"]["content"] - else: - results = [ - responses["choices"][i]["message"]["content"] for i in range(samples) - ] - - return results - - def generate_choice( - prompt: str, is_in: List[str], samples: int - ) -> Union[List[str], str]: - """Generate a a sequence that must be one of many options. - - We tokenize every choice, iterate over the token lists, create a mask - with the current tokens and generate one token. We progressively - eliminate the choices that don't start with the currently decoded - sequence. - - """ - import tiktoken - - assert is_in is not None - tokenizer = tiktoken.encoding_for_model(model_name) - encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] - - decoded_samples = [] - for _ in range(samples): - decoded: List[str] = [] - for i in range(max([len(word) for word in encoded])): - mask = {} - for word, tokenized_word in zip(is_in, encoded): - if not word.startswith("".join(decoded)): - continue - try: - mask[tokenized_word[i]] = 100 - except IndexError: - pass - - if len(mask) == 0: - break - - messages = [{"role": "user", "content": prompt}] - response = call_chat_completion_api( - model_name, messages, 1, temperature, None, mask, 1 - ) - decoded.append(response["choices"][0]["message"]["content"]) - prompt = prompt + "".join(decoded) - - decoded_samples.append("".join(decoded)) - - if samples == 1: - return decoded_samples[0] - - return decoded_samples - - return generate - - -def validate_completion_parameters( - stop_at, is_in, max_tokens, temperature -) -> Dict[str, Union[Tuple[str], Dict[int, int], int, float]]: - """Validate the parameters passed to the completion APIs and set default values.""" - if is_in is not None: - mask: Dict[int, int] = {} - else: - mask = {} - if stop_at is not None and len(stop_at) > 4: - raise TypeError("OpenAI's API does not accept more than 4 stop sequences.") - elif stop_at is not None: - stop_at = tuple(stop_at) - if max_tokens is None: - max_tokens = 216 - if temperature is None: - temperature = 1.0 - - return { - "stop_sequences": stop_at, - "logit_bias": mask, - "max_tokens": max_tokens, - "temperature": temperature, - } - - def OpenAIEmbeddings(model_name: str): """Create a function that will call OpenAI's embeddings endpoint. @@ -413,40 +242,6 @@ def generate(prompt: str, samples: int = 1) -> PILImage: return generate -def error_handler(api_call_fn: Callable) -> Callable: - """Handle OpenAI API errors and missing API key.""" - import openai - - try: - os.environ["OPENAI_API_KEY"] - except KeyError: - raise OSError( - "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " - "OpenAI's APIs. Please make sure it is set before re-running your model." - ) - - def call(*args, **kwargs): - try: - return api_call_fn(*args, **kwargs) - except ( - openai.error.RateLimitError, - openai.error.Timeout, - openai.error.TryAgain, - openai.error.APIConnectionError, - openai.error.ServiceUnavailableError, - ) as e: - raise OSError(f"Could not connect to the OpenAI API: {e}") - except ( - openai.error.AuthenticationError, - openai.error.PermissionError, - openai.error.InvalidRequestError, - openai.error.InvalidAPIType, - ) as e: - raise e - - return call - - def create_int_mask(encoder): """Create an exclusive mask for digit tokens.""" warnings.warn( @@ -501,3 +296,90 @@ def create_float_mask(encoder): def create_type_mask(type: str, encoder): return type_to_mask[type](encoder) + + +def error_handler(api_call_fn: Callable) -> Callable: + """Handle OpenAI API errors and missing API key.""" + + def call(*args, **kwargs): + import openai + + try: + os.environ["OPENAI_API_KEY"] + except KeyError: + raise OSError( + "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " + "OpenAI's APIs. Please make sure it is set before re-running your model." + ) + + try: + return api_call_fn(*args, **kwargs) + except ( + openai.error.RateLimitError, + openai.error.Timeout, + openai.error.TryAgain, + openai.error.APIConnectionError, + openai.error.ServiceUnavailableError, + ) as e: + raise OSError(f"Could not connect to the OpenAI API: {e}") + except ( + openai.error.AuthenticationError, + openai.error.PermissionError, + openai.error.InvalidRequestError, + openai.error.InvalidAPIType, + ) as e: + raise e + + return call + + +@error_handler +@cache +def call_completion_api( + model: str, + prompt: str, + max_tokens: int, + temperature: float, + stop_sequences: Tuple[str], + logit_bias: Dict[str, int], + num_samples: int, +): + import openai + + response = openai.Completion.create( + engine=model, + prompt=prompt, + temperature=temperature, + max_tokens=max_tokens, + stop=stop_sequences, + logit_bias=logit_bias, + n=num_samples, + ) + + return response + + +@error_handler +@cache +def call_chat_completion_api( + model: str, + messages: List[Dict[str, str]], + max_tokens: int, + temperature: float, + stop_sequences: Tuple[str], + logit_bias: Dict[str, int], + num_samples: int, +): + import openai + + response = openai.ChatCompletion.create( + model=model, + messages=messages, + max_tokens=max_tokens, + temperature=temperature, + stop=stop_sequences, + logit_bias=logit_bias, + n=num_samples, + ) + + return response diff --git a/outlines/models/text_completion.py b/outlines/models/text_completion.py index 1406c651..a2d6baad 100644 --- a/outlines/models/text_completion.py +++ b/outlines/models/text_completion.py @@ -1,27 +1,6 @@ """Router for text completion models.""" from .hf_transformers import HuggingFaceCompletion -from .openai import OpenAIChatCompletion, OpenAITextCompletion +from .openai import OpenAICompletion hf = HuggingFaceCompletion - - -def openai(model_name: str, *args, **kwargs): - """Dispatch the OpenAI model names to their respective completion API. - - This ensures that chat completion models can also be called as text - completion models (with no instruction and no history). - - Parameters - ---------- - model_name - The name of the model in OpenAI's API. - - """ - if "text-" in model_name: - return OpenAITextCompletion(model_name, *args, **kwargs) - elif "gpt-" in model_name: - return OpenAIChatCompletion(model_name, *args, **kwargs) - else: - raise NameError( - f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." - ) +openai = OpenAICompletion From b7ca38b1591572c4bf21d97f397763c87536d9bb Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 17 May 2023 17:05:48 -0500 Subject: [PATCH 134/734] Add a Conda dev environment file --- environment.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 environment.yml diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..d9c3f34e --- /dev/null +++ b/environment.yml @@ -0,0 +1,23 @@ +# To use: +# +# $ conda env create -f environment.yml # `mamba` works too for this command +# $ conda activate outlines-dev +# +name: outlines-dev +channels: + - conda-forge + - huggingface +dependencies: + - python<3.11.0 + - jinja2 + - numpy + - pillow + - pydantic + - scipy + - diffusers + - pytest + - pre-commit + - transformers + - pip + - pip: + - -e ".[test]" From efefd72cc430701ffc758377c0a4a396d8f22043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 17 May 2023 09:42:14 +0200 Subject: [PATCH 135/734] Add LICENSE --- LICENSE | 201 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 38baf1e15600c3df39df8f749fdf797f0a605bd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 17 May 2023 16:47:57 +0200 Subject: [PATCH 136/734] Update the README --- README.md | 195 +++++++++++++++++++++++++++++------------------------- 1 file changed, 106 insertions(+), 89 deletions(-) diff --git a/README.md b/README.md index 0337b0e6..a3027397 100644 --- a/README.md +++ b/README.md @@ -6,76 +6,103 @@ Build _reliable_ workflows based on interactions with generative models. - +[Prompting](#prompting) • +[Controlled generation](#controlled-generation) • +[Agents](#agents-example) • +[Sampling](#sampling-uncertainty-simulation-based-inference) • +[Examples](#examples) + -## Prompt management +**Outlines** allows you to control and diagnose interactions with LLMs more effectively. Modern language models are powerful and versatile, but the way they interface with existing systems [can be very brittle](https://github.com/Significant-Gravitas/Auto-GPT/labels/invalid_json), their outputs [can be unreliable](https://arxiv.org/abs/2302.04023), and complex workflows (agents) can introduce a lot of error-prone code duplication. Outlines provides robust prompting primitives that separate the prompting from the execution logic and lead to simple implementations of few-shot generations, ReAct, meta-prompting, agents, etc. Outlines helps developers control text generation and produce predictable outputs that make the interaction with user code more robust. Its sampling-first approach allows one to diagnose issues with model-generated output more easily, and implement more robust generation methods such as [self-consistency](https://arxiv.org/abs/2203.11171) or [DiVeRSe](https://arxiv.org/abs/2206.02336). -Outlines makes it easier to write and manage prompts by encapsulating templates -inside "template functions". These functions make it possible to neatly separate -the prompt logic from the general program logic; they can be imported from other -modules and libraries. +**Outlines** is designed as a library that integrates well with the broader Python environment. Generation can be interleaved with control flow or custom function calls, prompts can be imported from other modules or libraries. -Template functions use the Jinja2 templating engine to help build complex -prompts (like few-shot examples) in a concise manner: -``` python -import outlines.text as text +## Features +- [x] Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/). +- [x] Interleave completions with loops, conditionals, and custom Python functions +- [x] Caching of generations +- [x] Integration with OpenAI and HuggingFace models +- [x] Controlled generation, including multiple choice, type constraints and dynamic stopping +- [x] Sampling of multiple sequences -@text.prompt -def few_shot_examples(question, examples): - """Something something - {% for example in examples %} - EXAMPLE: {{ example }} - {% endfor %} +## Installation - QUESTION: {{ question }} - Let's think step by step. +**Outlines** is available on PyPi: - """ +``` bash +pip install outlines ``` -Functions can also be _partially evaluated_ just like any function, which can be useful when building agents: +## Prompting + +Writing prompts by concatenating strings in pure Python quickly becomes +cumbersome: the prompt building logic gets entangled with the rest of the +program, and the structure of the rendered prompt is obfuscated.**Outlines** +makes it easier to write and manage prompts by encapsulating templates inside +"template functions". + +These functions make it possible to neatly separate the prompt logic from the +general program logic; they can be imported from other modules and libraries. + +Template functions require no superfluous abstraction, they use the Jinja2 +templating engine to help build complex prompts in a concise manner: ``` python -import functools as ft import outlines.text as text +import outlines.models as models +examples = [ + ("The food was digusting", "Negative"), + ("We had a fantastic night", "Positive"), + ("Recommended", "Positive"), + ("The waiter was rude", "Negative") +] + @text.prompt -def my_agent(name, goals): - """Your name is {{ name }}. +def labelling(to_label, examples): + """You are a sentiment-labelling assistant. - GOALS: - {% for goal in goals %} - {{ loop.counter }}. {{ goal }} + {% for example in examples %} + {{ example[0] }} // {{ example[1] }} {% endfor %} + {{ to_label }} // """ - -jarvis = ft.partial(my_agent, "JARVIS") +model = models.text_completion.openai("text-davinci-003") +prompt = labelling("Just awesome", examples) +answer = complete(prompt) ``` -The template contained in template functions remains accessible: +## Chaining with loops and conditionals ([example](https://github.com/normal-computing/outlines/blob/readme/examples/react.py)) + +**Outlines** comes with very few abstractions, and is designed to blend into existing code and integrate with the rest of the ecosystem. ``` python -import outlines.text as text +reviews = ["Just awesome", "Avoid", "Will come back"] +def send_notification(review): + """This function sends a notification with the review's content.""" + ... -@text.prompt -def prompt(): - "I am accessible" +for review in reviews: + prompt = labelling(review, examples) + answer = model(prompt) + if answer == "Positive": + send_notification(review) +``` +## Agents ([example](https://github.com/normal-computing/outlines/blob/readme/examples/babyagi.py)) -prompt.template -# I am accessible -``` +**Outlines** makes building agents like [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT), [BabyAGI](https://github.com/yoheinakajima/babyagi), [ViperGPT](https://viper.cs.columbia.edu/) or [Transformers Agent](https://huggingface.co/docs/transformers/transformers_agents) easier by removing boilerplate prompting code. ### Tools -Prior work has shown that we can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt we define custom Jinja filters that can extract the function's name, description, signature and source: +We can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt, we define custom Jinja filters that can extract the function's name, description, signature and source: ``` python @@ -94,7 +121,7 @@ def wikipedia_search(query: str): @text.prompt -def my_commands(tools: List[Callable]): +def agent(tools: List[Callable]): """AVAILABLE COMMANDS: {% for tool in tools %} @@ -141,88 +168,78 @@ joke_ppt(Joke) # } ``` +## Controlled generation -## Natural language functions +The first step towards reliability of systems that include large language models is to ensure that there is a well-defined interface between their output and user-defined code. **Outlines** provides ways to control the generation of language models to make their output more predictable. -Large language models can be prompted so their output can be parsed into a data structure that can be manipulated by programming languages. The combination prompt + model call + output parser can thus be thought as a "natural language" function. +You can stop the generation after a given sequence has been found: ``` python -import json -import outlines.text as text -import outlines.models as models - +answer = model("Tell me a one-sentence joke.", stop_at=["."]) +``` -@text.prompt -def prime_numbers(n: int): - """Return a list that contains all prime numbers between 1 and {{ n }}. +You can reduce the completion to a choice between multiple possibilities: - The output must be parsable as a Python list. - """ +``` python +prompt = labelling("Just awesome", examples) +answer = model(prompt, is_in=["Positive", "Negative"]) +``` -def parse(result): - return json.loads(result) +You can require the generated sequence to be an int or a float: +``` python +import outlines.models as models -get_prime_numbers = text.function( - models.text_completion.openai("gpt-3.5-turbo"), - prime_numbers, - parse -) +model = models.text_completion.hf("sshleifer/tiny-gpt2") +answer = model("2 + 2 = ", type="int") +print(answer) +# 4 -get_prime_numbers(10) -# [2, 3, 5, 7] +model = models.text_completion.hf("sshleifer/tiny-gpt2") +answer = model("1.7 + 3.2 = ", type="float") +print(answer) +# 4.9 ``` -For more complex outputs one can pass a Pydantic model to `text.function`, which will be used to parse the output: -``` python -from pydantic import BaseModel -import outlines.text as text +## Sampling ([uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb), [simulation-based inference](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb)) +Outlines is strictly sampling based, and focused on using methods such as [self-consistency](https://arxiv.org/abs/2203.11171), [adaptive consistency](https://arxiv.org/abs/2305.11860), [DiVeRSe](https://arxiv.org/abs/2206.02336), [Tree of thoughts](https://arxiv.org/abs/2305.10601), [lattice sampling](https://arxiv.org/abs/2112.07660), etc. Several samples can be obtained using the `num_samples` keyword argument: -class Joke(BaseModel): - joke: str - explanation: str +``` python +import outlines.models as models -@text.prompt -def joke_ppt(response_model): - """Tell a joke and explain why the joke is funny. +model = models.text_completion.hf("sshleifer/tiny-gpt2") +answer = model("2 + 2 = ", num_samples=5) +print(answer) +# [4, 5, 4, 4, 4] +``` - RESPONSE FORMAT: - {{ response_model | schema }} - """ +The focus on sampling allows us to explore different ideas, such as [using the diversity of answers to evaluate the model's uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb), or [simulation-based inference to optimize the prompt](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb). -tell_a_joke = text.function( - models.text_completion.openai("gpt-3.5-turbo"), - joke_ppt, - Joke -) +## Contributing -tell_a_joke(Joke) -# [2, 3, 5, 7] -``` +### What contributions? -# Controlled generation +We curently only accept bug fixes and documentation contributions. If you have a feature request, please start a new [discussions](https://github.com/normal-computing/outlines/discussions). The issue tracker is only intended for actionable items. -Outlines offers mechanisms to specify high-level constraints on the text generations: +### How to contribute? -- `stop_at` allows to stop the generation once a particular word, sequence of symbol had been generated; -- `is_in` allows to constrain the model to generate an answer chosen among a set of possible answers; -- `type` allows to constrain the model's output to either `"int"`s or `"float"`s; +Run `pip install -e .[test]` or `conda env create -f environment.yml`. To build the documentation you will also need to run `pip install -r requirements-doc.txt`. -Coming: +Before pushing your code to repository please run `pre-commit run --all-files` and `pytest` to make sure that the code is formatted correctly and that the tests pass. -- Ability to constrain the output to a JSON with a given structure; -- Ability to constrain the output to a List; -- Ability to constrain the output to be Python code; +Do not hesitate to open a draft PR before your contribution is ready, especially if you have questions and/or need feedback. -# Examples +## Examples - [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) - [Meta prompting](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) - [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) - [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/dust/math-generate-code.py) - [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) +- [Uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb) +- [Simulation-based inference](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb) From 785613f14457fa4c820cefc9381b2bae4cd81973 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 22 May 2023 14:44:40 +0200 Subject: [PATCH 137/734] Add the uncertainty example notebook --- examples/sampling.ipynb | 400 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 400 insertions(+) create mode 100644 examples/sampling.ipynb diff --git a/examples/sampling.ipynb b/examples/sampling.ipynb new file mode 100644 index 00000000..dd7321e7 --- /dev/null +++ b/examples/sampling.ipynb @@ -0,0 +1,400 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "62129e1a-e9de-454e-a714-35ccbcf0b518", + "metadata": {}, + "outputs": [], + "source": [ + "import functools as ft\n", + "import re\n", + "\n", + "import numpy as np\n", + "import matplotlib.pylab as plt\n", + "\n", + "import outlines.models as models\n", + "import outlines.text as text" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "b20aafe8-b7a3-4df4-878f-b48b74e131df", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "env: OPENAI_API_KEY=# you key here\n" + ] + } + ], + "source": [ + "%env OPENAI_API_KEY= # you key here" + ] + }, + { + "cell_type": "markdown", + "id": "2a3514d6-d5d7-46e9-9b69-1251d337e094", + "metadata": {}, + "source": [ + "In this example we will look at completion results for questions from the GSM8K dataset, using few-shots prompts with 5 examples. We first use `outlines.text.prompt` to build the few-shot prompt; we partially evaluate the prompt function with the few-shot examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ffe8bb11-6b51-4fe7-bfb3-c62556a60db8", + "metadata": {}, + "outputs": [], + "source": [ + "examples = [\n", + " {\n", + " \"question\": \"There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\",\n", + " \"answer\": \"We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.\",\n", + " },\n", + " {\n", + " \"question\": \"If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\",\n", + " \"answer\": \"There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\",\n", + " },\n", + " {\n", + " \"question\": \"Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\",\n", + " \"answer\": \"Leah had 32 chocolates and Leah’s sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.\",\n", + " },\n", + " {\n", + " \"question\": \"Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\",\n", + " \"answer\": \"Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.\",\n", + " },\n", + " {\n", + " \"question\": \"Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\",\n", + " \"answer\": \"He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.\",\n", + " },\n", + " {\n", + " \"question\": \"There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\",\n", + " \"answer\": \"There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.\",\n", + " },\n", + " {\n", + " \"question\": \"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\",\n", + " \"answer\": \"Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.\",\n", + " },\n", + " {\n", + " \"question\": \"Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\",\n", + " \"answer\": \"She bought 5 bagels for $3 each. This means she spent 5\",\n", + " },\n", + "]\n", + "\n", + "@text.prompt\n", + "def few_shot_prompt(question, examples):\n", + " \"\"\"\n", + " {% for example in examples %}\n", + " Q: {{ example.question }}\n", + " A: {{ example.answer }}\n", + " {% endfor %}\n", + " Q: {{ question }}\n", + " A:\n", + " \"\"\"\n", + "\n", + "gsm8k_prompt = ft.partial(few_shot_prompt, examples=examples)" + ] + }, + { + "cell_type": "markdown", + "id": "1eae0ec8-89f0-43fc-b055-6fcd64cbc03b", + "metadata": {}, + "source": [ + "## When `text-davinci-003` is uncertain" + ] + }, + { + "cell_type": "markdown", + "id": "a273ed78-e813-467e-85f3-16d7f283ba87", + "metadata": {}, + "source": [ + "Let us now sample 20 completions with the `text-davinci-003` model:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "beff960d-6833-4f24-af09-5b65886a9549", + "metadata": {}, + "outputs": [], + "source": [ + "model = models.text_completion.openai(\"text-davinci-003\", max_tokens=128)\n", + "\n", + "question = \"When I was 6 my sister was half my age. Now I’m 70 how old is my sister?\"\n", + "prompt = gsm8k_prompt(question)\n", + "answers = model(prompt, samples=20)" + ] + }, + { + "cell_type": "markdown", + "id": "1a895b6d-d4d4-40f9-9156-24ba7e21cc08", + "metadata": {}, + "source": [ + "The correct answer to this question is 35. Let us now count the different answers, and take a look at their distribution. Let us first define a few utility functions:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f1c83d1f-a478-4509-890e-b84a2e0d8846", + "metadata": {}, + "outputs": [], + "source": [ + "def count_digits(answers):\n", + " digits = []\n", + " for answer in answers:\n", + " try:\n", + " match = re.findall(r\"\\d+\", answer)[-1]\n", + " if match is not None:\n", + " digit = int(match)\n", + " digits.append(digit)\n", + " except AttributeError:\n", + " print(f\"Could not parse the completion: '{answer}'\")\n", + " \n", + " unique_digits, counts = np.unique(digits, return_counts=True)\n", + " return {d: c for d, c in zip(unique_digits, counts)}\n", + "\n", + "def plot_counts(counts):\n", + " fig = plt.figure(figsize=(12,8))\n", + " ax = fig.add_subplot(111)\n", + " \n", + " bar = ax.bar(counts.keys(), counts.values())\n", + " ax.spines[[\"right\", \"top\", \"left\"]].set_visible(False)\n", + " ax.get_yaxis().set_visible(False)\n", + " ax.get_yaxis().set_visible(False)\n", + " \n", + " for rect in bar:\n", + " height = rect.get_height()\n", + " plt.text(rect.get_x() + rect.get_width() / 2.0, height, f'{height:.0f}', ha='center', va='bottom', fontsize=20)\n", + " \n", + " ax.set_xticks(list(counts.keys()))\n", + " ax.set_xlabel(\"Answer\")\n", + "\n", + "def entropy(counts):\n", + " counts = np.array(list(counts.values()))\n", + " probs = counts / np.sum(counts)\n", + " log_probs = np.log(probs)\n", + " return - np.sum(probs * log_probs)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "88668e09-bcd6-4a6a-83a5-838189b910eb", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqsAAAHgCAYAAACCbCTDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAVwklEQVR4nO3df7DldX3f8dcbFtmVLb9cDDgLrB0DjNWUlWQzWBaDtEJg0BBiJk5VsKLFKRSsU7u2M8wKZouDjsg4Y0chBn9MTfgh3QkapQiCHSJWFigBAlMgggUkJlVJDXXh0z/2ILuwF9LZe+95372Px8yZvff7PeznvcB893m/53vOt8YYAQCAjnaZ9gAAADATsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtLXkRfb7XCsAAOZDbW+jM6sAALQlVgEAaEusAgvGddddl5NPPjn7779/dt9997ziFa/Icccdl69+9avTHg3YiTn2TNeLXbMK0MIHP/jBXHjhhVm5cmXe/OY3Z8WKFXn88cfzve99LzfccENOOOGEaY8I7IQce6avxnjB91B5gxUwdZ/97Gfz3ve+N6eeemo+85nP5CUveck2+3/+859nt912m9J0wM7KsWfebfcNVmIVaO3JJ5/MgQcemGXLluW+++573l8WAHPBsWcqthurLgMAWrv22mvz+OOP55xzzskuu+ySa665JnfeeWeWLl2aNWvW5Mgjj5z2iMBOyLGnD7EKtPbd7343SbJ06dKsXr06d9555zb7jz766FxxxRXZb7/9pjEesJNy7OnDpwEArf3whz9Mklx44YWpqtx000356U9/mjvuuCNvetObcuONN+atb33rlKcEdjaOPX2IVaC1p59+OkmyZMmSbNy4MUcddVSWL1+e1772tfnKV76SlStX5lvf+lZuvvnmKU8K7Ewce/oQq0Bre++9d5Jk9erVWbVq1Tb7XvrSl+a4445Lktxyyy3zPBmwM3Ps6UOsAq0deuihSZ79i+O59tlnnyTJz372s/kaCVgEHHv6EKtAa8cee2yqKnfdddcvXpbb2jNvenjlK18536MBOzHHnj7EKtDawQcfnJNOOinf//7388lPfnKbfd/4xjfy9a9/PXvvvXeOP/74KU0I7Iwce/pwUwCgvYcffjivf/3r89BDD+XYY4/N6tWr88ADD+Tqq69OVeXLX/5yTjnllGmPCexkHHvmnTtYAQvX448/nvPOOy8bN27MI488kj333DNr167Nhz70oaxZs2ba4wE7KceeeSVWAQBoa7ux6ppVAADaEqsAALQlVgEAaGvJtAcAeDGr1l0z474HLzhxHicBFhPHnh6cWQUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCAAvaF7/4xVRVqiqXXHLJtMdhlolVAGDBeuihh3LmmWdm+fLl0x6FOSJWAYAFaYyRd73rXXnZy16WM844Y9rjMEfEKgCwIF188cX55je/mc997nPZY489pj0Oc0SsAgALzt13351169bl7LPPztFHHz3tcZhDYhUAWFA2b96cd7zjHTnooIOyYcOGaY/DHFsy7QEAAP5/nHfeedm0aVO+/e1vZ9myZdMehznmzCoAsGB85zvfyYYNG/KBD3wgRx555LTHYR6IVQBgQdi8eXPe+c535pBDDsn5558/7XGYJ2IVAFgQnnjiidx77725++67s3Tp0l/cCKCq8uEPfzhJ8p73vCdVlXPOOWe6wzJrXLMKACwIu+++e9797ndvd9+tt96aTZs25aijjsqhhx7qEoGdiFgFABaEZcuWzXg71fXr12fTpk059dRTc/rpp8/zZMwllwEAANCWWAUAoC2xCgAseOvXr88YwyUAOyGxCgBAW2IVAIC2xCoAAG356CoAYMFYte6aF9z/4AUnztMkzBdnVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAAIvEj370o1xyySU5+eST86pXvSrLli3LXnvtlaOOOiqXXnppnn766WmP+DxLpj0AAADz4/LLL8/73ve+HHDAATnmmGNy0EEH5bHHHstVV12V008/PV/72tdy+eWXp6qmPeoviFUAgEXikEMOycaNG3PiiSdml12efYF9w4YNWbNmTa688spcddVVOeWUU6Y45bZcBgAAsEi88Y1vzEknnbRNqCbJ/vvvnzPOOCNJcsMNN0xhspmJVQAAsttuuyVJlizp9cK7WAUAWOQ2b96cz3/+80mS448/fsrTbEusAgAscuvWrcudd96ZE044Iccdd9y0x9mGWAUAWMQuvvjifPzjH89hhx2WL3zhC9Me53nEKgDAIvWpT30qZ599dl796lfn+uuvz7777jvtkZ5HrAIALEIXXXRRzjrrrLzmNa/J9ddfn/3333/aI22XWAUAWGQ++tGP5v3vf38OP/zwXH/99Xn5y18+7ZFmJFYBABaR888/P+vWrcsRRxyR6667LitWrJj2SC+o1wdpAQAwZy677LKce+652XXXXbN27dpcfPHFz3vOqlWrctppp83/cDMQqwAAi8QDDzyQJHnqqady0UUXbfc5b3jDG1rFqssAAAAWifXr12eM8YIPt1sFAIC/J7EKAEBbYhUAgLa8wQoAYBFZte6aGfc9eMGJ8zjJ348zqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoq2WsXnHFFTnrrLOydu3a7LnnnqmqvP3tb5/2WAAAc0L7zGzJtAfYno985CO5/fbbs3z58qxcuTL33HPPtEcCAJgz2mdmLc+sfuITn8i9996bn/zkJ/n0pz897XEAAOaU9plZyzOrxxxzzLRHAACYN9pnZi3PrAIAQCJWAQBoTKwCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2mp5U4Crr746V199dZLk0UcfTZLcfPPNOe2005IkK1asyMc+9rEpTQcAMLu0z8xaxuptt92Wyy67bJtt999/f+6///4kycEHH7xo/4MBADsf7TOzlpcBrF+/PmOMGR8PPvjgtEcEAJg12mdmLWMVAAASsQoAQGNiFQCAtlq+wSpJVq27ZsZ9D15w4jxOAgAwt3TPzJxZBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2hKrAAC0JVYBAGhLrAIA0JZYBQCgLbEKAEBbYhUAgLbEKgAAbYlVAADaEqsAALQlVgEAaEusAgDQllgFAKAtsQoAQFtiFQCAtsQqAABtiVUAANoSqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG3VGGPmnVV/mmTF/I0zoxVJ/mraQwBtOCYAz5jP44G15tZfjTGOf+7GF4zVLqrqv48xfnXacwA9OCYAz5jP44G1psNlAAAAtCVWAQBoa6HE6memPQDQimMC8Iz5PB5YawoWxDWrAAAsTgvlzCoAAItQ+1itqr2r6oqquqeq7q6qI6c9EzA/qmppVd1SVbdX1Z9X1Ycn2/+wqh6oqtsmj8OnPCowD16oCarqA1U1qmpWPnJze2tV1R9tddx5sKpum4V1Dt3q97ytqn5SVedU1b5VdW1V3Tf5dZ85XOv8qrpjsu0bVfWKHV1rNrW/DKCqLkty0xjjkqp6SZKXjjH+95THAuZBVVWSPcYYT1TVbkm+neTsJGck+ZMxxhVTHRCYVzM1QVUdmOSSJIclOWKMscOfGfpi/VFVH0/y4zHGeTu61la/565JfpDk15P8qyR/Pca4oKrWJdlnjPHv5mitvxlj/GSy/V8nefUY44zZWmtHtT6zWlV7JTk6yaVJMsb4v0IVFo+xxROTb3ebPHr/hA3MiRdpgk8k+WBm6fjwYv0x+UH6d5P859lYbyvHJvmfY4y/TPKWJJdNtl+W5Lfmaq1nQnVijzQ7zraO1SSvTPJ4ks9V1aaquqSq9pj2UMD8qapdJy+1/TDJtWOM70x2/f7kZatPVNXu05sQmCfbbYKqekuSH4wxbp/rtbbavzbJY2OM+2ZxzST5vTwbwL80xnhk8vWjSX5pDtdKVf1+VT2U5J8nOXeW19oh3WN1SZLXJfn0GGN1kr9Nsm66IwHzaYzx1Bjj8CQrk6ypqtck+VC2vNz3a0n2TTJrL40BbW2vCdYn+feZ/bh6sf54W2b5rOrkUoM3J7n8ufvGlms2Z+1s5/bWGmP8hzHGgUm+lOTM2VprNnSP1YeTPLzVmZQrsuV/HmCRmbwEd32S48cYj0wuEXgyyeeSrJnqcMB8mKkJXpnk9qp6MFt+qL21qvafo7VSVUuS/HaSP9rBNZ7rN5PcOsZ4bPL9Y1V1wGTNA7Ll1aW5WmtrX0pyyiyutcNax+oY49EkD1XVoZNNxya5a4ojAfOoqvarqr0nXy9L8s+S3LPVAbyy5TquO6c1IzA/ZmiCW8cYLx9jrBpjrMqWyHzd5LmzvdYz/fFPk9wzxnh4R9bYjueerd2Y5NTJ16cm+S9ztVZV/fJW+96S5J5ZXGuHLYRPAzg8W97h95Ik9yd51xjjb6Y6FDAvqupXsuWNBbtmyw/XfzzGOK+qvplkvySV5LYkZ2z1RixgJ/ViTTA5u/qrs/RpANtdq6r+MMmfjTH+046usdVaeyT5fpJ/OMb48WTby5L8cZKDkvxlkt8dY/z1HK11ZZJDkzw9WeuMMcYPdnSt2dI+VgEAWLxaXwYAAMDiJlYBAGhLrAIA0JZYBQCgLbEKAEBbYhXgOarqt6pqVNVh054FYLETqwDP97Yk3578OhWTu+QALHpiFWArVbU8yVFJ3p3k9ybbfqOqbqiqK6rqnqr60uTuWamqC6rqrqq6o6o+VlW7VtUDtcXeVfVUVR09ee6NVfXLVbVHVf1BVd1SVZuq6i2T/adV1cbJTQ+um86/AYBe/OQOsK23JPnTMca9VfWjqjpisn11kn+U5H8l+W9J/klV3Z3k5CSHjTFGVe09xniqqv4iyauz5Z7ltyZZW1XfSXLgGOO+qtqQ5JtjjH8xuZ3sLVX1XyfrvC7Jr8zGnWoAdgbOrAJs621Jvjz5+st59lKAW8YYD48xns6WW7yuSvLjJH+X5NKq+u0k/2fy3JuSHD15/MdsOVP7a0m+O9n/piTrquq2JDckWZott1RMkmuFKsCznFkFmKiqfZO8Mclrq2ok2TXJSHJNkie3eupTSZaMMTZX1Zokxyb5nSRnTv75G5O8L8krkpyb5N8m+Y1sidgkqSSnjDH+4jnr/3qSv52TPxzAAuXMKsCzfifJF8YYB48xVo0xDkzyQJK123vy5PrWvcYYX03y/iT/eLLrliSvT/L0GOPvsuVM7L/MlohNkq8nOWur615Xz9GfB2DBE6sAz3pbkq88Z9uVmflTAf5Bkj+pqjuy5dMD/k2SjDGeTPJQkj+bPO+myXP/x+T785PsluSOqvrzyfcAbEeNMaY9AwAAbJczqwAAtCVWAQBoS6wCANCWWAUAoC2xCgBAW2IVAIC2xCoAAG2JVQAA2vp/jdj4sUZoV2sAAAAASUVORK5CYII=\n", + "text/plain": [ + "

" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "counts = count_digits(answers)\n", + "plot_counts(counts)" + ] + }, + { + "cell_type": "markdown", + "id": "661a1135-ac2d-4a49-a786-d04a7ba68b48", + "metadata": {}, + "source": [ + "We see that there is an important variabilty in the answers given by `text-davinci-003`. Depending on the number of samples taken, even self-consistency sampling may lead to the wrong result here." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "30ea0dfe-6c15-44f0-881c-88b325542b44", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Entropy: 1.5741030017371853\n" + ] + } + ], + "source": [ + "print(f\"Entropy: {entropy(counts)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "0b15b230-b667-4c9c-8a5d-366dd61de9b7", + "metadata": {}, + "source": [ + "## `text-davinci-003` on an easier question" + ] + }, + { + "cell_type": "markdown", + "id": "beae30f0-4168-4a80-90d4-d26a4f476469", + "metadata": {}, + "source": [ + "Let us now look at the results for an arguably easier question:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "7e106b94-2dfd-4a75-b4d9-b1ad693418a7", + "metadata": {}, + "outputs": [], + "source": [ + "question = \"John buys 2 pairs of shoes for each of his 3 children. They cost $60 each. How much did he pay?\"\n", + "prompt = gsm8k_prompt(question)\n", + "answers = model(question, samples=20)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "dd46fb2b-08ef-4003-8d03-ea0f39c865c4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Entropy: 0.1985152433458726\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqsAAAHgCAYAAACCbCTDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAQuklEQVR4nO3db6ye9V3H8c+PPxGxIjMVghSLkCYgOtmUzSCd6xii8mCIyxBCTBUTWbBmIzEhWeIAUfegSYkha6JIQsmSRaoclnUbmeCyQbrBoi2GrtSlEIZKxnQOCmPB9vLBuaH8aUui4dyfwuuVnPQ+13Wd3N/TR+/++ruva0zTFAAAaHTEvAcAAICDEasAANQSqwAA1BKrAADUEqsAANQSqwAA1Drqdc67rxUAAEthHOiglVUAAGqJVQAAaolVAIA3sc2bN2fdunVZvXp1jjvuuIwxcsUVVxz0+meeeSYf+9jHcsYZZ+SYY47J2972tlx44YW55557lnDq/V5vzyoAAIexG2+8Mdu3b8+yZcuyYsWK7Ny586DXfve73815552XHTt25KyzzspVV12VPXv25K677sr73//+3HLLLbnyyiuXcHorqwAAb2obNmzIrl278vTTT2fjxo2HvPa6667Ljh07cskll2Tbtm256aabcsstt+Thhx/OKaecknXr1uWJJ55YoskXiVUAgDexNWvWZNWqVRnjgB+2f4U777wzSXLDDTfkqKP2/wf8CSeckGuuuSbf//73c+utt75hsx6IWAUAIEny5JNPJklOO+2015x78dhS710VqwAAJEmWL1+eJHn00Udfc2737t1JkkceeWRJZxKrAAAkSS666KIkycc//vHs3bv3peNPPfVUNmzYkGTxQ1hLyd0AAABIsrhX9e67787mzZtz9tln5/zzz8+zzz6bu+66KyeffHIef/zxHHHE0q51WlkFACBJctJJJ+XBBx/M1VdfnWeeeSaf/OQns2XLllx66aW54447kix+2GopWVkFAOAlJ554Ym6++ebcfPPNrzh+7733JknOOeecJZ3HyioAAK9r06ZNSZLLL798Sd9XrAIAkCTZt29f9uzZ85rjt99+ezZt2pRzzz03F1988ZLOZBsAAMCb2MLCQhYWFpLsv4/q1q1bs3bt2iSLt6tav359kuS5557LiSeemAsuuCCnn356jjjiiNx///3ZunVrzjzzzNxxxx1L/gGrMU3Toc4f8iQAAN2uu+66XH/99Qc9v3Llyjz22GNJkhdeeCFXXXVV7rvvvpceq7pq1ap86EMfykc+8pEce+yxb+SoB3zEllgFAKDBAWPVnlUAAGqJVQAAaolVAABqiVUAAHLqtVvmPcIBiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABqiVUAAGrVxOrmzZuzbt26rF69Oscdd1zGGLniiivmPRYAAHN01LwHeNGNN96Y7du3Z9myZVmxYkV27tw575EAAJizmpXVDRs2ZNeuXXn66aezcePGeY8DAECBmpXVNWvWzHsEAADK1KysAgDAq4lVAABqiVUAAGqJVQAAaolVAABqiVUAAGqJVQAAaolVAABq1TwUYGFhIQsLC0mSJ598MkmydevWrF27NkmyfPnyrF+/fk7TAQAwDzWxum3bttx2222vOLZ79+7s3r07SbJy5UqxCgDwFjOmaTrU+UOeBADgzeHUa7fksU9cNM8RxoEO2rMKAEAtsQoAQC2xCgBArdpYPfXaLfMeAQCAOauNVQAAEKsAANQSqwAA1BKrAADUEqsAANQSqwAA1BKrAADUEqsAANQSqwAA1BKrAADUEqsAANQSqwAA1BKrAADUEqsAANQSqwAA1BKrAADUEqsAANQSqwAA1BKrAADUEqsAANQSqwAA1BrTNB385BhfSLJ86cZ5heVJvjOn9wYAeCuaZ399Z5qmX3v1wUPG6jyNMb4+TdMvznsOAIC3isb+sg0AAIBaYhUAgFrNsfpX8x4AAOAtpq6/avesAgBA88oqAABvcXWxOsY4ZozxwBhj+xjj4THG9fOeCQDgcHewxhqL/myMsWuM8Y0xxh+97PhfjjG+OcZ4aIzxznnMfdQ83vR1/CDJ+6Zp2jPGODrJfWOMz0/T9NV5DwYAcBg7YGMlOTPJKUnOmKZp3xjjhNn1v55k1ezr3Uk2zv5cUnWxOi1uot0z+/bo2ZeNtQAA/w+HaKwPJ7l8mqZ9s+u+PbvmA0k2zX7uq2OM48cYJ03T9B9LOXfdNoAkGWMcOcbYluTbSb44TdPX5jwSAMBh7yCNdXqSS8cYXx9jfH6MsWp2+clJvvWyH39idmxJVcbqNE17p2k6O8mKJO8aY/zsnEcCADjsHaSxfijJ87MnV/11klvnOOJrVMbqi6Zp+u8k/5jkNc+JBQDg/+ZVjfVEkr+fnbozydtnr/8ti3tZX7RidmxJ1cXqGOMnxhjHz17/cJILkuyc61AAAIe5QzTWQpI1s8t+Jcmu2evPJPmd2V0BfinJ95Z6v2pS+AGrJCcluW2McWQWY/pvp2n67JxnAgA43B2wscYY9yX51Bjjo1n8ANbvz67/XJLfSPLNJM8l+d05zOwJVgAA9KrbBgAAAC8SqwAA1BKrAADUEqsAANQSqwAA1BKrAK8yxrh4jDGNMc6Y9ywAb3ViFeC1Lkty3+zPuRhjNN4HG2DJiVWAlxljLEtyXpIrk/z27Nh7xxhfGmNsHmPsHGN8aowxZuc+McbYMcZ4aIyxfoxx5Bjj0dkTX44fY+wdY7xndu2Xxxirxhg/Msa4dYzxwBjjn8cYH5idXzvG+MwY494k98znbwCgi3+5A7zSB5J8YZqmXWOM/xxj/MLs+DuSnJXk35Pcn+SXxxjfSPKbSc6YpmkaYxw/TdPeMcYjSX4myU8n+ackq8cYX0tyyjRN/zrG+PMk907T9HuzRx8+MMb4h9n7vDPJ26dp+q+l+oUBmllZBXily5J8evb609m/FeCBaZqemKZpX5JtSU5N8r0kzyf5mzHGJVl8HGGSfCXJe2Zff5HFldpzkjw4O/+rSa4dY2xL8qUkxyT5qdm5LwpVgP2srALMjDF+PMn7kvzcGGNKcmSSKcmWJD942aV7kxw1TdP/jDHeleT8JB9M8oezn/9ykg8n+ckkf5Lkj5O8N4sRmyQjyW9N0/TIq97/3UmefUN+OYDDlJVVgP0+mOT2aZpWTtN06jRNpyR5NMnqA10829/6Y9M0fS7JR5P8/OzUA0nOTbJvmqbns7gS+wdZjNgkuTvJupfte33HG/T7ABz2xCrAfpclufNVx/4uB78rwI8m+ewY46Es3j3gmiSZpukHSb6V5Kuz674yu/ZfZt//aZKjkzw0xnh49j0ABzCmaZr3DAAAcEBWVgEAqCVWAQCoJVYBAKglVgEAqCVWAQCoJVYBAKglVgEAqCVWAQCo9b+lzUDoz9UHogAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "counts = count_digits(answers)\n", + "plot_counts(counts)\n", + "print(f\"Entropy: {entropy(counts)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "cf4cacdf-a31d-43bd-8517-eec9f656eee4", + "metadata": {}, + "source": [ + "The entropy of the results is much lower, we say that the model is more \"certain\" of its answers. " + ] + }, + { + "cell_type": "markdown", + "id": "22f31872-aab7-4a68-b9f2-d335a4f1a875", + "metadata": {}, + "source": [ + "## How `gpt-4` compares to `text-davinci-003`\n", + "\n", + "Let us now look at how GPT4 fares on the original question:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2d5ab5b8-eca5-47f5-a35c-5f3865e35755", + "metadata": {}, + "outputs": [], + "source": [ + "model = models.text_completion.openai(\"gpt-4\", max_tokens=128)\n", + "\n", + "question = \"When I was 6 my sister was half my age. Now I’m 70 how old is my sister?\"\n", + "prompt = gsm8k_prompt(question)\n", + "answers = model(prompt, samples=20)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d316a5f7-cebc-4b09-9b1b-aee219b2f088", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Entropy: -0.0\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqwAAAHgCAYAAABgsD+6AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAQI0lEQVR4nO3dYahf9X3H8c+vua6J3Ui4hKKDOpVMxTmn1Tlwatt0rKKUziZgfJa5B2tg0TkY5EEZLepWYYITwTJ0o5Wi08ypXYdjMxG76OpkLc51jQUdU4ZiHaagREg8e5C/0WhiNWlyP7l9vSDk/s/v/Lnffx6Ed05+95wxTVMAAKDVhxZ6AAAAeC+CFQCAaoIVAIBqghUAgGqCFQCAaoIVAIBqcz9h3T2vAAA4EsaBFlxhBQCgmmAFAKCaYAV4H15++eXcdtttueyyy7Jq1aosW7Ysy5cvzwUXXJDbb789b7zxxn7f9+ijj+aSSy7J/Px8li1bljPPPDM33XRTdu/efYQ/AcDRa/yER7PawwqQ5Ktf/Wo2bNiQ448/Pp/61Kdywgkn5MUXX8y9996bHTt2ZM2aNbnnnnsyxltbsO6///6sWbMmS5cuzeWXX575+fl885vfzPbt27N27drcc889C/iJAOoccA+rYAV4H7Zs2ZJXX301l156aT70obf+c+qFF17Ieeedl+eeey6bN2/OmjVrkiQ//vGPs2rVquzYsSPbtm3LueeemyTZuXNnVq9encceeyx33nln1q1btyCfB6CQH7oCOBSrV6/OZz/72X1iNUmOO+64fOELX0iSPPzww3uPb968OS+99FLWrVu3N1aTZOnSpbnuuuuSJLfeeuvhHxxgERCsAIfomGOOSZLMzb11p8AtW7YkSS6++OJ3nX/RRRfl2GOPzaOPPprXX3/9yAwJcBQTrACHYNeuXfn617+eZN843b59e5LklFNOedd75ubmctJJJ2XXrl155plnjsygAEcxwQpwCDZt2pSnnnoql1xyST7zmc/sPb5jx44kyfLly/f7vjePv/LKK4d9RoCjnWAFOEg333xzbrzxxpx22mm54447FnocgEVLsAIchFtuuSVXX311Tj/99GzdujXz8/P7rL95BfXNK63v9ObxFStWHNY5ARYDwQrwAd10003ZuHFjzjjjjGzdujXHHXfcu8459dRTkyRPP/30u9Z27dqVZ599NnNzczn55JMP+7wARzvBCvAB3HDDDbnmmmty1llnZevWrfnoRz+63/NWr16dJHnwwQfftfbII4/ktddey/nnn58Pf/jDh3VegMVAsAK8T9dee202bdqUc845Jw899FBWrlx5wHPXrl2blStX5q677soTTzyx9/jOnTvzxS9+MUmyYcOGwz4zwGLgSVcA78PXvva1rF+/PkuWLMnGjRv3+9P/J554YtavX7/39X333Ze1a9dm6dKlWbduXebn5/PAAw/sfTTr3Xffvc+jXAF+xnk0K8Ch+NKXvpQvf/nL73nOJz7xiX2edpUk27Zty/XXX5/HHnssO3fuzKpVq3LllVfmqquuypIlSw7jxABHHcEKAEC1AwarPawAAFQTrAAAVBOsAABUm1voAQ7kxE3fWugRAAB+pvz3Vy5d6BH2yxVWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKqNaZoOvDjGg0lWHrlxABaFlUl+tNBDABxlfjRN08X7W3jPYAXggxtjPDFN07kLPQfAYmFLAAAA1QQrAADVBCvAT99fLvQAAIuJPawAAFRzhRUAgGpzCz0AwNFsjLEiyW1JzkgyJbkyyR8mOXV2yookr0zTdNaRnw5gcRCsAIfmL5I8OE3T2jHGzyU5dpqmy99cHGPcmGTHgk0HsAjYwwpwkMYYy5N8L8nJ037+Mh1jjCT/k2T1NE0/PMLjASwa9rACHLyTkryU5K/HGN8dY9w2xvjI29YvTPKiWAU4NIIV4ODNJfl4klunaTo7yatJNr1t/Yokdy7EYACLiWAFOHjPJ3l+mqbvzF5vzp6AzRhjLsnnk/zNAs0GsGgIVoCDNE3TC0meG2O8eUeATyf5/uzr30ryg2manl+Q4QAWEXcJADg0G5N8Y3aHgGeS/O7s+LrYDgDwU+EuAQAAVLMlAACAaoIVAIBqghUAgGqCFQCAaoIVAIBqghXgHcYYvzPGmMYYpy30LAAIVoD9uSLJv8x+XxCzJ2UBEMEKsI8xxs8nuSDJ72XPzf8zxvjkGOPhMcbmMcYPxhjfGGOM2dpXxhjfH2M8Ocb48zHGkjHGs2OPFWOM3WOMi2bnPjLG+OUxxkfGGH81xnh8jPHdMcbnZuvrxxgPjDG2JHloYf4EAPr4FzzAvj6X5MFpmp4eY7w8xjhndvzsJL+S5H+TbEvym2OM/0pyWZLTpmmaxhgrpmnaPcbYnuT0JCcl+fckF44xvpPkY9M0/XCM8adJtkzTdOUYY0WSx8cY/zz7Ph9PcuY0Tf93pD4wQDtXWAH2dUWSu2Zf35W3tgU8Pk3T89M0vZHke0lOTLIjyc4kt48xPp/ktdm5305y0ezXn2XPFdtfT/Jvs/XfTrJpjPG9JA8nWZrkhNnaP4lVgH25wgowM8aYT7I6ya+OMaYkS5JMSb6V5PW3nbo7ydw0TbvGGOcl+XSStUn+YPb+R5JsSPKLSf4kyR8n+WT2hGySjCRrpmna/o7v/xtJXj0sHw7gKOYKK8Bb1ia5Y5qmX5qm6cRpmj6W5NkkF+7v5Nl+1+XTNP1DkmuS/Nps6fEk5yd5Y5qmndlzRfb3sydkk+Qfk2x82z7Ysw/T5wFYFAQrwFuuSPJ37zj2tznw3QJ+IcnfjzGezJ67CvxRkkzT9HqS55L86+y8b8/O/Y/Z62uTHJPkyTHGf85eA3AAY5qmhZ4BAAAOyBVWAACqCVYAAKoJVgAAqglWAACqCVYAAKoJVgAAqglWAACqCVYAAKr9P8bb7HZA9fu3AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "counts = count_digits(answers)\n", + "plot_counts(counts)\n", + "print(f\"Entropy: {entropy(counts)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "2f6c8a22-fdf5-4f30-865c-8e11927b1b7c", + "metadata": {}, + "source": [ + "GPT4 returns the correct answer with certainty." + ] + }, + { + "cell_type": "markdown", + "id": "50d4a55e-86df-46ab-8b38-302c79bc8add", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "When generating text completions with a language model we typically look at one output sample, trying to find the \"right\" answer. However, doing so we obscure the diversity of answers that these language models can produce. Assuming the diversity of answers reflects these models' \"uncertainty\", we can use measures such as the entropy of the answers' distribution to evaluate the quality of the answer.\n", + "\n", + "Which result should we be choosing once we have different samples? There is no definite answer to this question. The [self-consistency method](https://arxiv.org/abs/2203.11171) consists in choosing the result based on a majority vote. We think this choice is arbitrary and that choosing the correct answer is a [decision theory](https://en.wikipedia.org/wiki/Decision_theory) problem, which can only be solved by specifying a loss function that is adapted to the experiment's context; the majority vote being a particular case with a 0-1 loss." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d6fff5ab4c847acf383cc8dcdf7283a28ab48225 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 24 May 2023 14:22:15 +0200 Subject: [PATCH 138/734] Add simulation-based inference example notebook --- examples/sampling.ipynb | 5 +- examples/simulation_based_inference.ipynb | 370 ++++++++++++++++++++++ 2 files changed, 373 insertions(+), 2 deletions(-) create mode 100644 examples/simulation_based_inference.ipynb diff --git a/examples/sampling.ipynb b/examples/sampling.ipynb index dd7321e7..0743bb9d 100644 --- a/examples/sampling.ipynb +++ b/examples/sampling.ipynb @@ -40,7 +40,7 @@ "id": "2a3514d6-d5d7-46e9-9b69-1251d337e094", "metadata": {}, "source": [ - "In this example we will look at completion results for questions from the GSM8K dataset, using few-shots prompts with 5 examples. We first use `outlines.text.prompt` to build the few-shot prompt; we partially evaluate the prompt function with the few-shot examples:" + "In this example we will look at completion results for questions from the GSM8K dataset, using few-shots prompts with 5 examples. We first use `outlines.text.prompt` to build the few-shot prompt. `outlines.text.prompt` is a decorator around \"prompt functions\" which contain the prompt template in its docstring. Outlines uses the Jinja2 templating engine to render the prompt when the function is called with the variables' values; it thus allows you to build complex prompts very easily." ] }, { @@ -96,6 +96,7 @@ " A:\n", " \"\"\"\n", "\n", + "# Prompt functions can be partially evaluated like any other function\n", "gsm8k_prompt = ft.partial(few_shot_prompt, examples=examples)" ] }, @@ -112,7 +113,7 @@ "id": "a273ed78-e813-467e-85f3-16d7f283ba87", "metadata": {}, "source": [ - "Let us now sample 20 completions with the `text-davinci-003` model:" + "Let us now sample 20 completions with the `text-davinci-003` model. Outlines is sampling first, and allows to draw several samples with both OpenAI and `transformers` models easily:" ] }, { diff --git a/examples/simulation_based_inference.ipynb b/examples/simulation_based_inference.ipynb new file mode 100644 index 00000000..e6b99958 --- /dev/null +++ b/examples/simulation_based_inference.ipynb @@ -0,0 +1,370 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e7c7d0bb-8d45-4139-a584-02c7196db92b", + "metadata": {}, + "source": [ + "# Find the best few-shot examples using simulation-based inference" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "831a76f5-c569-4174-adab-fb0245877367", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import random\n", + "import requests\n", + "import re\n", + "\n", + "import outlines.models as models\n", + "import outlines.text as text\n", + "\n", + "random.seed(0)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "ec604edc-c8b6-4088-bf17-b77ae57d05a1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "env: OPENAI_API_KEY=# your key here\n" + ] + } + ], + "source": [ + "%env OPENAI_API_KEY = # your key here" + ] + }, + { + "cell_type": "markdown", + "id": "aabb4db6-fd94-4c42-ab7f-97c3de45b2cc", + "metadata": {}, + "source": [ + "In this example we will use GPT 3.5 to solve problems from the GSM-8K dataset. The state-of-the-art performance on this dataset is obtained using few-shot prompting with 5 examples. However, it is not clear how one should select these examples. Here, we will use **simulation-based inference** to try to infer which examples we should be using to get the best out of the model's abilities to solve the problem.\n", + "\n", + "Let's start with downloading the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "367f5f89-8e5d-4381-b9eb-78c60bc50f86", + "metadata": {}, + "outputs": [], + "source": [ + "result = requests.get(\"https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/train.jsonl\")\n", + "lines = result.iter_lines()" + ] + }, + { + "cell_type": "markdown", + "id": "ef0f7aa9-d528-41e9-8a9d-4497f01f0692", + "metadata": {}, + "source": [ + "We now divide the train set in two sets:\n", + "- 20 problems from which we are going to sample 5 examples at random for every inference;\n", + "- 500 problems which we are going to use to perform inference." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0667c4a8-cebe-4796-bbc9-575ee9498717", + "metadata": {}, + "outputs": [], + "source": [ + "example_set = []\n", + "for _ in range(10):\n", + " line = json.loads(next(lines))\n", + " answer = re.findall(r\"\\d+\", line[\"answer\"])[-1]\n", + " example_set.append({\"question\": line[\"question\"], \"answer\": answer})\n", + "\n", + "train_set = []\n", + "for _ in range(500):\n", + " line = json.loads(next(lines))\n", + " answer = re.findall(r\"\\d+\", line[\"answer\"])[-1]\n", + " train_set.append({\"question\": line[\"question\"], \"answer\": answer})" + ] + }, + { + "cell_type": "markdown", + "id": "4b52b470-d818-495a-a6e3-e50a1deff13c", + "metadata": {}, + "source": [ + "Now let's define the prompt, the model, and the sampling loop. The sampling loop consists in choosing 5 examples at random, sampling 20 model answers; if the answer is correct we keep the example ids as samples, otherwise continue:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "9fbebaa9-f05e-4c6b-8875-73a08273bbb5", + "metadata": {}, + "outputs": [], + "source": [ + "@text.prompt\n", + "def few_shots(question, examples):\n", + " \"\"\"\n", + " {% for example in examples %}\n", + " Q: {{ example.question }}\n", + " A: {{ example.answer }}\n", + " {% endfor %}\n", + " Q: {{ question }}\n", + " A:\n", + " \"\"\"\n", + "\n", + "model = models.text_completion.openai(\"text-davinci-003\", max_tokens=128)\n", + "\n", + "# TODO: This could largely benefit from vectorization in #52\n", + "def one_train_example(problem, example_set):\n", + " example_ids = random.choices(range(0, len(example_set)), k=5)\n", + " examples = [example_set[i] for i in example_ids]\n", + " prompt = few_shots(problem[\"question\"], examples)\n", + " answers_raw = model(prompt, samples=20)\n", + "\n", + " samples = []\n", + " for answer_raw in answers_raw:\n", + " try:\n", + " answer = re.findall(r\"\\d+\", answer_raw)[-1]\n", + " if answer == problem[\"answer\"]:\n", + " samples += example_ids\n", + " else:\n", + " continue\n", + " except IndexError:\n", + " pass\n", + "\n", + " return samples" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "1dae1ef2-c9e0-4c98-8686-7fbc2ff55e56", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9efc9d077af24a2eb5ea3c05fe63f298", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/500 [00:00" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import numpy as np\n", + "import matplotlib.pylab as plt\n", + "\n", + "example_ids, counts = np.unique(samples, return_counts=True)\n", + "\n", + "fig = plt.figure(figsize=(12,8))\n", + "ax = fig.add_subplot(111)\n", + "ax.bar(example_ids, counts)\n", + "\n", + "ax.spines[[\"top\", \"right\"]].set_visible(False)\n", + "\n", + "ax.set_xticks(range(10))\n", + "ax.set_xlabel(\"Example #\")\n", + "ax.set_ylabel(\"Counts\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "cde37e5b-377e-4872-af40-674d680bd2da", + "metadata": {}, + "source": [ + "Looking at the distribution, our best guess for which examples we should use for benchmarking on the test set would be 0, 1, 2, 6 and 9. This method can be trivially extended to other workflows that use few-shot examples to query LLMs. Of course, simulation-based inference extends beyong choosing the \"best\" prompt, and could for instance be useful to select the structure of chains of LLMs and tools as well." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "bddda20b-234a-4d30-b40a-90708fbaba23", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'question': 'Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?',\n", + " 'answer': '72'}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "example_set[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "fb186bf9-62b7-485f-a8ce-401f551a9e57", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'question': 'Weng earns $12 an hour for babysitting. Yesterday, she just did 50 minutes of babysitting. How much did she earn?',\n", + " 'answer': '10'}" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "example_set[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "ae427bb2-e3f4-4a96-a508-e8011a0fc553", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'question': 'Betty is saving money for a new wallet which costs $100. Betty has only half of the money she needs. Her parents decided to give her $15 for that purpose, and her grandparents twice as much as her parents. How much more money does Betty need to buy the wallet?',\n", + " 'answer': '5'}" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "example_set[2]" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "fe43ae0f-c18f-4b74-b639-8481472edf4d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'question': 'Albert is wondering how much pizza he can eat in one day. He buys 2 large pizzas and 2 small pizzas. A large pizza has 16 slices and a small pizza has 8 slices. If he eats it all, how many pieces does he eat that day?',\n", + " 'answer': '48'}" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "example_set[6]" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "19d9d936-d0f0-4927-990c-76dbbfa95b47", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'question': 'Tina makes $18.00 an hour. If she works more than 8 hours per shift, she is eligible for overtime, which is paid by your hourly wage + 1/2 your hourly wage. If she works 10 hours every day for 5 days, how much money does she make?',\n", + " 'answer': '990'}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "example_set[9]" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From f048fa21d196f35e225bbe3054d1814656af87e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 15 May 2023 10:43:49 +0200 Subject: [PATCH 139/734] Update reference documentation --- docs/source/index.rst | 25 +++---- docs/source/integrations/llamaindex.rst | 2 - docs/source/integrations/messaging.rst | 2 - docs/source/integrations/python.rst | 2 - docs/source/reference/batching.rst | 22 +++++- .../reference/controlled_generation.rst | 74 +++++++++++++++++++ docs/source/reference/multimodel.rst | 67 +++++++++++++++++ 7 files changed, 172 insertions(+), 22 deletions(-) delete mode 100644 docs/source/integrations/llamaindex.rst delete mode 100644 docs/source/integrations/messaging.rst delete mode 100644 docs/source/integrations/python.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index d425c8ab..0b5c685b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -9,6 +9,7 @@ **Outlines** is a Python library to write reliable programs for interactions with generative models: language models, diffusers, multimodal models, classifiers, etc. It provides a Domain Specific Language (DSL) to make prompting easier, constrained text generation and is natively concurrent. It integrates well with the rest of the Python ecosystem: tools, vector stores, etc. +*Outlines aims to be the library frameworks are made with. It is more like NumPy than LangChain.* .. grid:: 2 @@ -45,7 +46,7 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: from my_response_models import command_response - @outlines.prompt + @text.prompt def agent_prompt(objective, goals, tools, response_model): """You are an AI with the following objective: {{ objective }} @@ -64,11 +65,10 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: """ - @outlines.chain - async def agent(objective, goals, tools) + def agent(objective, goals, tools) complete = models.text_completion.hf("sshleifer/tiny-gpt2") prompt = agent_prompt(objective, goals, tools , command_response) - answer = await complete(prompt) + answer = complete(prompt) command = command_response(answer) return command @@ -82,12 +82,16 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: 📜 Features ----------- + Simple and powerful prompting primitives based on the Jinja templating engine. + Integration with OpenAI and HuggingFace models - A powerful domain-specific language to write and render prompts; +- Interleave completions with loops, conditionals, and custom Python functions; - OpenAI integration: language models, embeddings and Dall-E; - HuggingFace integration: ``transformers`` and ``diffusers``; -- Parallel model and tool calls with the ``outlines.elemwise`` decorator; -- Map your chains over different inputs in parallel to avoid overfitting; +- Caching; +- Sampling multiple sequences; +- Controlled generation, including multiple choice, type constraints and dynamic stopping. .. toctree:: :maxdepth: 1 @@ -105,12 +109,3 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: reference/controlled_generation reference/multimodel reference/batching - -.. toctree:: - :maxdepth: 1 - :caption: Integrations - :hidden: - - integrations/python.rst - integrations/llamaindex.rst - integrations/messaging.rst diff --git a/docs/source/integrations/llamaindex.rst b/docs/source/integrations/llamaindex.rst deleted file mode 100644 index 910960d9..00000000 --- a/docs/source/integrations/llamaindex.rst +++ /dev/null @@ -1,2 +0,0 @@ -🦙 Llamaindex -============= diff --git a/docs/source/integrations/messaging.rst b/docs/source/integrations/messaging.rst deleted file mode 100644 index 144e9aa8..00000000 --- a/docs/source/integrations/messaging.rst +++ /dev/null @@ -1,2 +0,0 @@ -✉ Slack, Discord, Twitter -========================== diff --git a/docs/source/integrations/python.rst b/docs/source/integrations/python.rst deleted file mode 100644 index fe36a28b..00000000 --- a/docs/source/integrations/python.rst +++ /dev/null @@ -1,2 +0,0 @@ -🐍 Python code -============== diff --git a/docs/source/reference/batching.rst b/docs/source/reference/batching.rst index 030c11e3..6182539b 100644 --- a/docs/source/reference/batching.rst +++ b/docs/source/reference/batching.rst @@ -1,2 +1,22 @@ -Batching +Sampling ======== + +Outlines is sampling-first, and is built to generate several samples from the same prompt: + +.. code:: + + import outlines.models as models + + sample = models.text_generation.openai("text-davinci-003") + answers = complete( + "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", + samples=10 + ) + +This will enable probabilistic applications down the line, stay tuned for more updates. In the meantime you can take a look at the `self-consistency example `_. + + +Batching +-------- + +Outlines will soon allow you to vectorize model calls. diff --git a/docs/source/reference/controlled_generation.rst b/docs/source/reference/controlled_generation.rst index 3413d2c8..acaec852 100644 --- a/docs/source/reference/controlled_generation.rst +++ b/docs/source/reference/controlled_generation.rst @@ -1,2 +1,76 @@ Controlled Generation ===================== + +While LLM capabilities are increasingly impressive, we can make their output more reliable by *steering* the generation. Outlines thus offers mechanisms to specify high level constraints on text completions by generative language models. + + +Stopping sequence +----------------- + +By default, language models stop generating tokens after and `` token was generated, or after a set maximum number of tokens. Their output can be verbose, and for practical purposes it is often necessary to stop the generation after a given sequence has been found instead. You can use the `stop_at` keyword argument when calling the model with a prompt: + +.. code:: + + import outlines.models as models + + complete = models.text_completion.openai("text-davinci-002") + expert = complete("Name an expert in quantum gravity.", stop_at=["\n", "."]) + + +.. warning:: + + The OpenAI API does not allow more than 4 stopping sequences. + + +Choice between different options +-------------------------------- + +In some cases we know the output is to be chosen between different options. We can restrict the completion's output to these choices using the `is_in` keyword argument: + + +.. code:: + + + import outlines.models as models + + complete = models.text_completion.openai("text-davinci-002") + answer = model( + "Pick the odd word out: skirt, dress, pen, jacket", + is_in=["skirt", "dress", "pen", "jacket"] + ) + + +Type constraints +---------------- + +We can ask completions to be restricted to `int`s or `float`s using the `type` keyword argument, respectively with the "int" or "float" value: + + +.. code:: + + + import outlines.models as models + + complete = models.text_completion.openai("text-davinci-002") + answer = model( + "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", + type="int" + ) + + +.. warning:: + + This feature is very limited for OpenAI models, due to restrictions on OpenAI's API. + + +The future of constrained generation +------------------------------------ + +We believe constrained hold a lot of promises when it comes to build reliable systems that use language models. In future releases of Outlines, you will be able to: + +- Exclude sequences with a `not_in` keyword agument; +- Constrain the output to be valid JSON; +- Constrain the output to be a valid array; +- Constrain the output to be valid Python code; + +We also believe that `alternative steering methods `_ can be useful and plan on expanding Outline's prompt DSL and generation methods in this direction. diff --git a/docs/source/reference/multimodel.rst b/docs/source/reference/multimodel.rst index 598e428f..ab3453a9 100644 --- a/docs/source/reference/multimodel.rst +++ b/docs/source/reference/multimodel.rst @@ -1,2 +1,69 @@ Multimodal, Multimodels ======================= + +Outlines interfaces with multiple model providers, so models can be easily swapped. It is built so that different models can be chained together, with different modalities. + +OpenAI +------ + +Outlines connects to OpenAI's text completion, and chat completion. Note however that Outlines does not provide a chat interface, and uses the chat completion API for text completion. Both are accessible via the `models.text_completion.openai` module, by passing the name of the model. You can currently specify `max_tokens` and `temperature` when initializing the model: + +.. code:: + + import outlines.models as models + + complete = models.text_completion.openai("gpt4", max_tokens=128, temperature=0.7) + + +It is also possible to use DALL-E to generate images: + +.. code:: + + import outlines.models as models + + generate = models.image_generation.openai("dall-e") + + +HuggingFace +----------- + +Outlines can call models from HuggingFace's `transformers` and `diffusers` libraries. The models are then run locally. + +.. code:: + + import outlines.models as models + + complete = models.text_completion.hf("sshleifer/tiny-gpt2") + generate = models.image_generation.hf("runwayml/stable-diffusion-v1-5") + + +.. note:: + + Outlines call the PyTorch version of models by default. The generation process also runs with defaults, please `open an issue `_ if you have more specific needs. + + +Bring Your Own Model +-------------------- + +Outlines models are currently simple functions that return a text or an image given a prompt, you can thus easily use any model. We will soon provide a more comprehensive integration that handles controlled generation for any model. + +If you think the model you are using could be useful to others, `open an issue `_ 😊 + + +Coming soon +----------- + +We plan on integrating more model providers, for instance: + +- Anthropic +- Llamacpp +- GPT4All + +We currently favor the integration of *Open Source models* since they give more freedom for guided generation. We will also extend the range of models to allow building more complex chains, including: + +- Image captioning; +- Classification; +- Image segmentation; +- Speech-to-text; +- Image question answering; +- etc. From c2d60d0eee6a76a09ed180e4e0e303c7827e48d2 Mon Sep 17 00:00:00 2001 From: brosand Date: Thu, 25 May 2023 10:32:30 -0400 Subject: [PATCH 140/734] Fix links in readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a3027397..5f56302d 100644 --- a/README.md +++ b/README.md @@ -241,5 +241,5 @@ Do not hesitate to open a draft PR before your contribution is ready, especially - [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) - [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/dust/math-generate-code.py) - [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) -- [Uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb) -- [Simulation-based inference](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb) +- [Uncertainty](https://github.com/normal-computing/outlines/blob/main/examples/sampling.ipynb) +- [Simulation-based inference](https://github.com/normal-computing/outlines/blob/main/examples/simulation_based_inference.ipynb) From 1d76fb9bffb0641ddb78232a9df120041bdfe742 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 25 May 2023 16:42:38 +0200 Subject: [PATCH 141/734] Add README to pyproject --- pyproject.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index f6e3f31c..745c3e49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,15 @@ test = [ "transformers" ] +[project.urls] +homepage = "https://github.com/normal-computing/outlines" +documentation = "https://normal-computing.github.io/outlines/" +repository = "https://github.com/normal-computing/outlines" + +[project.readme] +file="README.md" +content-type = "text/markdown" + [tool.setuptools_scm] write_to = "outlines/_version.py" From 4438f78d1f350406e595ec55b58738929a6a9e0a Mon Sep 17 00:00:00 2001 From: Alex Nguyen Date: Mon, 29 May 2023 17:35:11 +0700 Subject: [PATCH 142/734] Fix bug in https://github.com/normal-computing/outlines#prompting example `model = models.text_completion.openai("text-davinci-003")` should become `complete = models.text_completion.openai("text-davinci-003")` --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5f56302d..f71597c1 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ def labelling(to_label, examples): {{ to_label }} // """ -model = models.text_completion.openai("text-davinci-003") +complete = models.text_completion.openai("text-davinci-003") prompt = labelling("Just awesome", examples) answer = complete(prompt) ``` From 070553a2d0587f370a03b304fafcceeeec32b690 Mon Sep 17 00:00:00 2001 From: harshkumarchourasia Date: Sun, 4 Jun 2023 14:27:24 +0530 Subject: [PATCH 143/734] Fix docstring typo --- outlines/models/openai.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index c08e3f7f..0d5a881e 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -23,7 +23,7 @@ def OpenAICompletion( max_tokens: Optional[int] = 216, temperature: Optional[float] = 1.0, ) -> Callable: - """Create a function that will call the OpenAI conmpletion API. + """Create a function that will call the OpenAI completion API. You should have the `openai` package installed. Available models are listed in the `OpenAI documentation `_. @@ -100,7 +100,7 @@ def generate_base( def generate_choice( prompt: str, is_in: List[str], samples: int ) -> Union[List[str], str]: - """Generate a a sequence that must be one of many options. + """Generate a sequence that must be one of many options. We tokenize every choice, iterate over the token lists, create a mask with the current tokens and generate one token. We progressively From fd12cab42034319abdc4b67367c80015b2b62701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 26 May 2023 13:00:36 +0200 Subject: [PATCH 144/734] Vectorize scalar function --- outlines/__init__.py | 2 + outlines/base.py | 162 +++++++++++++++++++++++++++++++++++++++++++ tests/test_base.py | 152 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 316 insertions(+) create mode 100644 outlines/base.py create mode 100644 tests/test_base.py diff --git a/outlines/__init__.py b/outlines/__init__.py index a99de432..a5082287 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,4 +1,5 @@ """Outlines is a Generative Model Programming Framework.""" +from outlines.base import vectorize from outlines.caching import clear_cache, disable_cache, get_cache from outlines.text import prompt @@ -7,4 +8,5 @@ "disable_cache", "get_cache", "prompt", + "vectorize", ] diff --git a/outlines/base.py b/outlines/base.py new file mode 100644 index 00000000..065a567a --- /dev/null +++ b/outlines/base.py @@ -0,0 +1,162 @@ +import asyncio +import functools +import inspect + +import numpy as np + + +class vectorize: + """Returns an object that acts like a function but takes arrays as an input. + + The vectorized function evaluates `func` over successive tuples of the input + chararrays and returns a single NumPy chararrays or a tuple of NumPy chararrays. + + Its behavior is similar to NumPy's `vectorize` for Python functions: the function + being vectorized is executed in a `for` loop. Coroutines, however, are executed + concurrently. + + Part of the code was adapted from `numpy.lib.function_base`. + + """ + + def __init__(self, func, signature=None): + self.func = func + self.signature = signature + self.is_coroutine_fn = inspect.iscoroutinefunction(func) + + functools.update_wrapper(self, func) + + if self.signature is not None: + raise NotImplementedError( + "Vectorization of non-scalar functions is not implemented yet." + ) + + def __call__(self, *args, **kwargs): + """Call the vectorized function.""" + if not args and not kwargs: + return self.call_thunk() + elif self.signature is not None: + return self.call_with_signature(*args, **kwargs) + else: + return self.call_no_signature(*args, **kwargs) + + def call_thunk(self): + """Call a vectorized thunk. + + Thunks have no arguments and can thus be called directly. + + """ + if self.is_coroutine_fn: + loop = asyncio.new_event_loop() + try: + outputs = loop.run_until_complete(self.func()) + finally: + loop.close() + else: + outputs = self.func() + + return outputs + + def call_no_signature(self, *args, **kwargs): + """Call functions and coroutines when no signature is specified. + + When no signature is specified we assume that all of the function's + inputs and outputs are scalars (core dimension of zero). We first + broadcast the input arrays, then iteratively apply the function over the + elements of the broadcasted arrays and finally reshape the results to + match the input shape. + + Functions are executed in a for loop, coroutines are executed + concurrently. + + """ + # Convert args and kwargs to arrays + args = [np.array(arg) for arg in args] + kwargs = {key: np.array(value) for key, value in kwargs.items()} + + # Broadcast args and kwargs + broadcast_shape = np.broadcast(*args, *list(kwargs.values())).shape + args = [np.broadcast_to(arg, broadcast_shape) for arg in args] + kwargs = { + key: np.broadcast_to(value, broadcast_shape) + for key, value in kwargs.items() + } + + # Execute functions in a loop, and coroutines concurrently + if self.is_coroutine_fn: + outputs = self.vectorize_call_coroutine(broadcast_shape, args, kwargs) + else: + outputs = self.vectorize_call(broadcast_shape, args, kwargs) + + # `outputs` is a flat array or a tuple of flat arrays. We reshape the arrays + # to match the input shape. + outputs = [ + results if isinstance(results, tuple) else (results,) for results in outputs + ] + outputs = tuple( + [np.asarray(x).reshape(broadcast_shape).squeeze() for x in zip(*outputs)] + ) + outputs = tuple([x.item() if np.ndim(x) == 0 else x for x in outputs]) + + n_results = len(list(outputs)) + + return outputs[0] if n_results == 1 else outputs + + def vectorize_call(self, broadcast_shape, flat_args, flat_kwargs): + """Run the function in a for loop. + + A possible extension would be to parallelize the calls. + + Parameters + ---------- + broadcast_shape + The brodcast shape of the input arrays. + flat_args + A flat array that contains the function's arguments. + flat_kwargs + A flat array that contains the function's keyword arguments. + + """ + outputs = [] + for index in np.ndindex(*broadcast_shape): + args = tuple(arg[index] for arg in flat_args) + kwargs = {key: value[index] for key, value in flat_kwargs.items()} + outputs.append(self.func(*args, **kwargs)) + + return outputs + + def vectorize_call_coroutine(self, broadcast_shape, args, kwargs): + """Run coroutines concurrently. + + Creates as many tasks as needed and executes them in a new event + loop. + + Parameters + ---------- + broadcast_shape + The brodcast shape of the input arrays. + args + The function's broadcasted arguments. + kwargs + The function's broadcasted keyword arguments. + + """ + + async def create_and_gather_tasks(): + tasks = [] + for index in np.ndindex(*broadcast_shape): + current_args = tuple(arg[index] for arg in args) + current_kwargs = {key: value[index] for key, value in kwargs.items()} + tasks.append(self.func(*current_args, **current_kwargs)) + + outputs = await asyncio.gather(*tasks) + + return outputs + + loop = asyncio.new_event_loop() + try: + outputs = loop.run_until_complete(create_and_gather_tasks()) + finally: + loop.close() + + return outputs diff --git a/tests/test_base.py b/tests/test_base.py new file mode 100644 index 00000000..0405859e --- /dev/null +++ b/tests/test_base.py @@ -0,0 +1,152 @@ +from numpy.testing import assert_array_equal + +from outlines.base import vectorize + + +def test_vectorize_docstring(): + def test(x): + """This is a test docstring""" + return x + + fn = vectorize(test) + assert fn.__doc__ == "This is a test docstring" + assert fn.__name__ == "test" + + async def test(x): + """This is a test docstring""" + return x + + fn = vectorize(test) + assert fn.__doc__ == "This is a test docstring" + assert fn.__name__ == "test" + + +def test_vectorize_thunk(): + def thunk(): + """A thunk""" + return 1 + + fn = vectorize(thunk) + assert fn() == 1 + + async def thunk(): + """A thunk""" + return 1 + + fn = vectorize(thunk) + assert fn() == 1 + + +def test_vectorize_simple(): + def passthrough(x): + """A passthrough function.""" + return x + + fn = vectorize(passthrough) + + out_vector = fn(["one", "two", "three"]) + assert_array_equal(out_vector, ["one", "two", "three"]) + + out_array = fn([["one", "two"], ["three", "four"]]) + assert_array_equal(out_array, [["one", "two"], ["three", "four"]]) + + async def passthrough(x): + """A passthrough function.""" + return x + + fn = vectorize(passthrough) + assert fn.__doc__ == "A passthrough function." + + out_vector = fn(["one", "two", "three"]) + assert_array_equal(out_vector, ["one", "two", "three"]) + + out_array = fn([["one", "two"], ["three", "four"]]) + assert_array_equal(out_array, [["one", "two"], ["three", "four"]]) + + +def test_vectorize_multiple_outputs(): + def passthrough_multiple_outputs(x): + return x, x + + fn = vectorize(passthrough_multiple_outputs) + out = fn(["one", "two", "three"]) + assert len(out) == 2 + assert_array_equal(out[0], ["one", "two", "three"]) + assert_array_equal(out[1], ["one", "two", "three"]) + + async def passthrough_multiple_outputs(x): + return x, x + + fn = vectorize(passthrough_multiple_outputs) + out = fn(["one", "two", "three"]) + assert len(out) == 2 + assert_array_equal(out[0], ["one", "two", "three"]) + assert_array_equal(out[1], ["one", "two", "three"]) + + +def test_vectorize_args(): + def passthrough_args(*args): + """A passthrough function.""" + result = "" + for arg in args: + result += arg + return result + + fn = vectorize(passthrough_args) + + out_array = fn(["one", "two"], ["1", "2"]) + assert_array_equal(out_array, ["one1", "two2"]) + + # Broadcasting + out_array = fn([["one", "two"], ["three", "four"]], ["1", "2"]) + assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) + + async def passthrough_args(*args): + """A passthrough function.""" + result = "" + for arg in args: + result += arg + return result + + fn = vectorize(passthrough_args) + + out_array = fn(["one", "two"], ["1", "2"]) + assert_array_equal(out_array, ["one1", "two2"]) + + # Broadcasting + out_array = fn([["one", "two"], ["three", "four"]], ["1", "2"]) + assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) + + +def test_vectorize_kwargs(): + def passthrough_kwargs(**kwargs): + """A passthrough function.""" + result = "" + for _, value in kwargs.items(): + result += value + return result + + fn = vectorize(passthrough_kwargs) + + out_array = fn(first=["one", "two"], second=["1", "2"]) + assert_array_equal(out_array, ["one1", "two2"]) + + # Broadcasting + out_array = fn(first=[["one", "two"], ["three", "four"]], second=["1", "2"]) + assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) + + async def passthrough_kwargs(**kwargs): + """A passthrough function.""" + result = "" + for _, value in kwargs.items(): + result += value + return result + + fn = vectorize(passthrough_kwargs) + + out_array = fn(first=["one", "two"], second=["1", "2"]) + assert_array_equal(out_array, ["one1", "two2"]) + + # Broadcasting + out_array = fn(first=[["one", "two"], ["three", "four"]], second=["1", "2"]) + assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) From 47d14590e6c7edaaa7fd22a5955ea08cea611882 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 2 Jun 2023 12:32:26 +0200 Subject: [PATCH 145/734] Vectorize function with arbitrary signature --- outlines/base.py | 139 ++++++++++++++++++++++++++++++++++++++++----- tests/test_base.py | 131 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 253 insertions(+), 17 deletions(-) diff --git a/outlines/base.py b/outlines/base.py index 065a567a..6287a460 100644 --- a/outlines/base.py +++ b/outlines/base.py @@ -1,8 +1,15 @@ import asyncio import functools import inspect +from typing import Callable, Optional import numpy as np +from numpy.lib.function_base import ( + _calculate_shapes, + _parse_gufunc_signature, + _parse_input_dimensions, + _update_dim_sizes, +) class vectorize: @@ -19,17 +26,19 @@ class vectorize: """ - def __init__(self, func, signature=None): + def __init__(self, func: Callable, signature: Optional[str] = None): self.func = func self.signature = signature self.is_coroutine_fn = inspect.iscoroutinefunction(func) functools.update_wrapper(self, func) - if self.signature is not None: - raise NotImplementedError( - "Vectorization of non-scalar functions is not implemented yet." - ) + if signature is not None: + # Parse the signature string into a Python data structure. + # For instance "(m),(s)->(s,m)" becomes `([(m,),(s,)],[(s,m)])`. + self._in_and_out_core_dimensions = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dimensions = None def __call__(self, *args, **kwargs): """Call the vectorized function.""" @@ -102,7 +111,82 @@ def call_no_signature(self, *args, **kwargs): return outputs[0] if n_results == 1 else outputs - def vectorize_call(self, broadcast_shape, flat_args, flat_kwargs): + def call_with_signature(self, *args, **kwargs): + """Call functions and coroutines when a signature is specified.""" + input_core_dims, output_core_dims = self._in_and_out_core_dimensions + + # Make sure that the numbers of arguments passed is compatible with + # the signature. + num_args = len(args) + len(kwargs) + if num_args != len(input_core_dims): + raise TypeError( + "wrong number of positional arguments: " + "expected %r, got %r" % (len(input_core_dims), len(args)) + ) + + # Convert args and kwargs to arrays + args = [np.asarray(arg) for arg in args] + kwargs = {key: np.array(value) for key, value in kwargs.items()} + + # Find the arguments' broadcast shape, and map placeholder + # variables in the signature to the number of dimensions + # they correspond to given the arguments. + broadcast_shape, dim_sizes = _parse_input_dimensions( + args + list(kwargs.values()), input_core_dims + ) + + # Calculate the shape to which each of the arguments should be broadcasted + # and reshape them accordingly. + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, input_core_dims) + args = [ + np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes) + ] + kwargs = { + key: np.broadcast_to(value, broadcast_shape) + for key, value in kwargs.items() + } + + n_out = len(output_core_dims) + + if self.is_coroutine_fn: + outputs = self.vectorize_call_coroutine(broadcast_shape, args, kwargs) + else: + outputs = self.vectorize_call(broadcast_shape, args, kwargs) + + outputs = [ + results if isinstance(results, tuple) else (results,) for results in outputs + ] + + flat_outputs = list(zip(*outputs)) + n_results = len(flat_outputs) + + if n_out != n_results: + raise ValueError( + f"wrong number of outputs from the function, expected {n_out}, got {n_results}" + ) + + # The number of dimensions of the outputs are not necessarily known in + # advance. The following iterates over the results and updates the + # number of dimensions of the outputs accordingly. + for results, core_dims in zip(flat_outputs, output_core_dims): + for result in results: + _update_dim_sizes(dim_sizes, result, core_dims) + + # Calculate the shape to which each of the outputs should be broadcasted + # and reshape them. + shapes = _calculate_shapes(broadcast_shape, dim_sizes, output_core_dims) + outputs = tuple( + [ + np.hstack(results).reshape(shape).squeeze() + for shape, results in zip(shapes, zip(*outputs)) + ] + ) + outputs = tuple([x.item() if np.ndim(x) == 0 else x for x in outputs]) + + return outputs[0] if n_results == 1 else outputs + + def vectorize_call(self, broadcast_shape, args, kwargs): """Run the function in a for loop. A possible extension would be to parallelize the calls. @@ -111,17 +195,17 @@ def vectorize_call(self, broadcast_shape, flat_args, flat_kwargs): ---------- broadcast_shape The brodcast shape of the input arrays. - flat_args - A flat array that contains the function's arguments. - flat_kwargs - A flat array that contains the function's keyword arguments. + args + The function's broadcasted arguments. + kwargs + The function's broadcasted keyword arguments. """ outputs = [] for index in np.ndindex(*broadcast_shape): - args = tuple(arg[index] for arg in flat_args) - kwargs = {key: value[index] for key, value in flat_kwargs.items()} - outputs.append(self.func(*args, **kwargs)) + current_args = tuple(arg[index] for arg in args) + current_kwargs = {key: value[index] for key, value in kwargs.items()} + outputs.append(self.func(*current_args, **current_kwargs)) return outputs @@ -160,3 +244,32 @@ async def create_and_gather_tasks(): loop.close() return outputs + + +def _update_arrays_type(arrays, results): + """Update the dtype of arrays. + + String arrays contain strings of fixed length. Here they are initialized with + the type of the first results, so that if the next results contain longer + strings they will be truncated when added to the output arrays. Here we + update the type if the current results contain longer strings than in the + current output array. + + Parameters + ---------- + arrays + Arrays that contain the vectorized function's results. + results + The current output of the function being vectorized. + + """ + + updated_arrays = [] + for array, result in zip(arrays, results): + if array.dtype.type == np.str_: + if array.dtype < np.array(result).dtype: + array = array.astype(np.array(result).dtype) + + updated_arrays.append(array) + + return tuple(updated_arrays) diff --git a/tests/test_base.py b/tests/test_base.py index 0405859e..24c84f7d 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -1,3 +1,5 @@ +import numpy as np +import pytest from numpy.testing import assert_array_equal from outlines.base import vectorize @@ -37,7 +39,7 @@ async def thunk(): assert fn() == 1 -def test_vectorize_simple(): +def test_vectorize_scalar_simple(): def passthrough(x): """A passthrough function.""" return x @@ -64,7 +66,7 @@ async def passthrough(x): assert_array_equal(out_array, [["one", "two"], ["three", "four"]]) -def test_vectorize_multiple_outputs(): +def test_vectorize_scalar_multiple_outputs(): def passthrough_multiple_outputs(x): return x, x @@ -84,7 +86,7 @@ async def passthrough_multiple_outputs(x): assert_array_equal(out[1], ["one", "two", "three"]) -def test_vectorize_args(): +def test_vectorize_scalar_args(): def passthrough_args(*args): """A passthrough function.""" result = "" @@ -118,7 +120,7 @@ async def passthrough_args(*args): assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) -def test_vectorize_kwargs(): +def test_vectorize_scalar_kwargs(): def passthrough_kwargs(**kwargs): """A passthrough function.""" result = "" @@ -150,3 +152,124 @@ async def passthrough_kwargs(**kwargs): # Broadcasting out_array = fn(first=[["one", "two"], ["three", "four"]], second=["1", "2"]) assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) + + +def test_signature_invalid(): + with pytest.raises(ValueError): + vectorize(lambda x: x, "(m,)->()") + + with pytest.raises(TypeError, match="wrong number of positional"): + fn = vectorize(lambda x, y: x, "()->()") + fn(1, 2) + + with pytest.raises(ValueError, match="wrong number of outputs"): + + def test_multioutput(x, y): + return x, y + + fn = vectorize(test_multioutput, "(),()->()") + fn(1, 2) + + +def test_vectorize_simple(): + def test(x): + return x + + fn = vectorize(test, "(m)->(m)") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, [["one", "two", "three"], ["four", "five", "six"]]) + + fn = vectorize(test, "()->()") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, [["one", "two", "three"], ["four", "five", "six"]]) + + +def test_vectorize_kwargs(): + def passthrough_kwargs(**kwargs): + """A passthrough function.""" + result = "" + for _, value in kwargs.items(): + result += value + return result + + fn = vectorize(passthrough_kwargs, "(),()->()") + + out_array = fn(first=["one", "two"], second=["1", "2"]) + assert_array_equal(out_array, ["one1", "two2"]) + + # Broadcasting + out_array = fn(first=[["one", "two"], ["three", "four"]], second=["1", "2"]) + assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) + + async def passthrough_kwargs(**kwargs): + """A passthrough function.""" + result = "" + for _, value in kwargs.items(): + result += value + return result + + fn = vectorize(passthrough_kwargs, "(),()->()") + + out_array = fn(first=["one", "two"], second=["1", "2"]) + assert_array_equal(out_array, ["one1", "two2"]) + + # Broadcasting + out_array = fn(first=[["one", "two"], ["three", "four"]], second=["1", "2"]) + assert_array_equal(out_array, [["one1", "two2"], ["three1", "four2"]]) + + +def test_vectorize_reduce(): + def test(x): + return x[0] + + fn = vectorize(test, "(m)->()") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, ["one", "four"]) + + fn = vectorize(test, "()->()") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, [["o", "t", "t"], ["f", "f", "s"]]) + + +def test_vectorize_expand(): + def test(x): + return np.array([x for i in range(3)]) + + fn = vectorize(test, "()->(s)") + out = fn(["one", "two"]) + assert_array_equal(out, [["one", "one", "one"], ["two", "two", "two"]]) + + +def test_vectorize_coroutine_simple(): + async def test(x): + return x + + fn = vectorize(test, "(m)->(m)") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, [["one", "two", "three"], ["four", "five", "six"]]) + + fn = vectorize(test, "()->()") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, [["one", "two", "three"], ["four", "five", "six"]]) + + +def test_vectorize_coroutine_reduce(): + async def test(x): + return x[0] + + fn = vectorize(test, "(m)->()") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, ["one", "four"]) + + fn = vectorize(test, "()->()") + out = fn([["one", "two", "three"], ["four", "five", "six"]]) + assert_array_equal(out, [["o", "t", "t"], ["f", "f", "s"]]) + + +def test_vectorize_coroutine_expand(): + async def test(x): + return np.array([x for i in range(3)]) + + fn = vectorize(test, "()->(s)") + out = fn(["one", "two"]) + assert_array_equal(out, [["one", "one", "one"], ["two", "two", "two"]]) From b858751afdf1410d44602d98e8dfe64a30eb4340 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 5 Jun 2023 13:44:55 +0200 Subject: [PATCH 146/734] Vectorize OpenAI model calls --- outlines/models/__init__.py | 2 +- outlines/models/openai.py | 83 ++++++++++++++++++++----------------- 2 files changed, 45 insertions(+), 40 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 2e2150d8..7e902958 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,7 +5,7 @@ codebase. """ -from . import image_generation, text_completion +from . import embeddings, image_generation, text_completion from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion, OpenAIEmbeddings, OpenAIImageGeneration diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 0d5a881e..44c37b9c 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,14 +1,16 @@ """Integration with OpenAI's API.""" import base64 +import functools import os import warnings from io import BytesIO -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Dict, List, Optional, Union import numpy as np from PIL import Image from PIL.Image import Image as PILImage +import outlines from outlines.caching import cache __all__ = [ @@ -57,18 +59,22 @@ def OpenAICompletion( f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." ) - def generate(prompt: str, *, samples=1, stop_at=None, is_in=None, type=None): + def generate( + prompt: str, + *, + samples=1, + stop_at: List[Optional[str]] = [], + is_in=None, + type=None, + ): import tiktoken - if stop_at is not None: - stop_at = tuple(stop_at) - mask = {} if type is not None: encoder = tiktoken.encoding_for_model(model_name) mask = create_type_mask(type, encoder) - if is_in is not None and stop_at is not None: + if is_in is not None and stop_at: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None and len(mask) > 0: raise TypeError("You cannot set `is_in` and `mask` at the same time.") @@ -77,10 +83,11 @@ def generate(prompt: str, *, samples=1, stop_at=None, is_in=None, type=None): else: return generate_base(prompt, stop_at, samples, mask) - def generate_base( - prompt: str, stop_at: Optional[Tuple[str]], samples: int, mask: Dict[int, int] + @functools.partial(outlines.vectorize, signature="(),(m),(),()->(s)") + async def generate_base( + prompt: str, stop_at: List[Optional[str]], samples: int, mask: Dict[int, int] ) -> str: - responses = call_api( + responses = await call_api( model_name, format_prompt(prompt), max_tokens, @@ -91,13 +98,16 @@ def generate_base( ) if samples == 1: - results = extract_choice(responses["choices"][0]) + results = np.array([extract_choice(responses["choices"][0])]) else: - results = [extract_choice(responses["choices"][i]) for i in range(samples)] + results = np.array( + [extract_choice(responses["choices"][i]) for i in range(samples)] + ) return results - def generate_choice( + @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") + async def generate_choice( prompt: str, is_in: List[str], samples: int ) -> Union[List[str], str]: """Generate a sequence that must be one of many options. @@ -130,12 +140,12 @@ def generate_choice( if len(mask) == 0: break - response = call_api( + response = await call_api( model_name, format_prompt(prompt), 1, temperature, - None, + [], mask, samples, ) @@ -144,10 +154,7 @@ def generate_choice( decoded_samples.append("".join(decoded)) - if samples == 1: - return decoded_samples[0] - - return decoded_samples + return np.array(decoded_samples) return generate @@ -180,11 +187,12 @@ def call_embeddings_api( response = openai.Embedding.create( model=model, - input=input, + input=list(input), ) return response + @functools.partial(outlines.vectorize, signature="()->(s)") def generate(query: str) -> np.ndarray: api_response = call_embeddings_api(model_name, query) response = api_response["data"][0]["embedding"] @@ -216,28 +224,25 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): @error_handler @cache - def call_image_generation_api(prompt: str, size: str, samples: int): + async def call_image_generation_api(prompt: str, size: str, samples: int): import openai - response = openai.Image.create( - prompt=prompt, size=size, n=samples, response_format="b64_json" + response = await openai.Image.acreate( + prompt=prompt, size=size, n=int(samples), response_format="b64_json" ) return response - def generate(prompt: str, samples: int = 1) -> PILImage: - api_response = call_image_generation_api(prompt, size, samples) - - if samples == 1: - response = api_response["data"][0]["b64_json"] - return Image.open(BytesIO(base64.b64decode(response))) + @functools.partial(outlines.vectorize, signature="(),()->(s)") + async def generate(prompt: str, samples: int = 1) -> PILImage: + api_response = await call_image_generation_api(prompt, size, samples) images = [] for i in range(samples): response = api_response["data"][i]["b64_json"] images.append(Image.open(BytesIO(base64.b64decode(response)))) - return images + return np.array(images, dtype="object") return generate @@ -335,25 +340,25 @@ def call(*args, **kwargs): @error_handler @cache -def call_completion_api( +async def call_completion_api( model: str, prompt: str, max_tokens: int, temperature: float, - stop_sequences: Tuple[str], + stop_sequences: List[str], logit_bias: Dict[str, int], num_samples: int, ): import openai - response = openai.Completion.create( + response = await openai.Completion.acreate( engine=model, prompt=prompt, temperature=temperature, max_tokens=max_tokens, - stop=stop_sequences, + stop=list(stop_sequences) if len(stop_sequences) > 0 else None, logit_bias=logit_bias, - n=num_samples, + n=int(num_samples), ) return response @@ -361,25 +366,25 @@ def call_completion_api( @error_handler @cache -def call_chat_completion_api( +async def call_chat_completion_api( model: str, messages: List[Dict[str, str]], max_tokens: int, temperature: float, - stop_sequences: Tuple[str], + stop_sequences: List[str], logit_bias: Dict[str, int], num_samples: int, ): import openai - response = openai.ChatCompletion.create( + response = await openai.ChatCompletion.acreate( model=model, messages=messages, max_tokens=max_tokens, temperature=temperature, - stop=stop_sequences, + stop=list(stop_sequences) if len(stop_sequences) > 0 else None, logit_bias=logit_bias, - n=num_samples, + n=int(num_samples), ) return response From e3cdf0e6343b46f5f8f9ec6a18ae39ef7e6a157c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 5 Jun 2023 14:58:05 +0200 Subject: [PATCH 147/734] Vectorize HF model calls --- outlines/models/hf_diffusers.py | 28 +++++++++++++--- outlines/models/hf_transformers.py | 48 ++++++++++++++++++---------- tests/models/test_hf_diffusers.py | 3 +- tests/models/test_hf_transformers.py | 11 +++---- 4 files changed, 61 insertions(+), 29 deletions(-) diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 83f80fd2..d88e94c8 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -1,6 +1,12 @@ """Integration with HuggingFace's `diffusers` library.""" +import functools +from typing import List, Union + +import numpy as np from PIL.Image import Image as PILImage +import outlines + def HuggingFaceDiffuser(model_name: str) -> PILImage: """Create a function that will call a stable diffusion pipeline. @@ -12,17 +18,20 @@ def HuggingFaceDiffuser(model_name: str) -> PILImage: """ - def call(prompt: str, samples: int = 1) -> str: + def call(prompt: Union[str, List[str]], samples: int = 1) -> str: + if isinstance(prompt, str): + prompt = [prompt] + results = call_stable_diffusion_pipeline(model_name, prompt, samples) - if samples == 1: - return results[0] + return results return call +@functools.partial(outlines.vectorize, signature="(),(m),()->(m,s)") def call_stable_diffusion_pipeline( - model_name: str, prompt: str, samples: int + model_name: str, prompt: List[str], samples: int ) -> PILImage: """Build and call the Stable Diffusion pipeline. @@ -31,10 +40,19 @@ def call_stable_diffusion_pipeline( import torch from diffusers import StableDiffusionPipeline + # Pipelines don't accept NumPy arrays + prompt = list(prompt) + pipe = StableDiffusionPipeline.from_pretrained(model_name) if torch.cuda.is_available(): pipe = pipe.to("cuda") images = pipe(prompt, num_images_per_prompt=samples).images + if not isinstance(images, list): + images = [images] + + array = np.empty((samples,), dtype="object") + for idx, image in enumerate(images): + array[idx] = image - return images + return np.atleast_2d(array) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 86142b84..11c90327 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -1,7 +1,10 @@ """Integration with HuggingFace's `transformers` library.""" -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple +import functools +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union -from outlines.caching import cache +import numpy as np + +import outlines if TYPE_CHECKING: import torch @@ -47,34 +50,47 @@ def HuggingFaceCompletion( temperature = 1.0 def call( - prompt: str, + prompt: Union[str, List[str]], *, samples: int = 1, - stop_at: Optional[List[str]] = None, - is_in: Optional[List[str]] = None, + stop_at: List[Optional[str]] = [], + is_in: List[Optional[str]] = [], type: Optional[str] = None, ) -> str: + if isinstance(prompt, str): + prompt = [prompt] + return call_model_generate_method( - model_name, prompt, max_tokens, temperature, samples, stop_at, is_in, type + model_name, + prompt, + max_tokens, + temperature, + samples, + stop_at, + is_in, + type, ) return call -@cache +@functools.partial(outlines.vectorize, signature="(),(m),(),(),(),(i),(j),()->(m,s)") def call_model_generate_method( model_name: str, prompt: str, max_tokens: int, temperature: float, samples: int, - stop_at: List[str], - is_in: List[str], + stop_at: List[Optional[str]], + is_in: np.ndarray, type: str, ) -> str: import torch from transformers import AutoModelForCausalLM, AutoTokenizer + # `generate` does not accept NumPy arrays + prompt = list(prompt) + tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) @@ -88,7 +104,7 @@ def call_model_generate_method( raise NotImplementedError( "It is currently not possible to control the generation of several samples with the `transformers` integration" ) - if is_in is not None: + if is_in.size > 0: raise ValueError( "You cannot both restrict to a set of choices with `is_in` and to a type with `type`" ) @@ -97,12 +113,12 @@ def call_model_generate_method( ) logit_processors = [logit_processor] stopping_criteria = [stopping_criterion] - elif is_in is not None: + elif is_in.size > 0: if samples > 1: raise NotImplementedError( "It is currently not possible to control the generation of several samples with the `transformers` integration" ) - if stop_at is not None: + if stop_at.size > 0: raise ValueError( "You cannot both restrict to a set of choices with `is_in` and set a stopping criterion" ) @@ -111,7 +127,7 @@ def call_model_generate_method( ) logit_processors = [logit_processor] stopping_criteria = [stopping_criterion] - elif stop_at is not None: + elif stop_at.size > 0: if samples > 1: raise NotImplementedError( "It is currently not possible to control the generation of several samples with the `transformers` integration" @@ -132,7 +148,7 @@ def call_model_generate_method( temperature=temperature, max_new_tokens=max_tokens, pad_token_id=tokenizer.eos_token_id, - num_return_sequences=samples, + num_return_sequences=int(samples), logits_processor=logit_processors, stopping_criteria=stopping_criteria, ) @@ -141,11 +157,11 @@ def call_model_generate_method( if samples == 1: results = tokenizer.decode(new_tokens, skip_special_tokens=True) - results = postprocessing(results) + results = [postprocessing(results)] else: results = tokenizer.batch_decode(new_tokens, skip_special_tokens=True) - return results + return np.atleast_2d(results) def create_stop_constraint( diff --git a/tests/models/test_hf_diffusers.py b/tests/models/test_hf_diffusers.py index 4068c4b6..450d3c12 100644 --- a/tests/models/test_hf_diffusers.py +++ b/tests/models/test_hf_diffusers.py @@ -1,3 +1,4 @@ +import numpy as np from PIL.Image import Image as PILImage from outlines.models.hf_diffusers import HuggingFaceDiffuser @@ -12,7 +13,7 @@ def test_stable_diffusion(): assert isinstance(image, PILImage) images = model("test", samples=3) - assert isinstance(images, list) + assert isinstance(images, np.ndarray) assert len(images) == 3 for img in images: assert isinstance(image, PILImage) diff --git a/tests/models/test_hf_transformers.py b/tests/models/test_hf_transformers.py index 53fcef60..1777a5f3 100644 --- a/tests/models/test_hf_transformers.py +++ b/tests/models/test_hf_transformers.py @@ -1,10 +1,7 @@ -import outlines +import numpy as np +import pytest -outlines.disable_cache() - -import pytest # noqa - -from outlines.models.hf_transformers import HuggingFaceCompletion # noqa +from outlines.models.hf_transformers import HuggingFaceCompletion TEST_MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" @@ -19,7 +16,7 @@ def test_samples(): assert isinstance(answer, str) answers = model("test", samples=3) - assert isinstance(answers, list) + assert isinstance(answers, np.ndarray) assert len(answers) == 3 From cc914d6ef7ec7274c17bdbd25abb357e1b4e8f92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 6 Jun 2023 11:18:37 +0200 Subject: [PATCH 148/734] Update README with `outlines.vectorize` --- README.md | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/README.md b/README.md index f71597c1..e8dc351f 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ Build _reliable_ workflows based on interactions with generative models. [Controlled generation](#controlled-generation) • [Agents](#agents-example) • [Sampling](#sampling-uncertainty-simulation-based-inference) • +[Parallel execution](#vectorization-and-parallel-execution) • [Examples](#examples) @@ -27,6 +28,7 @@ Build _reliable_ workflows based on interactions with generative models. - [x] Integration with OpenAI and HuggingFace models - [x] Controlled generation, including multiple choice, type constraints and dynamic stopping - [x] Sampling of multiple sequences +- [x] Vectorized execution ## Installation @@ -220,6 +222,47 @@ print(answer) The focus on sampling allows us to explore different ideas, such as [using the diversity of answers to evaluate the model's uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb), or [simulation-based inference to optimize the prompt](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb). +## Vectorization and parallel execution + +You can pass prompts in a NumPy array to Outlines models: + +``` python +import numpy as np +import outlines.models as models + +model = models.text_completion.openai("text-davinci-003") + +prompts = [ + ["Translate 'Hello' in Italian", "Translate 'Hello' in French"], + ["Translate 'Hello' in Spanish", "Translate 'Hello' in German"], +] +answers = model(prompts) + +print(answers.shape) +# (2, 2) +``` + +Outlines also provide a `outlines.vectorize` decorator that will vectorize any function. If the function is async the requests will be run concurrently: + +``` python +import aiohttp +import numpy as np +import outlines + +@outlines.vectorize +async def wikipedia_search(query): + url = f"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={query}&origin=*" + async with aiohttp.ClientSession() as session: + async with session.get(url) as response: + return await response.text() + +results = wikipedia_search([["Cat", "Dog"],["Bird", "Horse"]]) +print(results.shape) +# (2, 2) +``` + +This feature allows you to run multiple workflows in parallel, for instance to avoid overfitting when iterating over a workflow or in production to run workflows over several different inputs. + ## Contributing ### What contributions? From 8c4fb4b53eda6f132c168a5ebcf48b5c988aa574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 6 Jun 2023 17:00:33 +0200 Subject: [PATCH 149/734] Fix vectorization of `transformers` models --- outlines/models/hf_transformers.py | 27 ++++++++++++++++++++------ tests/models/test_hf_transformers.py | 29 ++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 11c90327..34cdda24 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -91,10 +91,11 @@ def call_model_generate_method( # `generate` does not accept NumPy arrays prompt = list(prompt) - tokenizer = AutoTokenizer.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name, padding_size="left") model = AutoModelForCausalLM.from_pretrained(model_name) - prompt_tokens = tokenizer(prompt, return_tensors="pt") + tokenizer.pad_token = tokenizer.eos_token + prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True) logit_processors: Optional[List[Callable]] = None stopping_criteria: Optional[List[Callable]] = None @@ -153,15 +154,29 @@ def call_model_generate_method( stopping_criteria=stopping_criteria, ) new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] :] - new_tokens = new_tokens.squeeze() + if len(prompt) == 1: + new_tokens = new_tokens.squeeze() - if samples == 1: + if new_tokens.ndim < 2: results = tokenizer.decode(new_tokens, skip_special_tokens=True) - results = [postprocessing(results)] + results = np.array([postprocessing(results)]) else: results = tokenizer.batch_decode(new_tokens, skip_special_tokens=True) + results = [postprocessing(result) for result in results] + results = np.array(results) - return np.atleast_2d(results) + if len(prompt) == 1: + results = np.expand_dims(results, 0) + else: + results = np.expand_dims(results, 1) + + # If we pass a batch of prompts to the model and ask for + # several samples we get a list of results that we need + # to reshape to the right dimensions. + if len(prompt) > 1 and samples > 1: + results = np.reshape(results, (-1, samples)) + + return results def create_stop_constraint( diff --git a/tests/models/test_hf_transformers.py b/tests/models/test_hf_transformers.py index 1777a5f3..f7323c1f 100644 --- a/tests/models/test_hf_transformers.py +++ b/tests/models/test_hf_transformers.py @@ -20,17 +20,37 @@ def test_samples(): assert len(answers) == 3 +def test_prompt_array(): + model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) + prompts = [["Hello", "Bonjour"], ["Ciao", "Hallo"]] + answers = model(prompts) + assert isinstance(answers, np.ndarray) + assert answers.shape == (2, 2) + + answers = model(prompts, samples=5) + assert isinstance(answers, np.ndarray) + assert answers.shape == (2, 2, 5) + + def test_type_int(): model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) answer = model("test", type="int") int(answer) + answers = model(["test", "other_test"], type="int") + for answer in answers: + int(answer) + def test_type_float(): model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) answer = model("test", type="float") float(answer) + answers = model(["test", "other_test"], type="float") + for answer in answers: + float(answer) + def test_incompatible_constraints(): model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) @@ -46,6 +66,10 @@ def test_choices(): answer = model("test", is_in=choices) assert answer in choices + answers = model(["test", "other_test"], is_in=choices) + for answer in answers: + assert answer in choices + def test_stop(): model = HuggingFaceCompletion(TEST_MODEL, max_tokens=1000) @@ -55,6 +79,11 @@ def test_stop(): for seq in stop: assert seq not in answer + answers = model(["test", "other_test"], stop_at=stop) + for seq in stop: + for answer in answers: + assert seq not in answer + @pytest.mark.xfail def test_type_multiple_samples(): From ee60ce3958ef18a089c7393e60c3ba2206a160a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 1 Jun 2023 10:29:19 +0200 Subject: [PATCH 150/734] Create masks from regex --- outlines/text/masks.py | 73 ++++++++++++++++++++++++++++++++++++++++ tests/text/test_masks.py | 69 +++++++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 outlines/text/masks.py create mode 100644 tests/text/test_masks.py diff --git a/outlines/text/masks.py b/outlines/text/masks.py new file mode 100644 index 00000000..a7e7b4f5 --- /dev/null +++ b/outlines/text/masks.py @@ -0,0 +1,73 @@ +import re +from typing import Dict, Iterable + +import numpy as np + +__all__ = [ + "create_char_set_mask", + "create_float_mask", + "create_int_mask", + "create_mask_from_regex", +] + + +def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> np.ndarray: + """Create a token mask from a regex. + + Parameters + ---------- + vocabulary + A dictionary that contains a tokenizer's vocabulary as a map + between tokens and their ids. + regex + The regex that tokens need to respect. + + """ + program = re.compile(regex) + + mask = np.zeros(len(vocabulary), dtype=np.bool_) + for token, token_id in vocabulary.items(): + if program.match(token) is not None: + mask[token_id] = True + + return mask + + +def create_int_mask(vocabulary: Dict[str, int]) -> np.ndarray: + """Create a mask to generate integers.""" + mask = create_mask_from_regex(vocabulary, "^[0-9]+$") + + return mask + + +def create_float_mask(vocabulary: Dict[str, int]) -> np.ndarray: + """Create a mask to generate floating point numbers.""" + mask = create_mask_from_regex(vocabulary, r"^([0-9]+([.][0-9]*)?|[.][0-9]+)$") + + return mask + + +def create_char_set_mask( + vocabulary: Dict[str, int], char_set: Iterable[str] +) -> np.ndarray: + """Create a mask to only generate characters in a given set. + + Parameters + ---------- + vocabulary + A dictionary that contains a tokenizer's vocabulary as a map + between tokens and their ids. + char_set + An iterable that contains the valid single characters. + + """ + for char in char_set: + if len(char) != 1: + raise ValueError( + "The `char_set` argument of `char_set_mask` can only contain single characters." + ) + + char_set = re.escape("".join(char_set)) + regex = "^[" + char_set + "]+$" + mask = create_mask_from_regex(vocabulary, regex) + return mask diff --git a/tests/text/test_masks.py b/tests/text/test_masks.py new file mode 100644 index 00000000..c9d37353 --- /dev/null +++ b/tests/text/test_masks.py @@ -0,0 +1,69 @@ +import random + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from outlines.text.masks import create_char_set_mask, create_float_mask, create_int_mask + + +def test_int_mask(): + vocabulary = {"1": 0, "12": 1, "12a": 2, "a1": 3, "1.3": 4} + + mask = create_int_mask(vocabulary) + assert_array_equal(mask, np.array([True, True, False, False, False])) + + +def test_float_mask(): + vocabulary = { + "1": 0, + "12": 1, + "12a": 2, + "a1": 3, + "1.3": 4, + "1.": 5, + "0.": 6, + "1.2.3": 7, + } + + mask = create_float_mask(vocabulary) + assert_array_equal( + mask, np.array([True, True, False, False, True, True, True, False]) + ) + + +def test_char_set_mask(): + vocabulary = {} + with pytest.raises(ValueError, match="single characters"): + create_char_set_mask(vocabulary, ["ab"]) + + vocabulary = {"a": 0, "ab": 1, "abc": 2, "1": 3, "1_a": 4} + mask = create_char_set_mask(vocabulary, ["a", "b", "1", "_"]) + assert_array_equal(mask, np.array([True, True, False, True, True])) + + vocabulary = { + "\\": 0, + "$": 1, + ".": 2, + "|": 3, + "?": 4, + "*": 5, + "(": 6, + ")": 7, + "[": 8, + "]": 9, + "{": 10, + "}": 11, + } + + char_set = ["\\", "$", ".", "|", "?", "*", "(", ")", "[", "]", "{", "}"] + random.shuffle(char_set) + + mask = create_char_set_mask(vocabulary, char_set) + assert_array_equal(mask, np.ones(12, dtype=np.bool_)) + + mask = create_char_set_mask(vocabulary, ["a"]) + assert_array_equal(mask, np.zeros(12, dtype=np.bool_)) + + mask = create_char_set_mask(vocabulary, ["\n", "\r", "\t"]) + assert_array_equal(mask, np.zeros(12, dtype=np.bool_)) From cea7ac3006f2bc0c21accfdb9a454ad88c5f215e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 1 Jun 2023 10:35:24 +0200 Subject: [PATCH 151/734] Use `outlines.text.mask` in generation procedures --- outlines/models/hf_transformers.py | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 34cdda24..bd886b42 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -5,6 +5,7 @@ import numpy as np import outlines +from outlines.text.masks import create_float_mask, create_int_mask if TYPE_CHECKING: import torch @@ -331,16 +332,7 @@ def create_int_constraint( import torch num_prompt_tokens = prompt_tokens.shape[-1] - - mask = torch.zeros(len(tokenizer), dtype=torch.bool) - - for _, token_id in tokenizer.get_vocab().items(): - token = tokenizer.decode(token_id) - are_all_digits = all([c.isdigit() for c in token]) - if are_all_digits: - mask[token_id] = True - - mask[tokenizer.eos_token_id] = False + mask = torch.from_numpy(create_int_mask(tokenizer.get_vocab())) def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: """Pre-process the model's output logits before generating the next token. @@ -378,18 +370,7 @@ def create_float_constraint( import torch num_prompt_tokens = prompt_tokens.shape[-1] - - mask = torch.zeros(len(tokenizer), dtype=torch.bool) - - for _, token_id in tokenizer.get_vocab().items(): - token = tokenizer.decode(token_id) - is_valid_float_or_int = ( - all([c.isdigit() or c == "." for c in token]) and token.count(".") <= 1 - ) - if is_valid_float_or_int: - mask[token_id] = True - - mask[tokenizer.eos_token_id] = False + mask = torch.from_numpy(create_float_mask(tokenizer.get_vocab())) def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: """Pre-process the model's output logits before generating the next token. From b4e71133bffd2ce26f90e6832e83c9c8783d15ce Mon Sep 17 00:00:00 2001 From: Arun Patro Date: Wed, 7 Jun 2023 03:01:29 -0400 Subject: [PATCH 152/734] Add mps backend for HuggingFace models --- outlines/models/hf_diffusers.py | 2 ++ outlines/models/hf_transformers.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index d88e94c8..33f1c90f 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -46,6 +46,8 @@ def call_stable_diffusion_pipeline( pipe = StableDiffusionPipeline.from_pretrained(model_name) if torch.cuda.is_available(): pipe = pipe.to("cuda") + elif torch.backends.mps.is_available(): + pipe = pipe.to("mps") images = pipe(prompt, num_images_per_prompt=samples).images if not isinstance(images, list): diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index bd886b42..7b875155 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -143,6 +143,9 @@ def call_model_generate_method( if torch.cuda.is_available(): model = model.to("cuda") prompt_tokens = prompt_tokens.to("cuda") + elif torch.backends.mps.is_available(): + model = model.to("mps") + prompt_tokens = prompt_tokens.to("mps") returned_tokens = model.generate( **prompt_tokens, From a4662218232df8264d1b9eb93867362ed8da2e54 Mon Sep 17 00:00:00 2001 From: Daniel Gerlanc Date: Wed, 14 Jun 2023 14:41:12 -0400 Subject: [PATCH 153/734] Update development statement --- docs/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index 0b5c685b..b5e62dc7 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -29,7 +29,7 @@ :width: 75% :margin: 4 4 auto auto - The development of Outlines is entirely funded by `Normal Computing `_ + Outlines is built with ❤️ by `Normal Computing `_ 👀 Sneak Peek From 4e1016db969c28886d8c936c5fea47e5c1871815 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Mon, 19 Jun 2023 12:42:09 -0500 Subject: [PATCH 154/734] Add coverage checks to CI --- .github/workflows/tests.yml | 54 ++++++++++++++++++++++++++++++++++++- pyproject.toml | 26 +++++++++++++++++- 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index aa9ca806..1554bef3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -31,4 +31,56 @@ jobs: pip install .[test] - name: Run tests run: | - pytest + pytest --cov=outlines + - name: Upload coverage data + uses: actions/upload-artifact@v3 + with: + name: coverage-data + path: .coverage* + if-no-files-found: ignore + + coverage: + name: Combine & check coverage. + needs: tests + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v4 + with: + cache: pip + python-version: "3.11" + + - name: Set up environment + run: | + pip install --upgrade "coverage[toml]>=5.1" diff-cover + + - uses: actions/download-artifact@v3 + with: + name: coverage-data + + - name: Fetch master for coverage diff + run: | + git fetch --no-tags --prune origin main + + - name: Combine coverage & fail if it's <100%. + run: | + # Combine coverage files (not needed now, but maybe later) + # python -m coverage combine + + # Produce an html report with absolute coverage information + python -m coverage html --skip-covered --skip-empty + + # Report relative coverage and write to the workflow's summary + python -m coverage xml + diff-cover coverage.xml --markdown-report=coverage.md --fail-under=100 || (cat coverage.md >> $GITHUB_STEP_SUMMARY && exit 1) + + - name: Upload HTML report if check failed. + uses: actions/upload-artifact@v3 + with: + name: html-report + path: htmlcov + if: ${{ failure() }} diff --git a/pyproject.toml b/pyproject.toml index 745c3e49..7ece26ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,8 +38,11 @@ test = [ "diffusers", "pre-commit", "pytest", + "pytest-cov", "torch", - "transformers" + "transformers", + "coverage[toml]>=5.1", + "diff-cover", ] [project.urls] @@ -84,3 +87,24 @@ module = [ "transformers", ] ignore_missing_imports = true + +[tool.coverage.run] +omit = [ + "outlines/_version.py", + "tests/*", +] +branch = true + +[tool.coverage.report] +omit = [ + "tests/*", +] +exclude_lines = [ + "pragma: no cover", + "if TYPE_CHECKING:", +] +show_missing = true + +[tool.diff_cover] +compare_branch = "origin/main" +diff_range_notation = ".." From 8601eb213761be3432db25841c699dd634b10305 Mon Sep 17 00:00:00 2001 From: harshkumarchourasia Date: Sun, 4 Jun 2023 23:02:48 +0530 Subject: [PATCH 155/734] Add retry mechanism for OpenAI API calls --- outlines/models/openai.py | 74 ++++++++++++++++++++++++--------------- pyproject.toml | 2 ++ 2 files changed, 48 insertions(+), 28 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 44c37b9c..4b3cde3b 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -9,6 +9,12 @@ import numpy as np from PIL import Image from PIL.Image import Image as PILImage +from tenacity import ( + retry, + retry_if_exception_type, + stop_after_attempt, + wait_random_exponential, +) import outlines from outlines.caching import cache @@ -177,21 +183,6 @@ def OpenAIEmbeddings(model_name: str): """ - @error_handler - @cache - def call_embeddings_api( - model: str, - input: str, - ): - import openai - - response = openai.Embedding.create( - model=model, - input=list(input), - ) - - return response - @functools.partial(outlines.vectorize, signature="()->(s)") def generate(query: str) -> np.ndarray: api_response = call_embeddings_api(model_name, query) @@ -222,17 +213,6 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): """ - @error_handler - @cache - async def call_image_generation_api(prompt: str, size: str, samples: int): - import openai - - response = await openai.Image.acreate( - prompt=prompt, size=size, n=int(samples), response_format="b64_json" - ) - - return response - @functools.partial(outlines.vectorize, signature="(),()->(s)") async def generate(prompt: str, samples: int = 1) -> PILImage: api_response = await call_image_generation_api(prompt, size, samples) @@ -312,7 +292,7 @@ def call(*args, **kwargs): try: os.environ["OPENAI_API_KEY"] except KeyError: - raise OSError( + raise KeyError( "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " "OpenAI's APIs. Please make sure it is set before re-running your model." ) @@ -338,6 +318,14 @@ def call(*args, **kwargs): return call +retry_config = { + "wait": wait_random_exponential(min=1, max=30), + "stop": stop_after_attempt(6), + "retry": retry_if_exception_type(OSError), +} + + +@retry(**retry_config) @error_handler @cache async def call_completion_api( @@ -360,10 +348,10 @@ async def call_completion_api( logit_bias=logit_bias, n=int(num_samples), ) - return response +@retry(**retry_config) @error_handler @cache async def call_chat_completion_api( @@ -388,3 +376,33 @@ async def call_chat_completion_api( ) return response + + +@retry(**retry_config) +@error_handler +@cache +def call_embeddings_api( + model: str, + input: str, +): + import openai + + response = openai.Embedding.create( + model=model, + input=input, + ) + + return response + + +@retry(**retry_config) +@error_handler +@cache +def call_image_generation_api(prompt: str, size: str, samples: int): + import openai + + response = openai.Image.create( + prompt=prompt, size=size, n=samples, response_format="b64_json" + ) + + return response diff --git a/pyproject.toml b/pyproject.toml index 7ece26ff..8e3e8cd6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ dependencies = [ "perscache", "pydantic", "scipy", + "tenacity", ] dynamic = ["version"] @@ -82,6 +83,7 @@ module = [ "pydantic", "pytest", "scipy.*", + "tenacity.*", "tiktoken.*", "torch", "transformers", From a62f477852cbc903664bf128c69db4cdaceb5276 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 7 Jun 2023 10:59:25 +0200 Subject: [PATCH 156/734] Make OpenAI embedding and image calls async --- outlines/models/openai.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 4b3cde3b..2fa74a75 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -184,8 +184,8 @@ def OpenAIEmbeddings(model_name: str): """ @functools.partial(outlines.vectorize, signature="()->(s)") - def generate(query: str) -> np.ndarray: - api_response = call_embeddings_api(model_name, query) + async def generate(query: str) -> np.ndarray: + api_response = await call_embeddings_api(model_name, query) response = api_response["data"][0]["embedding"] return np.array(response) @@ -213,8 +213,11 @@ def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): """ + def generate(prompt: str, samples: int = 1): + return generate_base(prompt, samples) + @functools.partial(outlines.vectorize, signature="(),()->(s)") - async def generate(prompt: str, samples: int = 1) -> PILImage: + async def generate_base(prompt: str, samples: int) -> PILImage: api_response = await call_image_generation_api(prompt, size, samples) images = [] @@ -381,13 +384,13 @@ async def call_chat_completion_api( @retry(**retry_config) @error_handler @cache -def call_embeddings_api( +async def call_embeddings_api( model: str, input: str, ): import openai - response = openai.Embedding.create( + response = await openai.Embedding.acreate( model=model, input=input, ) @@ -398,11 +401,11 @@ def call_embeddings_api( @retry(**retry_config) @error_handler @cache -def call_image_generation_api(prompt: str, size: str, samples: int): +async def call_image_generation_api(prompt: str, size: str, samples: int): import openai - response = openai.Image.create( - prompt=prompt, size=size, n=samples, response_format="b64_json" + response = await openai.Image.acreate( + prompt=prompt, size=size, n=int(samples), response_format="b64_json" ) return response From e590a2341e2c69bedfcfd67e27349b9793827400 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 7 Jun 2023 11:00:07 +0200 Subject: [PATCH 157/734] Allow to pass simple string for `stop_at` --- outlines/models/openai.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 2fa74a75..b154a75f 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -69,12 +69,15 @@ def generate( prompt: str, *, samples=1, - stop_at: List[Optional[str]] = [], + stop_at: Union[List[Optional[str]], str] = [], is_in=None, type=None, ): import tiktoken + if isinstance(stop_at, str): + stop_at = [stop_at] + mask = {} if type is not None: encoder = tiktoken.encoding_for_model(model_name) From 7901224b3a663ed9863a89a2d08f0744c6184781 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 7 Jun 2023 11:00:26 +0200 Subject: [PATCH 158/734] Initialize `object` array correctly --- outlines/models/openai.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index b154a75f..0a5541f4 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -228,7 +228,11 @@ async def generate_base(prompt: str, samples: int) -> PILImage: response = api_response["data"][i]["b64_json"] images.append(Image.open(BytesIO(base64.b64decode(response)))) - return np.array(images, dtype="object") + array = np.empty((samples,), dtype="object") + for idx, image in enumerate(images): + array[idx] = image + + return np.atleast_2d(array) return generate From f587c7ebeb899baa71e91ffa2a396f9ab6a2abca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 13 Jun 2023 13:54:50 +0200 Subject: [PATCH 159/734] Fix float mask In some vocabularies the only token that contains a period is the "." period. However, the current regex used to create masks to generate floats excludes this token. This commit edits the regex to allow for the "." token. --- outlines/text/masks.py | 2 +- tests/text/test_masks.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/outlines/text/masks.py b/outlines/text/masks.py index a7e7b4f5..c5762573 100644 --- a/outlines/text/masks.py +++ b/outlines/text/masks.py @@ -42,7 +42,7 @@ def create_int_mask(vocabulary: Dict[str, int]) -> np.ndarray: def create_float_mask(vocabulary: Dict[str, int]) -> np.ndarray: """Create a mask to generate floating point numbers.""" - mask = create_mask_from_regex(vocabulary, r"^([0-9]+([.][0-9]*)?|[.][0-9]+)$") + mask = create_mask_from_regex(vocabulary, r"^(([0-9]+)?([.]([0-9]*)?)?|[.][0-9]+)$") return mask diff --git a/tests/text/test_masks.py b/tests/text/test_masks.py index c9d37353..3c0dc782 100644 --- a/tests/text/test_masks.py +++ b/tests/text/test_masks.py @@ -24,11 +24,12 @@ def test_float_mask(): "1.": 5, "0.": 6, "1.2.3": 7, + ".": 8, } mask = create_float_mask(vocabulary) assert_array_equal( - mask, np.array([True, True, False, False, True, True, True, False]) + mask, np.array([True, True, False, False, True, True, True, False, True]) ) From 50cbfcd6de0ffcb27494068177db088c16115f2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 20 Jun 2023 14:12:09 +0200 Subject: [PATCH 160/734] Add `Tokenizer` base class --- outlines/models/tokenizer.py | 23 +++++++++++++++++++++++ pyproject.toml | 1 + tests/models/test_tokenizer.py | 8 ++++++++ 3 files changed, 32 insertions(+) create mode 100644 outlines/models/tokenizer.py create mode 100644 tests/models/test_tokenizer.py diff --git a/outlines/models/tokenizer.py b/outlines/models/tokenizer.py new file mode 100644 index 00000000..84c317dd --- /dev/null +++ b/outlines/models/tokenizer.py @@ -0,0 +1,23 @@ +from abc import abstractmethod +from typing import List, Protocol, Tuple, Union + +import numpy as np +from numpy.typing import NDArray + + +class Tokenizer(Protocol): + eos_token: str + eos_token_id: int + pad_token_id: int + + @abstractmethod + def encode( + self, prompt: Union[str, List[str]] + ) -> Tuple[NDArray[np.int64], NDArray[np.int64]]: + """Translate the input prompts into NumPy arrays of token ids and attention mask.""" + ... + + @abstractmethod + def decode(self, token_ids: NDArray[np.int64]) -> List[str]: + """Translate an array of token ids to a string or list of strings.""" + ... diff --git a/pyproject.toml b/pyproject.toml index 8e3e8cd6..92d7a8d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,6 +104,7 @@ omit = [ exclude_lines = [ "pragma: no cover", "if TYPE_CHECKING:", + "...", ] show_missing = true diff --git a/tests/models/test_tokenizer.py b/tests/models/test_tokenizer.py new file mode 100644 index 00000000..831f7fe3 --- /dev/null +++ b/tests/models/test_tokenizer.py @@ -0,0 +1,8 @@ +import pytest + +from outlines.models.tokenizer import Tokenizer + + +def test_tokenizer(): + with pytest.raises(TypeError, match="instantiate abstract"): + Tokenizer() From 3165fae6eb17bafd880c83669d7756604fb76739 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 15 Jun 2023 11:07:30 +0200 Subject: [PATCH 161/734] Add the `Transformers` model --- outlines/models/__init__.py | 1 + outlines/models/transformers.py | 92 +++++++++++++++++++++++++++++++ pyproject.toml | 2 +- tests/models/test_transformers.py | 67 ++++++++++++++++++++++ 4 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 outlines/models/transformers.py create mode 100644 tests/models/test_transformers.py diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 7e902958..53653f0e 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -9,3 +9,4 @@ from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion, OpenAIEmbeddings, OpenAIImageGeneration +from .transformers import transformers diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py new file mode 100644 index 00000000..d71272f7 --- /dev/null +++ b/outlines/models/transformers.py @@ -0,0 +1,92 @@ +import math +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import numpy as np +from numpy.typing import NDArray + +from outlines.models.tokenizer import Tokenizer + +if TYPE_CHECKING: + from transformers import PreTrainedModel, PreTrainedTokenizer + + +__all__ = ["transformers"] + + +class Transformers: + """Represents a `transformers` model.""" + + def __init__( + self, + model: "PreTrainedModel", + tokenizer: "PreTrainedTokenizer", + device: Optional[str] = None, + ): + self.device = device if device is not None else "cpu" + self.model = model.to(self.device) + self.tokenizer = tokenizer + + def __call__( + self, input_ids: NDArray[np.int64], attention_mask: NDArray[np.int64] + ) -> NDArray[np.float64]: + import torch + + # `transformers` model accept `input_ids` of size at most equal to 2. We + # thus reshape the input array, call the model and reshape the output + # logits. + batch_shape = input_ids.shape[:-1] + num_tokens = input_ids.shape[-1] + input_ids = input_ids.reshape(math.prod(batch_shape), num_tokens) + + with torch.no_grad(): + input_ids = torch.from_numpy(input_ids).to(self.device) + attention_mask = torch.from_numpy(attention_mask).to(self.device) + + output = self.model(input_ids, attention_mask=attention_mask) + + next_token_logits = output.logits[:, -1, :] + probs = torch.nn.functional.softmax(next_token_logits, dim=-1).squeeze() + probs = torch.atleast_2d(probs) + numpy_probs = probs.cpu().detach().numpy() + + return numpy_probs.reshape(batch_shape + (-1,)) + + +class TransformersTokenizer(Tokenizer): + """Represents a tokenizer for models in the `transformers` library.""" + + def __init__(self, model_name: str, **kwargs): + from transformers import AutoTokenizer + + kwargs.setdefault("padding_side", "left") + self.tokenizer = AutoTokenizer.from_pretrained(model_name, **kwargs) + self.eos_token_id = self.tokenizer.eos_token_id + self.eos_token = self.tokenizer.eos_token + + if not self.tokenizer.pad_token_id: + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.pad_token_id = self.eos_token_id + else: + self.pad_token_id = self.tokenizer.pad_token_id + self.pad_token = self.tokenizer.pad_token + + def encode( + self, prompt: Union[str, List[str]], **kwargs + ) -> Tuple[NDArray[np.int64], NDArray[np.int64]]: + kwargs["padding"] = True + kwargs["return_tensors"] = "np" + output = self.tokenizer(prompt, **kwargs) + return output["input_ids"], output["attention_mask"] + + def decode(self, token_ids: NDArray[np.int64]) -> List[str]: + text = self.tokenizer.batch_decode(token_ids) + return text + + +def transformers(model_name: str, device: Optional[str] = None, **model_kwargs): + from transformers import AutoModelForCausalLM + + model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs) + tokenizer = TransformersTokenizer(model_name) + + return Transformers(model, tokenizer, device) diff --git a/pyproject.toml b/pyproject.toml index 92d7a8d1..01222eeb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,7 @@ module = [ "tenacity.*", "tiktoken.*", "torch", - "transformers", + "transformers.*", ] ignore_missing_imports = true diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py new file mode 100644 index 00000000..1d7bcb40 --- /dev/null +++ b/tests/models/test_transformers.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from transformers.models.gpt2 import GPT2TokenizerFast + +from outlines.models.transformers import TransformersTokenizer, transformers + +TEST_MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" + + +def test_tokenizer(): + tokenizer = TransformersTokenizer(TEST_MODEL) + assert tokenizer.eos_token_id == 0 + assert tokenizer.pad_token_id == 0 + assert isinstance(tokenizer.tokenizer, GPT2TokenizerFast) + + token_ids, attention_mask = tokenizer.encode("Test") + assert token_ids.ndim == 2 + assert token_ids.shape[0] == 1 + assert isinstance(token_ids, np.ndarray) + assert token_ids.shape == attention_mask.shape + + token_ids, attention_mask = tokenizer.encode(["Test", "Test"]) + assert token_ids.ndim == 2 + assert token_ids.shape[0] == 2 + assert isinstance(token_ids, np.ndarray) + assert token_ids.shape == attention_mask.shape + + token_ids, attention_mask = tokenizer.encode(["Test", "A long sentence"]) + assert token_ids.shape == attention_mask.shape + assert attention_mask[0][0] == tokenizer.pad_token_id + + text = tokenizer.decode(np.array([[0, 1, 2]])) + isinstance(text, str) + + text = tokenizer.decode(np.array([[0, 1, 2], [3, 4, 5]])) + isinstance(text, list) + isinstance(text[0], str) + isinstance(text[1], str) + + +def test_model(): + with pytest.raises(RuntimeError, match="Expected one of cpu, cuda"): + transformers(TEST_MODEL, device="non_existent") + + model = transformers(TEST_MODEL, device="cpu") + assert isinstance(model.tokenizer, TransformersTokenizer) + assert model.device == "cpu" + + input_ids = np.array([[0, 1, 2]]) + logits = model(input_ids, np.ones_like(input_ids)) + assert isinstance(logits, np.ndarray) + assert logits.ndim == 2 + assert logits.shape[0] == 1 + + input_ids = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) + logits = model(input_ids, np.ones_like(input_ids)) + assert isinstance(logits, np.ndarray) + assert logits.ndim == 2 + assert logits.shape[0] == 3 + + input_ids = np.array([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [0, 1, 2]]]) + logits = model(input_ids, np.ones_like(input_ids)) + assert logits.ndim == 3 + assert logits.shape[0] == 2 + assert logits.shape[1] == 2 + assert_array_equal(logits[0][0], logits[1][1]) From 13ce1fd194bd71a3c85d3544c3620fd0e921b94c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 15 Jun 2023 14:44:55 +0200 Subject: [PATCH 162/734] Add the `Sequence` base class --- outlines/text/sequences/sequence.py | 250 ++++++++++++++++ pyproject.toml | 2 +- tests/text/sequences/test_sequence.py | 393 ++++++++++++++++++++++++++ 3 files changed, 644 insertions(+), 1 deletion(-) create mode 100644 outlines/text/sequences/sequence.py create mode 100644 tests/text/sequences/test_sequence.py diff --git a/outlines/text/sequences/sequence.py b/outlines/text/sequences/sequence.py new file mode 100644 index 00000000..bea23de4 --- /dev/null +++ b/outlines/text/sequences/sequence.py @@ -0,0 +1,250 @@ +from typing import List, Optional, Tuple, Union + +import numpy as np +from numpy.random import Generator +from numpy.typing import NDArray + + +class Sequence: + """Represents a sequence generation method.""" + + def __init__(self, model, max_tokens: Optional[int] = None): + """Create a `Sequence` instance. + + Parameters + ---------- + model + The instance of the model used to generate next-token probabilities. + max_tokens + The maximum number of tokens that will be generated if no termination + condition is met. + + """ + self.model = model + self.max_tokens = max_tokens + + def is_finished(self, token_ids: NDArray[np.int64]) -> NDArray[np.bool_]: + """Determine whether we should stop the generation.""" + raise NotImplementedError( + "`Sequence.is_finished` must be implemented by subclasses." + ) + + def step( + self, + rng: Generator, + token_ids: NDArray[np.int64], + attention_mask: NDArray[np.int64], + samples: int = 1, + ) -> Tuple[NDArray[np.int64], NDArray[float]]: + """Generate one or several tokens that complete the input sequence. + + The sampling step consists in using a model to generate next-token + logits and then sample `samples`-many new tokens from a categorical + distribution parametrized by these logits. + + Parameters + ---------- + rng + NumPy random number Generator instance + token_ids + The token ids passed as an input to the model, of shape `batch_shape + + (num_tokens,)`, where `num_tokens` is the sequences' length. + samples + The number of continuations to sample from the next-token probability + distribution. + + Returns + ------- + A tuple with an array of shape `new_batch_shape + (num_tokens+1,)`that + contains the completed sequences (input token ids and generated token + ids) and an array of shape `new_batch_shape + (vocab_size,)` that + contains the next token probabilities. + `new_batch_shape` is computed by removing dimensions of size one in + `(samples,) + batch_shape`. + + """ + num_input_dims = token_ids.ndim + probs = self.model(token_ids, attention_mask) + + # Sample `samples`-many new tokens + next_token_ids = vectorized_random_choice(rng, probs, samples) + + # Add the missing `num_tokens` and `num_sample` dimensions + next_token_ids = np.expand_dims(next_token_ids, -1) + token_ids = np.expand_dims(token_ids, 0) + + # Expand the input `token_ids` array to be able to concatenate several + # samples. + if samples > 1: + repetitions = (samples,) + (1,) * num_input_dims + token_ids = np.tile(token_ids, repetitions) + probs = np.tile(probs, repetitions) + + token_ids = np.concatenate([token_ids, next_token_ids], axis=-1) + + # Merge sample and batch dimensions by removing dimensions of length + # 1. The shape of the resulting arrays is `new_batch_shape + (num_tokens,)` + # and `new_batch_shape + (vocab_size,)` respectively. + token_ids = np.atleast_2d(token_ids.squeeze()) + probs = np.atleast_2d(probs.squeeze()) + + return token_ids, probs + + def expand_attention_mask( + self, attention_mask: NDArray[np.int64] + ) -> NDArray[np.int64]: + """Expand the attention mask after the last completion.""" + batch_shape = attention_mask.shape[:-1] + attention_mask = np.concatenate( + [attention_mask, np.broadcast_to([1], batch_shape + (1,))], axis=-1 + ) + return attention_mask + + def update_token_ids( + self, + is_finished: NDArray[np.bool_], + token_ids: NDArray[np.int64], + token_ids_unfinished: NDArray[np.int64], + ) -> NDArray[np.int64]: + """Update the array of token ids after the last completion. + + We only generate new tokens for the sequences that are not finished. We thus + update the array with the new tokens, and append pad tokens to the finished + sequences. + + Parameters + ---------- + is_finished + Boolean array that indicates which sequences are finished. + token_ids + Array that contains the sequences before the generation's last step. + token_ids_unfinished + Array that contains the sequences of the unfinished sequences + after the generation's last step. + + Returns + ------- + An array that contains the updated array that contains the sequences. We append + pad tokens to the finished sequences. + + """ + batch_shape = token_ids.shape[:-1] + num_tokens = token_ids.shape[-1] + new_token_ids = np.empty(batch_shape + (num_tokens + 1,), dtype=np.int64) + + token_ids_finished = token_ids[is_finished] + batch_shape_finished = token_ids_finished.shape[:-1] + token_ids_finished = np.concatenate( + [ + token_ids_finished, + np.broadcast_to( + [self.model.tokenizer.pad_token_id], batch_shape_finished + (1,) + ), + ], + axis=-1, + ) + + new_token_ids[~is_finished] = token_ids_unfinished + new_token_ids[is_finished] = token_ids_finished + + return new_token_ids + + def __call__( + self, + prompt: Union[str, List[str]], + samples: int = 1, + rng: Generator = np.random.default_rng(), + ) -> Union[str, List[str]]: + """Generate a new sequence given a prompt. + + Parameters + ---------- + prompt + The input prompt. + samples + The number of samples to generate for each prompt. + + Returns + ------- + The full sequence that contains the prompts and the generated string. + + """ + token_ids, attention_mask = self.model.tokenizer.encode(prompt) + num_prompt_tokens = token_ids.shape[-1] + + if samples > 1: + token_ids, _ = self.step(rng, token_ids, attention_mask, samples) + is_finished = self.is_finished(token_ids) + + num_batch_dims = token_ids.ndim - 1 + repetitions = (samples,) + (1,) * num_batch_dims + attention_mask = np.tile(attention_mask, repetitions) + attention_mask = self.expand_attention_mask(attention_mask) + else: + batch_shape = token_ids.shape[:-1] + is_finished = np.zeros(batch_shape, dtype=np.bool_) + + while True: + num_generated_tokens = token_ids.shape[-1] - num_prompt_tokens + if np.all(is_finished) or num_generated_tokens == self.max_tokens: + break + + token_ids_unfinished = token_ids[~is_finished] + attention_mask_unfinished = attention_mask[~is_finished] + token_ids_unfinished, _ = self.step( + rng, token_ids_unfinished, attention_mask_unfinished + ) + + token_ids = self.update_token_ids( + is_finished, token_ids, token_ids_unfinished + ) + attention_mask = self.expand_attention_mask(attention_mask) + is_finished[~is_finished] = self.is_finished(token_ids_unfinished).flatten() + + result = self.model.tokenizer.decode(token_ids) + + if len(result) == 1: + return result[0] + + return result + + +vsearchsorted = np.vectorize(np.searchsorted, otypes=[int], signature="(n),()->()") + + +def vectorized_random_choice( + rng: Generator, + p: NDArray[np.float64], + samples: int = 1, +): + """Vectorized implementation of `np.random.choice`. + + `np.random.choice` does not support arrays of probability. This implements + the equivalent of this function where the `p` argument can be a matrix. + + Note + ---- + `searchsorted` might be more efficient here since the number of elements + can be quite large. + + Parameters + ---------- + rng + NumPy random number Generator instance + p + An array of probability of shape `(num_probability_vectors, num_items)` + that must sum to 1. + samples + The number of samples to take for each probability vector. + + Returns + ------- + An array of shape `(num_samples, batch_size)` + + """ + + cumsum = np.expand_dims(p.cumsum(axis=-1), 0) + rand = rng.random((samples,) + p.shape[:-1]) + idx = vsearchsorted(cumsum, rand) + + return idx diff --git a/pyproject.toml b/pyproject.toml index 01222eeb..62c7ae99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,8 +75,8 @@ module = [ "diffusers", "jinja2", "joblib", - "numpy.*", "openai", + "numpy.*", "perscache.*", "PIL", "PIL.Image", diff --git a/tests/text/sequences/test_sequence.py b/tests/text/sequences/test_sequence.py new file mode 100644 index 00000000..94699010 --- /dev/null +++ b/tests/text/sequences/test_sequence.py @@ -0,0 +1,393 @@ +from typing import Dict, List, Union + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from outlines.text.sequences.sequence import Sequence, vectorized_random_choice + + +def test_vectorized_random_choice(): + rng = np.random.default_rng(0) + + probs = np.array([[1, 0, 0, 0]]) + sample = vectorized_random_choice(rng, probs) + assert sample.shape == (1, 1) + assert_array_equal(sample, np.zeros((1, 1))) + + probs = np.array([[1, 0, 0, 0]]) + sample = vectorized_random_choice(rng, probs, samples=3) + assert sample.shape == (3, 1) + assert_array_equal(sample, np.zeros((3, 1))) + + probs = np.tile(np.array([[1, 0, 0, 0]]), (2, 1)) + sample = vectorized_random_choice(rng, probs) + assert sample.shape == (1, 2) + assert_array_equal(sample, np.zeros((1, 2))) + + probs = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) + sample = vectorized_random_choice(rng, probs, samples=3) + assert sample.shape == (3, 2) + assert_array_equal(sample, [[0, 1], [0, 1], [0, 1]]) + + probs = np.array([[[1, 0, 0, 0], [0, 1, 0, 0]], [[0, 0, 1, 0], [0, 0, 0, 1]]]) + sample = vectorized_random_choice(rng, probs, samples=3) + assert sample.shape == (3, 2, 2) + assert_array_equal(sample, [[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[0, 1], [2, 3]]]) + + +def test_sequence_error(): + with pytest.raises(NotImplementedError, match="must be implemented"): + sequence = Sequence(None) + sequence.is_finished(np.array([1])) + + +def ModelStep(logits): + """Mock model to test `Sequence.step`""" + + logits = np.array([logits]) + + def call(input_ids, *_): + """Call the model. + + We first repeat the logits `num_sequences` times, and then + reshape the resulting array to match the batch size. + + """ + import math + + batch_shape = input_ids.shape[:-1] + vocab_shape = (logits.shape[-1],) + shaped_logits = np.tile(logits, (math.prod(batch_shape), 1)) + return shaped_logits.reshape(batch_shape + vocab_shape) + + return call + + +def test_sequence_step(): + rng = np.random.default_rng(0) + + logits = np.array([0, 1, 0, 0]) + model = ModelStep(logits) + + sequence = Sequence(model) + + input_ids = np.array([[1, 2]]) + token_ids, probs = sequence.step(rng, input_ids, np.ones((1, 2))) + assert_array_equal(token_ids, [[1, 2, 1]]) + assert probs.shape == (1, 4) + + +def test_sequence_step_batch(): + rng = np.random.default_rng(0) + + logits = np.array([0, 1, 0, 0]) + model = ModelStep(logits) + + sequence = Sequence(model) + + input_ids = np.array([[1, 2], [3, 4]]) + token_ids, probs = sequence.step(rng, input_ids, np.ones((2, 2))) + assert_array_equal(token_ids, [[1, 2, 1], [3, 4, 1]]) + assert probs.shape == (2, 4) + + +def test_sequence_step_sample(): + rng = np.random.default_rng(0) + + logits = np.array([0, 1, 0, 0]) + model = ModelStep(logits) + + sequence = Sequence(model) + input_ids = np.array([[1, 2]]) + token_ids, probs = sequence.step(rng, input_ids, np.ones((1, 2)), samples=3) + assert_array_equal(token_ids, [[1, 2, 1], [1, 2, 1], [1, 2, 1]]) + assert probs.shape == (3, 4) + + +def test_sequence_sample_batch(): + rng = np.random.default_rng(0) + + logits = np.array([0, 1, 0, 0]) + model = ModelStep(logits) + + sequence = Sequence(model) + input_ids = np.array([[1, 2, 1], [3, 4, 1]]) + token_ids, probs = sequence.step(rng, input_ids, np.ones((2, 3)), samples=3) + assert_array_equal( + token_ids, + [ + [[1, 2, 1, 1], [3, 4, 1, 1]], + [[1, 2, 1, 1], [3, 4, 1, 1]], + [[1, 2, 1, 1], [3, 4, 1, 1]], + ], + ) + assert probs.shape == (3, 2, 4) + + +def test_sequence_step_loop(): + """Make sure that we can feed `step`'s output back as an input.""" + + rng = np.random.default_rng(0) + + logits = np.array([0, 1, 0, 0]) + model = ModelStep(logits) + + sequence = Sequence(model) + input_ids = np.array([[1, 2]]) + token_ids, _ = sequence.step(rng, input_ids, np.ones((1, 2))) + token_ids, probs = sequence.step(rng, token_ids, np.ones((1, 3))) + assert_array_equal(token_ids, [[1, 2, 1, 1]]) + assert probs.shape == (1, 4) + + input_ids = np.array([[1, 2], [3, 4]]) + token_ids, _ = sequence.step(rng, input_ids, np.ones((2, 2))) + token_ids, probs = sequence.step(rng, token_ids, np.ones((2, 3))) + assert_array_equal(token_ids, [[1, 2, 1, 1], [3, 4, 1, 1]]) + assert probs.shape == (2, 4) + + # The number of samples becomes the batch size at the next iteration. + input_ids = np.array([[1, 2]]) + token_ids, _ = sequence.step(rng, input_ids, np.ones((1, 2)), samples=3) + token_ids, probs = sequence.step(rng, token_ids, np.ones((3, 3))) + assert_array_equal(token_ids, [[1, 2, 1, 1], [1, 2, 1, 1], [1, 2, 1, 1]]) + assert probs.shape == (3, 4) + + +def test_sequence_step_loop_general(): + rng = np.random.default_rng(0) + + logits = np.array([0, 1, 0, 0]) + model = ModelStep(logits) + + sequence = Sequence(model) + input_ids = np.array([[1, 2, 1], [3, 4, 1]]) + token_ids, _ = sequence.step(rng, input_ids, np.ones((1, 3)), samples=3) + result, _ = sequence.step(rng, token_ids, np.ones((3, 4))) + assert result.shape == (3, 2, 5) + assert_array_equal( + result, + [ + [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + ], + ) + + +class TokenizerUpdateTokens: + pad_token_id = -1 + + +class ModelUpdateTokens: + tokenizer = TokenizerUpdateTokens() + + +def test_update_token_ids_all_unfinished(): + sequence = Sequence(ModelUpdateTokens()) + + previous_token_ids = np.array([[1, 1], [1, 1]]) + is_finished = np.array([False, False]) + token_ids_unfinished = np.array([[1, 1, 1], [1, 1, 1]]) + + result = sequence.update_token_ids( + is_finished, previous_token_ids, token_ids_unfinished + ) + assert_array_equal(result, [[1, 1, 1], [1, 1, 1]]) + + +def test_update_token_ids_some_unfinished(): + "Makes sure that the pad token is appended to finished sequences." + sequence = Sequence(ModelUpdateTokens()) + + previous_token_ids = np.array([[1, 1], [1, 1]]) + token_ids_unfinished = np.array([[1, 1, 1]]) + is_finished = np.array([True, False]) + result = sequence.update_token_ids( + is_finished, previous_token_ids, token_ids_unfinished + ) + assert_array_equal(result, [[1, 1, -1], [1, 1, 1]]) + + +@pytest.mark.xfail +def test_update_token_ids_larger_dimensions(): + sequence = Sequence(ModelUpdateTokens()) + + previous_token_ids = np.array([[1, 1], [1, 1]]) + is_finished = np.array([False, False]) + token_ids_unfinished = np.array([[1, 1, 1], [1, 1, 1]]) + result = sequence.update_token_ids( + is_finished, previous_token_ids, token_ids_unfinished + ) + assert_array_equal(result, [[1, 1, -1], [1, 1, 1]]) + + +class MockModel: + def __init__(self, tokenizer, logits): + self.tokenizer = tokenizer + self.logits = np.array(logits) + self.iteration_idx = 0 + + def __call__(self, input_ids, *_): + import math + + batch_shape = input_ids.shape[:-1] + vocab_shape = (self.logits.shape[-1],) + shaped_logits = np.tile( + self.logits[self.iteration_idx], (math.prod(batch_shape), 1) + ) + self.iteration_idx += 1 + + return shaped_logits.reshape(batch_shape + vocab_shape) + + +class MockTokenizer: + def __init__(self, vocabulary: Dict[str, int]): + self.vocabulary = vocabulary + self.pad_token_id = -1 + + def encode(self, prompts: Union[str, List[str]]): + if isinstance(prompts, str): + prompts = [prompts] + + token_ids = np.array([[self.vocabulary[prompt]] for prompt in prompts]) + attention_mask = np.ones_like(token_ids) + + return token_ids, attention_mask + + def decode(self, token_ids): + return token_ids + + +def test_call_single_prompt(): + class FinishAfterTwo(Sequence): + def __init__(self, model): + super().__init__(model) + self.iteration_idx = 0 + + def is_finished(self, token_ids): + """Finish generating the sequence after two iterations""" + if self.iteration_idx == 0: + self.iteration_idx += 1 + return np.array([False]) + else: + return np.array([True]) + + tokenizer = MockTokenizer({"Test": 0, "a": 1, "b": 2}) + model = MockModel(tokenizer, [[1, 0, 0], [0, 1, 0]]) + sequence = FinishAfterTwo(model) + + result = sequence("Test") + assert_array_equal(result, [0, 0, 1]) + + +def test_call_prompt_list(): + class Tokenizer: + def __init__(self, vocabulary: Dict[str, int]): + self.vocabulary = vocabulary + self.pad_token_id = -1 + + def __call__(self, prompts: List[str], **_): + return { + "input_ids": np.array([[self.vocabulary[prompt]] for prompt in prompts]) + } + + def batch_decode(self, token_ids): + return token_ids + + class FinishAfterThree(Sequence): + def __init__(self, model): + super().__init__(model) + self.iteration_idx = 0 + + def is_finished(self, token_ids): + """Finish generating the first sequence after two iteration and the + second one after two iterations. + + """ + if self.iteration_idx == 0: + self.iteration_idx += 1 + return np.array([False, False, False]) + elif self.iteration_idx == 1: + self.iteration_idx += 1 + return np.array([True, False, True]) + else: + return np.array([True]) # We only consider the unfinished sequences + + tokenizer = MockTokenizer( + {"Test1": 0, "Test2": 1, "a": 2, "b": 3, "c": 4, "Test3": 5} + ) + model = MockModel( + tokenizer, + [[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0]], + ) + sequence = FinishAfterThree(model) + + result = sequence(["Test1", "Test2", "Test3"]) + assert_array_equal(result, [[0, 2, 3, -1], [1, 2, 3, 4], [5, 2, 3, -1]]) + + +def test_call_single_prompt_samples(): + class FinishAfterTwo(Sequence): + def __init__(self, model): + super().__init__(model) + self.iteration_idx = 0 + + def is_finished(self, token_ids): + if self.iteration_idx == 0: + self.iteration_idx += 1 + return np.array([False, False, False]) + else: + return np.array([True, True, True]) + + tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 2, "Test": 4}) + model = MockModel(tokenizer, [[1, 0, 0, 0], [0, 1, 0, 0]]) + sequence = FinishAfterTwo(model) + result = sequence("Test", samples=3) + assert_array_equal(result, [[4, 0, 1], [4, 0, 1], [4, 0, 1]]) + + class FinishAfterOne(Sequence): + def __init__(self, model): + super().__init__(model) + + def is_finished(self, token_ids): + return np.array([True, True, True]) + + tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 3, "Test": 4}) + model = MockModel(tokenizer, [[1, 0, 0, 0], [0, 1, 0, 0]]) + sequence = FinishAfterOne(model) + result = sequence("Test", samples=3) + assert_array_equal(result, [[4, 0], [4, 0], [4, 0]]) + + +def test_call_prompt_list_samples(): + class FinishAfterThree(Sequence): + def __init__(self, model): + super().__init__(model) + self.iteration_idx = 0 + + def is_finished(self, token_ids): + if self.iteration_idx == 0: + self.iteration_idx += 1 + batch_shape = token_ids.shape[:-1] + return np.zeros(batch_shape, dtype=np.bool_) + elif self.iteration_idx == 1: + self.iteration_idx += 1 + return np.array( + [[True, False, True], [True, False, True], [True, False, True]] + ) + else: + return np.array([True, True, True]) + + tokenizer = MockTokenizer( + {"a": 0, "b": 1, "c": 2, "Test1": 3, "Test2": 4, "Test3": 5} + ) + model = MockModel( + tokenizer, [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]] + ) + sequence = FinishAfterThree(model) + + result = sequence(["Test1", "Test2", "Test3"], samples=3) + assert_array_equal( + result, np.tile([[3, 0, 1, -1], [4, 0, 1, 2], [5, 0, 1, -1]], (3, 1, 1)) + ) From 759ce89b2588d45c8ce68ff1e2ceb8f787f9e323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 12 Jun 2023 15:50:28 +0200 Subject: [PATCH 163/734] Add `Continuation` generation model --- outlines/text/__init__.py | 1 + outlines/text/generate/__init__.py | 1 + outlines/text/generate/continuation.py | 52 +++++++++++++++++++ .../text/{sequences => generate}/sequence.py | 4 ++ tests/text/generate/test_continuation.py | 42 +++++++++++++++ .../generate/test_integration_transfomers.py | 24 +++++++++ .../{sequences => generate}/test_sequence.py | 2 +- 7 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 outlines/text/generate/__init__.py create mode 100644 outlines/text/generate/continuation.py rename outlines/text/{sequences => generate}/sequence.py (98%) create mode 100644 tests/text/generate/test_continuation.py create mode 100644 tests/text/generate/test_integration_transfomers.py rename tests/text/{sequences => generate}/test_sequence.py (99%) diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 4b187905..8870c7a1 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,2 +1,3 @@ from .functions import function +from .generate import continuation from .prompts import prompt, render diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py new file mode 100644 index 00000000..3176b9b4 --- /dev/null +++ b/outlines/text/generate/__init__.py @@ -0,0 +1 @@ +from .continuation import continuation diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py new file mode 100644 index 00000000..e616d3f3 --- /dev/null +++ b/outlines/text/generate/continuation.py @@ -0,0 +1,52 @@ +from typing import List, Optional + +import numpy as np +from numpy.typing import NDArray + +from outlines.text.generate.sequence import Sequence + + +class Continuation(Sequence): + """Represents a completion generation model. + + `Completion` instances are unconstrained generation models that stop when an EOS token + has been found or when the maximum number of tokens has been reached. + + >> import outlines.text as text + >> sequence = text.sequence(model)("Say something") + + """ + + def __init__(self, model, max_tokens: Optional[int]): + super().__init__(model, max_tokens) + + def is_finished(self, token_ids: NDArray[np.int64]) -> NDArray[np.bool_]: + """Determine whether the sequences reached maximum length of end with + and EOS token. + + In practice, `Sequence`'s `__call__` methods only passed the `token_ids` + of the sequences that haven't been marked as finished already, which is + why we only need to look for the EOS token in the last element rather + than in the whole sequence. + + Parameters + ---------- + token_ids + The input sequences. + + """ + is_finished = np.zeros((token_ids.shape[0],), dtype=np.bool_) + is_finished[token_ids[:, -1] == self.model.tokenizer.eos_token_id] = True + + return is_finished + + def postprocess_completions(self, completions: List[str]) -> List[str]: + """Remove the EOS token from the completion.""" + return [ + completion.replace(self.model.tokenizer.eos_token, "") + for completion in completions + ] + + +def continuation(model, max_tokens: Optional[int] = None): + return Continuation(model, max_tokens) diff --git a/outlines/text/sequences/sequence.py b/outlines/text/generate/sequence.py similarity index 98% rename from outlines/text/sequences/sequence.py rename to outlines/text/generate/sequence.py index bea23de4..614297ed 100644 --- a/outlines/text/sequences/sequence.py +++ b/outlines/text/generate/sequence.py @@ -29,6 +29,9 @@ def is_finished(self, token_ids: NDArray[np.int64]) -> NDArray[np.bool_]: "`Sequence.is_finished` must be implemented by subclasses." ) + def postprocess_completions(self, completions: List[str]) -> List[str]: + return completions + def step( self, rng: Generator, @@ -202,6 +205,7 @@ def __call__( is_finished[~is_finished] = self.is_finished(token_ids_unfinished).flatten() result = self.model.tokenizer.decode(token_ids) + result = self.postprocess_completions(result) if len(result) == 1: return result[0] diff --git a/tests/text/generate/test_continuation.py b/tests/text/generate/test_continuation.py new file mode 100644 index 00000000..aaf01749 --- /dev/null +++ b/tests/text/generate/test_continuation.py @@ -0,0 +1,42 @@ +import numpy as np +from numpy.testing import assert_array_equal + +from outlines.text.generate.continuation import Continuation, continuation + + +class Tokenizer: + eos_token = "" + eos_token_id = 0 + pad_token_ids = -1 + + +class Model: + tokenizer = Tokenizer() + + +def test_continuation_is_finished(): + model = continuation(Model(), 10) + assert isinstance(model, Continuation) + + token_ids = np.array([[3, 2]]) + result = model.is_finished(token_ids) + assert_array_equal(result, [False]) + + token_ids = np.array([[3, 2, 0]]) + result = model.is_finished(token_ids) + assert_array_equal(result, [True]) + + token_ids = np.array([[3, 2, 1], [3, 2, 0]]) + result = model.is_finished(token_ids) + assert_array_equal(result, [False, True]) + + token_ids = np.array([[3, 2, 1, 0], [3, 2, 0, -1]]) + result = model.is_finished(token_ids) + assert_array_equal(result, [True, False]) + + +def test_continuation_postprocess(): + model = continuation(Model()) + result = model.postprocess_completions(["Here"]) + assert len(result) == 1 + assert result[0] == "Here" diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py new file mode 100644 index 00000000..55bbde96 --- /dev/null +++ b/tests/text/generate/test_integration_transfomers.py @@ -0,0 +1,24 @@ +import numpy as np + +import outlines.models as models +from outlines.text.generate.continuation import continuation + + +def test_transformers_integration_completion(): + rng = np.random.default_rng(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + sequence = continuation(model)("prompt", rng=rng) + assert isinstance(sequence, str) + assert model.tokenizer.eos_token not in sequence + + sequence = continuation(model, max_tokens=10)("prompt", rng=rng) + assert isinstance(sequence, str) + + +def test_transformers_integration_with_pad_token(): + model_name = "hf-internal-testing/tiny-random-XLMRobertaXLForCausalLM" + model = models.transformers(model_name, device="cpu") + assert model.tokenizer.pad_token_id == 1 + assert model.tokenizer.pad_token == "" diff --git a/tests/text/sequences/test_sequence.py b/tests/text/generate/test_sequence.py similarity index 99% rename from tests/text/sequences/test_sequence.py rename to tests/text/generate/test_sequence.py index 94699010..9659e8d6 100644 --- a/tests/text/sequences/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -4,7 +4,7 @@ import pytest from numpy.testing import assert_array_equal -from outlines.text.sequences.sequence import Sequence, vectorized_random_choice +from outlines.text.generate.sequence import Sequence, vectorized_random_choice def test_vectorized_random_choice(): From e88f13b22308f3808f51648be2ee70ff7e3ecc9e Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Mon, 3 Jul 2023 13:28:31 -0500 Subject: [PATCH 164/734] Updates for Pydantic 2.0 --- outlines/text/functions.py | 5 ++++- outlines/text/prompts.py | 12 +++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/outlines/text/functions.py b/outlines/text/functions.py index ae1d2135..c3c6bfa5 100644 --- a/outlines/text/functions.py +++ b/outlines/text/functions.py @@ -52,7 +52,10 @@ def validate(validator, result): @validate.register(BaseModelType) def validate_pydantic(validator, result): - return validator.parse_raw(result) + if hasattr(validator, "model_validate_json"): + return validator.model_validate_json(result) + else: # pragma: no cover + return validator.parse_raw(result) @validate.register(FunctionType) diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index 635a5683..8b3d5eba 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -289,15 +289,21 @@ def get_schema_pydantic(model: type[BaseModel]): if not type(model) == type(BaseModel): raise TypeError("The `schema` filter only applies to Pydantic models.") - raw_schema = model.schema() - definitions = raw_schema.get("definitions", None) + if hasattr(model, "model_json_schema"): + def_key = "$defs" + raw_schema = model.model_json_schema() + else: # pragma: no cover + def_key = "definitions" + raw_schema = model.schema() + + definitions = raw_schema.get(def_key, None) schema = parse_pydantic_schema(raw_schema, definitions) return json.dumps(schema, indent=2) def parse_pydantic_schema(raw_schema, definitions): - """Parse the output of `Basemodel.schema()`. + """Parse the output of `Basemodel.[schema|model_json_schema]()`. This recursively follows the references to other schemas in case of nested models. Other schemas are stored under the "definitions" From 2e0797394824e35b4007b2be3f5e2a777d208517 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Mon, 5 Jun 2023 23:18:49 -0500 Subject: [PATCH 165/734] Add basic parser-driven masking utilities --- examples/parsing.py | 108 +++++++++++++++++ outlines/text/parsing.py | 240 +++++++++++++++++++++++++++++++++++++ pyproject.toml | 4 + tests/text/test_parsing.py | 113 +++++++++++++++++ 4 files changed, 465 insertions(+) create mode 100644 examples/parsing.py create mode 100644 outlines/text/parsing.py create mode 100644 tests/text/test_parsing.py diff --git a/examples/parsing.py b/examples/parsing.py new file mode 100644 index 00000000..3f070c47 --- /dev/null +++ b/examples/parsing.py @@ -0,0 +1,108 @@ +"""An example illustrating parser-based masking.""" +import math +import time + +import torch +from lark import Lark +from lark.indenter import DedentError +from lark.lexer import UnexpectedCharacters, UnexpectedToken +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + LogitsProcessor, + LogitsProcessorList, + set_seed, +) + +from outlines.text.parsing import PartialPythonIndenter, copy_parser_state, parse_to_end + +revision = None +checkpoint = "Salesforce/codegen-350M-mono" +device = "cuda" + +tokenizer = AutoTokenizer.from_pretrained(checkpoint) + +model = AutoModelForCausalLM.from_pretrained( + checkpoint, trust_remote_code=True, revision=revision +).to(device) + +input_text = "def " +inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) + + +class ParserLogitsProcessor(LogitsProcessor): + """Bias invalid token scores according to a running parse state.""" + + def __init__(self): + pyparser = Lark.open_from_package( + "lark", + "python.lark", + ["grammars"], + parser="lalr", + postlex=PartialPythonIndenter(), + start="file_input", + ) + ip = pyparser.parse_interactive("") + self.parser_state = ip.parser_state + self.states_stack = [self.parser_state] + self.token_seq = None + self.token_idx = 0 + + def __call__( + self, input_ids: torch.LongTensor, scores: torch.FloatTensor + ) -> torch.FloatTensor: + if self.token_seq is None: + self.token_seq = tokenizer.decode(input_ids[0]) + self.token_idx = len(input_ids[0]) - 1 + else: + self.token_idx += 1 + self.token_seq += tokenizer.decode(input_ids[0][self.token_idx]) + + # Process the last sampled token + lex_state = self.parser_state.lexer.state + lex_state.text = self.token_seq + + self.parser_state, partial_tokens = parse_to_end(self.parser_state) + + print("Parsed:\n") + print(self.token_seq) + + print(partial_tokens) + + mask = torch.full_like(scores, -math.inf) + + # Determine which tokens in the vocabulary are valid next tokens + # given the parser state. + # + # TODO: This is a very naive and slow approach. It could be done in + # parallel, but there are a few other approaches to try first, and + # those should dramatically reduce the amount of work done here. + t0 = time.perf_counter() + for test_token, token_id in tokenizer.vocab.items(): + ps = copy_parser_state(self.parser_state) + ls = ps.lexer.state + ls.text = self.token_seq + test_token + + try: + # TODO: The resulting states could possibly be reused? + parse_to_end(ps) + mask[0][token_id] = 0 + except (UnexpectedToken, UnexpectedCharacters, DedentError): + pass + + print(f"Next token masking duration: {time.perf_counter() - t0}") + + return scores + mask + + +set_seed(20399) + +outputs = model.generate( + inputs, + max_length=100, + temperature=0.1, + logits_processor=LogitsProcessorList([ParserLogitsProcessor()]), + renormalize_logits=True, +) + +print(tokenizer.decode(outputs[0])) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py new file mode 100644 index 00000000..7b1d9cba --- /dev/null +++ b/outlines/text/parsing.py @@ -0,0 +1,240 @@ +from copy import copy +from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Tuple + +import regex +from lark.exceptions import ( + LexError, + UnexpectedCharacters, + UnexpectedEOF, + UnexpectedToken, +) +from lark.indenter import PythonIndenter +from lark.lexer import BasicLexer, LexerState, Scanner, Token +from lark.parsers.lalr_interactive_parser import InteractiveParser +from lark.utils import get_regexp_width + +if TYPE_CHECKING: + from lark.lexer import LexerThread + from lark.parsers.lalr_parser import ParserState + + +class PartialTokenEOF(UnexpectedEOF): + pass + + +class PartialScanner(Scanner): + def __init__(self, scanner: Scanner): + self.terminals = scanner.terminals + self.g_regex_flags = scanner.g_regex_flags + self.re_ = regex + self.use_bytes = scanner.use_bytes + self.match_whole = scanner.match_whole + self.allowed_types = scanner.allowed_types + self._mres = scanner._mres + + def match(self, text, pos) -> Optional[Tuple[str, Optional[str], bool]]: + for mre in self._mres: + m = mre.match(text, pos=pos, partial=True) + if m: # and ((not m.partial) or m.endpos == len(text)): + return m.group(0), m.lastgroup, m.partial + return None + + +class PartialBasicLexer(BasicLexer): + def __init__(self, basic_lexer: BasicLexer): + self.re = regex + self.newline_types = basic_lexer.newline_types + self.ignore_types = basic_lexer.ignore_types + self.terminals = basic_lexer.terminals + self.user_callbacks = basic_lexer.user_callbacks + self.g_regex_flags = basic_lexer.g_regex_flags + self.use_bytes = basic_lexer.use_bytes + self.terminals_by_name = basic_lexer.terminals_by_name + self.callback = getattr(basic_lexer, "callback", None) + + if basic_lexer._scanner is not None: + self._scanner: Optional[PartialScanner] = PartialScanner( + basic_lexer._scanner + ) + else: + self._scanner = None + + # This is used to determine the token type for partial matches + self.terminal_to_regex = {} + for name, terminal in self.terminals_by_name.items(): + self.terminal_to_regex[name] = self.re.compile( + terminal.pattern.to_regexp(), self.g_regex_flags + ) + + def _build_scanner(self): + super()._build_scanner() + self._scanner = PartialScanner(self._scanner) + + def partial_matches(self, value, type_): + partial_matches = set() + + # TODO: It's unfortunate that we have to do this costly search (again). + # It would be better if we could *not* short-circuit the first time we + # scan in the call to `self.match`. + for term_name, term_regex in self.terminal_to_regex.items(): + if term_name == type_: + # A standard lexed token result could actual indicate a partial + # match + regex_min, regex_max = get_regexp_width(term_regex.pattern) + if regex_min <= len(value) < regex_max: + partial_matches.add(term_name) + else: + m = term_regex.match(value, partial=True) + if m: + partial_matches.add(term_name) + + return partial_matches + + def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + line_ctr = lex_state.line_ctr + while line_ctr.char_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos) + + if not res: + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters( + lex_state.text, + line_ctr.char_pos, + line_ctr.line, + line_ctr.column, + allowed=allowed, + token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, + terminals_by_name=self.terminals_by_name, + ) + + value, type_, partial = res + + # Don't advance the lexing state if we're at the end; there could + # be ambiguous token types that aren't finished. + if line_ctr.char_pos + len(value) >= len(lex_state.text): + partial_matches = self.partial_matches(value, type_) + if partial_matches or partial: + raise PartialTokenEOF(partial_matches) + + assert isinstance(self.callback, Dict) + + if type_ not in self.ignore_types: + t = Token( + type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column + ) + line_ctr.feed(value, type_ in self.newline_types) + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not isinstance(t, Token): + raise LexError( + "Callbacks must return a token (returned %r)" % t + ) + lex_state.last_token = t + return t + + if type_ in self.callback: + t2 = Token( + type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column + ) + self.callback[type_](t2) + + line_ctr.feed(value, type_ in self.newline_types) + + raise EOFError(self) + + +class PartialPythonIndenter(PythonIndenter): + """An `Indenter` that doesn't reset its state every time `process` is called.""" + + def process(self, stream): + return self._process(stream) + + def _process(self, stream): + for token in stream: + # These were previously *after* the `yield`, but that makes the + # state tracking unnecessarily convoluted. + if token.type in self.OPEN_PAREN_types: + self.paren_level += 1 + elif token.type in self.CLOSE_PAREN_types: + self.paren_level -= 1 + if self.paren_level < 0: + raise UnexpectedToken(token, []) + + if token.type == self.NL_type: + yield from self.handle_NL(token) + else: + yield token + + # while len(self.indent_level) > 1: + # self.indent_level.pop() + # yield Token(self.DEDENT_type, "") + + def __copy__(self): + res = type(self)() + res.paren_level = self.paren_level + res.indent_level = copy(self.indent_level) + return res + + +def copy_lexer_thread(lexer_thread: "LexerThread") -> "LexerThread": + res = copy(lexer_thread) + res.lexer = copy(res.lexer) + + if ( + res.lexer.postlexer + and isinstance(res.lexer.postlexer, PythonIndenter) + and not isinstance(res.lexer.postlexer, PartialPythonIndenter) + ): + # Patch these methods so that the post lexer keeps its state + # XXX: This won't really work in generality. + postlexer = PartialPythonIndenter() + postlexer.paren_level = res.lexer.postlexer.paren_level + postlexer.indent_level = res.lexer.postlexer.indent_level + res.lexer.postlexer = postlexer + + # Patch/replace the lexer objects so that they support partial matches + lexer = res.lexer.lexer + if not isinstance(lexer.root_lexer, PartialBasicLexer): + lexer.root_lexer = PartialBasicLexer(lexer.root_lexer) + + basic_lexers = res.lexer.lexer.lexers + for idx, lexer in basic_lexers.items(): + basic_lexers[idx] = PartialBasicLexer(lexer) + + res.lexer.postlexer = copy(res.lexer.postlexer) + + return res + + +def copy_parser_state(parser_state: "ParserState") -> "ParserState": + res = copy(parser_state) + res.lexer = copy_lexer_thread(res.lexer) + + return res + + +def copy_ip(ip: "InteractiveParser") -> "InteractiveParser": + res = copy(ip) + res.lexer_thread = copy_lexer_thread(res.lexer_thread) + return res + + +def parse_to_end(parser_state: "ParserState") -> Tuple["ParserState", Set[str]]: + """Continue parsing from the current parse state and return partial next tokens.""" + + parser_state = copy_parser_state(parser_state) + + expected_next_tokens: Set[str] = set() + try: + for token in parser_state.lexer.lex(parser_state): + parser_state.feed_token(token) + except PartialTokenEOF as e: + expected_next_tokens = e.expected + + return parser_state, expected_next_tokens diff --git a/pyproject.toml b/pyproject.toml index 62c7ae99..686de1a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,8 @@ test = [ "transformers", "coverage[toml]>=5.1", "diff-cover", + "lark", + "regex", ] [project.urls] @@ -87,6 +89,8 @@ module = [ "tiktoken.*", "torch", "transformers.*", + "lark.*", + "regex.*", ] ignore_missing_imports = true diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py new file mode 100644 index 00000000..2e69b3cb --- /dev/null +++ b/tests/text/test_parsing.py @@ -0,0 +1,113 @@ +from lark import Lark +from lark.indenter import DedentError +from lark.lexer import UnexpectedCharacters, UnexpectedToken + +from outlines.text.parsing import PartialPythonIndenter, copy_parser_state, parse_to_end + + +def test_parse_to_end(): + pyparser = Lark.open_from_package( + "lark", + "python.lark", + ["grammars"], + parser="lalr", + postlex=PartialPythonIndenter(), + start="file_input", + ) + + ip = pyparser.parse_interactive("x") + parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + assert not parser_state.value_stack + assert expected_next_tokens == {"NAME"} + + ip = pyparser.parse_interactive("x = '") + parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + assert parser_state.value_stack[-1].type == "EQUAL" + assert expected_next_tokens == {"LONG_STRING", "STRING"} + + ip = pyparser.parse_interactive("x = 'hi") + parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + assert parser_state.value_stack[-1].type == "EQUAL" + assert expected_next_tokens == {"STRING"} + + ip = pyparser.parse_interactive("x = ('hi") + parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + assert parser_state.value_stack[-1].type == "LPAR" + assert expected_next_tokens == {"STRING"} + + ip = pyparser.parse_interactive("def") + parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + assert not parser_state.value_stack + assert expected_next_tokens == {"NAME", "DEF"} + + # Now, try something incremental + parser_state = copy_parser_state(parser_state) + last_lexer_state = parser_state.lexer.state + last_lexer_state.text = "def blah()" + + (parser_state, expected_next_tokens) = parse_to_end(parser_state) + + last_lexer_state = parser_state.lexer.state + last_valid_token = last_lexer_state.last_token + assert last_valid_token.type == "RPAR" + assert not expected_next_tokens + + +def test_sequential_parse_example(): + input_tokens = [ + "x ", + "= ", + "1", + "\nde", + "f ", + "foo(", + "x)", + ":\n", + " ", + " return x", + " + 1", + "\n", + "z ", + "= ", + "foo(", + '"hi' '")', + ] + vocab = set(input_tokens) + + pyparser = Lark.open_from_package( + "lark", + "python.lark", + ["grammars"], + parser="lalr", + postlex=PartialPythonIndenter(), + start="file_input", + ) + ip = pyparser.parse_interactive("") + parser_state = ip.parser_state + + token_seq = "" + for i, token in enumerate(input_tokens): + token_seq += token + + lex_state = parser_state.lexer.state + lex_state.text = token_seq + + parser_state, partial_tokens = parse_to_end(parser_state) + + next_vocab = set() + for test_token in vocab: + ps = copy_parser_state(parser_state) + ls = ps.lexer.state + ls.text = token_seq + test_token + + try: + # TODO: The resulting states could possibly be reused? + parse_to_end(ps) + next_vocab.add(test_token) + except (UnexpectedToken, UnexpectedCharacters, DedentError): + pass + + if i + 1 < len(input_tokens): + assert input_tokens[i + 1] in next_vocab + else: + assert all(tk in next_vocab for tk in ["\n", "\nde", " ", " + 1"]) From a034c78c4779740c7f3597dff5a0ea8f95f82976 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 29 Jun 2023 19:58:46 -0500 Subject: [PATCH 166/734] Add vocabulary pre-parsing tools --- outlines/text/parsing.py | 181 ++++++++++++++++++++++++++++++++++++- pyproject.toml | 2 + tests/text/test_parsing.py | 166 +++++++++++++++++++++++++++++++++- 3 files changed, 346 insertions(+), 3 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 7b1d9cba..8126a218 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,7 +1,13 @@ +from collections import ChainMap, defaultdict from copy import copy -from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Tuple +from itertools import chain +from typing import TYPE_CHECKING, Any, DefaultDict, Dict, Iterable, Optional, Set, Tuple +import interegular import regex +from interegular.fsm import FSM, anything_else +from interegular.patterns import Unsupported +from lark import Lark from lark.exceptions import ( LexError, UnexpectedCharacters, @@ -11,11 +17,14 @@ from lark.indenter import PythonIndenter from lark.lexer import BasicLexer, LexerState, Scanner, Token from lark.parsers.lalr_interactive_parser import InteractiveParser +from lark.parsers.lalr_parser import ParserState from lark.utils import get_regexp_width if TYPE_CHECKING: from lark.lexer import LexerThread - from lark.parsers.lalr_parser import ParserState + + +PartialParseState = Tuple[str, int] class PartialTokenEOF(UnexpectedEOF): @@ -238,3 +247,171 @@ def parse_to_end(parser_state: "ParserState") -> Tuple["ParserState", Set[str]]: expected_next_tokens = e.expected return parser_state, expected_next_tokens + + +def find_partial_matches( + fsm: FSM, input_string: str +) -> Set[Tuple[Optional[int], Tuple[int, ...]]]: + """Find the states in the finite state machine `fsm` that accept `input_string`. + + Returns + ------- + A set of tuples corresponding to each valid starting state in the FSM. + The first element of each tuple contains either ``None`` or an integer + indicating the position in `input_string` at which the FSM terminated. The + second element is a tuple of the states visited during execution of the + FSM. + + """ + if len(input_string) == 0 or input_string[0] not in fsm.alphabet: + return set() + + trans_key = fsm.alphabet[input_string[0]] + + # TODO: We could probably memoize this easily (i.e. no need to recompute + # paths shared by different starting states) + def _partial_match(trans: int) -> Optional[Tuple[Optional[int], Tuple[int, ...]]]: + fsm_map = ChainMap({fsm.initial: trans}, fsm.map) + state = fsm.initial + accepted_states: Tuple[int, ...] = () + + for i, symbol in enumerate(input_string): + if anything_else in fsm.alphabet and symbol not in fsm.alphabet: + symbol = anything_else + + trans_key = fsm.alphabet[symbol] + + if not (state in fsm_map and trans_key in fsm_map[state]): + if state in fsm.finals: + i -= 1 + break + return None + + state = fsm_map[state][trans_key] + + accepted_states += (state,) + + terminated = state in fsm.finals + if not terminated and state == fsm.initial: + return None + + return None if not terminated else i, accepted_states + + res = set() + for s_now, trans in fsm.map.items(): + if trans_key in trans: + path = _partial_match(trans) + if path is not None: + res.add(path) + + return res + + +def terminals_to_fsms(lp: Lark) -> Dict[str, FSM]: + """Construct a ``dict`` mapping terminal symbol names to their finite state machines.""" + + symbol_names_and_fsms = {} + for terminal in lp.terminals: + pattern = interegular.parse_pattern(terminal.pattern.to_regexp()) + # TODO: Use `pyparser.terminals[0].pattern.flags`? + try: + fsm = pattern.to_fsm() + except Unsupported: + fsm = None + + symbol_names_and_fsms[terminal.name] = fsm + + return symbol_names_and_fsms + + +def map_partial_states_to_vocab( + vocabulary: Iterable[str], + terminals_to_fsms_map: Dict[str, FSM], + map_to_antecedents: bool = False, +) -> DefaultDict[PartialParseState, Set[str]]: + """Construct a map from partial parse states to the vocabulary elements that start in those states. + + Parameters + ---------- + vocabulary + The vocabulary composed of strings. + terminals_to_fsms_map + Terminal symbol names mapped to FSMs, as provided by `terminals_to_fsms`. + map_to_antecedents + When ``True``, return a map with keys that are the antecedent partial + parse states. In other words, this is a map that can be used to + determine valid next tokens given a parse state. + """ + + pstate_to_vocab = defaultdict(set) + for symbol_name, fsm in terminals_to_fsms_map.items(): + for tk in vocabulary: + for _, states in find_partial_matches(fsm, tk): + pstate_to_vocab[(symbol_name, states[0])].add(tk) + + if not map_to_antecedents: + return pstate_to_vocab + + # Partially parsed states to next/transition states (for the same terminal symbol) + ts_pstate_to_substates = dict( + chain.from_iterable( + [ + ((symbol_name, s), {(symbol_name, v) for v in ts.values()}) + for s, ts in fsm.map.items() + ] + for symbol_name, fsm in terminals_to_fsms_map.items() + ) + ) + + # Reverse the map + # TODO: We could construct this more directly. + rev_ts_pstate_to_substates = defaultdict(set) + for pstate, to_pstates in ts_pstate_to_substates.items(): + for to_pstate in to_pstates: + rev_ts_pstate_to_substates[to_pstate].add(pstate) + + # A version of `pstate_to_vocab` that is keyed on states that *transition to* + # the original keys of `pstate_to_vocab`. + _pstate_to_vocab: DefaultDict[PartialParseState, Set[str]] = defaultdict(set) + for pstate, vocab in pstate_to_vocab.items(): + for next_pstate in rev_ts_pstate_to_substates[pstate]: + _pstate_to_vocab[next_pstate] |= vocab + + return _pstate_to_vocab + + +def terminals_to_lalr_states(lp: Lark) -> DefaultDict[str, Set[int]]: + from lark.parsers.lalr_analysis import Shift + + terminals_to_states = defaultdict(set) + parse_table = lp.parser.parser.parser.parse_table + for state, tokens_to_ops in parse_table.states.items(): + for token, op in tokens_to_ops.items(): + if op[0] == Shift: + # `op[1]` is the state we shift to when `token` is observed + terminals_to_states[token].add(op[1]) + + return terminals_to_states + + +def create_pmatch_parser_states( + lp: Lark, + terminals_to_states: Dict[str, Set[int]], + term_type: str, + ptoken: str, + pmatch: Tuple[int, Tuple[int, ...]], +) -> Tuple[ParserState, ...]: + from lark import Token + from lark.parsers.lalr_parser import ParseConf, ParserState + + parse_table = lp.parser.parser.parser.parse_table + parse_conf = ParseConf(parse_table, lp._callbacks, lp.options.start[0]) + lexer_thread = lp.parser._make_lexer_thread(ptoken) + lexer_state = lexer_thread.state + lexer_state.line_ctr.char_pos = pmatch[0] + 1 + lexer_state.last_token = Token(term_type, "") + res = tuple( + ParserState(parse_conf, lexer_thread, [state], None) + for state in terminals_to_states[term_type] + ) + return res diff --git a/pyproject.toml b/pyproject.toml index 686de1a3..1eb52a9c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ test = [ "diff-cover", "lark", "regex", + "interegular", ] [project.urls] @@ -91,6 +92,7 @@ module = [ "transformers.*", "lark.*", "regex.*", + "interegular.*", ] ignore_missing_imports = true diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 2e69b3cb..6101daec 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -1,8 +1,19 @@ +import interegular +import pytest from lark import Lark from lark.indenter import DedentError from lark.lexer import UnexpectedCharacters, UnexpectedToken -from outlines.text.parsing import PartialPythonIndenter, copy_parser_state, parse_to_end +from outlines.text.parsing import ( + PartialPythonIndenter, + copy_parser_state, + create_pmatch_parser_states, + find_partial_matches, + map_partial_states_to_vocab, + parse_to_end, + terminals_to_fsms, + terminals_to_lalr_states, +) def test_parse_to_end(): @@ -111,3 +122,156 @@ def test_sequential_parse_example(): assert input_tokens[i + 1] in next_vocab else: assert all(tk in next_vocab for tk in ["\n", "\nde", " ", " + 1"]) + + +def test_partial_match(): + name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") + name_fsm = name_pattern.to_fsm() + + def_pattern = interegular.parse_pattern("def") + def_fsm = def_pattern.to_fsm() + + assert find_partial_matches(def_fsm, "def") == {(2, (1, 2, 3))} + assert find_partial_matches(def_fsm, "de") == {(None, (1, 2))} + assert find_partial_matches(def_fsm, "d") == {(None, (1,))} + assert find_partial_matches(def_fsm, "") == set() + assert find_partial_matches(def_fsm, "df") == set() + assert find_partial_matches(def_fsm, "ef") == {(1, (2, 3))} + assert find_partial_matches(def_fsm, "e") == {(None, (2,))} + assert find_partial_matches(def_fsm, "f") == {(0, (3,))} + assert find_partial_matches(def_fsm, "ef foo") == {(1, (2, 3))} + + # This string has a `DEF` token in it, but should ultimately not lex one + assert find_partial_matches(def_fsm, "defb") == {(2, (1, 2, 3))} + + # `NAME` can have multiple start states for this input + assert find_partial_matches(name_fsm, "d") == {(0, (1,)), (0, (2,))} + # Not this case + assert find_partial_matches(name_fsm, "1d") == {(1, (2, 2))} + + assert find_partial_matches(name_fsm, "blah") == { + (3, (1, 2, 2, 2)), + (3, (2, 2, 2, 2)), + } + + +def test_partial_match_preprocessing(): + pyparser = Lark.open_from_package( + "lark", + "python.lark", + ["grammars"], + parser="lalr", + postlex=PartialPythonIndenter(), + start="file_input", + ) + + symbol_names_and_fsms = terminals_to_fsms(pyparser) + test_symbols = {"DEF", "NAME", "__IGNORE_0"} + symbol_names_and_fsms = { + k: v for k, v in symbol_names_and_fsms.items() if k in test_symbols + } + + vocabulary = {"d", "e", "ef foo", "f ", " "} + + pstate_to_vocab = map_partial_states_to_vocab( + vocabulary, symbol_names_and_fsms, False + ) + + assert dict(pstate_to_vocab) == { + ("NAME", 1): {"d", "e", "ef foo", "f "}, + ("NAME", 2): {"d", "e", "ef foo", "f "}, + ("DEF", 1): { + "d", + }, + ("DEF", 2): {"e", "ef foo"}, + ("DEF", 3): { + "f ", + }, + ("__IGNORE_0", 1): { + " ", + }, + ("__IGNORE_0", 2): { + " ", + }, + } + + pstate_to_vocab = map_partial_states_to_vocab( + vocabulary, symbol_names_and_fsms, True + ) + + assert dict(pstate_to_vocab) == { + ("DEF", 1): {"e", "ef foo"}, + ("DEF", 2): { + "f ", + }, + ("DEF", 0): { + "d", + }, + ("NAME", 1): {"d", "e", "ef foo", "f "}, + ("NAME", 2): {"d", "e", "ef foo", "f "}, + ("NAME", 0): {"d", "e", "ef foo", "f "}, + ("__IGNORE_0", 1): { + " ", + }, + ("__IGNORE_0", 2): { + " ", + }, + ("__IGNORE_0", 0): { + " ", + }, + } + + +def test_parse_from_partial_match(): + """Make sure we can continue parsing from an FSM-based partial match.""" + pyparser = Lark( + r""" +start: funcdef + +funcdef: "def" name "(" ")" ":" + +%ignore /[\t \f]+/ // WS + +!name: NAME | "match" | "case" +NAME: /[^\W\d]\w*/ + + """, + parser="lalr", + postlex=PartialPythonIndenter(), + ) + + terminals_to_states = terminals_to_lalr_states(pyparser) + symbol_names_and_fsms = terminals_to_fsms(pyparser) + + term_type = "DEF" + def_fsm = symbol_names_and_fsms[term_type] + + # TODO FIXME: This is broken, and it's a bug in `lark`'s Python grammar! + # ptoken = "defx" + + ptoken = "ef foo" + pmatch = find_partial_matches(def_fsm, ptoken) + first_pmatch = next(pm for pm in pmatch if pm[0] is not None) + (parser_state,) = create_pmatch_parser_states( + pyparser, terminals_to_states, term_type, ptoken, first_pmatch + ) + new_parser_state, expected_next_tokens = parse_to_end(parser_state) + assert expected_next_tokens == {"NAME"} + + ptoken = "ef foo():" + pmatch = find_partial_matches(def_fsm, ptoken) + first_pmatch = next(pm for pm in pmatch if pm[0] is not None) + (parser_state,) = create_pmatch_parser_states( + pyparser, terminals_to_states, term_type, ptoken, first_pmatch + ) + new_parser_state, expected_next_tokens = parse_to_end(parser_state) + assert not expected_next_tokens + + ptoken = "ef (" + pmatch = find_partial_matches(def_fsm, ptoken) + first_pmatch = next(pm for pm in pmatch if pm[0] is not None) + (parser_state,) = create_pmatch_parser_states( + pyparser, terminals_to_states, term_type, ptoken, first_pmatch + ) + with pytest.raises(UnexpectedToken): + parse_to_end(parser_state) From ced6f50a0120287e2f1323641a9ed32492c5a29b Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 5 Jul 2023 16:15:40 -0500 Subject: [PATCH 167/734] Minor typing, imports, and name refactoring --- outlines/text/parsing.py | 26 +++++++++++++++----------- tests/text/test_parsing.py | 33 ++++++++++++++++++--------------- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 8126a218..63a7a491 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -7,7 +7,7 @@ import regex from interegular.fsm import FSM, anything_else from interegular.patterns import Unsupported -from lark import Lark +from lark import Lark, Token from lark.exceptions import ( LexError, UnexpectedCharacters, @@ -15,9 +15,10 @@ UnexpectedToken, ) from lark.indenter import PythonIndenter -from lark.lexer import BasicLexer, LexerState, Scanner, Token +from lark.lexer import BasicLexer, LexerState, Scanner +from lark.parsers.lalr_analysis import Shift from lark.parsers.lalr_interactive_parser import InteractiveParser -from lark.parsers.lalr_parser import ParserState +from lark.parsers.lalr_parser import ParseConf, ParserState from lark.utils import get_regexp_width if TYPE_CHECKING: @@ -221,7 +222,7 @@ def copy_lexer_thread(lexer_thread: "LexerThread") -> "LexerThread": return res -def copy_parser_state(parser_state: "ParserState") -> "ParserState": +def copy_parser_state(parser_state: ParserState) -> ParserState: res = copy(parser_state) res.lexer = copy_lexer_thread(res.lexer) @@ -234,7 +235,7 @@ def copy_ip(ip: "InteractiveParser") -> "InteractiveParser": return res -def parse_to_end(parser_state: "ParserState") -> Tuple["ParserState", Set[str]]: +def parse_to_end(parser_state: ParserState) -> Tuple[ParserState, Set[str]]: """Continue parsing from the current parse state and return partial next tokens.""" parser_state = copy_parser_state(parser_state) @@ -381,8 +382,6 @@ def map_partial_states_to_vocab( def terminals_to_lalr_states(lp: Lark) -> DefaultDict[str, Set[int]]: - from lark.parsers.lalr_analysis import Shift - terminals_to_states = defaultdict(set) parse_table = lp.parser.parser.parser.parse_table for state, tokens_to_ops in parse_table.states.items(): @@ -401,11 +400,16 @@ def create_pmatch_parser_states( ptoken: str, pmatch: Tuple[int, Tuple[int, ...]], ) -> Tuple[ParserState, ...]: - from lark import Token - from lark.parsers.lalr_parser import ParseConf, ParserState - parse_table = lp.parser.parser.parser.parse_table - parse_conf = ParseConf(parse_table, lp._callbacks, lp.options.start[0]) + + # TODO: We need to effectively disable the callbacks that build the + # trees, because we aren't actually parsing a valid state that can, say, + # be reduced + def noop(*args, **kwargs): + pass + + callbacks = {rule: noop for rule, cb in lp._callbacks.items()} + parse_conf = ParseConf(parse_table, callbacks, lp.options.start[0]) lexer_thread = lp.parser._make_lexer_thread(ptoken) lexer_state = lexer_thread.state lexer_state.line_ctr.char_pos = pmatch[0] + 1 diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 6101daec..17acd4b8 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -224,54 +224,57 @@ def test_partial_match_preprocessing(): def test_parse_from_partial_match(): """Make sure we can continue parsing from an FSM-based partial match.""" - pyparser = Lark( + lp = Lark( r""" start: funcdef -funcdef: "def" name "(" ")" ":" +funcdef: "def" name "(" ")" ":" attr_pattern + +attr_pattern: NAME ("." NAME)+ -> value %ignore /[\t \f]+/ // WS !name: NAME | "match" | "case" NAME: /[^\W\d]\w*/ + """, parser="lalr", postlex=PartialPythonIndenter(), ) - terminals_to_states = terminals_to_lalr_states(pyparser) - symbol_names_and_fsms = terminals_to_fsms(pyparser) + terminals_to_states = terminals_to_lalr_states(lp) + symbol_names_and_fsms = terminals_to_fsms(lp) term_type = "DEF" - def_fsm = symbol_names_and_fsms[term_type] + term_fsm = symbol_names_and_fsms[term_type] - # TODO FIXME: This is broken, and it's a bug in `lark`'s Python grammar! + # TODO FIXME: This is broken, and it's a bug in `lark`'s Python grammar? # ptoken = "defx" ptoken = "ef foo" - pmatch = find_partial_matches(def_fsm, ptoken) - first_pmatch = next(pm for pm in pmatch if pm[0] is not None) + pmatches = find_partial_matches(term_fsm, ptoken) + first_pmatch = next(pm for pm in pmatches if pm[0] is not None) (parser_state,) = create_pmatch_parser_states( - pyparser, terminals_to_states, term_type, ptoken, first_pmatch + lp, terminals_to_states, term_type, ptoken, first_pmatch ) new_parser_state, expected_next_tokens = parse_to_end(parser_state) assert expected_next_tokens == {"NAME"} ptoken = "ef foo():" - pmatch = find_partial_matches(def_fsm, ptoken) - first_pmatch = next(pm for pm in pmatch if pm[0] is not None) + pmatches = find_partial_matches(term_fsm, ptoken) + first_pmatch = next(pm for pm in pmatches if pm[0] is not None) (parser_state,) = create_pmatch_parser_states( - pyparser, terminals_to_states, term_type, ptoken, first_pmatch + lp, terminals_to_states, term_type, ptoken, first_pmatch ) new_parser_state, expected_next_tokens = parse_to_end(parser_state) assert not expected_next_tokens ptoken = "ef (" - pmatch = find_partial_matches(def_fsm, ptoken) - first_pmatch = next(pm for pm in pmatch if pm[0] is not None) + pmatches = find_partial_matches(term_fsm, ptoken) + first_pmatch = next(pm for pm in pmatches if pm[0] is not None) (parser_state,) = create_pmatch_parser_states( - pyparser, terminals_to_states, term_type, ptoken, first_pmatch + lp, terminals_to_states, term_type, ptoken, first_pmatch ) with pytest.raises(UnexpectedToken): parse_to_end(parser_state) From c85586069a63d66fa78a98f64ddc798926f7ea59 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 5 Jul 2023 17:08:05 -0500 Subject: [PATCH 168/734] Make map_partial_states_to_vocab return vocab indices and filter matches --- outlines/text/parsing.py | 30 +++++++-- tests/text/test_parsing.py | 121 ++++++++++++++++++++++++++----------- 2 files changed, 109 insertions(+), 42 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 63a7a491..385bc63d 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,7 +1,17 @@ from collections import ChainMap, defaultdict from copy import copy from itertools import chain -from typing import TYPE_CHECKING, Any, DefaultDict, Dict, Iterable, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Callable, + DefaultDict, + Dict, + Iterable, + Optional, + Set, + Tuple, +) import interegular import regex @@ -329,7 +339,10 @@ def map_partial_states_to_vocab( vocabulary: Iterable[str], terminals_to_fsms_map: Dict[str, FSM], map_to_antecedents: bool = False, -) -> DefaultDict[PartialParseState, Set[str]]: + partial_match_filter: Callable[ + [str, Optional[int], Tuple[int, ...]], bool + ] = lambda *args: True, +) -> DefaultDict[PartialParseState, Set[int]]: """Construct a map from partial parse states to the vocabulary elements that start in those states. Parameters @@ -342,13 +355,18 @@ def map_partial_states_to_vocab( When ``True``, return a map with keys that are the antecedent partial parse states. In other words, this is a map that can be used to determine valid next tokens given a parse state. + partial_match_filter + A callable that determines which partial matches to keep. The first + argument is the string being match, the rest are the unpacked partial + match return values of `find_partial_matches`. """ pstate_to_vocab = defaultdict(set) for symbol_name, fsm in terminals_to_fsms_map.items(): - for tk in vocabulary: - for _, states in find_partial_matches(fsm, tk): - pstate_to_vocab[(symbol_name, states[0])].add(tk) + for i, vocab_string in enumerate(vocabulary): + for end_idx, state_seq in find_partial_matches(fsm, vocab_string): + if partial_match_filter(vocab_string, end_idx, state_seq): + pstate_to_vocab[(symbol_name, state_seq[0])].add(i) if not map_to_antecedents: return pstate_to_vocab @@ -373,7 +391,7 @@ def map_partial_states_to_vocab( # A version of `pstate_to_vocab` that is keyed on states that *transition to* # the original keys of `pstate_to_vocab`. - _pstate_to_vocab: DefaultDict[PartialParseState, Set[str]] = defaultdict(set) + _pstate_to_vocab: DefaultDict[PartialParseState, Set[int]] = defaultdict(set) for pstate, vocab in pstate_to_vocab.items(): for next_pstate in rev_ts_pstate_to_substates[pstate]: _pstate_to_vocab[next_pstate] |= vocab diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 17acd4b8..097d8c17 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -1,3 +1,6 @@ +import random +import re + import interegular import pytest from lark import Lark @@ -155,7 +158,7 @@ def test_partial_match(): } -def test_partial_match_preprocessing(): +def test_map_partial_states_to_vocab_python(): pyparser = Lark.open_from_package( "lark", "python.lark", @@ -171,28 +174,20 @@ def test_partial_match_preprocessing(): k: v for k, v in symbol_names_and_fsms.items() if k in test_symbols } - vocabulary = {"d", "e", "ef foo", "f ", " "} + vocabulary = ["d", "e", "ef foo", "f ", " "] pstate_to_vocab = map_partial_states_to_vocab( vocabulary, symbol_names_and_fsms, False ) assert dict(pstate_to_vocab) == { - ("NAME", 1): {"d", "e", "ef foo", "f "}, - ("NAME", 2): {"d", "e", "ef foo", "f "}, - ("DEF", 1): { - "d", - }, - ("DEF", 2): {"e", "ef foo"}, - ("DEF", 3): { - "f ", - }, - ("__IGNORE_0", 1): { - " ", - }, - ("__IGNORE_0", 2): { - " ", - }, + ("__IGNORE_0", 2): {4}, + ("__IGNORE_0", 1): {4}, + ("NAME", 2): {0, 1, 2, 3}, + ("NAME", 1): {0, 1, 2, 3}, + ("DEF", 1): {0}, + ("DEF", 2): {1, 2}, + ("DEF", 3): {3}, } pstate_to_vocab = map_partial_states_to_vocab( @@ -200,25 +195,15 @@ def test_partial_match_preprocessing(): ) assert dict(pstate_to_vocab) == { - ("DEF", 1): {"e", "ef foo"}, - ("DEF", 2): { - "f ", - }, - ("DEF", 0): { - "d", - }, - ("NAME", 1): {"d", "e", "ef foo", "f "}, - ("NAME", 2): {"d", "e", "ef foo", "f "}, - ("NAME", 0): {"d", "e", "ef foo", "f "}, - ("__IGNORE_0", 1): { - " ", - }, - ("__IGNORE_0", 2): { - " ", - }, - ("__IGNORE_0", 0): { - " ", - }, + ("__IGNORE_0", 1): {4}, + ("__IGNORE_0", 2): {4}, + ("__IGNORE_0", 0): {4}, + ("NAME", 1): {0, 1, 2, 3}, + ("NAME", 2): {0, 1, 2, 3}, + ("NAME", 0): {0, 1, 2, 3}, + ("DEF", 0): {0}, + ("DEF", 1): {1, 2}, + ("DEF", 2): {3}, } @@ -278,3 +263,67 @@ def test_parse_from_partial_match(): ) with pytest.raises(UnexpectedToken): parse_to_end(parser_state) + + +def test_map_partial_states_to_vocab_regex(): + regex_string = r"(([0-9]+)?([.]([0-9]*)?)?|[.][0-9]+)" + regex_pattern = interegular.parse_pattern(regex_string) + regex_fsm = regex_pattern.simplify().to_fsm() + + vocabulary = ["1.", "2", "3.", ".", ".80", "42", "1a", " ", "0", "a", "b", "$"] + + # We want the vocabulary strings to entirely match the regex--not just the + # prefixes of the vocabulary strings + def partial_match_filter(string, end_idx, state_seq): + if end_idx is not None and end_idx < len(string) - 1: + return False + return True + + pstate_to_vocab = map_partial_states_to_vocab( + vocabulary, {"FLOAT": regex_fsm}, True, partial_match_filter + ) + + assert dict(pstate_to_vocab) == { + ("FLOAT", 0): {0, 1, 2, 3, 4, 5, 8}, + ("FLOAT", 3): {0, 1, 2, 3, 4, 5, 8}, + ("FLOAT", 1): {0, 1, 2, 3, 4, 5, 8}, + ("FLOAT", 5): {1, 5, 8}, + ("FLOAT", 7): {1, 5, 8}, + ("FLOAT", 4): {1, 5, 8}, + ("FLOAT", 6): {1, 5, 8}, + ("FLOAT", 2): {1, 5, 8}, + } + + pstate_to_vocab = {k: tuple(v) for k, v in pstate_to_vocab.items()} + + random.seed(24080) + + # Start at the initial state + pstate = ("FLOAT", regex_fsm.initial) + + sample_seq = "" + + for i in range(10): + next_support = pstate_to_vocab[pstate] + + (next_sample_idx,) = random.sample(next_support, 1) + + next_sample = vocabulary[next_sample_idx] + sample_seq += next_sample + + # Parse the entire sampled sequence/string + # TODO: We could continue from the previous parse state, but this is + # easier for now and only for demonstration purposes. + partial_matches = find_partial_matches(regex_fsm, sample_seq) + + # Use the/a longest match + pmatch = max(partial_matches, key=lambda x: x[0] if x[0] is not None else -1) + + # Create the next state + pstate = (pstate[0], pmatch[1][-1]) + + # TODO: We could check if the FSM is done (i.e. in an final/accept + # state) and end the sampling loop + + # Make sure the whole thing matches the regex + assert re.fullmatch(regex_string, sample_seq) is not None From 293826cdec1aacd794ab9480ed4ac92f10956240 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 29 Jun 2023 17:24:11 +0200 Subject: [PATCH 169/734] Use PyTorch instead of NumPy --- outlines/models/hf_transformers.py | 4 +- outlines/models/transformers.py | 38 +- outlines/text/generate/continuation.py | 13 +- outlines/text/generate/sequence.py | 120 ++++--- outlines/text/masks.py | 12 +- pyproject.toml | 2 +- tests/models/test_transformers.py | 29 +- tests/text/generate/test_continuation.py | 3 +- .../generate/test_integration_transfomers.py | 11 +- tests/text/generate/test_sequence.py | 335 +++++++++--------- tests/text/test_masks.py | 17 +- 11 files changed, 302 insertions(+), 282 deletions(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 7b875155..8df842a6 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -335,7 +335,7 @@ def create_int_constraint( import torch num_prompt_tokens = prompt_tokens.shape[-1] - mask = torch.from_numpy(create_int_mask(tokenizer.get_vocab())) + mask = create_int_mask(tokenizer.get_vocab()) def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: """Pre-process the model's output logits before generating the next token. @@ -373,7 +373,7 @@ def create_float_constraint( import torch num_prompt_tokens = prompt_tokens.shape[-1] - mask = torch.from_numpy(create_float_mask(tokenizer.get_vocab())) + mask = create_float_mask(tokenizer.get_vocab()) def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: """Pre-process the model's output logits before generating the next token. diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index d71272f7..d965816d 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -1,8 +1,7 @@ import math from typing import TYPE_CHECKING, List, Optional, Tuple, Union -import numpy as np -from numpy.typing import NDArray +import torch from outlines.models.tokenizer import Tokenizer @@ -27,29 +26,28 @@ def __init__( self.tokenizer = tokenizer def __call__( - self, input_ids: NDArray[np.int64], attention_mask: NDArray[np.int64] - ) -> NDArray[np.float64]: - import torch - + self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor + ) -> torch.FloatTensor: # `transformers` model accept `input_ids` of size at most equal to 2. We # thus reshape the input array, call the model and reshape the output # logits. batch_shape = input_ids.shape[:-1] num_tokens = input_ids.shape[-1] input_ids = input_ids.reshape(math.prod(batch_shape), num_tokens) + output = self.model( + input_ids, + attention_mask=attention_mask, + return_dict=True, + output_attentions=False, + output_hidden_states=False, + ) + next_token_logits = output.logits[:, -1, :] + probs = torch.nn.functional.softmax(next_token_logits, dim=-1).squeeze() - with torch.no_grad(): - input_ids = torch.from_numpy(input_ids).to(self.device) - attention_mask = torch.from_numpy(attention_mask).to(self.device) - - output = self.model(input_ids, attention_mask=attention_mask) - - next_token_logits = output.logits[:, -1, :] - probs = torch.nn.functional.softmax(next_token_logits, dim=-1).squeeze() - probs = torch.atleast_2d(probs) - numpy_probs = probs.cpu().detach().numpy() + probs = torch.atleast_2d(probs) + probs = probs.reshape(batch_shape + (-1,)) - return numpy_probs.reshape(batch_shape + (-1,)) + return probs class TransformersTokenizer(Tokenizer): @@ -72,13 +70,13 @@ def __init__(self, model_name: str, **kwargs): def encode( self, prompt: Union[str, List[str]], **kwargs - ) -> Tuple[NDArray[np.int64], NDArray[np.int64]]: + ) -> Tuple[torch.LongTensor, torch.LongTensor]: kwargs["padding"] = True - kwargs["return_tensors"] = "np" + kwargs["return_tensors"] = "pt" output = self.tokenizer(prompt, **kwargs) return output["input_ids"], output["attention_mask"] - def decode(self, token_ids: NDArray[np.int64]) -> List[str]: + def decode(self, token_ids: torch.LongTensor) -> List[str]: text = self.tokenizer.batch_decode(token_ids) return text diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index e616d3f3..1ebca7f0 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -1,7 +1,6 @@ from typing import List, Optional -import numpy as np -from numpy.typing import NDArray +import torch from outlines.text.generate.sequence import Sequence @@ -19,8 +18,11 @@ class Continuation(Sequence): def __init__(self, model, max_tokens: Optional[int]): super().__init__(model, max_tokens) + self.eos_token_id = torch.tensor( + [self.model.tokenizer.eos_token_id], device=self.device + ) - def is_finished(self, token_ids: NDArray[np.int64]) -> NDArray[np.bool_]: + def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: """Determine whether the sequences reached maximum length of end with and EOS token. @@ -35,10 +37,7 @@ def is_finished(self, token_ids: NDArray[np.int64]) -> NDArray[np.bool_]: The input sequences. """ - is_finished = np.zeros((token_ids.shape[0],), dtype=np.bool_) - is_finished[token_ids[:, -1] == self.model.tokenizer.eos_token_id] = True - - return is_finished + return token_ids[:, -1] == self.model.tokenizer.eos_token_id def postprocess_completions(self, completions: List[str]) -> List[str]: """Remove the EOS token from the completion.""" diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 614297ed..5e63881a 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -1,8 +1,6 @@ from typing import List, Optional, Tuple, Union -import numpy as np -from numpy.random import Generator -from numpy.typing import NDArray +import torch class Sequence: @@ -21,9 +19,13 @@ def __init__(self, model, max_tokens: Optional[int] = None): """ self.model = model + self.device = model.device self.max_tokens = max_tokens + self.pad_token_id = torch.tensor( + model.tokenizer.pad_token_id, device=model.device + ) - def is_finished(self, token_ids: NDArray[np.int64]) -> NDArray[np.bool_]: + def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: """Determine whether we should stop the generation.""" raise NotImplementedError( "`Sequence.is_finished` must be implemented by subclasses." @@ -34,11 +36,11 @@ def postprocess_completions(self, completions: List[str]) -> List[str]: def step( self, - rng: Generator, - token_ids: NDArray[np.int64], - attention_mask: NDArray[np.int64], + rng: torch.Generator, + token_ids: torch.LongTensor, + attention_mask: torch.LongTensor, samples: int = 1, - ) -> Tuple[NDArray[np.int64], NDArray[float]]: + ) -> Tuple[torch.LongTensor, torch.FloatTensor]: """Generate one or several tokens that complete the input sequence. The sampling step consists in using a model to generate next-token @@ -73,42 +75,48 @@ def step( next_token_ids = vectorized_random_choice(rng, probs, samples) # Add the missing `num_tokens` and `num_sample` dimensions - next_token_ids = np.expand_dims(next_token_ids, -1) - token_ids = np.expand_dims(token_ids, 0) + next_token_ids = torch.unsqueeze(next_token_ids, -1) + token_ids = torch.unsqueeze(token_ids, 0) # Expand the input `token_ids` array to be able to concatenate several # samples. if samples > 1: repetitions = (samples,) + (1,) * num_input_dims - token_ids = np.tile(token_ids, repetitions) - probs = np.tile(probs, repetitions) + token_ids = torch.tile(token_ids, repetitions) + probs = torch.tile(probs, repetitions) - token_ids = np.concatenate([token_ids, next_token_ids], axis=-1) + token_ids = torch.concatenate([token_ids, next_token_ids], axis=-1) # Merge sample and batch dimensions by removing dimensions of length # 1. The shape of the resulting arrays is `new_batch_shape + (num_tokens,)` # and `new_batch_shape + (vocab_size,)` respectively. - token_ids = np.atleast_2d(token_ids.squeeze()) - probs = np.atleast_2d(probs.squeeze()) + token_ids = torch.atleast_2d(token_ids.squeeze()) + probs = torch.atleast_2d(probs.squeeze()) return token_ids, probs def expand_attention_mask( - self, attention_mask: NDArray[np.int64] - ) -> NDArray[np.int64]: + self, attention_mask: torch.LongTensor + ) -> torch.LongTensor: """Expand the attention mask after the last completion.""" batch_shape = attention_mask.shape[:-1] - attention_mask = np.concatenate( - [attention_mask, np.broadcast_to([1], batch_shape + (1,))], axis=-1 + attention_mask = torch.concatenate( + [ + attention_mask, + torch.broadcast_to( + torch.tensor([1], device=self.device), batch_shape + (1,) + ), + ], + axis=-1, ) return attention_mask def update_token_ids( self, - is_finished: NDArray[np.bool_], - token_ids: NDArray[np.int64], - token_ids_unfinished: NDArray[np.int64], - ) -> NDArray[np.int64]: + is_finished: torch.BoolTensor, + token_ids: torch.LongTensor, + token_ids_unfinished: torch.LongTensor, + ) -> torch.LongTensor: """Update the array of token ids after the last completion. We only generate new tokens for the sequences that are not finished. We thus @@ -133,15 +141,15 @@ def update_token_ids( """ batch_shape = token_ids.shape[:-1] num_tokens = token_ids.shape[-1] - new_token_ids = np.empty(batch_shape + (num_tokens + 1,), dtype=np.int64) - - token_ids_finished = token_ids[is_finished] - batch_shape_finished = token_ids_finished.shape[:-1] - token_ids_finished = np.concatenate( + new_token_ids = torch.empty( + batch_shape + (num_tokens + 1,), dtype=torch.int64, device=self.device + ) + token_ids_finished = torch.concatenate( [ - token_ids_finished, - np.broadcast_to( - [self.model.tokenizer.pad_token_id], batch_shape_finished + (1,) + token_ids[is_finished], + torch.broadcast_to( + self.pad_token_id, + token_ids[is_finished].shape[:-1] + (1,), ), ], axis=-1, @@ -152,11 +160,12 @@ def update_token_ids( return new_token_ids + @torch.inference_mode() def __call__( self, prompt: Union[str, List[str]], samples: int = 1, - rng: Generator = np.random.default_rng(), + rng: Optional[torch.Generator] = None, ) -> Union[str, List[str]]: """Generate a new sequence given a prompt. @@ -173,6 +182,13 @@ def __call__( """ token_ids, attention_mask = self.model.tokenizer.encode(prompt) + + token_ids = token_ids.to(self.device) + attention_mask = attention_mask.to(self.device) + + if rng is None: + rng = torch.Generator(device=self.device) + num_prompt_tokens = token_ids.shape[-1] if samples > 1: @@ -181,28 +197,23 @@ def __call__( num_batch_dims = token_ids.ndim - 1 repetitions = (samples,) + (1,) * num_batch_dims - attention_mask = np.tile(attention_mask, repetitions) + attention_mask = torch.tile(attention_mask, repetitions) attention_mask = self.expand_attention_mask(attention_mask) else: batch_shape = token_ids.shape[:-1] - is_finished = np.zeros(batch_shape, dtype=np.bool_) + is_finished = torch.zeros(batch_shape, dtype=torch.bool, device=self.device) while True: num_generated_tokens = token_ids.shape[-1] - num_prompt_tokens - if np.all(is_finished) or num_generated_tokens == self.max_tokens: + if torch.all(is_finished) or num_generated_tokens == self.max_tokens: break - token_ids_unfinished = token_ids[~is_finished] - attention_mask_unfinished = attention_mask[~is_finished] - token_ids_unfinished, _ = self.step( - rng, token_ids_unfinished, attention_mask_unfinished - ) - - token_ids = self.update_token_ids( - is_finished, token_ids, token_ids_unfinished + updated_token_ids, _ = self.step( + rng, token_ids[~is_finished], attention_mask[~is_finished] ) + token_ids = self.update_token_ids(is_finished, token_ids, updated_token_ids) attention_mask = self.expand_attention_mask(attention_mask) - is_finished[~is_finished] = self.is_finished(token_ids_unfinished).flatten() + is_finished[~is_finished] = self.is_finished(updated_token_ids).flatten() result = self.model.tokenizer.decode(token_ids) result = self.postprocess_completions(result) @@ -213,12 +224,9 @@ def __call__( return result -vsearchsorted = np.vectorize(np.searchsorted, otypes=[int], signature="(n),()->()") - - def vectorized_random_choice( - rng: Generator, - p: NDArray[np.float64], + rng: torch.Generator, + p: torch.FloatTensor, samples: int = 1, ): """Vectorized implementation of `np.random.choice`. @@ -228,13 +236,13 @@ def vectorized_random_choice( Note ---- - `searchsorted` might be more efficient here since the number of elements - can be quite large. + `torch.searchsorted` may be more efficient, but it is not implemented for + every backend, for instance MPS. Parameters ---------- rng - NumPy random number Generator instance + Torch random number Generator instance p An array of probability of shape `(num_probability_vectors, num_items)` that must sum to 1. @@ -247,8 +255,10 @@ def vectorized_random_choice( """ - cumsum = np.expand_dims(p.cumsum(axis=-1), 0) - rand = rng.random((samples,) + p.shape[:-1]) - idx = vsearchsorted(cumsum, rand) + cumsum = torch.unsqueeze(p.cumsum(axis=-1), 0) + rand = torch.rand( + (samples,) + p.shape[:-1] + (1,), generator=rng, device=rng.device + ) + idx = (cumsum < rand).sum(axis=-1) return idx diff --git a/outlines/text/masks.py b/outlines/text/masks.py index c5762573..035e0a6d 100644 --- a/outlines/text/masks.py +++ b/outlines/text/masks.py @@ -1,7 +1,7 @@ import re from typing import Dict, Iterable -import numpy as np +import torch __all__ = [ "create_char_set_mask", @@ -11,7 +11,7 @@ ] -def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> np.ndarray: +def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> torch.BoolTensor: """Create a token mask from a regex. Parameters @@ -25,7 +25,7 @@ def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> np.ndarray """ program = re.compile(regex) - mask = np.zeros(len(vocabulary), dtype=np.bool_) + mask = torch.zeros(len(vocabulary), dtype=torch.bool) for token, token_id in vocabulary.items(): if program.match(token) is not None: mask[token_id] = True @@ -33,14 +33,14 @@ def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> np.ndarray return mask -def create_int_mask(vocabulary: Dict[str, int]) -> np.ndarray: +def create_int_mask(vocabulary: Dict[str, int]) -> torch.BoolTensor: """Create a mask to generate integers.""" mask = create_mask_from_regex(vocabulary, "^[0-9]+$") return mask -def create_float_mask(vocabulary: Dict[str, int]) -> np.ndarray: +def create_float_mask(vocabulary: Dict[str, int]) -> torch.BoolTensor: """Create a mask to generate floating point numbers.""" mask = create_mask_from_regex(vocabulary, r"^(([0-9]+)?([.]([0-9]*)?)?|[.][0-9]+)$") @@ -49,7 +49,7 @@ def create_float_mask(vocabulary: Dict[str, int]) -> np.ndarray: def create_char_set_mask( vocabulary: Dict[str, int], char_set: Iterable[str] -) -> np.ndarray: +) -> torch.BoolTensor: """Create a mask to only generate characters in a given set. Parameters diff --git a/pyproject.toml b/pyproject.toml index 1eb52a9c..d1bbffdb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,7 @@ dependencies = [ "pydantic", "scipy", "tenacity", + "torch", ] dynamic = ["version"] @@ -40,7 +41,6 @@ test = [ "pre-commit", "pytest", "pytest-cov", - "torch", "transformers", "coverage[toml]>=5.1", "diff-cover", diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 1d7bcb40..b5e9ae44 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -1,6 +1,5 @@ -import numpy as np import pytest -from numpy.testing import assert_array_equal +import torch from transformers.models.gpt2 import GPT2TokenizerFast from outlines.models.transformers import TransformersTokenizer, transformers @@ -17,23 +16,23 @@ def test_tokenizer(): token_ids, attention_mask = tokenizer.encode("Test") assert token_ids.ndim == 2 assert token_ids.shape[0] == 1 - assert isinstance(token_ids, np.ndarray) + assert isinstance(token_ids, torch.LongTensor) assert token_ids.shape == attention_mask.shape token_ids, attention_mask = tokenizer.encode(["Test", "Test"]) assert token_ids.ndim == 2 assert token_ids.shape[0] == 2 - assert isinstance(token_ids, np.ndarray) + assert isinstance(token_ids, torch.LongTensor) assert token_ids.shape == attention_mask.shape token_ids, attention_mask = tokenizer.encode(["Test", "A long sentence"]) assert token_ids.shape == attention_mask.shape assert attention_mask[0][0] == tokenizer.pad_token_id - text = tokenizer.decode(np.array([[0, 1, 2]])) + text = tokenizer.decode(torch.tensor([[0, 1, 2]])) isinstance(text, str) - text = tokenizer.decode(np.array([[0, 1, 2], [3, 4, 5]])) + text = tokenizer.decode(torch.tensor([[0, 1, 2], [3, 4, 5]])) isinstance(text, list) isinstance(text[0], str) isinstance(text[1], str) @@ -47,21 +46,21 @@ def test_model(): assert isinstance(model.tokenizer, TransformersTokenizer) assert model.device == "cpu" - input_ids = np.array([[0, 1, 2]]) - logits = model(input_ids, np.ones_like(input_ids)) - assert isinstance(logits, np.ndarray) + input_ids = torch.tensor([[0, 1, 2]]) + logits = model(input_ids, torch.ones_like(input_ids)) + assert logits.type() == "torch.FloatTensor" assert logits.ndim == 2 assert logits.shape[0] == 1 - input_ids = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) - logits = model(input_ids, np.ones_like(input_ids)) - assert isinstance(logits, np.ndarray) + input_ids = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) + logits = model(input_ids, torch.ones_like(input_ids)) + assert logits.type() == "torch.FloatTensor" assert logits.ndim == 2 assert logits.shape[0] == 3 - input_ids = np.array([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [0, 1, 2]]]) - logits = model(input_ids, np.ones_like(input_ids)) + input_ids = torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [0, 1, 2]]]) + logits = model(input_ids, torch.ones_like(input_ids)) assert logits.ndim == 3 assert logits.shape[0] == 2 assert logits.shape[1] == 2 - assert_array_equal(logits[0][0], logits[1][1]) + assert torch.equal(logits[0][0], logits[1][1]) diff --git a/tests/text/generate/test_continuation.py b/tests/text/generate/test_continuation.py index aaf01749..0f35cd24 100644 --- a/tests/text/generate/test_continuation.py +++ b/tests/text/generate/test_continuation.py @@ -7,11 +7,12 @@ class Tokenizer: eos_token = "" eos_token_id = 0 - pad_token_ids = -1 + pad_token_id = -1 class Model: tokenizer = Tokenizer() + device = "cpu" def test_continuation_is_finished(): diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 55bbde96..f303500c 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -1,19 +1,20 @@ -import numpy as np +import torch import outlines.models as models from outlines.text.generate.continuation import continuation -def test_transformers_integration_completion(): - rng = np.random.default_rng(0) +def test_transformers_integration_continuation(): + rng = torch.Generator() + rng.manual_seed(10000) # Choosen so is generated model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") - sequence = continuation(model)("prompt", rng=rng) + sequence = continuation(model)("Write a short sentence", rng=rng) assert isinstance(sequence, str) assert model.tokenizer.eos_token not in sequence - sequence = continuation(model, max_tokens=10)("prompt", rng=rng) + sequence = continuation(model, max_tokens=10)("Write a short sentence", rng=rng) assert isinstance(sequence, str) diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 9659e8d6..3d9629c5 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -1,53 +1,96 @@ from typing import Dict, List, Union -import numpy as np import pytest -from numpy.testing import assert_array_equal +import torch from outlines.text.generate.sequence import Sequence, vectorized_random_choice +class MockModel: + def __init__(self, tokenizer, logits): + self.tokenizer = tokenizer + self.logits = logits + self.iteration_idx = 0 + self.device = "cpu" + + def __call__(self, input_ids, *_): + import math + + batch_shape = input_ids.shape[:-1] + vocab_shape = (self.logits.shape[-1],) + shaped_logits = torch.tile( + self.logits[self.iteration_idx], (math.prod(batch_shape), 1) + ) + self.iteration_idx += 1 + + return shaped_logits.reshape(batch_shape + vocab_shape) + + +class MockTokenizer: + def __init__(self, vocabulary: Dict[str, int]): + self.vocabulary = vocabulary + self.pad_token_id = -1 + + def encode(self, prompts: Union[str, List[str]]): + if isinstance(prompts, str): + prompts = [prompts] + + token_ids = torch.tensor([[self.vocabulary[prompt]] for prompt in prompts]) + attention_mask = torch.ones_like(token_ids) + + return token_ids, attention_mask + + def decode(self, token_ids): + return token_ids + + def test_vectorized_random_choice(): - rng = np.random.default_rng(0) + rng = torch.Generator() + rng.manual_seed(0) - probs = np.array([[1, 0, 0, 0]]) + probs = torch.tensor([[1, 0, 0, 0]]) sample = vectorized_random_choice(rng, probs) assert sample.shape == (1, 1) - assert_array_equal(sample, np.zeros((1, 1))) + assert torch.equal(sample, torch.zeros((1, 1))) - probs = np.array([[1, 0, 0, 0]]) + probs = torch.tensor([[1, 0, 0, 0]]) sample = vectorized_random_choice(rng, probs, samples=3) assert sample.shape == (3, 1) - assert_array_equal(sample, np.zeros((3, 1))) + assert torch.equal(sample, torch.zeros((3, 1))) - probs = np.tile(np.array([[1, 0, 0, 0]]), (2, 1)) + probs = torch.tile(torch.tensor([[1, 0, 0, 0]]), (2, 1)) sample = vectorized_random_choice(rng, probs) assert sample.shape == (1, 2) - assert_array_equal(sample, np.zeros((1, 2))) + assert torch.equal(sample, torch.zeros((1, 2))) - probs = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) + probs = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]]) sample = vectorized_random_choice(rng, probs, samples=3) assert sample.shape == (3, 2) - assert_array_equal(sample, [[0, 1], [0, 1], [0, 1]]) + assert torch.equal(sample, torch.tensor([[0, 1], [0, 1], [0, 1]])) - probs = np.array([[[1, 0, 0, 0], [0, 1, 0, 0]], [[0, 0, 1, 0], [0, 0, 0, 1]]]) + probs = torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0]], [[0, 0, 1, 0], [0, 0, 0, 1]]]) sample = vectorized_random_choice(rng, probs, samples=3) assert sample.shape == (3, 2, 2) - assert_array_equal(sample, [[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[0, 1], [2, 3]]]) + assert torch.equal( + sample, torch.tensor([[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[0, 1], [2, 3]]]) + ) def test_sequence_error(): with pytest.raises(NotImplementedError, match="must be implemented"): - sequence = Sequence(None) - sequence.is_finished(np.array([1])) + sequence = Sequence(MockModel(MockTokenizer(None), None)) + sequence.is_finished(torch.tensor([1])) -def ModelStep(logits): +class ModelStep: """Mock model to test `Sequence.step`""" - logits = np.array([logits]) + def __init__(self, tokenizer, logits): + self.device = "cpu" + self.logits = logits + self.tokenizer = tokenizer - def call(input_ids, *_): + def __call__(self, input_ids, *_): """Call the model. We first repeat the logits `num_sequences` times, and then @@ -57,121 +100,130 @@ def call(input_ids, *_): import math batch_shape = input_ids.shape[:-1] - vocab_shape = (logits.shape[-1],) - shaped_logits = np.tile(logits, (math.prod(batch_shape), 1)) + vocab_shape = (self.logits.shape[-1],) + shaped_logits = torch.tile(self.logits, (math.prod(batch_shape), 1)) return shaped_logits.reshape(batch_shape + vocab_shape) - return call - def test_sequence_step(): - rng = np.random.default_rng(0) + rng = torch.Generator() + rng.manual_seed(0) - logits = np.array([0, 1, 0, 0]) - model = ModelStep(logits) + logits = torch.tensor([0, 1, 0, 0]) + model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) - input_ids = np.array([[1, 2]]) - token_ids, probs = sequence.step(rng, input_ids, np.ones((1, 2))) - assert_array_equal(token_ids, [[1, 2, 1]]) + input_ids = torch.tensor([[1, 2]]) + token_ids, probs = sequence.step(rng, input_ids, torch.ones((1, 2))) + assert torch.equal(token_ids, torch.tensor([[1, 2, 1]])) assert probs.shape == (1, 4) def test_sequence_step_batch(): - rng = np.random.default_rng(0) + rng = torch.Generator() + rng.manual_seed(0) - logits = np.array([0, 1, 0, 0]) - model = ModelStep(logits) + logits = torch.tensor([0, 1, 0, 0]) + model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) - input_ids = np.array([[1, 2], [3, 4]]) - token_ids, probs = sequence.step(rng, input_ids, np.ones((2, 2))) - assert_array_equal(token_ids, [[1, 2, 1], [3, 4, 1]]) + input_ids = torch.tensor([[1, 2], [3, 4]]) + token_ids, probs = sequence.step(rng, input_ids, torch.ones((2, 2))) + assert torch.equal(token_ids, torch.tensor([[1, 2, 1], [3, 4, 1]])) assert probs.shape == (2, 4) def test_sequence_step_sample(): - rng = np.random.default_rng(0) + rng = torch.Generator() + rng.manual_seed(0) - logits = np.array([0, 1, 0, 0]) - model = ModelStep(logits) + logits = torch.tensor([0, 1, 0, 0]) + model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) - input_ids = np.array([[1, 2]]) - token_ids, probs = sequence.step(rng, input_ids, np.ones((1, 2)), samples=3) - assert_array_equal(token_ids, [[1, 2, 1], [1, 2, 1], [1, 2, 1]]) + input_ids = torch.tensor([[1, 2]]) + token_ids, probs = sequence.step(rng, input_ids, torch.ones((1, 2)), samples=3) + assert torch.equal(token_ids, torch.tensor([[1, 2, 1], [1, 2, 1], [1, 2, 1]])) assert probs.shape == (3, 4) -def test_sequence_sample_batch(): - rng = np.random.default_rng(0) +def test_sequence_step_sample_batch(): + rng = torch.Generator() + rng.manual_seed(0) - logits = np.array([0, 1, 0, 0]) - model = ModelStep(logits) + logits = torch.tensor([0, 1, 0, 0]) + model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) - input_ids = np.array([[1, 2, 1], [3, 4, 1]]) - token_ids, probs = sequence.step(rng, input_ids, np.ones((2, 3)), samples=3) - assert_array_equal( + input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) + token_ids, probs = sequence.step(rng, input_ids, torch.ones((2, 3)), samples=3) + assert torch.equal( token_ids, - [ - [[1, 2, 1, 1], [3, 4, 1, 1]], - [[1, 2, 1, 1], [3, 4, 1, 1]], - [[1, 2, 1, 1], [3, 4, 1, 1]], - ], + torch.tensor( + [ + [[1, 2, 1, 1], [3, 4, 1, 1]], + [[1, 2, 1, 1], [3, 4, 1, 1]], + [[1, 2, 1, 1], [3, 4, 1, 1]], + ] + ), ) assert probs.shape == (3, 2, 4) def test_sequence_step_loop(): """Make sure that we can feed `step`'s output back as an input.""" + rng = torch.Generator() + rng.manual_seed(0) - rng = np.random.default_rng(0) - - logits = np.array([0, 1, 0, 0]) - model = ModelStep(logits) + logits = torch.tensor([0, 1, 0, 0]) + model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) - input_ids = np.array([[1, 2]]) - token_ids, _ = sequence.step(rng, input_ids, np.ones((1, 2))) - token_ids, probs = sequence.step(rng, token_ids, np.ones((1, 3))) - assert_array_equal(token_ids, [[1, 2, 1, 1]]) + input_ids = torch.tensor([[1, 2]]) + token_ids, _ = sequence.step(rng, input_ids, torch.ones((1, 2))) + token_ids, probs = sequence.step(rng, token_ids, torch.ones((1, 3))) + assert torch.equal(token_ids, torch.tensor([[1, 2, 1, 1]])) assert probs.shape == (1, 4) - input_ids = np.array([[1, 2], [3, 4]]) - token_ids, _ = sequence.step(rng, input_ids, np.ones((2, 2))) - token_ids, probs = sequence.step(rng, token_ids, np.ones((2, 3))) - assert_array_equal(token_ids, [[1, 2, 1, 1], [3, 4, 1, 1]]) + input_ids = torch.tensor([[1, 2], [3, 4]]) + token_ids, _ = sequence.step(rng, input_ids, torch.ones((2, 2))) + token_ids, probs = sequence.step(rng, token_ids, torch.ones((2, 3))) + assert torch.equal(token_ids, torch.tensor([[1, 2, 1, 1], [3, 4, 1, 1]])) assert probs.shape == (2, 4) # The number of samples becomes the batch size at the next iteration. - input_ids = np.array([[1, 2]]) - token_ids, _ = sequence.step(rng, input_ids, np.ones((1, 2)), samples=3) - token_ids, probs = sequence.step(rng, token_ids, np.ones((3, 3))) - assert_array_equal(token_ids, [[1, 2, 1, 1], [1, 2, 1, 1], [1, 2, 1, 1]]) + input_ids = torch.tensor([[1, 2]]) + token_ids, _ = sequence.step(rng, input_ids, torch.ones((1, 2)), samples=3) + token_ids, probs = sequence.step(rng, token_ids, torch.ones((3, 3))) + assert torch.equal( + token_ids, torch.tensor([[1, 2, 1, 1], [1, 2, 1, 1], [1, 2, 1, 1]]) + ) assert probs.shape == (3, 4) def test_sequence_step_loop_general(): - rng = np.random.default_rng(0) + rng = torch.Generator() + rng.manual_seed(0) - logits = np.array([0, 1, 0, 0]) - model = ModelStep(logits) + logits = torch.tensor([0, 1, 0, 0]) + model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) - input_ids = np.array([[1, 2, 1], [3, 4, 1]]) - token_ids, _ = sequence.step(rng, input_ids, np.ones((1, 3)), samples=3) - result, _ = sequence.step(rng, token_ids, np.ones((3, 4))) + input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) + token_ids, _ = sequence.step(rng, input_ids, torch.ones((1, 3)), samples=3) + result, _ = sequence.step(rng, token_ids, torch.ones((3, 4))) assert result.shape == (3, 2, 5) - assert_array_equal( + assert torch.equal( result, - [ - [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], - [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], - [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], - ], + torch.tensor( + [ + [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + ] + ), ) @@ -181,82 +233,33 @@ class TokenizerUpdateTokens: class ModelUpdateTokens: tokenizer = TokenizerUpdateTokens() + device = "cpu" def test_update_token_ids_all_unfinished(): sequence = Sequence(ModelUpdateTokens()) - previous_token_ids = np.array([[1, 1], [1, 1]]) - is_finished = np.array([False, False]) - token_ids_unfinished = np.array([[1, 1, 1], [1, 1, 1]]) + previous_token_ids = torch.tensor([[1, 1], [1, 1]]) + is_finished = torch.tensor([False, False]) + token_ids_unfinished = torch.tensor([[1, 1, 1], [1, 1, 1]]) result = sequence.update_token_ids( is_finished, previous_token_ids, token_ids_unfinished ) - assert_array_equal(result, [[1, 1, 1], [1, 1, 1]]) + assert torch.equal(result, torch.tensor([[1, 1, 1], [1, 1, 1]])) def test_update_token_ids_some_unfinished(): "Makes sure that the pad token is appended to finished sequences." sequence = Sequence(ModelUpdateTokens()) - previous_token_ids = np.array([[1, 1], [1, 1]]) - token_ids_unfinished = np.array([[1, 1, 1]]) - is_finished = np.array([True, False]) + previous_token_ids = torch.tensor([[1, 1], [1, 1]]) + token_ids_unfinished = torch.tensor([[1, 1, 1]]) + is_finished = torch.tensor([True, False]) result = sequence.update_token_ids( is_finished, previous_token_ids, token_ids_unfinished ) - assert_array_equal(result, [[1, 1, -1], [1, 1, 1]]) - - -@pytest.mark.xfail -def test_update_token_ids_larger_dimensions(): - sequence = Sequence(ModelUpdateTokens()) - - previous_token_ids = np.array([[1, 1], [1, 1]]) - is_finished = np.array([False, False]) - token_ids_unfinished = np.array([[1, 1, 1], [1, 1, 1]]) - result = sequence.update_token_ids( - is_finished, previous_token_ids, token_ids_unfinished - ) - assert_array_equal(result, [[1, 1, -1], [1, 1, 1]]) - - -class MockModel: - def __init__(self, tokenizer, logits): - self.tokenizer = tokenizer - self.logits = np.array(logits) - self.iteration_idx = 0 - - def __call__(self, input_ids, *_): - import math - - batch_shape = input_ids.shape[:-1] - vocab_shape = (self.logits.shape[-1],) - shaped_logits = np.tile( - self.logits[self.iteration_idx], (math.prod(batch_shape), 1) - ) - self.iteration_idx += 1 - - return shaped_logits.reshape(batch_shape + vocab_shape) - - -class MockTokenizer: - def __init__(self, vocabulary: Dict[str, int]): - self.vocabulary = vocabulary - self.pad_token_id = -1 - - def encode(self, prompts: Union[str, List[str]]): - if isinstance(prompts, str): - prompts = [prompts] - - token_ids = np.array([[self.vocabulary[prompt]] for prompt in prompts]) - attention_mask = np.ones_like(token_ids) - - return token_ids, attention_mask - - def decode(self, token_ids): - return token_ids + assert torch.equal(result, torch.tensor([[1, 1, -1], [1, 1, 1]])) def test_call_single_prompt(): @@ -269,16 +272,16 @@ def is_finished(self, token_ids): """Finish generating the sequence after two iterations""" if self.iteration_idx == 0: self.iteration_idx += 1 - return np.array([False]) + return torch.tensor([False]) else: - return np.array([True]) + return torch.tensor([True]) tokenizer = MockTokenizer({"Test": 0, "a": 1, "b": 2}) - model = MockModel(tokenizer, [[1, 0, 0], [0, 1, 0]]) + model = MockModel(tokenizer, torch.tensor([[1.0, 0, 0], [0, 1.0, 0]])) sequence = FinishAfterTwo(model) result = sequence("Test") - assert_array_equal(result, [0, 0, 1]) + assert torch.equal(result, torch.tensor([0, 0, 1])) def test_call_prompt_list(): @@ -289,7 +292,9 @@ def __init__(self, vocabulary: Dict[str, int]): def __call__(self, prompts: List[str], **_): return { - "input_ids": np.array([[self.vocabulary[prompt]] for prompt in prompts]) + "input_ids": torch.tensor( + [[self.vocabulary[prompt]] for prompt in prompts] + ) } def batch_decode(self, token_ids): @@ -307,24 +312,28 @@ def is_finished(self, token_ids): """ if self.iteration_idx == 0: self.iteration_idx += 1 - return np.array([False, False, False]) + return torch.tensor([False, False, False]) elif self.iteration_idx == 1: self.iteration_idx += 1 - return np.array([True, False, True]) + return torch.tensor([True, False, True]) else: - return np.array([True]) # We only consider the unfinished sequences + return torch.tensor([True]) # We only consider the unfinished sequences tokenizer = MockTokenizer( {"Test1": 0, "Test2": 1, "a": 2, "b": 3, "c": 4, "Test3": 5} ) model = MockModel( tokenizer, - [[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0]], + torch.tensor( + [[0, 0, 1.0, 0, 0, 0], [0, 0, 0, 1.0, 0, 0], [0, 0, 0, 0, 1.0, 0]] + ), ) sequence = FinishAfterThree(model) result = sequence(["Test1", "Test2", "Test3"]) - assert_array_equal(result, [[0, 2, 3, -1], [1, 2, 3, 4], [5, 2, 3, -1]]) + assert torch.equal( + result, torch.tensor([[0, 2, 3, -1], [1, 2, 3, 4], [5, 2, 3, -1]]) + ) def test_call_single_prompt_samples(): @@ -336,28 +345,28 @@ def __init__(self, model): def is_finished(self, token_ids): if self.iteration_idx == 0: self.iteration_idx += 1 - return np.array([False, False, False]) + return torch.tensor([False, False, False]) else: - return np.array([True, True, True]) + return torch.tensor([True, True, True]) tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 2, "Test": 4}) - model = MockModel(tokenizer, [[1, 0, 0, 0], [0, 1, 0, 0]]) + model = MockModel(tokenizer, torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]])) sequence = FinishAfterTwo(model) result = sequence("Test", samples=3) - assert_array_equal(result, [[4, 0, 1], [4, 0, 1], [4, 0, 1]]) + assert torch.equal(result, torch.tensor([[4, 0, 1], [4, 0, 1], [4, 0, 1]])) class FinishAfterOne(Sequence): def __init__(self, model): super().__init__(model) def is_finished(self, token_ids): - return np.array([True, True, True]) + return torch.tensor([True, True, True]) tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 3, "Test": 4}) - model = MockModel(tokenizer, [[1, 0, 0, 0], [0, 1, 0, 0]]) + model = MockModel(tokenizer, torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]])) sequence = FinishAfterOne(model) result = sequence("Test", samples=3) - assert_array_equal(result, [[4, 0], [4, 0], [4, 0]]) + assert torch.equal(result, torch.tensor([[4, 0], [4, 0], [4, 0]])) def test_call_prompt_list_samples(): @@ -370,24 +379,28 @@ def is_finished(self, token_ids): if self.iteration_idx == 0: self.iteration_idx += 1 batch_shape = token_ids.shape[:-1] - return np.zeros(batch_shape, dtype=np.bool_) + return torch.zeros(batch_shape, dtype=torch.bool) elif self.iteration_idx == 1: self.iteration_idx += 1 - return np.array( + return torch.tensor( [[True, False, True], [True, False, True], [True, False, True]] ) else: - return np.array([True, True, True]) + return torch.tensor([True, True, True]) tokenizer = MockTokenizer( {"a": 0, "b": 1, "c": 2, "Test1": 3, "Test2": 4, "Test3": 5} ) model = MockModel( - tokenizer, [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]] + tokenizer, + torch.tensor([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]]), ) sequence = FinishAfterThree(model) result = sequence(["Test1", "Test2", "Test3"], samples=3) - assert_array_equal( - result, np.tile([[3, 0, 1, -1], [4, 0, 1, 2], [5, 0, 1, -1]], (3, 1, 1)) + assert torch.equal( + result, + torch.tile( + torch.tensor([[3, 0, 1, -1], [4, 0, 1, 2], [5, 0, 1, -1]]), (3, 1, 1) + ), ) diff --git a/tests/text/test_masks.py b/tests/text/test_masks.py index 3c0dc782..2a94cf46 100644 --- a/tests/text/test_masks.py +++ b/tests/text/test_masks.py @@ -1,8 +1,7 @@ import random -import numpy as np import pytest -from numpy.testing import assert_array_equal +import torch from outlines.text.masks import create_char_set_mask, create_float_mask, create_int_mask @@ -11,7 +10,7 @@ def test_int_mask(): vocabulary = {"1": 0, "12": 1, "12a": 2, "a1": 3, "1.3": 4} mask = create_int_mask(vocabulary) - assert_array_equal(mask, np.array([True, True, False, False, False])) + assert torch.equal(mask, torch.tensor([True, True, False, False, False])) def test_float_mask(): @@ -28,8 +27,8 @@ def test_float_mask(): } mask = create_float_mask(vocabulary) - assert_array_equal( - mask, np.array([True, True, False, False, True, True, True, False, True]) + assert torch.equal( + mask, torch.tensor([True, True, False, False, True, True, True, False, True]) ) @@ -40,7 +39,7 @@ def test_char_set_mask(): vocabulary = {"a": 0, "ab": 1, "abc": 2, "1": 3, "1_a": 4} mask = create_char_set_mask(vocabulary, ["a", "b", "1", "_"]) - assert_array_equal(mask, np.array([True, True, False, True, True])) + assert torch.equal(mask, torch.tensor([True, True, False, True, True])) vocabulary = { "\\": 0, @@ -61,10 +60,10 @@ def test_char_set_mask(): random.shuffle(char_set) mask = create_char_set_mask(vocabulary, char_set) - assert_array_equal(mask, np.ones(12, dtype=np.bool_)) + assert torch.equal(mask, torch.ones(12, dtype=torch.bool)) mask = create_char_set_mask(vocabulary, ["a"]) - assert_array_equal(mask, np.zeros(12, dtype=np.bool_)) + assert torch.equal(mask, torch.zeros(12, dtype=torch.bool)) mask = create_char_set_mask(vocabulary, ["\n", "\r", "\t"]) - assert_array_equal(mask, np.zeros(12, dtype=np.bool_)) + assert torch.equal(mask, torch.zeros(12, dtype=torch.bool)) From 48b91ea94e9e7268e64035ece05271eee57e03aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 29 Jun 2023 13:57:49 +0200 Subject: [PATCH 170/734] Add `Integer` sequence generation method --- outlines/models/tokenizer.py | 3 +- outlines/models/transformers.py | 9 +- outlines/text/generate/__init__.py | 1 + outlines/text/generate/continuation.py | 9 +- outlines/text/generate/integer.py | 96 +++++++++++++++++++ outlines/text/generate/sequence.py | 21 +++- tests/text/generate/test_integer.py | 70 ++++++++++++++ .../generate/test_integration_transfomers.py | 47 +++++++++ tests/text/generate/test_sequence.py | 79 ++++++++++----- 9 files changed, 300 insertions(+), 35 deletions(-) create mode 100644 outlines/text/generate/integer.py create mode 100644 tests/text/generate/test_integer.py diff --git a/outlines/models/tokenizer.py b/outlines/models/tokenizer.py index 84c317dd..7aeefccd 100644 --- a/outlines/models/tokenizer.py +++ b/outlines/models/tokenizer.py @@ -1,5 +1,5 @@ from abc import abstractmethod -from typing import List, Protocol, Tuple, Union +from typing import Dict, List, Protocol, Tuple, Union import numpy as np from numpy.typing import NDArray @@ -9,6 +9,7 @@ class Tokenizer(Protocol): eos_token: str eos_token_id: int pad_token_id: int + vocabulary: Dict[str, int] @abstractmethod def encode( diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index d965816d..d639c202 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -34,6 +34,7 @@ def __call__( batch_shape = input_ids.shape[:-1] num_tokens = input_ids.shape[-1] input_ids = input_ids.reshape(math.prod(batch_shape), num_tokens) + output = self.model( input_ids, attention_mask=attention_mask, @@ -42,12 +43,10 @@ def __call__( output_hidden_states=False, ) next_token_logits = output.logits[:, -1, :] - probs = torch.nn.functional.softmax(next_token_logits, dim=-1).squeeze() - probs = torch.atleast_2d(probs) - probs = probs.reshape(batch_shape + (-1,)) + next_token_logits = next_token_logits.reshape(batch_shape + (-1,)) - return probs + return next_token_logits class TransformersTokenizer(Tokenizer): @@ -68,6 +67,8 @@ def __init__(self, model_name: str, **kwargs): self.pad_token_id = self.tokenizer.pad_token_id self.pad_token = self.tokenizer.pad_token + self.vocabulary = self.tokenizer.get_vocab() + def encode( self, prompt: Union[str, List[str]], **kwargs ) -> Tuple[torch.LongTensor, torch.LongTensor]: diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py index 3176b9b4..dc39d81b 100644 --- a/outlines/text/generate/__init__.py +++ b/outlines/text/generate/__init__.py @@ -1 +1,2 @@ from .continuation import continuation +from .integer import integer diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index 1ebca7f0..4141e45b 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -8,11 +8,12 @@ class Continuation(Sequence): """Represents a completion generation model. - `Completion` instances are unconstrained generation models that stop when an EOS token - has been found or when the maximum number of tokens has been reached. + `Continuation` instances are unconstrained generation models that stop when + an EOS token has been found or when the maximum number of tokens has been + reached. - >> import outlines.text as text - >> sequence = text.sequence(model)("Say something") + >>> import outlines.text as text + >>> sequence = text.generate.continuation(model)("Say something") """ diff --git a/outlines/text/generate/integer.py b/outlines/text/generate/integer.py new file mode 100644 index 00000000..f138b047 --- /dev/null +++ b/outlines/text/generate/integer.py @@ -0,0 +1,96 @@ +import math +from typing import List, Optional, Tuple + +import interegular +import torch + +from outlines.text.generate.continuation import Continuation +from outlines.text.parsing import find_partial_matches, map_partial_states_to_vocab + + +class Integer(Continuation): + """Represents a integer generation model. + + `Integer` instances are constrained generation models that only + generate integer values. Leading zeros are fobidden. EOS tokens + are only allowed after at least one digit has been generated. + + >>> import outlines.text as text + >>> sequence = text.generate.integer(model)("Return an integer between 0 and 10") + + """ + + def __init__(self, model, max_tokens: Optional[int]): + super().__init__(model, max_tokens) + + vocabulary = model.tokenizer.vocabulary + sorted_vocabulary = [ + k for k, v in sorted(vocabulary.items(), key=lambda kv: kv[1]) + ] + + int_regex_string = r"(0|[1-9][0-9]+)" + int_regex_pattern = interegular.parse_pattern(int_regex_string) + self.int_regex_fsm = int_regex_pattern.simplify().to_fsm() + + def partial_match_filter(string, end_idx, state_seq): + if end_idx is not None and end_idx < len(string) - 1: + return False + return True + + pstate_to_vocab = map_partial_states_to_vocab( + list(sorted_vocabulary), + {"INT": self.int_regex_fsm}, + True, + partial_match_filter, + ) + self.pstate_to_vocab = {k: list(v) for k, v in pstate_to_vocab.items()} + + def create_proposal( + self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor + ) -> torch.DoubleTensor: + """Modify the next-token logits so that only integers can be generated. + + Parameters + ---------- + generated_token_ids + The token ids generated so far. + logits + The next-token logits. + + """ + if generated_token_ids.shape[-1] > 0: + # TODO Make this work for `generated_token_ids` of arbitrary shape + sampled_sequences = self.model.tokenizer.decode(generated_token_ids) + if isinstance(sampled_sequences, str): + sampled_sequences = [sampled_sequences] + partial_matches = [ + find_partial_matches(self.int_regex_fsm, sequence) + for sequence in sampled_sequences + ] + pmatches = [ + max(partial_match, key=lambda x: x[0] if x[0] is not None else -1) + for partial_match in partial_matches + ] + self.pstates: List[Tuple[str, int]] = [ + (self.pstates[0][0], pmatch[1][-1]) for pmatch in pmatches + ] + else: + self.pstates = [ + ("INT", self.int_regex_fsm.initial) + for _ in range(generated_token_ids.shape[0]) + ] + + masks = [] + for pstate in self.pstates: + next_support = self.pstate_to_vocab[pstate] + mask = torch.full((len(self.model.tokenizer.vocabulary),), -math.inf) + mask[next_support] = 0 + masks.append(mask.unsqueeze(0)) + + mask = torch.concatenate(masks, dim=0) + + return logits + mask + + +def integer(model, max_tokens: Optional[int] = None): + return Integer(model, max_tokens) diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 5e63881a..12de2ecb 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -25,6 +25,12 @@ def __init__(self, model, max_tokens: Optional[int] = None): model.tokenizer.pad_token_id, device=model.device ) + def create_proposal( + self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor + ) -> torch.DoubleTensor: + """Create a new proposal from the next-token logits.""" + return logits + def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: """Determine whether we should stop the generation.""" raise NotImplementedError( @@ -37,6 +43,7 @@ def postprocess_completions(self, completions: List[str]) -> List[str]: def step( self, rng: torch.Generator, + num_prompt_tokens: int, token_ids: torch.LongTensor, attention_mask: torch.LongTensor, samples: int = 1, @@ -51,6 +58,8 @@ def step( ---------- rng NumPy random number Generator instance + num_prompt_tokens + The number of tokens in the prompt. token_ids The token ids passed as an input to the model, of shape `batch_shape + (num_tokens,)`, where `num_tokens` is the sequences' length. @@ -70,6 +79,8 @@ def step( """ num_input_dims = token_ids.ndim probs = self.model(token_ids, attention_mask) + probs = self.create_proposal(token_ids[:, num_prompt_tokens:], probs) + probs = torch.nn.functional.softmax(probs, dim=-1) # Sample `samples`-many new tokens next_token_ids = vectorized_random_choice(rng, probs, samples) @@ -192,7 +203,9 @@ def __call__( num_prompt_tokens = token_ids.shape[-1] if samples > 1: - token_ids, _ = self.step(rng, token_ids, attention_mask, samples) + token_ids, _ = self.step( + rng, num_prompt_tokens, token_ids, attention_mask, samples + ) is_finished = self.is_finished(token_ids) num_batch_dims = token_ids.ndim - 1 @@ -209,7 +222,10 @@ def __call__( break updated_token_ids, _ = self.step( - rng, token_ids[~is_finished], attention_mask[~is_finished] + rng, + num_prompt_tokens, + token_ids[~is_finished], + attention_mask[~is_finished], ) token_ids = self.update_token_ids(is_finished, token_ids, updated_token_ids) attention_mask = self.expand_attention_mask(attention_mask) @@ -254,7 +270,6 @@ def vectorized_random_choice( An array of shape `(num_samples, batch_size)` """ - cumsum = torch.unsqueeze(p.cumsum(axis=-1), 0) rand = torch.rand( (samples,) + p.shape[:-1] + (1,), generator=rng, device=rng.device diff --git a/tests/text/generate/test_integer.py b/tests/text/generate/test_integer.py new file mode 100644 index 00000000..d5ae7548 --- /dev/null +++ b/tests/text/generate/test_integer.py @@ -0,0 +1,70 @@ +import math + +import torch + +from outlines.text.generate.integer import integer + + +class Tokenizer: + eos_token = "" + eos_token_id = 0 + pad_token_id = -1 + vocabulary = {"": 0, "00": 1, "1": 2, "0.": 3, "431": 4, "a": 5} + tokens = list(vocabulary.keys()) + + def decode(self, token_ids): + decoded = [] + for i in range(token_ids.shape[0]): + decoded.append("".join([self.tokens[idx] for idx in token_ids[i]])) + + return decoded + + +class Model: + tokenizer = Tokenizer() + device = "cpu" + + +def test_integer_proposal(): + model = Model() + generator = integer(model) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[]]), logits) + assert torch.equal( + result, torch.tensor([[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf]]) + ) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[2]]), logits) + assert torch.equal( + result, torch.tensor([[-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf]]) + ) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[4]]), logits) + assert torch.equal( + result, torch.tensor([[-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf]]) + ) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[4], [2]]), logits) + assert torch.equal( + result, + torch.tensor( + [ + [-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf], + [-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf], + ] + ), + ) + + logits = torch.ones((4, len(model.tokenizer.vocabulary))) + result = generator.create_proposal(torch.tensor([[]]), logits) + assert torch.equal( + result, + torch.tile( + torch.tensor([[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf]]), + (4, 1), + ), + ) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index f303500c..307da6d4 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -1,7 +1,9 @@ +import pytest import torch import outlines.models as models from outlines.text.generate.continuation import continuation +from outlines.text.generate.integer import integer def test_transformers_integration_continuation(): @@ -17,6 +19,51 @@ def test_transformers_integration_continuation(): sequence = continuation(model, max_tokens=10)("Write a short sentence", rng=rng) assert isinstance(sequence, str) + prompts = ["Write a short sentence", "And another one"] + sequence = continuation(model, max_tokens=10)(prompts, rng=rng) + assert isinstance(sequence, list) + assert len(sequence) == 2 + assert isinstance(sequence[0], str) + + +@pytest.mark.xfail +def test_transformers_integration_continuation_array_samples(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompts = ["Write a short sentence", "And another one"] + _ = continuation(model, max_tokens=10)(prompts, rng=rng, samples=3) + + +def test_transformers_integration_integer(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Write a short sentence" + sequence = integer(model, max_tokens=10)(prompt, rng=rng) + + generated = sequence[len(prompt) :] + assert generated[0] != 0 + int(generated) + + +def test_transformers_integration_integer_array(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompts = ["Give me a number", "And another one"] + sequence = integer(model, max_tokens=10)(prompts, rng=rng) + assert isinstance(sequence, list) + assert len(sequence) == 2 + int(sequence[0][len(prompts[0]) :]) + int(sequence[1][len(prompts[1]) :]) + def test_transformers_integration_with_pad_token(): model_name = "hf-internal-testing/tiny-random-XLMRobertaXLForCausalLM" diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 3d9629c5..3833a9fd 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -1,3 +1,4 @@ +import math from typing import Dict, List, Union import pytest @@ -109,13 +110,13 @@ def test_sequence_step(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([0, 1, 0, 0]) + logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) - token_ids, probs = sequence.step(rng, input_ids, torch.ones((1, 2))) + token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) assert torch.equal(token_ids, torch.tensor([[1, 2, 1]])) assert probs.shape == (1, 4) @@ -124,13 +125,13 @@ def test_sequence_step_batch(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([0, 1, 0, 0]) + logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2], [3, 4]]) - token_ids, probs = sequence.step(rng, input_ids, torch.ones((2, 2))) + token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) assert torch.equal(token_ids, torch.tensor([[1, 2, 1], [3, 4, 1]])) assert probs.shape == (2, 4) @@ -139,12 +140,12 @@ def test_sequence_step_sample(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([0, 1, 0, 0]) + logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) - token_ids, probs = sequence.step(rng, input_ids, torch.ones((1, 2)), samples=3) + token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) assert torch.equal(token_ids, torch.tensor([[1, 2, 1], [1, 2, 1], [1, 2, 1]])) assert probs.shape == (3, 4) @@ -153,12 +154,12 @@ def test_sequence_step_sample_batch(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([0, 1, 0, 0]) + logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) - token_ids, probs = sequence.step(rng, input_ids, torch.ones((2, 3)), samples=3) + token_ids, probs = sequence.step(rng, 3, input_ids, torch.ones((2, 3)), samples=3) assert torch.equal( token_ids, torch.tensor( @@ -177,26 +178,26 @@ def test_sequence_step_loop(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([0, 1, 0, 0]) + logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) - token_ids, _ = sequence.step(rng, input_ids, torch.ones((1, 2))) - token_ids, probs = sequence.step(rng, token_ids, torch.ones((1, 3))) + token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) + token_ids, probs = sequence.step(rng, 2, token_ids, torch.ones((1, 3))) assert torch.equal(token_ids, torch.tensor([[1, 2, 1, 1]])) assert probs.shape == (1, 4) input_ids = torch.tensor([[1, 2], [3, 4]]) - token_ids, _ = sequence.step(rng, input_ids, torch.ones((2, 2))) - token_ids, probs = sequence.step(rng, token_ids, torch.ones((2, 3))) + token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) + token_ids, probs = sequence.step(rng, 2, token_ids, torch.ones((2, 3))) assert torch.equal(token_ids, torch.tensor([[1, 2, 1, 1], [3, 4, 1, 1]])) assert probs.shape == (2, 4) # The number of samples becomes the batch size at the next iteration. input_ids = torch.tensor([[1, 2]]) - token_ids, _ = sequence.step(rng, input_ids, torch.ones((1, 2)), samples=3) - token_ids, probs = sequence.step(rng, token_ids, torch.ones((3, 3))) + token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) + token_ids, probs = sequence.step(rng, 2, token_ids, torch.ones((3, 3))) assert torch.equal( token_ids, torch.tensor([[1, 2, 1, 1], [1, 2, 1, 1], [1, 2, 1, 1]]) ) @@ -207,13 +208,13 @@ def test_sequence_step_loop_general(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([0, 1, 0, 0]) + logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) - token_ids, _ = sequence.step(rng, input_ids, torch.ones((1, 3)), samples=3) - result, _ = sequence.step(rng, token_ids, torch.ones((3, 4))) + token_ids, _ = sequence.step(rng, 3, input_ids, torch.ones((1, 3)), samples=3) + result, _ = sequence.step(rng, 3, token_ids, torch.ones((3, 4))) assert result.shape == (3, 2, 5) assert torch.equal( result, @@ -277,7 +278,10 @@ def is_finished(self, token_ids): return torch.tensor([True]) tokenizer = MockTokenizer({"Test": 0, "a": 1, "b": 2}) - model = MockModel(tokenizer, torch.tensor([[1.0, 0, 0], [0, 1.0, 0]])) + model = MockModel( + tokenizer, + torch.tensor([[1.0, -math.inf, -math.inf], [-math.inf, 1.0, -math.inf]]), + ) sequence = FinishAfterTwo(model) result = sequence("Test") @@ -325,7 +329,11 @@ def is_finished(self, token_ids): model = MockModel( tokenizer, torch.tensor( - [[0, 0, 1.0, 0, 0, 0], [0, 0, 0, 1.0, 0, 0], [0, 0, 0, 0, 1.0, 0]] + [ + [-math.inf, -math.inf, 1.0, -math.inf, -math.inf, -math.inf], + [-math.inf, -math.inf, -math.inf, 1.0, -math.inf, -math.inf], + [-math.inf, -math.inf, -math.inf, -math.inf, 1.0, -math.inf], + ] ), ) sequence = FinishAfterThree(model) @@ -350,7 +358,16 @@ def is_finished(self, token_ids): return torch.tensor([True, True, True]) tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 2, "Test": 4}) - model = MockModel(tokenizer, torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]])) + model = MockModel( + tokenizer, + torch.tensor( + [ + [1, -math.inf, -math.inf, -math.inf], + [-math.inf, 1, -math.inf, -math.inf], + ], + dtype=torch.double, + ), + ) sequence = FinishAfterTwo(model) result = sequence("Test", samples=3) assert torch.equal(result, torch.tensor([[4, 0, 1], [4, 0, 1], [4, 0, 1]])) @@ -363,7 +380,16 @@ def is_finished(self, token_ids): return torch.tensor([True, True, True]) tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 3, "Test": 4}) - model = MockModel(tokenizer, torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]])) + model = MockModel( + tokenizer, + torch.tensor( + [ + [1, -math.inf, -math.inf, -math.inf], + [-math.inf, 1, -math.inf, -math.inf], + ], + dtype=torch.double, + ), + ) sequence = FinishAfterOne(model) result = sequence("Test", samples=3) assert torch.equal(result, torch.tensor([[4, 0], [4, 0], [4, 0]])) @@ -393,7 +419,14 @@ def is_finished(self, token_ids): ) model = MockModel( tokenizer, - torch.tensor([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]]), + torch.tensor( + [ + [1, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf], + [-math.inf, 1, -math.inf, -math.inf, -math.inf, -math.inf], + [-math.inf, -math.inf, 1, -math.inf, -math.inf, -math.inf], + ], + dtype=torch.double, + ), ) sequence = FinishAfterThree(model) From 0ab5e2298dd2d8a94cfc08aca1da74bd2751ba10 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 12 Jul 2023 19:17:31 -0500 Subject: [PATCH 171/734] Fix float and int masks --- outlines/text/masks.py | 8 ++++---- tests/text/test_masks.py | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/outlines/text/masks.py b/outlines/text/masks.py index 035e0a6d..8dc7b3d3 100644 --- a/outlines/text/masks.py +++ b/outlines/text/masks.py @@ -34,15 +34,15 @@ def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> torch.Bool def create_int_mask(vocabulary: Dict[str, int]) -> torch.BoolTensor: - """Create a mask to generate integers.""" - mask = create_mask_from_regex(vocabulary, "^[0-9]+$") + """Create a mask to generate signed integers.""" + mask = create_mask_from_regex(vocabulary, r"^[-+]?\d+$") return mask def create_float_mask(vocabulary: Dict[str, int]) -> torch.BoolTensor: - """Create a mask to generate floating point numbers.""" - mask = create_mask_from_regex(vocabulary, r"^(([0-9]+)?([.]([0-9]*)?)?|[.][0-9]+)$") + """Create a mask to generate signed floating point numbers.""" + mask = create_mask_from_regex(vocabulary, r"^[-+]?([0-9]+(\.[0-9]*)?|\.[0-9]+)$") return mask diff --git a/tests/text/test_masks.py b/tests/text/test_masks.py index 2a94cf46..36e3eff7 100644 --- a/tests/text/test_masks.py +++ b/tests/text/test_masks.py @@ -24,11 +24,13 @@ def test_float_mask(): "0.": 6, "1.2.3": 7, ".": 8, + ".0": 9, } mask = create_float_mask(vocabulary) assert torch.equal( - mask, torch.tensor([True, True, False, False, True, True, True, False, True]) + mask, + torch.tensor([True, True, False, False, True, True, True, False, False, True]), ) From 3263b008aec31ce8d26ee536e58a9eaf3d7d6bd6 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Fri, 7 Jul 2023 15:25:39 -0500 Subject: [PATCH 172/734] Add start_state option to find_partial_matches --- outlines/text/parsing.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 385bc63d..d830479d 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -261,10 +261,25 @@ def parse_to_end(parser_state: ParserState) -> Tuple[ParserState, Set[str]]: def find_partial_matches( - fsm: FSM, input_string: str + fsm: FSM, input_string: str, start_state: Optional[int] = None ) -> Set[Tuple[Optional[int], Tuple[int, ...]]]: """Find the states in the finite state machine `fsm` that accept `input_string`. + This will consider all possible states in the finite state machine (FSM) + that accept the beginning of `input_string` as starting points, unless a + specific `start_state` is provided. + + Parameters + ---------- + fsm + The finite state machine. + input_string + The string for which we generate partial matches. + start_state + A single fixed starting state to consider. For example, if this value + is set to `fsm.initial`, it attempt to read `input_string` from the + beginning of the FSM/regular expression. + Returns ------- A set of tuples corresponding to each valid starting state in the FSM. @@ -281,7 +296,9 @@ def find_partial_matches( # TODO: We could probably memoize this easily (i.e. no need to recompute # paths shared by different starting states) - def _partial_match(trans: int) -> Optional[Tuple[Optional[int], Tuple[int, ...]]]: + def _partial_match( + trans: Dict[int, int] + ) -> Optional[Tuple[Optional[int], Tuple[int, ...]]]: fsm_map = ChainMap({fsm.initial: trans}, fsm.map) state = fsm.initial accepted_states: Tuple[int, ...] = () @@ -309,7 +326,10 @@ def _partial_match(trans: int) -> Optional[Tuple[Optional[int], Tuple[int, ...]] return None if not terminated else i, accepted_states res = set() - for s_now, trans in fsm.map.items(): + transition_maps = ( + fsm.map.values() if start_state is None else [fsm.map[start_state]] + ) + for trans in transition_maps: if trans_key in trans: path = _partial_match(trans) if path is not None: From 1343f6677a278def916dd9acfd8b9b2bbecbc608 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Fri, 7 Jul 2023 15:26:07 -0500 Subject: [PATCH 173/734] Add final_state_string option to map_partial_states_to_vocab This allows one to add EOS transitions to the partial-parse-state-to-vocabulary maps produced by map_partial_states_to_vocab. --- outlines/text/parsing.py | 24 +++++++-- tests/text/test_parsing.py | 107 +++++++++++++++++++++++++------------ 2 files changed, 95 insertions(+), 36 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index d830479d..252b74c4 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -362,8 +362,12 @@ def map_partial_states_to_vocab( partial_match_filter: Callable[ [str, Optional[int], Tuple[int, ...]], bool ] = lambda *args: True, + final_state_string: Optional[str] = None, ) -> DefaultDict[PartialParseState, Set[int]]: - """Construct a map from partial parse states to the vocabulary elements that start in those states. + """Construct a map from partial parse states to subsets of `vocabulary`. + + The subsets of `vocabulary` consist of elements that are accepted by--or + transition to--the corresponding partial parse states. Parameters ---------- @@ -379,11 +383,19 @@ def map_partial_states_to_vocab( A callable that determines which partial matches to keep. The first argument is the string being match, the rest are the unpacked partial match return values of `find_partial_matches`. + final_state_string + A string from `vocabulary` that is to be added to all the final states + in the FSM. """ + final_state_string_idx = None + # Partial parse states to the subsets of the vocabulary that accept them pstate_to_vocab = defaultdict(set) for symbol_name, fsm in terminals_to_fsms_map.items(): for i, vocab_string in enumerate(vocabulary): + if vocab_string == final_state_string: + final_state_string_idx = i + for end_idx, state_seq in find_partial_matches(fsm, vocab_string): if partial_match_filter(vocab_string, end_idx, state_seq): pstate_to_vocab[(symbol_name, state_seq[0])].add(i) @@ -391,7 +403,7 @@ def map_partial_states_to_vocab( if not map_to_antecedents: return pstate_to_vocab - # Partially parsed states to next/transition states (for the same terminal symbol) + # Partial parse states to their valid next/transition states ts_pstate_to_substates = dict( chain.from_iterable( [ @@ -402,7 +414,7 @@ def map_partial_states_to_vocab( ) ) - # Reverse the map + # Reverse the state transitions map # TODO: We could construct this more directly. rev_ts_pstate_to_substates = defaultdict(set) for pstate, to_pstates in ts_pstate_to_substates.items(): @@ -416,6 +428,12 @@ def map_partial_states_to_vocab( for next_pstate in rev_ts_pstate_to_substates[pstate]: _pstate_to_vocab[next_pstate] |= vocab + if final_state_string_idx is not None: + # Allow transitions to EOS from all terminals FSM states + for symbol_name, fsm in terminals_to_fsms_map.items(): + for state in fsm.finals: + _pstate_to_vocab[(symbol_name, state)].add(final_state_string_idx) + return _pstate_to_vocab diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 097d8c17..911f95dd 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -206,6 +206,24 @@ def test_map_partial_states_to_vocab_python(): ("DEF", 2): {3}, } + vocabulary = list(vocabulary) + [""] + pstate_to_vocab = map_partial_states_to_vocab( + vocabulary, symbol_names_and_fsms, True, final_state_string="" + ) + + assert dict(pstate_to_vocab) == { + ("__IGNORE_0", 1): {4, 5}, + ("__IGNORE_0", 2): {4, 5}, + ("__IGNORE_0", 0): {4}, + ("NAME", 1): {0, 1, 2, 3, 5}, + ("NAME", 2): {0, 1, 2, 3, 5}, + ("NAME", 0): {0, 1, 2, 3}, + ("DEF", 0): {0}, + ("DEF", 1): {1, 2}, + ("DEF", 2): {3}, + ("DEF", 3): {5}, + } + def test_parse_from_partial_match(): """Make sure we can continue parsing from an FSM-based partial match.""" @@ -266,11 +284,25 @@ def test_parse_from_partial_match(): def test_map_partial_states_to_vocab_regex(): - regex_string = r"(([0-9]+)?([.]([0-9]*)?)?|[.][0-9]+)" + regex_string = r"([0-9]+([.][0-9]*)?|[.][0-9]+)" regex_pattern = interegular.parse_pattern(regex_string) regex_fsm = regex_pattern.simplify().to_fsm() - vocabulary = ["1.", "2", "3.", ".", ".80", "42", "1a", " ", "0", "a", "b", "$"] + vocabulary = [ + "1.", + "2", + "3.", + ".", + ".80", + "42", + "1a", + " ", + "0", + "a", + "b", + "$", + "", + ] # We want the vocabulary strings to entirely match the regex--not just the # prefixes of the vocabulary strings @@ -280,50 +312,59 @@ def partial_match_filter(string, end_idx, state_seq): return True pstate_to_vocab = map_partial_states_to_vocab( - vocabulary, {"FLOAT": regex_fsm}, True, partial_match_filter + vocabulary, {"FLOAT": regex_fsm}, True, partial_match_filter, "" ) - assert dict(pstate_to_vocab) == { - ("FLOAT", 0): {0, 1, 2, 3, 4, 5, 8}, - ("FLOAT", 3): {0, 1, 2, 3, 4, 5, 8}, - ("FLOAT", 1): {0, 1, 2, 3, 4, 5, 8}, - ("FLOAT", 5): {1, 5, 8}, - ("FLOAT", 7): {1, 5, 8}, - ("FLOAT", 4): {1, 5, 8}, - ("FLOAT", 6): {1, 5, 8}, - ("FLOAT", 2): {1, 5, 8}, - } + assert tuple(pstate_to_vocab.values()) == ( + {0, 1, 2, 3, 4, 5, 8}, + {0, 1, 2, 3, 4, 5, 8, 12}, + {0, 1, 2, 3, 4, 5, 8, 12}, + {1, 5, 8, 12}, + {1, 5, 8, 12}, + {1, 5, 8, 12}, + {1, 5, 8, 12}, + {1, 5, 8}, + ) pstate_to_vocab = {k: tuple(v) for k, v in pstate_to_vocab.items()} random.seed(24080) - # Start at the initial state - pstate = ("FLOAT", regex_fsm.initial) + for n in range(50): + # Start at the initial state + pstate = ("FLOAT", regex_fsm.initial) + + sample_seq = "" + + for i in range(5): + next_support = pstate_to_vocab[pstate] - sample_seq = "" + (next_sample_idx,) = random.sample(next_support, 1) - for i in range(10): - next_support = pstate_to_vocab[pstate] + next_sample = vocabulary[next_sample_idx] - (next_sample_idx,) = random.sample(next_support, 1) + if next_sample == "": + break - next_sample = vocabulary[next_sample_idx] - sample_seq += next_sample + sample_seq += next_sample - # Parse the entire sampled sequence/string - # TODO: We could continue from the previous parse state, but this is - # easier for now and only for demonstration purposes. - partial_matches = find_partial_matches(regex_fsm, sample_seq) + # Parse the entire sampled sequence/string + # TODO: We could continue from the previous parse state, but this is + # easier for now and only for demonstration purposes. + partial_matches = find_partial_matches( + regex_fsm, sample_seq, start_state=regex_fsm.initial + ) - # Use the/a longest match - pmatch = max(partial_matches, key=lambda x: x[0] if x[0] is not None else -1) + # Use the/a longest match + pmatch = max( + partial_matches, key=lambda x: x[0] if x[0] is not None else -1 + ) - # Create the next state - pstate = (pstate[0], pmatch[1][-1]) + # Create the next state + pstate = (pstate[0], pmatch[1][-1]) - # TODO: We could check if the FSM is done (i.e. in an final/accept - # state) and end the sampling loop + # TODO: We could check if the FSM is done (i.e. in an final/accept + # state) and end the sampling loop - # Make sure the whole thing matches the regex - assert re.fullmatch(regex_string, sample_seq) is not None + # Make sure the whole thing matches the regex + assert re.fullmatch(regex_string, sample_seq) is not None From 05f3f33baa42ff7362396a13c1ae419af7604e02 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 12 Jul 2023 18:13:51 -0500 Subject: [PATCH 174/734] Refactor find_partial_matches so that it returns full sequences This refactoring also removed the need for the antecedent mapping option in `map_partial_states_to_vocab`. --- outlines/text/parsing.py | 66 +++++-------------- tests/text/test_parsing.py | 128 ++++++++++++++++++------------------- 2 files changed, 80 insertions(+), 114 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 252b74c4..164ed8c5 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,6 +1,5 @@ from collections import ChainMap, defaultdict from copy import copy -from itertools import chain from typing import ( TYPE_CHECKING, Any, @@ -282,11 +281,11 @@ def find_partial_matches( Returns ------- - A set of tuples corresponding to each valid starting state in the FSM. - The first element of each tuple contains either ``None`` or an integer + A set of tuples corresponding to each valid starting state in the FSM. The + first element of each tuple contains either ``None`` or an integer indicating the position in `input_string` at which the FSM terminated. The - second element is a tuple of the states visited during execution of the - FSM. + second element is the tuple of states visited during execution of the FSM + plus the next, unvisited transition state. """ if len(input_string) == 0 or input_string[0] not in fsm.alphabet: @@ -294,11 +293,11 @@ def find_partial_matches( trans_key = fsm.alphabet[input_string[0]] - # TODO: We could probably memoize this easily (i.e. no need to recompute - # paths shared by different starting states) + # TODO: We could probably reuse parts of the computed paths when computing + # results for multiple starting points. def _partial_match( trans: Dict[int, int] - ) -> Optional[Tuple[Optional[int], Tuple[int, ...]]]: + ) -> Tuple[Optional[int], Optional[Tuple[int, ...]]]: fsm_map = ChainMap({fsm.initial: trans}, fsm.map) state = fsm.initial accepted_states: Tuple[int, ...] = () @@ -313,7 +312,7 @@ def _partial_match( if state in fsm.finals: i -= 1 break - return None + return None, None state = fsm_map[state][trans_key] @@ -321,19 +320,19 @@ def _partial_match( terminated = state in fsm.finals if not terminated and state == fsm.initial: - return None + return None, None return None if not terminated else i, accepted_states res = set() transition_maps = ( - fsm.map.values() if start_state is None else [fsm.map[start_state]] + fsm.map if start_state is None else {start_state: fsm.map[start_state]} ) - for trans in transition_maps: + for state, trans in transition_maps.items(): if trans_key in trans: - path = _partial_match(trans) + n_matched, path = _partial_match(trans) if path is not None: - res.add(path) + res.add((n_matched, (state,) + path)) return res @@ -346,7 +345,7 @@ def terminals_to_fsms(lp: Lark) -> Dict[str, FSM]: pattern = interegular.parse_pattern(terminal.pattern.to_regexp()) # TODO: Use `pyparser.terminals[0].pattern.flags`? try: - fsm = pattern.to_fsm() + fsm = pattern.to_fsm().reduce() except Unsupported: fsm = None @@ -358,7 +357,6 @@ def terminals_to_fsms(lp: Lark) -> Dict[str, FSM]: def map_partial_states_to_vocab( vocabulary: Iterable[str], terminals_to_fsms_map: Dict[str, FSM], - map_to_antecedents: bool = False, partial_match_filter: Callable[ [str, Optional[int], Tuple[int, ...]], bool ] = lambda *args: True, @@ -375,10 +373,6 @@ def map_partial_states_to_vocab( The vocabulary composed of strings. terminals_to_fsms_map Terminal symbol names mapped to FSMs, as provided by `terminals_to_fsms`. - map_to_antecedents - When ``True``, return a map with keys that are the antecedent partial - parse states. In other words, this is a map that can be used to - determine valid next tokens given a parse state. partial_match_filter A callable that determines which partial matches to keep. The first argument is the string being match, the rest are the unpacked partial @@ -400,41 +394,13 @@ def map_partial_states_to_vocab( if partial_match_filter(vocab_string, end_idx, state_seq): pstate_to_vocab[(symbol_name, state_seq[0])].add(i) - if not map_to_antecedents: - return pstate_to_vocab - - # Partial parse states to their valid next/transition states - ts_pstate_to_substates = dict( - chain.from_iterable( - [ - ((symbol_name, s), {(symbol_name, v) for v in ts.values()}) - for s, ts in fsm.map.items() - ] - for symbol_name, fsm in terminals_to_fsms_map.items() - ) - ) - - # Reverse the state transitions map - # TODO: We could construct this more directly. - rev_ts_pstate_to_substates = defaultdict(set) - for pstate, to_pstates in ts_pstate_to_substates.items(): - for to_pstate in to_pstates: - rev_ts_pstate_to_substates[to_pstate].add(pstate) - - # A version of `pstate_to_vocab` that is keyed on states that *transition to* - # the original keys of `pstate_to_vocab`. - _pstate_to_vocab: DefaultDict[PartialParseState, Set[int]] = defaultdict(set) - for pstate, vocab in pstate_to_vocab.items(): - for next_pstate in rev_ts_pstate_to_substates[pstate]: - _pstate_to_vocab[next_pstate] |= vocab - if final_state_string_idx is not None: # Allow transitions to EOS from all terminals FSM states for symbol_name, fsm in terminals_to_fsms_map.items(): for state in fsm.finals: - _pstate_to_vocab[(symbol_name, state)].add(final_state_string_idx) + pstate_to_vocab[(symbol_name, state)].add(final_state_string_idx) - return _pstate_to_vocab + return pstate_to_vocab def terminals_to_lalr_states(lp: Lark) -> DefaultDict[str, Set[int]]: diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 911f95dd..eb7c67d8 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -129,34 +129,54 @@ def test_sequential_parse_example(): def test_partial_match(): name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") - name_fsm = name_pattern.to_fsm() + name_fsm = name_pattern.to_fsm().reduce() + assert name_fsm.initial == 0 def_pattern = interegular.parse_pattern("def") - def_fsm = def_pattern.to_fsm() + def_fsm = def_pattern.to_fsm().reduce() + assert def_fsm.initial == 0 - assert find_partial_matches(def_fsm, "def") == {(2, (1, 2, 3))} - assert find_partial_matches(def_fsm, "de") == {(None, (1, 2))} - assert find_partial_matches(def_fsm, "d") == {(None, (1,))} + assert find_partial_matches(def_fsm, "def") == {(2, (0, 1, 2, 3))} + assert find_partial_matches(def_fsm, "de") == {(None, (0, 1, 2))} + assert find_partial_matches(def_fsm, "d") == {(None, (0, 1))} assert find_partial_matches(def_fsm, "") == set() assert find_partial_matches(def_fsm, "df") == set() - assert find_partial_matches(def_fsm, "ef") == {(1, (2, 3))} - assert find_partial_matches(def_fsm, "e") == {(None, (2,))} - assert find_partial_matches(def_fsm, "f") == {(0, (3,))} - assert find_partial_matches(def_fsm, "ef foo") == {(1, (2, 3))} + assert find_partial_matches(def_fsm, "ef") == {(1, (1, 2, 3))} + assert find_partial_matches(def_fsm, "e") == {(None, (1, 2))} + assert find_partial_matches(def_fsm, "f") == {(0, (2, 3))} + assert find_partial_matches(def_fsm, "ef foo") == {(1, (1, 2, 3))} # This string has a `DEF` token in it, but should ultimately not lex one - assert find_partial_matches(def_fsm, "defb") == {(2, (1, 2, 3))} + assert find_partial_matches(def_fsm, "defb") == {(2, (0, 1, 2, 3))} # `NAME` can have multiple start states for this input - assert find_partial_matches(name_fsm, "d") == {(0, (1,)), (0, (2,))} + assert find_partial_matches(name_fsm, "d") == { + (0, (0, 1)), + (0, (1, 1)), + } # Not this case - assert find_partial_matches(name_fsm, "1d") == {(1, (2, 2))} + assert find_partial_matches(name_fsm, "1d") == {(1, (1, 1, 1))} assert find_partial_matches(name_fsm, "blah") == { - (3, (1, 2, 2, 2)), - (3, (2, 2, 2, 2)), + (3, (0, 1, 1, 1, 1)), + (3, (1, 1, 1, 1, 1)), } + float_pattern = interegular.parse_pattern( + r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))" + ) + float_fsm = float_pattern.to_fsm().reduce() + + # XXX: It look like there's a lot of set/frozenset usage that prevents us + # from adequately reproducing the exact state sequences in this case. + # It seems to stem from `_CharGroup`s and the FSM map construction process. + res = find_partial_matches(float_fsm, ".") + assert {v[0] for v in res} == {0, 0, None} + # Make sure that the terminated sequences actually end in final states + assert all(v[1][-1] in float_fsm.finals for v in res if v[0] == 0) + # Make sure that the non-terminated sequences don't end in final states + assert all(v[1][-1] not in float_fsm.finals for v in res if v[0] != 0) + def test_map_partial_states_to_vocab_python(): pyparser = Lark.open_from_package( @@ -174,54 +194,45 @@ def test_map_partial_states_to_vocab_python(): k: v for k, v in symbol_names_and_fsms.items() if k in test_symbols } - vocabulary = ["d", "e", "ef foo", "f ", " "] + assert len(symbol_names_and_fsms["DEF"].states) == 4 + assert len(symbol_names_and_fsms["NAME"].states) == 2 + assert len(symbol_names_and_fsms["__IGNORE_0"].states) == 2 - pstate_to_vocab = map_partial_states_to_vocab( - vocabulary, symbol_names_and_fsms, False - ) + vocabulary = ["d", "e", "ef foo", "f ", " ", "1d", ""] - assert dict(pstate_to_vocab) == { - ("__IGNORE_0", 2): {4}, - ("__IGNORE_0", 1): {4}, - ("NAME", 2): {0, 1, 2, 3}, - ("NAME", 1): {0, 1, 2, 3}, - ("DEF", 1): {0}, - ("DEF", 2): {1, 2}, - ("DEF", 3): {3}, - } - - pstate_to_vocab = map_partial_states_to_vocab( - vocabulary, symbol_names_and_fsms, True - ) + pstate_to_vocab = map_partial_states_to_vocab(vocabulary, symbol_names_and_fsms) assert dict(pstate_to_vocab) == { - ("__IGNORE_0", 1): {4}, - ("__IGNORE_0", 2): {4}, ("__IGNORE_0", 0): {4}, - ("NAME", 1): {0, 1, 2, 3}, - ("NAME", 2): {0, 1, 2, 3}, + ("__IGNORE_0", 1): {4}, ("NAME", 0): {0, 1, 2, 3}, + ("NAME", 1): {0, 1, 2, 3, 5}, ("DEF", 0): {0}, ("DEF", 1): {1, 2}, ("DEF", 2): {3}, } - vocabulary = list(vocabulary) + [""] pstate_to_vocab = map_partial_states_to_vocab( - vocabulary, symbol_names_and_fsms, True, final_state_string="" + vocabulary, symbol_names_and_fsms, final_state_string="" ) assert dict(pstate_to_vocab) == { - ("__IGNORE_0", 1): {4, 5}, - ("__IGNORE_0", 2): {4, 5}, - ("__IGNORE_0", 0): {4}, - ("NAME", 1): {0, 1, 2, 3, 5}, - ("NAME", 2): {0, 1, 2, 3, 5}, + ("__IGNORE_0", 0): { + 4, + }, + ("__IGNORE_0", 1): {4, 6}, ("NAME", 0): {0, 1, 2, 3}, - ("DEF", 0): {0}, + ("NAME", 1): {0, 1, 2, 3, 5, 6}, + ("DEF", 0): { + 0, + }, ("DEF", 1): {1, 2}, - ("DEF", 2): {3}, - ("DEF", 3): {5}, + ("DEF", 2): { + 3, + }, + ("DEF", 3): { + 6, + }, } @@ -286,7 +297,7 @@ def test_parse_from_partial_match(): def test_map_partial_states_to_vocab_regex(): regex_string = r"([0-9]+([.][0-9]*)?|[.][0-9]+)" regex_pattern = interegular.parse_pattern(regex_string) - regex_fsm = regex_pattern.simplify().to_fsm() + regex_fsm = regex_pattern.to_fsm().reduce() vocabulary = [ "1.", @@ -312,19 +323,15 @@ def partial_match_filter(string, end_idx, state_seq): return True pstate_to_vocab = map_partial_states_to_vocab( - vocabulary, {"FLOAT": regex_fsm}, True, partial_match_filter, "" + vocabulary, {"FLOAT": regex_fsm}, partial_match_filter, "" ) - assert tuple(pstate_to_vocab.values()) == ( - {0, 1, 2, 3, 4, 5, 8}, - {0, 1, 2, 3, 4, 5, 8, 12}, + assert sorted(pstate_to_vocab.values(), key=lambda x: -len(x)) == [ {0, 1, 2, 3, 4, 5, 8, 12}, - {1, 5, 8, 12}, - {1, 5, 8, 12}, - {1, 5, 8, 12}, + {0, 1, 2, 3, 4, 5, 8}, {1, 5, 8, 12}, {1, 5, 8}, - ) + ] pstate_to_vocab = {k: tuple(v) for k, v in pstate_to_vocab.items()} @@ -348,16 +355,9 @@ def partial_match_filter(string, end_idx, state_seq): sample_seq += next_sample - # Parse the entire sampled sequence/string - # TODO: We could continue from the previous parse state, but this is - # easier for now and only for demonstration purposes. - partial_matches = find_partial_matches( - regex_fsm, sample_seq, start_state=regex_fsm.initial - ) - - # Use the/a longest match - pmatch = max( - partial_matches, key=lambda x: x[0] if x[0] is not None else -1 + # Continue matching from where we left off + (pmatch,) = find_partial_matches( + regex_fsm, next_sample, start_state=pstate[-1] ) # Create the next state From 34bc2fb10d302cdfb21492b575a15bff925462fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 7 Jul 2023 17:42:44 +0200 Subject: [PATCH 175/734] Add `Regex` generation method --- outlines/text/generate/__init__.py | 2 +- outlines/text/generate/integer.py | 96 ----------- outlines/text/generate/regex.py | 160 ++++++++++++++++++ tests/text/generate/test_integer.py | 70 -------- .../generate/test_integration_transfomers.py | 45 ++++- tests/text/generate/test_regex.py | 122 +++++++++++++ 6 files changed, 320 insertions(+), 175 deletions(-) delete mode 100644 outlines/text/generate/integer.py create mode 100644 outlines/text/generate/regex.py delete mode 100644 tests/text/generate/test_integer.py create mode 100644 tests/text/generate/test_regex.py diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py index dc39d81b..bb752264 100644 --- a/outlines/text/generate/__init__.py +++ b/outlines/text/generate/__init__.py @@ -1,2 +1,2 @@ from .continuation import continuation -from .integer import integer +from .regex import float, integer, regex diff --git a/outlines/text/generate/integer.py b/outlines/text/generate/integer.py deleted file mode 100644 index f138b047..00000000 --- a/outlines/text/generate/integer.py +++ /dev/null @@ -1,96 +0,0 @@ -import math -from typing import List, Optional, Tuple - -import interegular -import torch - -from outlines.text.generate.continuation import Continuation -from outlines.text.parsing import find_partial_matches, map_partial_states_to_vocab - - -class Integer(Continuation): - """Represents a integer generation model. - - `Integer` instances are constrained generation models that only - generate integer values. Leading zeros are fobidden. EOS tokens - are only allowed after at least one digit has been generated. - - >>> import outlines.text as text - >>> sequence = text.generate.integer(model)("Return an integer between 0 and 10") - - """ - - def __init__(self, model, max_tokens: Optional[int]): - super().__init__(model, max_tokens) - - vocabulary = model.tokenizer.vocabulary - sorted_vocabulary = [ - k for k, v in sorted(vocabulary.items(), key=lambda kv: kv[1]) - ] - - int_regex_string = r"(0|[1-9][0-9]+)" - int_regex_pattern = interegular.parse_pattern(int_regex_string) - self.int_regex_fsm = int_regex_pattern.simplify().to_fsm() - - def partial_match_filter(string, end_idx, state_seq): - if end_idx is not None and end_idx < len(string) - 1: - return False - return True - - pstate_to_vocab = map_partial_states_to_vocab( - list(sorted_vocabulary), - {"INT": self.int_regex_fsm}, - True, - partial_match_filter, - ) - self.pstate_to_vocab = {k: list(v) for k, v in pstate_to_vocab.items()} - - def create_proposal( - self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor - ) -> torch.DoubleTensor: - """Modify the next-token logits so that only integers can be generated. - - Parameters - ---------- - generated_token_ids - The token ids generated so far. - logits - The next-token logits. - - """ - if generated_token_ids.shape[-1] > 0: - # TODO Make this work for `generated_token_ids` of arbitrary shape - sampled_sequences = self.model.tokenizer.decode(generated_token_ids) - if isinstance(sampled_sequences, str): - sampled_sequences = [sampled_sequences] - partial_matches = [ - find_partial_matches(self.int_regex_fsm, sequence) - for sequence in sampled_sequences - ] - pmatches = [ - max(partial_match, key=lambda x: x[0] if x[0] is not None else -1) - for partial_match in partial_matches - ] - self.pstates: List[Tuple[str, int]] = [ - (self.pstates[0][0], pmatch[1][-1]) for pmatch in pmatches - ] - else: - self.pstates = [ - ("INT", self.int_regex_fsm.initial) - for _ in range(generated_token_ids.shape[0]) - ] - - masks = [] - for pstate in self.pstates: - next_support = self.pstate_to_vocab[pstate] - mask = torch.full((len(self.model.tokenizer.vocabulary),), -math.inf) - mask[next_support] = 0 - masks.append(mask.unsqueeze(0)) - - mask = torch.concatenate(masks, dim=0) - - return logits + mask - - -def integer(model, max_tokens: Optional[int] = None): - return Integer(model, max_tokens) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py new file mode 100644 index 00000000..0bcb86ec --- /dev/null +++ b/outlines/text/generate/regex.py @@ -0,0 +1,160 @@ +import math +from typing import List, Optional, Tuple + +import interegular +import torch + +from outlines.text.generate.continuation import Continuation +from outlines.text.parsing import find_partial_matches, map_partial_states_to_vocab + + +class Regex(Continuation): + """Represents a regex-based generation model. + + `Regex` instances are constrained generation models that only generate + sequences that match an input regex. We assume that the sequence can be + terminated (but not necessarily) when the finite state machine corresponding + to the regex is in an accepting state. + + >>> import outlines.text as text + >>> sequence = text.generate.regex(model, "(0|[1-9][0-9]+)")("Return an integer between 0 and 10") + + """ + + def __init__(self, model, regex_string: str, max_tokens: Optional[int]): + super().__init__(model, max_tokens) + + vocabulary = model.tokenizer.vocabulary + sorted_vocabulary = [ + k for k, v in sorted(vocabulary.items(), key=lambda kv: kv[1]) + ] + + regex_pattern = interegular.parse_pattern(regex_string) + self.regex_fsm = regex_pattern.to_fsm().reduce() + + def partial_match_filter(string, end_idx, state_seq): + if end_idx is not None and end_idx < len(string) - 1: + return False + return True + + pstate_to_vocab = map_partial_states_to_vocab( + list(sorted_vocabulary), + {"REGEX": self.regex_fsm}, + partial_match_filter, + final_state_string=model.tokenizer.eos_token, + ) + + # TODO: This check might be a little too strict, because I think that + # while some states are made unreachable by a vocabulary (and will not + # be present in the following set difference), there could still be + # paths to terminal states emanating from the states that are reachable. + states_with_transition = {x[1] for x in pstate_to_vocab.keys()} + if len(self.regex_fsm.states.difference(states_with_transition)) > 0: + raise ValueError( + "The vocabulary does not allow us to build a sequence that matches the input regex" + ) + + self.pstate_to_vocab = {k: list(v) for k, v in pstate_to_vocab.items()} + # These tuples are comprised of the FSM name, last FSM state, and + # number of processed tokens. + # When an EOS is observed, the last FSM state becomes `-1`. + self.pstates: List[Tuple[str, int, int]] = [] + + def create_proposal( + self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor + ) -> torch.DoubleTensor: + """Modify the next-token logits so that only integers can be generated. + + Parameters + ---------- + generated_token_ids + The token ids generated so far. + logits + The next-token logits. + + """ + + if len(self.pstates) == 0: + self.pstates = [ + ("REGEX", self.regex_fsm.initial, 0) + for _ in range(generated_token_ids.shape[0]) + ] + + if generated_token_ids.shape[-1] > 0: + new_pstates = [] + for token_seq, (_, last_fsm_state, last_token_idx) in zip( + generated_token_ids, + self.pstates, + ): + # Get the tokens we haven't already processed + readable_tokens = token_seq[last_token_idx:] + # excluding any EOS tokens + not_eos_mask = [ + tk != self.model.tokenizer.eos_token_id for tk in readable_tokens + ] + readable_tokens = readable_tokens[not_eos_mask] + if len(readable_tokens) > 0: + # If we previously ended with an EOS, we shouldn't be + # getting/sampling any more non-EOS tokens + assert last_fsm_state > -1 + + sequence = self.model.tokenizer.decode(readable_tokens) + + ((_, state_seq),) = find_partial_matches( + self.regex_fsm, + "".join(sequence), + start_state=last_fsm_state, + ) + pstate = ( + "REGEX", + state_seq[-1], + last_token_idx + len(sequence), + ) + else: + pstate = ("REGEX", -1, last_token_idx) + + new_pstates.append(pstate) + + self.pstates = new_pstates + + masks = [] + for pstate in self.pstates: + mask = torch.full((len(self.model.tokenizer.vocabulary),), -math.inf) + + if pstate[1] > -1: + next_support = self.pstate_to_vocab[pstate[:2]] + else: + next_support = [self.model.tokenizer.eos_token_id] + + mask[next_support] = 0 + masks.append(mask.unsqueeze(0)) + + mask = torch.concatenate(masks, dim=0) + + return logits + mask + + +def regex(model, regex_string: str, max_tokens: Optional[int] = None): + return Regex(model, regex_string, max_tokens) + + +def integer(model, max_tokens: Optional[int] = None): + """Generate integers. + + The regex used to constrain the generation optionally matches plus or minus + signs and forbids leading zeros (even if the `int` function in Python allows + them). + + """ + return Regex(model, r"[-+]?\d+", max_tokens) + + +def float(model, max_tokens: Optional[int] = None): + """Generate floating-point numbers. + + The regex used to constrain the generation optionally matches plus or minus + signs, and forbids leading zeros (even if the `float` function in Python + allows them). + + """ + return Regex(model, r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", max_tokens) diff --git a/tests/text/generate/test_integer.py b/tests/text/generate/test_integer.py deleted file mode 100644 index d5ae7548..00000000 --- a/tests/text/generate/test_integer.py +++ /dev/null @@ -1,70 +0,0 @@ -import math - -import torch - -from outlines.text.generate.integer import integer - - -class Tokenizer: - eos_token = "" - eos_token_id = 0 - pad_token_id = -1 - vocabulary = {"": 0, "00": 1, "1": 2, "0.": 3, "431": 4, "a": 5} - tokens = list(vocabulary.keys()) - - def decode(self, token_ids): - decoded = [] - for i in range(token_ids.shape[0]): - decoded.append("".join([self.tokens[idx] for idx in token_ids[i]])) - - return decoded - - -class Model: - tokenizer = Tokenizer() - device = "cpu" - - -def test_integer_proposal(): - model = Model() - generator = integer(model) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[]]), logits) - assert torch.equal( - result, torch.tensor([[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf]]) - ) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[2]]), logits) - assert torch.equal( - result, torch.tensor([[-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf]]) - ) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[4]]), logits) - assert torch.equal( - result, torch.tensor([[-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf]]) - ) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[4], [2]]), logits) - assert torch.equal( - result, - torch.tensor( - [ - [-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf], - [-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf], - ] - ), - ) - - logits = torch.ones((4, len(model.tokenizer.vocabulary))) - result = generator.create_proposal(torch.tensor([[]]), logits) - assert torch.equal( - result, - torch.tile( - torch.tensor([[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf]]), - (4, 1), - ), - ) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 307da6d4..5c055bc4 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -1,9 +1,10 @@ +import re + import pytest import torch import outlines.models as models -from outlines.text.generate.continuation import continuation -from outlines.text.generate.integer import integer +import outlines.text.generate as generate def test_transformers_integration_continuation(): @@ -12,15 +13,17 @@ def test_transformers_integration_continuation(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") - sequence = continuation(model)("Write a short sentence", rng=rng) + sequence = generate.continuation(model)("Write a short sentence", rng=rng) assert isinstance(sequence, str) assert model.tokenizer.eos_token not in sequence - sequence = continuation(model, max_tokens=10)("Write a short sentence", rng=rng) + sequence = generate.continuation(model, max_tokens=10)( + "Write a short sentence", rng=rng + ) assert isinstance(sequence, str) prompts = ["Write a short sentence", "And another one"] - sequence = continuation(model, max_tokens=10)(prompts, rng=rng) + sequence = generate.continuation(model, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) assert len(sequence) == 2 assert isinstance(sequence[0], str) @@ -34,7 +37,19 @@ def test_transformers_integration_continuation_array_samples(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") prompts = ["Write a short sentence", "And another one"] - _ = continuation(model, max_tokens=10)(prompts, rng=rng, samples=3) + _ = generate.continuation(model, max_tokens=10)(prompts, rng=rng, samples=3) + + +def test_transformers_various_regexes(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Write an email address" + regex_str = r"([a-z]{10})@([a-z]{5})\.([a-z]{3})" + sequence = generate.regex(model, regex_str)(prompt, rng=rng) + assert re.fullmatch(regex_str, sequence[len(prompt) :]) is not None def test_transformers_integration_integer(): @@ -44,7 +59,7 @@ def test_transformers_integration_integer(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") prompt = "Write a short sentence" - sequence = integer(model, max_tokens=10)(prompt, rng=rng) + sequence = generate.integer(model, max_tokens=10)(prompt, rng=rng) generated = sequence[len(prompt) :] assert generated[0] != 0 @@ -58,13 +73,27 @@ def test_transformers_integration_integer_array(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") prompts = ["Give me a number", "And another one"] - sequence = integer(model, max_tokens=10)(prompts, rng=rng) + sequence = generate.integer(model, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) assert len(sequence) == 2 int(sequence[0][len(prompts[0]) :]) int(sequence[1][len(prompts[1]) :]) +def test_transformers_integration_float(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Write a short sentence" + sequence = generate.float(model, max_tokens=10)(prompt, rng=rng) + + generated = sequence[len(prompt) :] + assert generated[0] != 0 + float(generated) + + def test_transformers_integration_with_pad_token(): model_name = "hf-internal-testing/tiny-random-XLMRobertaXLForCausalLM" model = models.transformers(model_name, device="cpu") diff --git a/tests/text/generate/test_regex.py b/tests/text/generate/test_regex.py new file mode 100644 index 00000000..5d4ede3a --- /dev/null +++ b/tests/text/generate/test_regex.py @@ -0,0 +1,122 @@ +import math + +import pytest +import torch + +import outlines.text.generate as generate + + +class Tokenizer: + eos_token = "" + pad_token = None + eos_token_id = 0 + pad_token_id = -1 + vocabulary = {"": 0, "-": 1, "1": 2, "0.": 3, "431": 4, "a": 5, "A": 6} + tokens = list(vocabulary.keys()) + + def decode(self, token_ids): + decoded = [] + for i in range(token_ids.shape[0]): + decoded.append("".join([self.tokens[idx] for idx in token_ids[i]])) + + return decoded + + +class Model: + tokenizer = Tokenizer() + device = "cpu" + + +@pytest.mark.parametrize( + "regex_string, valid_first_token, proposal", + [ + ( + r"[A-Z]+", + 6, + [-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0], + ), + ( + r"[a-z]+", + 5, + [-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0, -math.inf], + ), + ( + r"(a|A)", + 6, + [-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0, 1.0], + ), + (r"\d+", 2, [-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]), + (r"\d+\.", 3, [-math.inf, -math.inf, 1.0, 1.0, 1.0, -math.inf, -math.inf]), + ], +) +def test_regex_proposal(regex_string, valid_first_token, proposal): + model = Model() + generator = generate.regex(model, regex_string) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[]]), logits) + assert torch.equal(result.squeeze(), torch.tensor(proposal)) + assert result.squeeze()[0] == -math.inf + + # The EOS token can be generated once the FSM is in an accept state + result = generator.create_proposal(torch.tensor([[valid_first_token]]), logits) + assert result.squeeze()[0] == 1 + + +def test_regex_no_valid_transition(): + model = Model() + with pytest.raises(ValueError, match="The vocabulary does not allow"): + generate.regex(model, "aw") + + +@pytest.mark.parametrize( + "input_ids, proposal", + [ + ([[]], [[-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), + ([[1]], [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), + ([[4]], [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), + ( + [[4], [2]], + [ + [1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf], + [1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf], + ], + ), + ( + [[4, 0], [1, 2]], + [ + [1.0, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf], + [1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf], + ], + ), + ], +) +def test_integer_proposal(input_ids, proposal): + model = Model() + generator = generate.integer(model) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor(input_ids), logits) + assert torch.equal( + result, + torch.tensor(proposal), + ) + + +@pytest.mark.parametrize( + "input_ids, proposal", + [ + ([[]], [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf]]), + ([[3]], [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), + ], +) +def test_float_proposal(input_ids, proposal): + model = Model() + generator = generate.float(model) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor(input_ids), logits) + assert torch.equal( + result, + torch.tensor(proposal), + ) From 0bdcc56e4d5b3de941d11fe79ff04a2b376201a1 Mon Sep 17 00:00:00 2001 From: Marc Lelarge Date: Wed, 12 Jul 2023 21:59:50 +0200 Subject: [PATCH 176/734] Fix 'Hello world' example in README --- docs/source/overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/overview.rst b/docs/source/overview.rst index 224971fe..3a084a38 100644 --- a/docs/source/overview.rst +++ b/docs/source/overview.rst @@ -18,7 +18,7 @@ Here is a simple Outlines program that highlights some of its key features: hello_world = where_from("Hello world") foobar = where_from("Foo Bar") - answer = complete([hello_world, foobar], num_samples=3, stop_at=["."]) + answer = complete([hello_world, foobar], samples=3, stop_at=["."]) - **Prompt management**. You can use functions with the ``@outlines.text.prompt`` decorator. "Prompt functions" use the `Jinja templating language `_ to render the prompt written in the docstring. We also added a few filters to help with common worflows, like building agents. Of course, for simple prompts, you can also use Python strings directly. From bfa0e943db033af042408812a491d3a9b69bfb10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Jul 2023 13:18:37 +0200 Subject: [PATCH 177/734] Stop generation with `Continuation` when a specific string was generated --- outlines/text/generate/continuation.py | 75 ++++++++++++++++--- outlines/text/generate/regex.py | 30 ++++++++ outlines/text/generate/sequence.py | 4 +- tests/text/generate/test_continuation.py | 75 ++++++++++++++++--- .../generate/test_integration_transfomers.py | 10 ++- 5 files changed, 167 insertions(+), 27 deletions(-) diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index 4141e45b..a2762211 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import List, Optional, Union import torch @@ -17,20 +17,27 @@ class Continuation(Sequence): """ - def __init__(self, model, max_tokens: Optional[int]): + def __init__( + self, model, max_tokens: Optional[int] = None, stop: Union[str, List[str]] = [] + ): super().__init__(model, max_tokens) self.eos_token_id = torch.tensor( [self.model.tokenizer.eos_token_id], device=self.device ) + if isinstance(stop, str): + stop = [stop] + + self.stop_sequences = stop + def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: """Determine whether the sequences reached maximum length of end with and EOS token. - In practice, `Sequence`'s `__call__` methods only passed the `token_ids` - of the sequences that haven't been marked as finished already, which is - why we only need to look for the EOS token in the last element rather - than in the whole sequence. + We only need to look for the EOS token in the last element rather than + in the whole sequence. Indeed, (1) EOS is a single token (2) + `Sequence`'s `__call__` methods only passed the `token_ids` of the + sequences that haven't been marked as finished already. Parameters ---------- @@ -38,15 +45,61 @@ def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: The input sequences. """ - return token_ids[:, -1] == self.model.tokenizer.eos_token_id + + sequences = self.model.tokenizer.decode(token_ids) + contains_stop_sequence = [] + for sequence in sequences: + found = False + for stop_str in self.stop_sequences: + if stop_str in sequence: + found = True + + contains_stop_sequence.append(found) + + contains_stop_sequence = torch.tensor(contains_stop_sequence, dtype=torch.bool) + contains_eos = token_ids[:, -1] == self.model.tokenizer.eos_token_id + + return torch.logical_or(contains_eos, contains_stop_sequence) def postprocess_completions(self, completions: List[str]) -> List[str]: - """Remove the EOS token from the completion.""" - return [ + """Remove the EOS token from the completion. + + Sequences in `stop` take precedence over EOS. For instance, if + `stop=["\n"]` and the generated sequence is 'One\nTwo` + `Continuation.postprocess_completions` will return `One`. + + """ + completions_without_eos = [ completion.replace(self.model.tokenizer.eos_token, "") for completion in completions ] + completions_without_stop = [] + for completion in completions_without_eos: + for stop_str in self.stop_sequences: + idx = completion.rfind(stop_str) # ignore the prompt + if idx > 0: + completion = completion[:idx] + + completions_without_stop.append(completion) + + return completions_without_stop + -def continuation(model, max_tokens: Optional[int] = None): - return Continuation(model, max_tokens) +def continuation( + model, max_tokens: Optional[int] = None, *, stop: Union[str, List[str]] = [] +): + """Generate text sequences. + + Parameters + ---------- + model + The model to use to computes the next-token logits. + max_tokens + The maximum number of tokens to generate. + stop + A string or list of strings which, when generated, stops + the generation for this sequence. + + """ + return Continuation(model, max_tokens, stop) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 0bcb86ec..9da1f83c 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -135,6 +135,18 @@ def create_proposal( def regex(model, regex_string: str, max_tokens: Optional[int] = None): + """Generate text sequences that match the input regex. + + Parameters + ---------- + model + The model to use to computes the next-token logits. + regex + The regular expression generated expressions must match. + max_tokens + The maximum number of tokens to generate. + + """ return Regex(model, regex_string, max_tokens) @@ -145,6 +157,15 @@ def integer(model, max_tokens: Optional[int] = None): signs and forbids leading zeros (even if the `int` function in Python allows them). + Parameters + ---------- + model + The model to use to computes the next-token logits. + regex + The regular expression generated expressions must match. + max_tokens + The maximum number of tokens to generate. + """ return Regex(model, r"[-+]?\d+", max_tokens) @@ -156,5 +177,14 @@ def float(model, max_tokens: Optional[int] = None): signs, and forbids leading zeros (even if the `float` function in Python allows them). + Parameters + ---------- + model + The model to use to computes the next-token logits. + regex + The regular expression generated expressions must match. + max_tokens + The maximum number of tokens to generate. + """ return Regex(model, r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", max_tokens) diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 12de2ecb..b5691359 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -229,7 +229,9 @@ def __call__( ) token_ids = self.update_token_ids(is_finished, token_ids, updated_token_ids) attention_mask = self.expand_attention_mask(attention_mask) - is_finished[~is_finished] = self.is_finished(updated_token_ids).flatten() + is_finished[~is_finished] = self.is_finished( + updated_token_ids[:, num_prompt_tokens:] + ).flatten() result = self.model.tokenizer.decode(token_ids) result = self.postprocess_completions(result) diff --git a/tests/text/generate/test_continuation.py b/tests/text/generate/test_continuation.py index 0f35cd24..78a3e34a 100644 --- a/tests/text/generate/test_continuation.py +++ b/tests/text/generate/test_continuation.py @@ -1,5 +1,4 @@ -import numpy as np -from numpy.testing import assert_array_equal +import torch from outlines.text.generate.continuation import Continuation, continuation @@ -9,31 +8,34 @@ class Tokenizer: eos_token_id = 0 pad_token_id = -1 + def decode(self, token_ids): + return ["Test"] * token_ids.shape[0] + class Model: tokenizer = Tokenizer() device = "cpu" -def test_continuation_is_finished(): - model = continuation(Model(), 10) +def test_continuation_eos_is_finished(): + model = continuation(Model()) assert isinstance(model, Continuation) - token_ids = np.array([[3, 2]]) + token_ids = torch.tensor([[3, 2]]) result = model.is_finished(token_ids) - assert_array_equal(result, [False]) + assert torch.equal(result, torch.tensor([False])) - token_ids = np.array([[3, 2, 0]]) + token_ids = torch.tensor([[3, 2, 0]]) result = model.is_finished(token_ids) - assert_array_equal(result, [True]) + assert torch.equal(result, torch.tensor([True])) - token_ids = np.array([[3, 2, 1], [3, 2, 0]]) + token_ids = torch.tensor([[3, 2, 1], [3, 2, 0]]) result = model.is_finished(token_ids) - assert_array_equal(result, [False, True]) + assert torch.equal(result, torch.tensor([False, True])) - token_ids = np.array([[3, 2, 1, 0], [3, 2, 0, -1]]) + token_ids = torch.tensor([[3, 2, 1, 0], [3, 2, 0, -1]]) result = model.is_finished(token_ids) - assert_array_equal(result, [True, False]) + assert torch.equal(result, torch.tensor([True, False])) def test_continuation_postprocess(): @@ -41,3 +43,52 @@ def test_continuation_postprocess(): result = model.postprocess_completions(["Here"]) assert len(result) == 1 assert result[0] == "Here" + + +def test_continuation_stop_is_finished(): + tokenizer = Tokenizer() + tokenizer.decode = lambda x: ["finished \n", "not_finished"] + model = Model() + model.tokenizer = tokenizer + + model = continuation(model, stop=["\n"]) + + token_ids = torch.tensor([[2, 3]]) + result = model.is_finished(token_ids) + assert torch.equal(result, torch.tensor([True, False])) + + +def test_continuation_stop_postprocess(): + model = Continuation(Model(), stop="\n") + result = model.postprocess_completions(["Stop\n"]) + assert len(result) == 1 + assert result[0] == "Stop" + + model = Continuation(Model(), stop=["\n", ","]) + result = model.postprocess_completions(["Stop"]) + assert len(result) == 1 + assert result[0] == "Stop" + + result = model.postprocess_completions(["Stop\n"]) + assert len(result) == 1 + assert result[0] == "Stop" + + result = model.postprocess_completions(["Stop\naaa"]) + assert len(result) == 1 + assert result[0] == "Stop" + + result = model.postprocess_completions(["Stop,aa\naaa"]) + assert len(result) == 1 + assert result[0] == "Stop" + + result = model.postprocess_completions(["Stop\naa,a"]) + assert len(result) == 1 + assert result[0] == "Stop" + + result = model.postprocess_completions(["Stop\n", "Nonstop"]) + assert len(result) == 2 + assert result == ["Stop", "Nonstop"] + + result = model.postprocess_completions(["StopHere\nNoHere"]) + assert len(result) == 1 + assert result[0] == "StopHere" diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 5c055bc4..fad0c2d0 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -13,21 +13,25 @@ def test_transformers_integration_continuation(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") - sequence = generate.continuation(model)("Write a short sentence", rng=rng) + sequence = generate.continuation(model)("Write a short sentence ", rng=rng) assert isinstance(sequence, str) assert model.tokenizer.eos_token not in sequence sequence = generate.continuation(model, max_tokens=10)( - "Write a short sentence", rng=rng + "Write a short sentence ", rng=rng ) assert isinstance(sequence, str) - prompts = ["Write a short sentence", "And another one"] + prompts = ["Write a short sentence ", "And another one "] sequence = generate.continuation(model, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) assert len(sequence) == 2 assert isinstance(sequence[0], str) + prompt = "Write a short sentence " + sequence = generate.continuation(model, stop="a")(prompt, rng=rng) + assert sequence[len(prompt) :].find("a") == -1 + @pytest.mark.xfail def test_transformers_integration_continuation_array_samples(): From b46433494ccecc1e21dc1b119aac1ad716aab054 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 18 Jul 2023 20:08:51 -0500 Subject: [PATCH 178/734] Make sure Continuation and Regex use the correct device --- outlines/text/generate/continuation.py | 4 +++- outlines/text/generate/regex.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index a2762211..b6ff3efc 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -56,7 +56,9 @@ def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: contains_stop_sequence.append(found) - contains_stop_sequence = torch.tensor(contains_stop_sequence, dtype=torch.bool) + contains_stop_sequence = torch.tensor( + contains_stop_sequence, dtype=torch.bool, device=self.model.device + ) contains_eos = token_ids[:, -1] == self.model.tokenizer.eos_token_id return torch.logical_or(contains_eos, contains_stop_sequence) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 9da1f83c..72408c92 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -119,7 +119,11 @@ def create_proposal( masks = [] for pstate in self.pstates: - mask = torch.full((len(self.model.tokenizer.vocabulary),), -math.inf) + mask = torch.full( + (len(self.model.tokenizer.vocabulary),), + -math.inf, + device=self.model.device, + ) if pstate[1] > -1: next_support = self.pstate_to_vocab[pstate[:2]] From 2b807535d6657cd6c67d06a163d5bdb86caee1da Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 6 Jul 2023 15:20:27 -0500 Subject: [PATCH 179/734] Generalize the Lark parse state cloning extensions --- outlines/text/parsing.py | 41 ++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 164ed8c5..665ef43e 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -24,7 +24,7 @@ UnexpectedToken, ) from lark.indenter import PythonIndenter -from lark.lexer import BasicLexer, LexerState, Scanner +from lark.lexer import BasicLexer, ContextualLexer, LexerState, Scanner from lark.parsers.lalr_analysis import Shift from lark.parsers.lalr_interactive_parser import InteractiveParser from lark.parsers.lalr_parser import ParseConf, ParserState @@ -205,29 +205,34 @@ def copy_lexer_thread(lexer_thread: "LexerThread") -> "LexerThread": res = copy(lexer_thread) res.lexer = copy(res.lexer) - if ( - res.lexer.postlexer - and isinstance(res.lexer.postlexer, PythonIndenter) - and not isinstance(res.lexer.postlexer, PartialPythonIndenter) - ): - # Patch these methods so that the post lexer keeps its state - # XXX: This won't really work in generality. - postlexer = PartialPythonIndenter() - postlexer.paren_level = res.lexer.postlexer.paren_level - postlexer.indent_level = res.lexer.postlexer.indent_level - res.lexer.postlexer = postlexer + if getattr(res.lexer, "postlexer", None): + if isinstance(res.lexer.postlexer, PythonIndenter) and not isinstance( + res.lexer.postlexer, PartialPythonIndenter + ): + # Patch these methods so that the post lexer keeps its state + # XXX: This won't really work in generality. + postlexer = PartialPythonIndenter() + postlexer.paren_level = res.lexer.postlexer.paren_level + postlexer.indent_level = res.lexer.postlexer.indent_level + res.lexer.postlexer = postlexer + else: + res.lexer.postlexer = copy(res.lexer.postlexer) # Patch/replace the lexer objects so that they support partial matches - lexer = res.lexer.lexer - if not isinstance(lexer.root_lexer, PartialBasicLexer): - lexer.root_lexer = PartialBasicLexer(lexer.root_lexer) + context_lexer = res.lexer + + if not isinstance(context_lexer, ContextualLexer): + # XXX: The layouts change with the grammars + context_lexer = context_lexer.lexer + assert isinstance(context_lexer, ContextualLexer) - basic_lexers = res.lexer.lexer.lexers + if not isinstance(context_lexer.root_lexer, PartialBasicLexer): + context_lexer.root_lexer = PartialBasicLexer(context_lexer.root_lexer) + + basic_lexers = context_lexer.lexers for idx, lexer in basic_lexers.items(): basic_lexers[idx] = PartialBasicLexer(lexer) - res.lexer.postlexer = copy(res.lexer.postlexer) - return res From 580d1137c35014c40d9eeaae2b2aaae3922613bb Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 6 Jul 2023 15:20:49 -0500 Subject: [PATCH 180/734] Extend the parsing example to include SQL-guided generation --- examples/parsing.py | 49 ++++++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/examples/parsing.py b/examples/parsing.py index 3f070c47..de8efb60 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -1,6 +1,7 @@ """An example illustrating parser-based masking.""" import math import time +import urllib.request import torch from lark import Lark @@ -26,23 +27,33 @@ checkpoint, trust_remote_code=True, revision=revision ).to(device) -input_text = "def " -inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) +sql_grammar_url = "https://github.com/zbrookle/sql_to_ibis/raw/0e9226da42065940ce21439d490f9fcacadc7f92/sql_to_ibis/grammar/sql.lark" +sql_grammar = "".join( + [line.decode("utf-8") for line in urllib.request.urlopen(sql_grammar_url)] +) +with open("sql_grammar.lark", "w") as f: + f.write(sql_grammar) + +sqlparser = Lark.open( + "sql_grammar.lark", + parser="lalr", +) + +pyparser = Lark.open_from_package( + "lark", + "python.lark", + ["grammars"], + parser="lalr", + postlex=PartialPythonIndenter(), + start="file_input", +) class ParserLogitsProcessor(LogitsProcessor): """Bias invalid token scores according to a running parse state.""" - def __init__(self): - pyparser = Lark.open_from_package( - "lark", - "python.lark", - ["grammars"], - parser="lalr", - postlex=PartialPythonIndenter(), - start="file_input", - ) - ip = pyparser.parse_interactive("") + def __init__(self, parser): + ip = parser.parse_interactive("") self.parser_state = ip.parser_state self.states_stack = [self.parser_state] self.token_seq = None @@ -64,10 +75,8 @@ def __call__( self.parser_state, partial_tokens = parse_to_end(self.parser_state) - print("Parsed:\n") - print(self.token_seq) - - print(partial_tokens) + print(f'parsed:"{self.token_seq}"') + print(f"partial_tokens: {partial_tokens}") mask = torch.full_like(scores, -math.inf) @@ -90,18 +99,22 @@ def __call__( except (UnexpectedToken, UnexpectedCharacters, DedentError): pass - print(f"Next token masking duration: {time.perf_counter() - t0}") + print(f"next token masking duration: {time.perf_counter() - t0}") return scores + mask set_seed(20399) +parser = sqlparser +input_text = "select " +inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) + outputs = model.generate( inputs, max_length=100, temperature=0.1, - logits_processor=LogitsProcessorList([ParserLogitsProcessor()]), + logits_processor=LogitsProcessorList([ParserLogitsProcessor(parser)]), renormalize_logits=True, ) From bb9617915466caff513d8d463bc6915acf8a32bd Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 6 Jul 2023 17:37:38 -0500 Subject: [PATCH 181/734] Require cloning and patching before calling parse_to_end This just makes it easier to cut down on unnecessary copying. --- outlines/text/parsing.py | 9 +++++++-- tests/text/test_parsing.py | 22 ++++++++++++++++------ 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 665ef43e..c394e1fa 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -250,9 +250,14 @@ def copy_ip(ip: "InteractiveParser") -> "InteractiveParser": def parse_to_end(parser_state: ParserState) -> Tuple[ParserState, Set[str]]: - """Continue parsing from the current parse state and return partial next tokens.""" + """Continue parsing from the current parse state and return partial next tokens. - parser_state = copy_parser_state(parser_state) + .. warning:: + The parse state `parser_state` is updated in-place and must be patched + to work with this function. Either patch it manually or use + `copy_parser_state` before calling this. + + """ expected_next_tokens: Set[str] = set() try: diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index eb7c67d8..d8ee52ad 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -30,27 +30,32 @@ def test_parse_to_end(): ) ip = pyparser.parse_interactive("x") - parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + parser_state = copy_parser_state(ip.parser_state) + parser_state, expected_next_tokens = parse_to_end(parser_state) assert not parser_state.value_stack assert expected_next_tokens == {"NAME"} ip = pyparser.parse_interactive("x = '") - parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + parser_state = copy_parser_state(ip.parser_state) + parser_state, expected_next_tokens = parse_to_end(parser_state) assert parser_state.value_stack[-1].type == "EQUAL" assert expected_next_tokens == {"LONG_STRING", "STRING"} ip = pyparser.parse_interactive("x = 'hi") - parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + parser_state = copy_parser_state(ip.parser_state) + parser_state, expected_next_tokens = parse_to_end(parser_state) assert parser_state.value_stack[-1].type == "EQUAL" assert expected_next_tokens == {"STRING"} ip = pyparser.parse_interactive("x = ('hi") - parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + parser_state = copy_parser_state(ip.parser_state) + parser_state, expected_next_tokens = parse_to_end(parser_state) assert parser_state.value_stack[-1].type == "LPAR" assert expected_next_tokens == {"STRING"} ip = pyparser.parse_interactive("def") - parser_state, expected_next_tokens = parse_to_end(ip.parser_state) + parser_state = copy_parser_state(ip.parser_state) + parser_state, expected_next_tokens = parse_to_end(parser_state) assert not parser_state.value_stack assert expected_next_tokens == {"NAME", "DEF"} @@ -97,7 +102,7 @@ def test_sequential_parse_example(): start="file_input", ) ip = pyparser.parse_interactive("") - parser_state = ip.parser_state + parser_state = copy_parser_state(ip.parser_state) token_seq = "" for i, token in enumerate(input_tokens): @@ -272,6 +277,9 @@ def test_parse_from_partial_match(): (parser_state,) = create_pmatch_parser_states( lp, terminals_to_states, term_type, ptoken, first_pmatch ) + # These copies also patch the lexers in the parse state, which is now + # needed for use with `parse_to_end` + parser_state = copy_parser_state(parser_state) new_parser_state, expected_next_tokens = parse_to_end(parser_state) assert expected_next_tokens == {"NAME"} @@ -281,6 +289,7 @@ def test_parse_from_partial_match(): (parser_state,) = create_pmatch_parser_states( lp, terminals_to_states, term_type, ptoken, first_pmatch ) + parser_state = copy_parser_state(parser_state) new_parser_state, expected_next_tokens = parse_to_end(parser_state) assert not expected_next_tokens @@ -290,6 +299,7 @@ def test_parse_from_partial_match(): (parser_state,) = create_pmatch_parser_states( lp, terminals_to_states, term_type, ptoken, first_pmatch ) + parser_state = copy_parser_state(parser_state) with pytest.raises(UnexpectedToken): parse_to_end(parser_state) From 2d3bb54fb36add6a6437c88d3ba1110d28e0efc0 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 6 Jul 2023 17:37:38 -0500 Subject: [PATCH 182/734] Clone and patch initial parse state in the parser example --- examples/parsing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/parsing.py b/examples/parsing.py index de8efb60..f3c78988 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -54,7 +54,7 @@ class ParserLogitsProcessor(LogitsProcessor): def __init__(self, parser): ip = parser.parse_interactive("") - self.parser_state = ip.parser_state + self.parser_state = copy_parser_state(ip.parser_state) self.states_stack = [self.parser_state] self.token_seq = None self.token_idx = 0 From 7a7458d519113c4038890b759d8a445d9fd5bb5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 19 Jul 2023 21:21:10 +0200 Subject: [PATCH 183/734] Add missing requirements --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index d1bbffdb..29659ff4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,11 +24,14 @@ classifiers = [ "Topic :: Scientific/Engineering :: Artificial Intelligence", ] dependencies = [ + "interegular", "jinja2", + "lark", "numpy", "pillow", "perscache", "pydantic", + "regex", "scipy", "tenacity", "torch", From a4a0868291d9c6198e027feb33cba3b7fcef6432 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 17 Jul 2023 16:36:16 +0200 Subject: [PATCH 184/734] Convert tokens to strings before partial matching BPE tokenizers encode whitespaces as a special character, and it is possible that another tokenizer may encode one or several strings as special character(s). We thus add a `convert_token_to_string` that converts back these special character to the string they correspond to so we can partially match the corresponding tokens with the regex. --- outlines/models/tokenizer.py | 11 +++++++++++ outlines/models/transformers.py | 4 ++++ outlines/text/generate/regex.py | 3 ++- tests/text/generate/test_regex.py | 3 +++ 4 files changed, 20 insertions(+), 1 deletion(-) diff --git a/outlines/models/tokenizer.py b/outlines/models/tokenizer.py index 7aeefccd..bffcc517 100644 --- a/outlines/models/tokenizer.py +++ b/outlines/models/tokenizer.py @@ -22,3 +22,14 @@ def encode( def decode(self, token_ids: NDArray[np.int64]) -> List[str]: """Translate an array of token ids to a string or list of strings.""" ... + + @abstractmethod + def convert_token_to_string(self, token: str) -> str: + """Convert a token to its equivalent string. + + This is for instance useful for BPE tokenizers where whitespaces are + represented by the special characted `Ġ`. This prevents matching a raw + token that includes `Ġ` with a string. + + """ + ... diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index d639c202..42cef7a3 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -81,6 +81,10 @@ def decode(self, token_ids: torch.LongTensor) -> List[str]: text = self.tokenizer.batch_decode(token_ids) return text + def convert_token_to_string(self, token: str) -> str: + string = self.tokenizer.convert_tokens_to_string([token]) + return string + def transformers(model_name: str, device: Optional[str] = None, **model_kwargs): from transformers import AutoModelForCausalLM diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 72408c92..764ff62d 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -26,7 +26,8 @@ def __init__(self, model, regex_string: str, max_tokens: Optional[int]): vocabulary = model.tokenizer.vocabulary sorted_vocabulary = [ - k for k, v in sorted(vocabulary.items(), key=lambda kv: kv[1]) + model.tokenizer.convert_token_to_string(k) + for k, v in sorted(vocabulary.items(), key=lambda kv: kv[1]) ] regex_pattern = interegular.parse_pattern(regex_string) diff --git a/tests/text/generate/test_regex.py b/tests/text/generate/test_regex.py index 5d4ede3a..ee616cc6 100644 --- a/tests/text/generate/test_regex.py +++ b/tests/text/generate/test_regex.py @@ -21,6 +21,9 @@ def decode(self, token_ids): return decoded + def convert_token_to_string(self, token): + return token + class Model: tokenizer = Tokenizer() From 4a3126bc3530831a0a187bbe2308632e044c5605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 15 Jul 2023 18:32:10 +0200 Subject: [PATCH 185/734] Check whether terminal paths exist with vocabulary If the vocabulary does not allow to transition from the FSM's initial state to one of its terminal paths it will be impossible to generate a sequence that matches the input regex. We therefore raise an exception when no such transition exists. --- outlines/text/generate/regex.py | 21 ++++++++++++++------- outlines/text/parsing.py | 12 ++++++++++-- tests/text/test_parsing.py | 15 ++++++++++++--- 3 files changed, 36 insertions(+), 12 deletions(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 764ff62d..90d573fc 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -1,3 +1,4 @@ +import collections import math from typing import List, Optional, Tuple @@ -38,19 +39,25 @@ def partial_match_filter(string, end_idx, state_seq): return False return True - pstate_to_vocab = map_partial_states_to_vocab( + pstate_to_vocab, paths = map_partial_states_to_vocab( list(sorted_vocabulary), {"REGEX": self.regex_fsm}, partial_match_filter, final_state_string=model.tokenizer.eos_token, ) - # TODO: This check might be a little too strict, because I think that - # while some states are made unreachable by a vocabulary (and will not - # be present in the following set difference), there could still be - # paths to terminal states emanating from the states that are reachable. - states_with_transition = {x[1] for x in pstate_to_vocab.keys()} - if len(self.regex_fsm.states.difference(states_with_transition)) > 0: + # Check whether a terminal path (from the initial state of the FSM to + # one of its terminal states) exists, raise an exception otherwise. + traversed_states = set() + queue = collections.deque([self.regex_fsm.initial]) + while queue: + symbol = queue.popleft() + for prev_state in paths["REGEX"][symbol]: + if prev_state not in traversed_states: + traversed_states.add(prev_state) + queue.append(prev_state) + + if traversed_states.intersection(self.regex_fsm.finals) == set(): raise ValueError( "The vocabulary does not allow us to build a sequence that matches the input regex" ) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index c394e1fa..2daff0ab 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -371,7 +371,9 @@ def map_partial_states_to_vocab( [str, Optional[int], Tuple[int, ...]], bool ] = lambda *args: True, final_state_string: Optional[str] = None, -) -> DefaultDict[PartialParseState, Set[int]]: +) -> Tuple[ + DefaultDict[PartialParseState, Set[int]], Dict[str, DefaultDict[int, Set[int]]] +]: """Construct a map from partial parse states to subsets of `vocabulary`. The subsets of `vocabulary` consist of elements that are accepted by--or @@ -393,24 +395,30 @@ def map_partial_states_to_vocab( """ final_state_string_idx = None + # Partial parse states to the subsets of the vocabulary that accept them pstate_to_vocab = defaultdict(set) + possible_paths = {} for symbol_name, fsm in terminals_to_fsms_map.items(): + terminal_possible_paths = defaultdict(set) for i, vocab_string in enumerate(vocabulary): if vocab_string == final_state_string: final_state_string_idx = i for end_idx, state_seq in find_partial_matches(fsm, vocab_string): if partial_match_filter(vocab_string, end_idx, state_seq): + terminal_possible_paths[state_seq[0]].add(state_seq[-1]) pstate_to_vocab[(symbol_name, state_seq[0])].add(i) + possible_paths[symbol_name] = terminal_possible_paths + if final_state_string_idx is not None: # Allow transitions to EOS from all terminals FSM states for symbol_name, fsm in terminals_to_fsms_map.items(): for state in fsm.finals: pstate_to_vocab[(symbol_name, state)].add(final_state_string_idx) - return pstate_to_vocab + return pstate_to_vocab, possible_paths def terminals_to_lalr_states(lp: Lark) -> DefaultDict[str, Set[int]]: diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index d8ee52ad..eb9350fb 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -205,7 +205,9 @@ def test_map_partial_states_to_vocab_python(): vocabulary = ["d", "e", "ef foo", "f ", " ", "1d", ""] - pstate_to_vocab = map_partial_states_to_vocab(vocabulary, symbol_names_and_fsms) + pstate_to_vocab, possible_paths = map_partial_states_to_vocab( + vocabulary, symbol_names_and_fsms + ) assert dict(pstate_to_vocab) == { ("__IGNORE_0", 0): {4}, @@ -216,8 +218,11 @@ def test_map_partial_states_to_vocab_python(): ("DEF", 1): {1, 2}, ("DEF", 2): {3}, } + assert possible_paths["__IGNORE_0"] == {0: {1}, 1: {1}} + assert possible_paths["NAME"] == {0: {1}, 1: {1}} + assert possible_paths["DEF"] == {0: {1}, 1: {2, 3}, 2: {3}} - pstate_to_vocab = map_partial_states_to_vocab( + pstate_to_vocab, possible_paths = map_partial_states_to_vocab( vocabulary, symbol_names_and_fsms, final_state_string="" ) @@ -239,6 +244,9 @@ def test_map_partial_states_to_vocab_python(): 6, }, } + assert possible_paths["__IGNORE_0"] == {0: {1}, 1: {1}} + assert possible_paths["NAME"] == {0: {1}, 1: {1}} + assert possible_paths["DEF"] == {0: {1}, 1: {2, 3}, 2: {3}} def test_parse_from_partial_match(): @@ -332,7 +340,7 @@ def partial_match_filter(string, end_idx, state_seq): return False return True - pstate_to_vocab = map_partial_states_to_vocab( + pstate_to_vocab, possible_paths = map_partial_states_to_vocab( vocabulary, {"FLOAT": regex_fsm}, partial_match_filter, "" ) @@ -342,6 +350,7 @@ def partial_match_filter(string, end_idx, state_seq): {1, 5, 8, 12}, {1, 5, 8}, ] + assert possible_paths["FLOAT"] == {0: {1, 2, 3}, 1: {1, 3}, 2: {3}, 3: {3}} pstate_to_vocab = {k: tuple(v) for k, v in pstate_to_vocab.items()} From cfeb812927b18ceeb1d2c764701132edca4df456 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 13 Jul 2023 16:50:19 +0200 Subject: [PATCH 186/734] Generate a choice between different strings --- outlines/text/generate/__init__.py | 2 +- outlines/text/generate/regex.py | 6 ++++ .../generate/test_integration_transfomers.py | 13 +++++++++ tests/text/generate/test_regex.py | 29 +++++++++++++++++++ 4 files changed, 49 insertions(+), 1 deletion(-) diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py index bb752264..beca6f56 100644 --- a/outlines/text/generate/__init__.py +++ b/outlines/text/generate/__init__.py @@ -1,2 +1,2 @@ from .continuation import continuation -from .regex import float, integer, regex +from .regex import choice, float, integer, regex diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 90d573fc..c18eb18f 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -200,3 +200,9 @@ def float(model, max_tokens: Optional[int] = None): """ return Regex(model, r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", max_tokens) + + +def choice(model, choices: List[str], max_tokens: Optional[int] = None): + """Choose between different sequences.""" + regex_str = r"(" + r"|".join(choices) + r")" + return Regex(model, regex_str, max_tokens) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index fad0c2d0..46562a40 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -98,6 +98,19 @@ def test_transformers_integration_float(): float(generated) +def test_transformers_integration_choice(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Write a short sentence " + sequence = generate.choice(model, ["test", "choice"])(prompt, rng=rng) + + generated = sequence[len(prompt) :] + assert generated == "test" or generated == "choice" + + def test_transformers_integration_with_pad_token(): model_name = "hf-internal-testing/tiny-random-XLMRobertaXLForCausalLM" model = models.transformers(model_name, device="cpu") diff --git a/tests/text/generate/test_regex.py b/tests/text/generate/test_regex.py index ee616cc6..6bdac68a 100644 --- a/tests/text/generate/test_regex.py +++ b/tests/text/generate/test_regex.py @@ -106,6 +106,35 @@ def test_integer_proposal(input_ids, proposal): ) +def test_choice_proposal(): + model = Model() + generator = generate.choice(model, ["1", "431a", "431A-"]) + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[]]), logits) + assert torch.equal( + result, + torch.tensor( + [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]] + ), + ) + + result = generator.create_proposal(torch.tensor([[4]]), logits) + assert torch.equal( + result, + torch.tensor( + [[-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0, 1.0]] + ), + ) + + result = generator.create_proposal(torch.tensor([[4, 6]]), logits) + assert torch.equal( + result, + torch.tensor( + [[-math.inf, 1.0, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf]] + ), + ) + + @pytest.mark.parametrize( "input_ids, proposal", [ From a9cd45b719b5f6ce084018f8ba58abaeb6d7565e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sat, 15 Jul 2023 18:50:09 +0200 Subject: [PATCH 187/734] Create mask on device --- outlines/text/generate/regex.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index c18eb18f..4f21f72f 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -128,9 +128,7 @@ def create_proposal( masks = [] for pstate in self.pstates: mask = torch.full( - (len(self.model.tokenizer.vocabulary),), - -math.inf, - device=self.model.device, + (len(self.model.tokenizer.vocabulary),), -math.inf, device=self.device ) if pstate[1] > -1: From de8c2e5d1d02bab78adf15ea1dab1f24b6d9aa72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 23 Jul 2023 15:38:24 +0200 Subject: [PATCH 188/734] Update the README --- README.md | 237 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 142 insertions(+), 95 deletions(-) diff --git a/README.md b/README.md index e8dc351f..72ab8829 100644 --- a/README.md +++ b/README.md @@ -2,33 +2,55 @@ Outlines Logo -# Outlines +# Outlines 〰️ -Build _reliable_ workflows based on interactions with generative models. +Fast and reliable neural text generation. -[Prompting](#prompting) • -[Controlled generation](#controlled-generation) • -[Agents](#agents-example) • -[Sampling](#sampling-uncertainty-simulation-based-inference) • -[Parallel execution](#vectorization-and-parallel-execution) • -[Examples](#examples) +[Install](#installation) • +[Prompting primitives](#prompting) • +[Guided generation](#guided-generation) • +[Examples](#examples) • +[Stay tuned](#stay-tuned-for) -**Outlines** allows you to control and diagnose interactions with LLMs more effectively. Modern language models are powerful and versatile, but the way they interface with existing systems [can be very brittle](https://github.com/Significant-Gravitas/Auto-GPT/labels/invalid_json), their outputs [can be unreliable](https://arxiv.org/abs/2302.04023), and complex workflows (agents) can introduce a lot of error-prone code duplication. Outlines provides robust prompting primitives that separate the prompting from the execution logic and lead to simple implementations of few-shot generations, ReAct, meta-prompting, agents, etc. Outlines helps developers control text generation and produce predictable outputs that make the interaction with user code more robust. Its sampling-first approach allows one to diagnose issues with model-generated output more easily, and implement more robust generation methods such as [self-consistency](https://arxiv.org/abs/2203.11171) or [DiVeRSe](https://arxiv.org/abs/2206.02336). +**Outlines** 〰 is a library for neural text generation. You can think of it as a +more flexible replacement for the `generate` method in the +[transformers](https://github.com/huggingface/transformers) library. -**Outlines** is designed as a library that integrates well with the broader Python environment. Generation can be interleaved with control flow or custom function calls, prompts can be imported from other modules or libraries. +**Outlines** 〰 helps developers *guide text generation* to build robust +interfaces with external systems. +**Outlines** 〰 provides *robust prompting primitives* that separate the prompting +from the execution logic and lead to simple implementations of few-shot +generations, ReAct, meta-prompting, agents, etc. + +**Outlines** 〰 is designed as a *library* that is meant to be compatible the +broader ecosystem, not to replace it. We use as few abstractions as possible, +and generation can be interleaved with control flow, conditionals, custom Python +functions and calls to other libraries. + +**Outlines** 〰 is *compatible with all models*. It only interfaces with models +via the next-token logits. It can be used with API-based models as well. ## Features -- [x] Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/). -- [x] Interleave completions with loops, conditionals, and custom Python functions -- [x] Caching of generations -- [x] Integration with OpenAI and HuggingFace models -- [x] Controlled generation, including multiple choice, type constraints and dynamic stopping -- [x] Sampling of multiple sequences -- [x] Vectorized execution +- [x] 🖍️Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/) +- [x] 🚄 Guided generation, including multiple choice, type constraints and dynamic stopping +- [x] ⚡ Fast regex-guided generation +- [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions +- [x] 💾 Caching of generations +- [x] 🤗 Integration with HuggingFace's `transformers` models + + +## Stay tuned for + +- Context-Free Grammar guided generation ([#178](https://github.com/normal-computing/outlines/pull/178)); +- Generate JSON with a defined structure ([#140](https://github.com/normal-computing/outlines/pull/140)) +- Prompt-token alignment so you don't have to think about tokenization details ([#201](https://github.com/normal-computing/outlines/pull/201)) +- An infilling DSL ([#182](https://github.com/normal-computing/outlines/issues/182)) + +You can follow [@NormalComputing](https://twitter.com/NormalComputing), [@remilouf](https://twitter.com/remilouf) or [@BrandonTWillard](https://twitter.com/BrandonTWillard) for regular updates! ## Installation @@ -75,36 +97,18 @@ def labelling(to_label, examples): {{ to_label }} // """ -complete = models.text_completion.openai("text-davinci-003") +model = models.transformers("gpt2") prompt = labelling("Just awesome", examples) -answer = complete(prompt) +answer = text.generate.continuation(model, max_tokens=100)(prompt) ``` -## Chaining with loops and conditionals ([example](https://github.com/normal-computing/outlines/blob/readme/examples/react.py)) - -**Outlines** comes with very few abstractions, and is designed to blend into existing code and integrate with the rest of the ecosystem. - -``` python -reviews = ["Just awesome", "Avoid", "Will come back"] - -def send_notification(review): - """This function sends a notification with the review's content.""" - ... - -for review in reviews: - prompt = labelling(review, examples) - answer = model(prompt) - if answer == "Positive": - send_notification(review) -``` - -## Agents ([example](https://github.com/normal-computing/outlines/blob/readme/examples/babyagi.py)) - -**Outlines** makes building agents like [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT), [BabyAGI](https://github.com/yoheinakajima/babyagi), [ViperGPT](https://viper.cs.columbia.edu/) or [Transformers Agent](https://huggingface.co/docs/transformers/transformers_agents) easier by removing boilerplate prompting code. - ### Tools -We can teach language models to call external functions to get additional informations or perform tasks, by encoding the functions' description in the prompt. To avoid duplicating information between the function definition and the description passed to the prompt, we define custom Jinja filters that can extract the function's name, description, signature and source: +We can teach language models to call external functions to get additional +informations or perform tasks, by encoding the functions' description in the +prompt. To avoid duplicating information between the function definition and the +description passed to the prompt, we define custom Jinja filters that can +extract the function's name, description, signature and source: ``` python @@ -139,7 +143,10 @@ prompt = my_commands([google_search, wikipedia_search]) ### Response models -We can instruct models to return their output in a pre-defined format, often JSON. To avoid duplicating information between the function definition and the description passed to the prompt we define a custom Jinja filter that can extract the expected response's schema: +We can instruct models to return their output in a pre-defined format, often +JSON. To avoid duplicating information between the function definition and the +description passed to the prompt we define a custom Jinja filter that can +extract the expected response's schema: ``` python from pydantic import BaseModel @@ -170,104 +177,132 @@ joke_ppt(Joke) # } ``` -## Controlled generation +With these prompting primitives **Outlines** makes building agents like +[AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT), +[BabyAGI](https://github.com/yoheinakajima/babyagi), +[ViperGPT](https://viper.cs.columbia.edu/) or [Transformers +Agent](https://huggingface.co/docs/transformers/transformers_agents) easier by +removing boilerplate prompting code. -The first step towards reliability of systems that include large language models is to ensure that there is a well-defined interface between their output and user-defined code. **Outlines** provides ways to control the generation of language models to make their output more predictable. +## Guided generation + +The first step towards reliability of systems that include large language models +is to ensure that there is a well-defined interface between their output and +user-defined code. **Outlines** provides ways to control the generation of +language models to make their output more predictable. + +### Early stopping You can stop the generation after a given sequence has been found: ``` python -answer = model("Tell me a one-sentence joke.", stop_at=["."]) +import outlines.text.generate as generate +import outlines.models as models + +model = models.transformers("gpt2") +answer = generate.continuation(model, stop=["."])("Tell me a one-sentence joke.") ``` +### Multiple choices + You can reduce the completion to a choice between multiple possibilities: ``` python +import outlines.text.generate as generate +import outlines.models as models + +model = models.transformers("gpt2") + prompt = labelling("Just awesome", examples) -answer = model(prompt, is_in=["Positive", "Negative"]) +answer = generate.choice(model, ["Positive", "Negative"])(prompt) ``` +### Type constraint + +You can instruct the model to only return integers or floats: -You can require the generated sequence to be an int or a float: ``` python +import outlines.text.generate as generate import outlines.models as models +model = models.transformers("gpt2") -model = models.text_completion.hf("sshleifer/tiny-gpt2") -answer = model("2 + 2 = ", type="int") -print(answer) -# 4 +prompt = "1+1=" +answer = generate.integer(model)(prompt) -model = models.text_completion.hf("sshleifer/tiny-gpt2") -answer = model("1.7 + 3.2 = ", type="float") -print(answer) -# 4.9 +prompt = "sqrt(2)=" +answer = generate.float(model)(prompt) ``` +### Efficient regex-guided generation -## Sampling ([uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb), [simulation-based inference](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb)) - -Outlines is strictly sampling based, and focused on using methods such as [self-consistency](https://arxiv.org/abs/2203.11171), [adaptive consistency](https://arxiv.org/abs/2305.11860), [DiVeRSe](https://arxiv.org/abs/2206.02336), [Tree of thoughts](https://arxiv.org/abs/2305.10601), [lattice sampling](https://arxiv.org/abs/2112.07660), etc. Several samples can be obtained using the `num_samples` keyword argument: +Outlines also comes with fast regex-guided generation. In fact, the `choice`, +`integer` and `float` functions above all use regex-guided generation under the +hood: ``` python import outlines.models as models +import outlines.text.generate as generate -model = models.text_completion.hf("sshleifer/tiny-gpt2") -answer = model("2 + 2 = ", num_samples=5) -print(answer) -# [4, 5, 4, 4, 4] -``` +model = models.transformers("gpt2-medium") -The focus on sampling allows us to explore different ideas, such as [using the diversity of answers to evaluate the model's uncertainty](https://github.com/normal-computing/outlines/blob/readme/examples/sampling.ipynb), or [simulation-based inference to optimize the prompt](https://github.com/normal-computing/outlines/blob/readme/examples/simulation_based_inference.ipynb). +prompt = "Is 1+1=2? " +unguided = generate.continuation(model, max_tokens=30)(prompt) +guided = generate.regex(model, r"\s*([Yy]es|[Nn]o|[Nn]ever|[Aa]lways)", max_tokens=30)( + prompt +) -## Vectorization and parallel execution +print(unguided) +# Is 1+1=2? +# +# This is probably the most perplexing question. +# As I said in one of my articles describing how +# I call 2 and 1, there isn't -You can pass prompts in a NumPy array to Outlines models: +print(guided) +# Is 1+1=2? Always +``` ``` python -import numpy as np import outlines.models as models +import outlines.text.generate as generate -model = models.text_completion.openai("text-davinci-003") -prompts = [ - ["Translate 'Hello' in Italian", "Translate 'Hello' in French"], - ["Translate 'Hello' in Spanish", "Translate 'Hello' in German"], -] -answers = model(prompts) +model = models.transformers("gpt2-medium") -print(answers.shape) -# (2, 2) -``` +prompt = "What is the IP address of the Google DNS servers? " +unguided = generate.continuation(model, max_tokens=30)(prompt) +guided = generate.regex( + model, + r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)", + max_tokens=30, +)(prompt) -Outlines also provide a `outlines.vectorize` decorator that will vectorize any function. If the function is async the requests will be run concurrently: +print(unguided) +# What is the IP address of the Google DNS servers? +# +# Passive DNS servers are at DNS servers that are private. +# In other words, both IP servers are private. The database +# does not contain Chelsea Manning -``` python -import aiohttp -import numpy as np -import outlines - -@outlines.vectorize -async def wikipedia_search(query): - url = f"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={query}&origin=*" - async with aiohttp.ClientSession() as session: - async with session.get(url) as response: - return await response.text() - -results = wikipedia_search([["Cat", "Dog"],["Bird", "Horse"]]) -print(results.shape) -# (2, 2) +print(guided) +# What is the IP address of the Google DNS servers? +# 2.2.6.1 ``` -This feature allows you to run multiple workflows in parallel, for instance to avoid overfitting when iterating over a workflow or in production to run workflows over several different inputs. +Unlike other libraries, regex-guided generation in Outlines is almost as fast +as non-guided generation. ## Contributing ### What contributions? -We curently only accept bug fixes and documentation contributions. If you have a feature request, please start a new [discussions](https://github.com/normal-computing/outlines/discussions). The issue tracker is only intended for actionable items. +We curently only accept bug fixes and documentation contributions. If you have a +feature request, please start a new +[discussion](https://github.com/normal-computing/outlines/discussions). The +issue tracker is only intended for actionable items. ### How to contribute? @@ -286,3 +321,15 @@ Do not hesitate to open a draft PR before your contribution is ready, especially - [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) - [Uncertainty](https://github.com/normal-computing/outlines/blob/main/examples/sampling.ipynb) - [Simulation-based inference](https://github.com/normal-computing/outlines/blob/main/examples/simulation_based_inference.ipynb) + + +## Cite Outlines + +``` +@article{willard2023efficient, + title={Efficient Guided Generation for LLMs}, + author={Willard, Brandon T and Louf, R{\'e}mi}, + journal={arXiv preprint arXiv:2307.09702}, + year={2023} +} +``` From 3aefe07705f2afcf9ac4e0c9b775a67f749ef83b Mon Sep 17 00:00:00 2001 From: mondaychen Date: Thu, 10 Aug 2023 16:17:48 -0400 Subject: [PATCH 189/734] Fix error when using type="float" on OpenAI model Resolves #211 --- outlines/models/openai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 0a5541f4..aad5ca75 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -276,7 +276,7 @@ def create_float_mask(encoder): # TODO: This is a hack because OpenAI's API does not # allow more than 300 entries for `logit_bias` - special_tokens = encoder._special_tokens.values() + special_tokens = encoder._special_tokens mask = {special_tokens["<|endoftext|>"]: 100} mask.update({int_token_ids[i]: 100 for i in range(300 - len(special_tokens))}) From c7ece19fb6140d88105c1fbf5fce94e1961a757c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 1 Aug 2023 13:20:41 +0200 Subject: [PATCH 190/734] Only return the generation when generating sequences --- outlines/text/generate/sequence.py | 2 +- .../generate/test_integration_transfomers.py | 19 ++++++++----------- tests/text/generate/test_sequence.py | 14 +++++--------- 3 files changed, 14 insertions(+), 21 deletions(-) diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index b5691359..594f107a 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -233,7 +233,7 @@ def __call__( updated_token_ids[:, num_prompt_tokens:] ).flatten() - result = self.model.tokenizer.decode(token_ids) + result = self.model.tokenizer.decode(token_ids[..., num_prompt_tokens:]) result = self.postprocess_completions(result) if len(result) == 1: diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 46562a40..5643a894 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -53,7 +53,7 @@ def test_transformers_various_regexes(): prompt = "Write an email address" regex_str = r"([a-z]{10})@([a-z]{5})\.([a-z]{3})" sequence = generate.regex(model, regex_str)(prompt, rng=rng) - assert re.fullmatch(regex_str, sequence[len(prompt) :]) is not None + assert re.fullmatch(regex_str, sequence) is not None def test_transformers_integration_integer(): @@ -65,9 +65,8 @@ def test_transformers_integration_integer(): prompt = "Write a short sentence" sequence = generate.integer(model, max_tokens=10)(prompt, rng=rng) - generated = sequence[len(prompt) :] - assert generated[0] != 0 - int(generated) + assert sequence[0] != 0 + int(sequence) def test_transformers_integration_integer_array(): @@ -80,8 +79,8 @@ def test_transformers_integration_integer_array(): sequence = generate.integer(model, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) assert len(sequence) == 2 - int(sequence[0][len(prompts[0]) :]) - int(sequence[1][len(prompts[1]) :]) + int(sequence[0]) + int(sequence[1]) def test_transformers_integration_float(): @@ -93,9 +92,8 @@ def test_transformers_integration_float(): prompt = "Write a short sentence" sequence = generate.float(model, max_tokens=10)(prompt, rng=rng) - generated = sequence[len(prompt) :] - assert generated[0] != 0 - float(generated) + assert sequence[0] != 0 + float(sequence) def test_transformers_integration_choice(): @@ -107,8 +105,7 @@ def test_transformers_integration_choice(): prompt = "Write a short sentence " sequence = generate.choice(model, ["test", "choice"])(prompt, rng=rng) - generated = sequence[len(prompt) :] - assert generated == "test" or generated == "choice" + assert sequence == "test" or sequence == "choice" def test_transformers_integration_with_pad_token(): diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 3833a9fd..8e5f3dd1 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -285,7 +285,7 @@ def is_finished(self, token_ids): sequence = FinishAfterTwo(model) result = sequence("Test") - assert torch.equal(result, torch.tensor([0, 0, 1])) + assert torch.equal(result, torch.tensor([0, 1])) def test_call_prompt_list(): @@ -339,9 +339,7 @@ def is_finished(self, token_ids): sequence = FinishAfterThree(model) result = sequence(["Test1", "Test2", "Test3"]) - assert torch.equal( - result, torch.tensor([[0, 2, 3, -1], [1, 2, 3, 4], [5, 2, 3, -1]]) - ) + assert torch.equal(result, torch.tensor([[2, 3, -1], [2, 3, 4], [2, 3, -1]])) def test_call_single_prompt_samples(): @@ -370,7 +368,7 @@ def is_finished(self, token_ids): ) sequence = FinishAfterTwo(model) result = sequence("Test", samples=3) - assert torch.equal(result, torch.tensor([[4, 0, 1], [4, 0, 1], [4, 0, 1]])) + assert torch.equal(result, torch.tensor([[0, 1], [0, 1], [0, 1]])) class FinishAfterOne(Sequence): def __init__(self, model): @@ -392,7 +390,7 @@ def is_finished(self, token_ids): ) sequence = FinishAfterOne(model) result = sequence("Test", samples=3) - assert torch.equal(result, torch.tensor([[4, 0], [4, 0], [4, 0]])) + assert torch.equal(result, torch.tensor([[0], [0], [0]])) def test_call_prompt_list_samples(): @@ -433,7 +431,5 @@ def is_finished(self, token_ids): result = sequence(["Test1", "Test2", "Test3"], samples=3) assert torch.equal( result, - torch.tile( - torch.tensor([[3, 0, 1, -1], [4, 0, 1, 2], [5, 0, 1, -1]]), (3, 1, 1) - ), + torch.tile(torch.tensor([[0, 1, -1], [0, 1, 2], [0, 1, -1]]), (3, 1, 1)), ) From 12b672be963b98dc81a50f1cec6b753f94d7350e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 9 Jun 2023 16:27:50 +0200 Subject: [PATCH 191/734] Parse JSON schema into a generation schedule --- .github/workflows/build_documentation.yml | 4 +- .github/workflows/publish_documentation.yml | 4 +- .github/workflows/release.yml | 2 +- .github/workflows/tests.yml | 4 +- environment.yml | 2 +- outlines/text/json_schema.py | 239 +++++++++++++ pyproject.toml | 1 + tests/text/test_json_schema.py | 356 ++++++++++++++++++++ 8 files changed, 604 insertions(+), 8 deletions(-) create mode 100644 outlines/text/json_schema.py create mode 100644 tests/text/test_json_schema.py diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml index 1a70a4df..50de5b2d 100644 --- a/.github/workflows/build_documentation.yml +++ b/.github/workflows/build_documentation.yml @@ -14,10 +14,10 @@ jobs: with: persist-credentials: false - - name: Set up Python 3.9 + - name: Set up Python 3.10 uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: "3.10" - name: Build the documentation with Sphinx run: | diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml index 57c93a5f..6542ac70 100644 --- a/.github/workflows/publish_documentation.yml +++ b/.github/workflows/publish_documentation.yml @@ -15,10 +15,10 @@ jobs: - name: Checkout the branch uses: actions/checkout@v2.3.1 - - name: Set up Python 3.9 + - name: Set up Python 3.10 uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: "3.10" - name: Build the documentation with Sphinx run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a9c826b3..e6bf1b11 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,7 +14,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: 3.9 + python-version: "3.10" - name: Build sdist and wheel run: | python -m pip install -U pip diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1554bef3..dab938cb 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: 3.9 + python-version: "3.10" - uses: pre-commit/action@v3.0.0 tests: @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: 3.9 + python-version: "3.10" - name: Set up test environment run: | python -m pip install --upgrade pip diff --git a/environment.yml b/environment.yml index d9c3f34e..8f7eae92 100644 --- a/environment.yml +++ b/environment.yml @@ -8,7 +8,7 @@ channels: - conda-forge - huggingface dependencies: - - python<3.11.0 + - python==3.10.0 - jinja2 - numpy - pillow diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py new file mode 100644 index 00000000..8b96c9dc --- /dev/null +++ b/outlines/text/json_schema.py @@ -0,0 +1,239 @@ +import itertools +import json +from typing import Dict + +STRING = r'".*"' +INTEGER = r"(0|[1-9][0-9]*)" +NUMBER = rf"(-)?({INTEGER})(\.[0-9]+)?([eE][+-][0-9]+)?" +BOOLEAN = r"(true|false)" +NULL = r"null" + +type_to_regex = { + "string": STRING, + "integer": INTEGER, + "number": NUMBER, + "boolean": BOOLEAN, + "null": NULL, +} + + +def build_regex_from_schema(schema: str): + """Turn a JSON schema into a regex that matches any JSON object that follows + this schema. + + Parameters + ---------- + schema + A string that contains the JSON schema. + + Returns + ------- + A string that contains a regular expression that matches any JSON object that + follows the schema. + + """ + schedule = build_schedule_from_schema(schema) + + regex = "" + for step in schedule: + regex += match_step_to_regex(step) + + return regex + + +def build_schedule_from_schema(schema: str): + """Turn a JSON schema into a regex that matches any JSON object that follows + this schema. + + JSON Schema is a declarative language that allows to annotate JSON documents + with types and descriptions. These schemas can be generated from any Python + datastructure that has type annotation: namedtuples, dataclasses, Pydantic + models. And by ensuring that the generation respects the schema we ensure + that the output can be parsed into these objects. + This function parses the provided schema and builds a generation schedule which + mixes deterministic generation (fixed strings), and sampling with constraints. + + Parameters + ---------- + schema + A string that represents a JSON Schema. + + Returns + ------- + A generation schedule. A list of strings that represent the JSON + schema's structure and regular expression that define the structure of + the fields. + + References + ---------- + .. [0] JSON Schema. https://json-schema.org/ + + """ + schema = json.loads(schema) + + # Find object definitions in the schema, if any + definitions = {} + if "$defs" in schema: + for definition, annotation in schema["$defs"].items(): + definitions[f"#/$defs/{definition}"] = annotation + + schema = expand_json_schema(schema, definitions) + schedule = build_schedule_from_instance(schema) + + # Concatenate adjacent strings + reduced_schedule = [ + x + for cls, grp in itertools.groupby(schedule, type) + for x in (("".join(grp),) if cls is str else grp) + ] + + return reduced_schedule + + +def expand_json_schema(raw_schema: Dict, definitions: Dict): + """Replace references by their value in the JSON Schema. + + This recursively follows the references to other schemas in case + of nested models. Other schemas are stored under the "definitions" + key in the schema of the top-level model. + + Parameters + --------- + raw_schema + The raw JSON schema as a Python dictionary, possibly with definitions + and references. + definitions + The currently known definitions. + + Returns + ------- + A dictionary that represents the flattened equivalent of the input + JSON schema. + + """ + expanded_properties = {} + + if "properties" in raw_schema: + for name, value in raw_schema["properties"].items(): + if "$ref" in value: # if item is a single element + expanded_properties[name] = expand_json_schema( + definitions[value["$ref"]], definitions + ) + elif "type" in value and value["type"] == "array": # if item is a list + expanded_properties[name] = value + if "$ref" in value["items"]: + expanded_properties[name]["items"] = expand_json_schema( + definitions[value["items"]["$ref"]], definitions + ) + else: + expanded_properties[name]["items"] = value["items"] + else: + expanded_properties[name] = value + + return { + "title": raw_schema["title"], + "type": raw_schema["type"], + "properties": expanded_properties, + } + + else: + return raw_schema + + +def build_schedule_from_instance(instance: Dict, indent: int = 0): + """Build a generation schedule from a instance. + + This recursively follows the references to other instances. + + Parameters + ---------- + instance + An instance, can be the JSON schema itself. + indent + The current indentation level + + Returns + ------- + A generation schedule for the instance, a list of strings that represent + the structure of the JSON schema and dictionaries that contain the + instance definition. + + """ + schedule = [] + if "properties" in instance: + schedule.append("{\n") + schedule += build_schedule_from_instance(instance["properties"], indent + 2) + if indent > 0: + schedule.append(" " * indent) + schedule.append("}") + else: + for i, (name, annotation) in enumerate(instance.items()): + schedule.append(" " * indent) + schedule.append(f'"{name}": ') + if "anyOf" in annotation: + schedule.append(annotation) + elif annotation["type"] == "object": + schedule += build_schedule_from_instance(annotation, indent) + else: + schedule.append(annotation) + + # We cannot add commas after the last key-value pair in JSON + if i == len(instance) - 1: + schedule.append("\n") + else: + schedule.append(",\n") + + return schedule + + +def match_step_to_regex(step): + """Translate an element of a JSON schema to a regex that defines its content. + + Parameters + ---------- + step: + A string that represents the schema's structure, or a dictionnary + that represents a field in the schema. + + Returns + ------- + A string that represents a regular expression that defines the value of the + schedule's step. + + """ + match step: + case str() as step: + return step + + case {"enum": choices, "type": "string"}: + choices = [f'"{choice}"' for choice in choices] + return f"({'|'.join(choices)})" + case {"enum": choices}: + choices = [str(choice) for choice in choices] + return f"({'|'.join(choices)})" + + case {"type": "array", "items": items}: + item_regexes = match_step_to_regex(items) + return rf"\[({item_regexes})(,({item_regexes}))*\]" + + case {"type": "object"} as object: + steps = build_schedule_from_schema(json.dumps(object)) + regex_str = "" + for step in steps: + regex_str += match_step_to_regex(step) + return regex_str + + case {"type": "string", "maxLength": max_length}: + return f'".{{,{max_length}}}"' + case {"type": "string", "minLength": min_length}: + return f'".{{{min_length},}}"' + + case {"type": field_type}: + return type_to_regex[field_type] + + case {"anyOf": choices}: + regexes = [match_step_to_regex(choice) for choice in choices] + return rf"({'|'.join(regexes)})" + + case _: + raise NotImplementedError diff --git a/pyproject.toml b/pyproject.toml index 29659ff4..dd560892 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ dynamic = ["version"] test = [ "diffusers", "pre-commit", + "pydantic>=2.0", "pytest", "pytest-cov", "transformers", diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py new file mode 100644 index 00000000..4e3018e0 --- /dev/null +++ b/tests/text/test_json_schema.py @@ -0,0 +1,356 @@ +import json +import re +from enum import Enum +from typing import List, Optional, Union + +import pytest +from pydantic import BaseModel, constr + +from outlines.text.json_schema import ( + BOOLEAN, + INTEGER, + NULL, + NUMBER, + STRING, + build_schedule_from_schema, + match_step_to_regex, +) + + +def test_pydantic_basic(): + class User(BaseModel): + user_id: int + name: str + maxlength_name: constr(max_length=10) + minlength_name: constr(min_length=10) + value: float + is_true: bool + + schema = json.dumps(User.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "user_id": ', + {"title": "User Id", "type": "integer"}, + ',\n "name": ', + {"title": "Name", "type": "string"}, + ',\n "maxlength_name": ', + {"title": "Maxlength Name", "type": "string", "maxLength": 10}, + ',\n "minlength_name": ', + {"title": "Minlength Name", "type": "string", "minLength": 10}, + ',\n "value": ', + {"title": "Value", "type": "number"}, + ',\n "is_true": ', + {"title": "Is True", "type": "boolean"}, + "\n}", + ] + + +def test_pydantic_optional(): + class Foo(BaseModel): + bar: Optional[str] + + schema = json.dumps(Foo.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "bar": ', + {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bar"}, + "\n}", + ] + + +def test_pydantic_array(): + class User(BaseModel): + user_id: int + value: List[float] + + schema = json.dumps(User.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "user_id": ', + {"title": "User Id", "type": "integer"}, + ',\n "value": ', + {"title": "Value", "type": "array", "items": {"type": "number"}}, + "\n}", + ] + + +def test_pydantic_enum(): + class Name(str, Enum): + john = "John" + marc = "Marc" + michel = "Michel" + + class User(BaseModel): + user_id: int + name: Name + + schema = json.dumps(User.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "user_id": ', + {"title": "User Id", "type": "integer"}, + ',\n "name": ', + { + "title": "Name", + "enum": ["John", "Marc", "Michel"], + "type": "string", + }, + "\n}", + ] + + +def test_pydantic_nested(): + """Arbitrarily nested schema.""" + + class Fizz(BaseModel): + buzz: str + + class Foo(BaseModel): + count: int + size: Fizz + + class Bar(BaseModel): + apple: str + banana: str + + class Spam(BaseModel): + foo: Foo + bars: Bar + + # We need to a recursive function to parse nested schemas + schema = json.dumps(Spam.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "foo": {\n "count": ', + {"title": "Count", "type": "integer"}, + ',\n "size": {\n "buzz": ', + {"title": "Buzz", "type": "string"}, + '\n }\n },\n "bars": {\n "apple": ', + {"title": "Apple", "type": "string"}, + ',\n "banana": ', + {"title": "Banana", "type": "string"}, + "\n }\n}", + ] + + +def test_pydantic_list_object(): + class Foo(BaseModel): + count: int + + class Spam(BaseModel): + foo: List[Foo] + + # We need to a recursive function to parse nested schemas + schema = json.dumps(Spam.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "foo": ', + { + "items": { + "title": "Foo", + "type": "object", + "properties": {"count": {"title": "Count", "type": "integer"}}, + }, + "title": "Foo", + "type": "array", + }, + "\n}", + ] + + +def test_pydantic_union(): + """Schemas with Union types.""" + + class Spam(BaseModel): + foo: int + bar: Union[float, str] + + schema = json.dumps(Spam.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "foo": ', + {"title": "Foo", "type": "integer"}, + ',\n "bar": ', + {"title": "Bar", "anyOf": [{"type": "number"}, {"type": "string"}]}, + "\n}", + ] + + +def test_json_schema(): + schema = '{"title": "User", "type": "object", "properties": {"user_id": {"title": "User Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["user_id", "name"]}' + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '{\n "user_id": ', + {"title": "User Id", "type": "integer"}, + ',\n "name": ', + {"title": "Name", "type": "string"}, + "\n}", + ] + + +class MockTokenizer: + pad_token_id = 0 + eos_token_id = 0 + + +class MockModel: + tokenizer = MockTokenizer() + device = "cpu" + + +@pytest.mark.parametrize( + "pattern,does_match", + [ + ("0", True), + ("1", True), + ("-1", False), + ("01", False), + ("1.3", False), + ("t", False), + ], +) +def test_match_integer(pattern, does_match): + step = {"title": "Foo", "type": "integer"} + regex = match_step_to_regex(step) + assert regex == INTEGER + + match = re.fullmatch(regex, pattern) + if does_match: + assert match[0] == pattern + assert match.span() == (0, len(pattern)) + else: + assert match is None + + +@pytest.mark.parametrize( + "pattern,does_match", + [ + ("1", True), + ("0", True), + ("01", False), + (".3", False), + ("1.3", True), + ("-1.3", True), + ("1.3e9", False), + ("1.3e+9", True), + ], +) +def test_match_number(pattern, does_match): + step = {"title": "Foo", "type": "number"} + regex = match_step_to_regex(step) + assert regex == NUMBER + + match = re.fullmatch(regex, pattern) + if does_match: + assert match[0] == pattern + assert match.span() == (0, len(pattern)) + else: + assert match is None + + +@pytest.mark.parametrize( + "step,regex,examples", + [ + ( + {"title": "Foo", "type": "string"}, + STRING, + [("unquotedstring", False), ('"quoted_string"', True)], + ), + ( + {"title": "Foo", "type": "string", "maxLength": 3}, + '".{,3}"', + [('"ab"', True), ('"abcd"', False)], + ), + ( + {"title": "Foo", "type": "string", "minLength": 3}, + '".{3,}"', + [('"ab"', False), ('"abcd"', True)], + ), + ( + {"title": "Foo", "type": "boolean"}, + BOOLEAN, + [ + ("true", True), + ("false", True), + ("null", False), + ("0", False), + ], + ), + ( + {"title": "Foo", "type": "null"}, + NULL, + [ + ("null", True), + ("true", False), + ("0", False), + ], + ), + ( + {"title": "Foo", "anyOf": [{"type": "string"}, {"type": "number"}]}, + f"({STRING}|{NUMBER})", + [ + ('"string"', True), + ("1000", True), + ("true", False), + ], + ), + ( + {"title": "Foo", "enum": ["Marc", "Jean"], "type": "string"}, + '("Marc"|"Jean")', + [('"Marc"', True), ('"Jean"', True), ('"John"', False)], + ), + ( + {"title": "Foo", "enum": [0, 1], "type": "integer"}, + "(0|1)", + [("0", True), ("1", True), ("a", False)], + ), + ( + { + "title": "Foo", + "type": "object", + "properties": {"count": {"title": "Count", "type": "integer"}}, + }, + '{\n "count": ' + INTEGER + "\n}", + [('{\n "count": 100\n}', True)], + ), + ( + {"title": "Foo", "type": "array", "items": {"type": "number"}}, + rf"\[({NUMBER})(,({NUMBER}))*\]", + [("[1e+9,1.3]", True)], + ), + ( + { + "title": "Foo", + "type": "array", + "items": {"anyOf": [{"type": "boolean"}, {"type": "null"}]}, + }, + r"\[(((true|false)|null))(,(((true|false)|null)))*\]", + [("[true,null,false]", True)], + ), + ( + { + "title": "Bar", + "type": "object", + "properties": { + "fuzz": { + "title": "Foo", + "type": "object", + "properties": {"spam": {"title": "Spam", "type": "integer"}}, + } + }, + }, + '{\n "fuzz": {\n "spam": ' + INTEGER + "\n }\n}", + [('{\n "fuzz": {\n "spam": 100\n }\n}', True)], + ), + ], +) +def test_match(step, regex, examples): + assert match_step_to_regex(step) == regex + + for string, does_match in examples: + match = re.fullmatch(regex, string) + if does_match: + assert match[0] == string + assert match.span() == (0, len(string)) + else: + assert match is None From ded269003ca79f57b790653fe81c3c300fd7e027 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 31 Jul 2023 17:36:05 +0200 Subject: [PATCH 192/734] Generate from JSON schema with `JSON` class --- outlines/text/generate/__init__.py | 2 +- outlines/text/generate/regex.py | 26 +++- .../generate/test_integration_transfomers.py | 112 ++++++++++++++++++ 3 files changed, 138 insertions(+), 2 deletions(-) diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py index beca6f56..359b7f0b 100644 --- a/outlines/text/generate/__init__.py +++ b/outlines/text/generate/__init__.py @@ -1,2 +1,2 @@ from .continuation import continuation -from .regex import choice, float, integer, regex +from .regex import choice, float, integer, json, regex diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 4f21f72f..74137607 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -1,11 +1,14 @@ import collections import math -from typing import List, Optional, Tuple +from json import dumps +from typing import List, Optional, Tuple, Union import interegular import torch +from pydantic import BaseModel from outlines.text.generate.continuation import Continuation +from outlines.text.json_schema import build_regex_from_schema from outlines.text.parsing import find_partial_matches, map_partial_states_to_vocab @@ -204,3 +207,24 @@ def choice(model, choices: List[str], max_tokens: Optional[int] = None): """Choose between different sequences.""" regex_str = r"(" + r"|".join(choices) + r")" return Regex(model, regex_str, max_tokens) + + +def json(model, schema: Union[str, BaseModel], max_tokens: Optional[int] = None): + """Generate a text sequence that follows a JSON schema. + + Parameters + --------- + model + The model to use to computes the next-token logits. + schema + The JSON schema, or Pydantic model, that guides the generation. + max_tokens + The maximum number of tokens to generate at each step. + + """ + if isinstance(schema, type(BaseModel)): + schema = dumps(schema.model_json_schema()) + + regex_str = build_regex_from_schema(schema) + + return Regex(model, regex_str, max_tokens) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 5643a894..70bdbb0b 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -1,7 +1,11 @@ +import json import re +from enum import Enum +from typing import List, Union import pytest import torch +from pydantic import BaseModel, constr import outlines.models as models import outlines.text.generate as generate @@ -113,3 +117,111 @@ def test_transformers_integration_with_pad_token(): model = models.transformers(model_name, device="cpu") assert model.tokenizer.pad_token_id == 1 assert model.tokenizer.pad_token == "" + + +def test_transformers_json_basic(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Output some JSON " + + class Spam(BaseModel): + foo: int + bar: float + spam: constr(max_length=10) + fuzz: bool + + rng = torch.Generator() + rng.manual_seed(0) # make sure that `bar` is not an int + + sequence = generate.json(model, Spam, max_tokens=1000)(prompt, rng=rng) + parsed = json.loads(sequence) + assert isinstance(parsed["foo"], int) + assert isinstance(parsed["bar"], float) + assert isinstance(parsed["spam"], str) + assert isinstance(parsed["fuzz"], bool) + assert len(parsed["spam"]) == 10 + + +def test_transformers_json_str_enum(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Output some JSON " + + rng = torch.Generator() + rng.manual_seed(0) + + class Name(str, Enum): + john = "John" + marc = "Marc" + michel = "Michel" + + class User(BaseModel): + user_id: int + name: Name + + sequence = generate.json(model, User)(prompt, rng=rng) + parsed = json.loads(sequence) + assert isinstance(parsed["user_id"], int) + assert parsed["name"] in ["John", "Marc", "Michel"] + + +def test_transformers_json_int_enum(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Output some JSON " + + rng = torch.Generator() + rng.manual_seed(0) + + class Id(int, Enum): + one = 1 + two = 2 + + class User(BaseModel): + user_id: Id + + sequence = generate.json(model, User)(prompt, rng=rng) + parsed = json.loads(sequence) + assert isinstance(parsed["user_id"], int) + assert parsed["user_id"] in [1, 2] + + +def test_transformers_json_array(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Output some JSON " + + class User(BaseModel): + user_id: int + value: List[float] + + rng = torch.Generator() + rng.manual_seed(0) + + sequence = generate.json(model, User)(prompt, rng=rng) + parsed = json.loads(sequence) + assert isinstance(parsed["user_id"], int) + assert isinstance(parsed["value"], list) + for value in parsed["value"]: + assert isinstance(value, float) or isinstance(value, int) + + +def test_transformers_json_union(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Output some JSON " + + class Spam(BaseModel): + foo: int + bar: Union[constr(max_length=10), float] + + rng = torch.Generator() + rng.manual_seed(4) + + sequence = generate.json(model, Spam, max_tokens=100)(prompt, rng=rng) + parsed = json.loads(sequence) + assert ( + isinstance(parsed["bar"], int) + or isinstance(parsed["bar"], float) + or isinstance(parsed["bar"], str) + ) From df993c416625808f2914dab1cce49d9392b03e90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 14 Aug 2023 20:09:47 +0200 Subject: [PATCH 193/734] Update the README --- README.md | 292 +++++++++++++++++++++++++++++++++--------------------- 1 file changed, 177 insertions(+), 115 deletions(-) diff --git a/README.md b/README.md index 72ab8829..f23d89fe 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ Fast and reliable neural text generation. [Install](#installation) • -[Prompting primitives](#prompting) • [Guided generation](#guided-generation) • +[Prompting primitives](#prompting) • [Examples](#examples) • [Stay tuned](#stay-tuned-for) @@ -19,7 +19,9 @@ more flexible replacement for the `generate` method in the [transformers](https://github.com/huggingface/transformers) library. **Outlines** 〰 helps developers *guide text generation* to build robust -interfaces with external systems. +interfaces with external systems. Provides generation methods that +guarantee that the output will match a regular expressions, or follow +a JSON schema. **Outlines** 〰 provides *robust prompting primitives* that separate the prompting from the execution logic and lead to simple implementations of few-shot @@ -37,16 +39,17 @@ via the next-token logits. It can be used with API-based models as well. - [x] 🖍️Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/) - [x] 🚄 Guided generation, including multiple choice, type constraints and dynamic stopping -- [x] ⚡ Fast regex-guided generation +- [x] ⚡ Fast [regex-guided generation](#efficient-regex-guided-generation) +- [x] 🔥 Fast [JSON generation](#efficient-json-generation-following-a-pydantic-model) following a JSON schema or a Pydantic model - [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions - [x] 💾 Caching of generations - [x] 🤗 Integration with HuggingFace's `transformers` models +Outlines 〰 has new releases and features coming every week! Make sure to ⭐ star and 👀 watch this repository to stay up to date. ## Stay tuned for - Context-Free Grammar guided generation ([#178](https://github.com/normal-computing/outlines/pull/178)); -- Generate JSON with a defined structure ([#140](https://github.com/normal-computing/outlines/pull/140)) - Prompt-token alignment so you don't have to think about tokenization details ([#201](https://github.com/normal-computing/outlines/pull/201)) - An infilling DSL ([#182](https://github.com/normal-computing/outlines/issues/182)) @@ -61,6 +64,172 @@ You can follow [@NormalComputing](https://twitter.com/NormalComputing), [@remilo pip install outlines ``` + +## Guided generation + +The first step towards reliability of systems that include large language models +is to ensure that there is a well-defined interface between their output and +user-defined code. **Outlines** provides ways to control the generation of +language models to make their output more predictable. + +### Early stopping + +You can stop the generation after a given sequence has been found: + +``` python +import outlines.text.generate as generate +import outlines.models as models + +model = models.transformers("gpt2") +answer = generate.continuation(model, stop=["."])("Tell me a one-sentence joke.") +``` + +### Multiple choices + +You can reduce the completion to a choice between multiple possibilities: + +``` python +import outlines.text.generate as generate +import outlines.models as models + +model = models.transformers("gpt2") + +prompt = labelling("Just awesome", examples) +answer = generate.choice(model, ["Positive", "Negative"])(prompt) +``` + +### Type constraint + +You can instruct the model to only return integers or floats: + + +``` python +import outlines.text.generate as generate +import outlines.models as models + +model = models.transformers("gpt2") + +prompt = "1+1=" +answer = generate.integer(model)(prompt) + +prompt = "sqrt(2)=" +answer = generate.float(model)(prompt) +``` + +### Efficient regex-guided generation + +Outlines also comes with fast regex-guided generation. In fact, the `choice`, +`integer` and `float` functions above all use regex-guided generation under the +hood: + +``` python +import outlines.models as models +import outlines.text.generate as generate + + +model = models.transformers("gpt2-medium") + +prompt = "Is 1+1=2? " +unguided = generate.continuation(model, max_tokens=30)(prompt) +guided = generate.regex(model, r"\s*([Yy]es|[Nn]o|[Nn]ever|[Aa]lways)", max_tokens=30)( + prompt +) + +print(unguided) +# Is 1+1=2? +# +# This is probably the most perplexing question. +# As I said in one of my articles describing how +# I call 2 and 1, there isn't + +print(guided) +# Is 1+1=2? Always +``` + +``` python +import outlines.models as models +import outlines.text.generate as generate + + +model = models.transformers("gpt2-medium") + +prompt = "What is the IP address of the Google DNS servers? " +unguided = generate.continuation(model, max_tokens=30)(prompt) +guided = generate.regex( + model, + r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)", + max_tokens=30, +)(prompt) + +print(unguided) +# What is the IP address of the Google DNS servers? +# +# Passive DNS servers are at DNS servers that are private. +# In other words, both IP servers are private. The database +# does not contain Chelsea Manning + +print(guided) +# What is the IP address of the Google DNS servers? +# 2.2.6.1 +``` + +Unlike other libraries, regex-guided generation in Outlines is almost as fast +as non-guided generation. + +### Efficient JSON generation following a Pydantic model + +Outlines 〰 allows to guide the generation process so the output is *guaranteed* to follow a [JSON schema](https://json-schema.org/) or [Pydantic model](https://docs.pydantic.dev/latest/): + +```python +from typing import List +from enum import Enum +from pydantic import BaseModel, constr + +import outlines.models as models +import outlines.text.generate as generate + + +class Weapon(str, Enum): + sword = "sword" + axe = "axe" + mace = "mace" + spear = "spear" + bow = "bow" + crossbow = "crossbow" + + +class Armor(str, Enum): + leather = "leather" + chainmail = "chainmail" + plate = "plate" + + +class Character(BaseModel): + name: constr(max_length=10) + age: int + armor: Armor + weapon: Weapon + strength: int + + +model = models.transformers("gpt2") +sequence = generate.json(model, Character)("Give me a character description") +print(sequence) +# { +# "name": "ranbelt", +# "age": 26, +# "armor": "chainmail", +# "weapon": "bow", +# "strength": 5 +# } + +parsed = Character.model_validate_json(sequence) +print(parsed) +# name='ranbelt' age=26 armor= weapon= strength=5 +``` + +The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/normal-computing/outlines/issues/215), but everything else should work. + ## Prompting Writing prompts by concatenating strings in pure Python quickly becomes @@ -184,117 +353,6 @@ With these prompting primitives **Outlines** makes building agents like Agent](https://huggingface.co/docs/transformers/transformers_agents) easier by removing boilerplate prompting code. -## Guided generation - -The first step towards reliability of systems that include large language models -is to ensure that there is a well-defined interface between their output and -user-defined code. **Outlines** provides ways to control the generation of -language models to make their output more predictable. - -### Early stopping - -You can stop the generation after a given sequence has been found: - -``` python -import outlines.text.generate as generate -import outlines.models as models - -model = models.transformers("gpt2") -answer = generate.continuation(model, stop=["."])("Tell me a one-sentence joke.") -``` - -### Multiple choices - -You can reduce the completion to a choice between multiple possibilities: - -``` python -import outlines.text.generate as generate -import outlines.models as models - -model = models.transformers("gpt2") - -prompt = labelling("Just awesome", examples) -answer = generate.choice(model, ["Positive", "Negative"])(prompt) -``` - -### Type constraint - -You can instruct the model to only return integers or floats: - - -``` python -import outlines.text.generate as generate -import outlines.models as models - -model = models.transformers("gpt2") - -prompt = "1+1=" -answer = generate.integer(model)(prompt) - -prompt = "sqrt(2)=" -answer = generate.float(model)(prompt) -``` - -### Efficient regex-guided generation - -Outlines also comes with fast regex-guided generation. In fact, the `choice`, -`integer` and `float` functions above all use regex-guided generation under the -hood: - -``` python -import outlines.models as models -import outlines.text.generate as generate - - -model = models.transformers("gpt2-medium") - -prompt = "Is 1+1=2? " -unguided = generate.continuation(model, max_tokens=30)(prompt) -guided = generate.regex(model, r"\s*([Yy]es|[Nn]o|[Nn]ever|[Aa]lways)", max_tokens=30)( - prompt -) - -print(unguided) -# Is 1+1=2? -# -# This is probably the most perplexing question. -# As I said in one of my articles describing how -# I call 2 and 1, there isn't - -print(guided) -# Is 1+1=2? Always -``` - -``` python -import outlines.models as models -import outlines.text.generate as generate - - -model = models.transformers("gpt2-medium") - -prompt = "What is the IP address of the Google DNS servers? " -unguided = generate.continuation(model, max_tokens=30)(prompt) -guided = generate.regex( - model, - r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)", - max_tokens=30, -)(prompt) - -print(unguided) -# What is the IP address of the Google DNS servers? -# -# Passive DNS servers are at DNS servers that are private. -# In other words, both IP servers are private. The database -# does not contain Chelsea Manning - -print(guided) -# What is the IP address of the Google DNS servers? -# 2.2.6.1 -``` - -Unlike other libraries, regex-guided generation in Outlines is almost as fast -as non-guided generation. - ## Contributing ### What contributions? @@ -333,3 +391,7 @@ Do not hesitate to open a draft PR before your contribution is ready, especially year={2023} } ``` + +## License + +Outlines is open-source and licensed under the [Apache License 2.0](LICENSE). From 8cdd72c1cb132d9c940051f5f9f76cc8a877ad28 Mon Sep 17 00:00:00 2001 From: Luke Stanley Date: Tue, 15 Aug 2023 02:00:39 +0100 Subject: [PATCH 194/734] Fix ReAct example link path --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f23d89fe..5d88956c 100644 --- a/README.md +++ b/README.md @@ -374,7 +374,7 @@ Do not hesitate to open a draft PR before your contribution is ready, especially - [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) - [Meta prompting](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) -- [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) +- [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/react.py) - [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/dust/math-generate-code.py) - [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) - [Uncertainty](https://github.com/normal-computing/outlines/blob/main/examples/sampling.ipynb) From a9496bd8188f5add9d5feabcabc3b28e107542bf Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Thu, 17 Aug 2023 00:10:54 +0900 Subject: [PATCH 195/734] Fix typo in README.md curently -> currently --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5d88956c..b23eeba6 100644 --- a/README.md +++ b/README.md @@ -357,7 +357,7 @@ removing boilerplate prompting code. ### What contributions? -We curently only accept bug fixes and documentation contributions. If you have a +We currently only accept bug fixes and documentation contributions. If you have a feature request, please start a new [discussion](https://github.com/normal-computing/outlines/discussions). The issue tracker is only intended for actionable items. From 054e6c61ce5941b43d14b123a7c115e8e62828bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 16 Aug 2023 22:44:32 +0200 Subject: [PATCH 196/734] Fix Python version in `pyproject.toml` --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index dd560892..e0873ac7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta" name = "outlines" authors= [{name = "Normal Computing", email = "support@normalcomputing.com"}] description = "Probabilistic Generative Model Programming" -requires-python = ">=3.7" +requires-python = ">=3.10" keywords=[ "normal computing", "machine learning", From 7700d4d2522d1c45950f8c59f1d581d5debf7784 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 16 Aug 2023 22:52:11 +0200 Subject: [PATCH 197/734] Raise informative exception when `transformers` missing --- README.md | 4 ++++ outlines/models/transformers.py | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b23eeba6..2037573c 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,10 @@ You can follow [@NormalComputing](https://twitter.com/NormalComputing), [@remilo pip install outlines ``` +The dependencies needed to use models are not installed by default. You will need to run: + +- `pip install openai` to be able to use OpenAI [models](https://platform.openai.com/docs/api-reference). +- `pip install transformers` to be able to use HuggingFace `transformers` [models](https://huggingface.co/models?pipeline_tag=text-generation). ## Guided generation diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 42cef7a3..1ae61603 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -87,7 +87,12 @@ def convert_token_to_string(self, token: str) -> str: def transformers(model_name: str, device: Optional[str] = None, **model_kwargs): - from transformers import AutoModelForCausalLM + try: + from transformers import AutoModelForCausalLM + except ImportError: + raise ImportError( + "The `transformers` library needs to be installed in order to use `transformers` models." + ) model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs) tokenizer = TransformersTokenizer(model_name) From 4c2eb4e68711d44ce9cfc525ff4991abbf1efc3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 16 Aug 2023 22:58:22 +0200 Subject: [PATCH 198/734] Fix unclear example in README --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2037573c..b6dfe2bf 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,11 @@ import outlines.models as models model = models.transformers("gpt2") -prompt = labelling("Just awesome", examples) +prompt = """You are a sentiment-labelling assistant. +Is the following review positive or negative? + +Review: This restaurant is just awesome! +""" answer = generate.choice(model, ["Positive", "Negative"])(prompt) ``` From 91f95d8d818b21806075a0394a65aba406e42c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 17 Aug 2023 00:35:34 +0200 Subject: [PATCH 199/734] Explicitly list the packages in modules --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index e0873ac7..788e64e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,6 +62,9 @@ repository = "https://github.com/normal-computing/outlines" file="README.md" content-type = "text/markdown" +[tool.setuptools] +packages = ["outlines"] + [tool.setuptools_scm] write_to = "outlines/_version.py" From ef90f557be62fad19337eeb72db2b6ec837ac386 Mon Sep 17 00:00:00 2001 From: John <51100181+jwmza@users.noreply.github.com> Date: Sun, 20 Aug 2023 17:47:03 -0400 Subject: [PATCH 200/734] Fix typo in models index --- outlines/models/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 53653f0e..e26df934 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -1,4 +1,4 @@ -"""Module that contains all the models integated in outlines. +"""Module that contains all the models integrated in outlines. We group the models in submodules by provider instead of theme (completion, chat completion, diffusers, etc.) and use routing functions everywhere else in the From 798c9c7f765d9d03c885e3e400b09d8331d53e40 Mon Sep 17 00:00:00 2001 From: John <51100181+jwmza@users.noreply.github.com> Date: Sun, 20 Aug 2023 17:53:09 -0400 Subject: [PATCH 201/734] Use "Hugging Face" instead of "HugginFace" in docs/comments --- README.md | 4 ++-- docs/source/index.rst | 4 ++-- docs/source/installation.rst | 6 +++--- docs/source/overview.rst | 2 +- docs/source/reference/multimodel.rst | 4 ++-- outlines/models/hf_diffusers.py | 4 ++-- outlines/models/hf_transformers.py | 6 +++--- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index b6dfe2bf..0318461c 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ via the next-token logits. It can be used with API-based models as well. - [x] 🔥 Fast [JSON generation](#efficient-json-generation-following-a-pydantic-model) following a JSON schema or a Pydantic model - [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions - [x] 💾 Caching of generations -- [x] 🤗 Integration with HuggingFace's `transformers` models +- [x] 🤗 Integration with Hugging Face's `transformers` models Outlines 〰 has new releases and features coming every week! Make sure to ⭐ star and 👀 watch this repository to stay up to date. @@ -67,7 +67,7 @@ pip install outlines The dependencies needed to use models are not installed by default. You will need to run: - `pip install openai` to be able to use OpenAI [models](https://platform.openai.com/docs/api-reference). -- `pip install transformers` to be able to use HuggingFace `transformers` [models](https://huggingface.co/models?pipeline_tag=text-generation). +- `pip install transformers` to be able to use Hugging Face `transformers` [models](https://huggingface.co/models?pipeline_tag=text-generation). ## Guided generation diff --git a/docs/source/index.rst b/docs/source/index.rst index b5e62dc7..b62d0392 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -83,12 +83,12 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: 📜 Features ----------- Simple and powerful prompting primitives based on the Jinja templating engine. - Integration with OpenAI and HuggingFace models + Integration with OpenAI and Hugging Face models - A powerful domain-specific language to write and render prompts; - Interleave completions with loops, conditionals, and custom Python functions; - OpenAI integration: language models, embeddings and Dall-E; -- HuggingFace integration: ``transformers`` and ``diffusers``; +- Hugging Face integration: ``transformers`` and ``diffusers``; - Caching; - Sampling multiple sequences; - Controlled generation, including multiple choice, type constraints and dynamic stopping. diff --git a/docs/source/installation.rst b/docs/source/installation.rst index e7d6d099..d2b417f7 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -24,10 +24,10 @@ To use OpenAI models you first have to run: You also need to set your API credentials by defining the ``OPENAI_API_KEY`` environment variable. -HuggingFace +Hugging Face ----------- -To use the integrations with HuggingFace's `transformers `_ and `diffusers `_ libraries you first need to run: +To use the integrations with Hugging Face's `transformers `_ and `diffusers `_ libraries you first need to run: .. code:: @@ -36,7 +36,7 @@ To use the integrations with HuggingFace's `transformers `_ versions of the models. Please refer to the `PyTorch documentation `_ for questions related to **GPU support**. + Hugging Face models are run locally. Outlines uses the `PyTorch `_ versions of the models. Please refer to the `PyTorch documentation `_ for questions related to **GPU support**. The integration is fairly basic for now, and if you have specific performance needs please `open an issue `_ diff --git a/docs/source/overview.rst b/docs/source/overview.rst index 3a084a38..d2d0416b 100644 --- a/docs/source/overview.rst +++ b/docs/source/overview.rst @@ -22,7 +22,7 @@ Here is a simple Outlines program that highlights some of its key features: - **Prompt management**. You can use functions with the ``@outlines.text.prompt`` decorator. "Prompt functions" use the `Jinja templating language `_ to render the prompt written in the docstring. We also added a few filters to help with common worflows, like building agents. Of course, for simple prompts, you can also use Python strings directly. -- **Generative model integration**. You can use text completion models from OpenAI and HuggingFace, but models are not limited to text. +- **Generative model integration**. You can use text completion models from OpenAI and Hugging Face, but models are not limited to text. - **Controlled generation**. The ``stop_at`` keyword arguments allows to define when the generation should be stopped. Outlines includes more options to control the generation; these happen on a token basis, saving time and costs. - **Sampling**. Outlines exclusively generates sequences using sampling. You can generate many samples with one call. - **Batching**. Models can take a list of prompt as input and generate completions in parallel. diff --git a/docs/source/reference/multimodel.rst b/docs/source/reference/multimodel.rst index ab3453a9..6ebef3f0 100644 --- a/docs/source/reference/multimodel.rst +++ b/docs/source/reference/multimodel.rst @@ -24,10 +24,10 @@ It is also possible to use DALL-E to generate images: generate = models.image_generation.openai("dall-e") -HuggingFace +Hugging Face ----------- -Outlines can call models from HuggingFace's `transformers` and `diffusers` libraries. The models are then run locally. +Outlines can call models from Hugging Face's `transformers` and `diffusers` libraries. The models are then run locally. .. code:: diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py index 33f1c90f..e7ebb851 100644 --- a/outlines/models/hf_diffusers.py +++ b/outlines/models/hf_diffusers.py @@ -1,4 +1,4 @@ -"""Integration with HuggingFace's `diffusers` library.""" +"""Integration with Hugging Face's `diffusers` library.""" import functools from typing import List, Union @@ -14,7 +14,7 @@ def HuggingFaceDiffuser(model_name: str) -> PILImage: Parameters ---------- model_name: str - The name of the model as listed on HuggingFace's models page. + The name of the model as listed on Hugging Face's models page. """ diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index 8df842a6..aeceff24 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -1,4 +1,4 @@ -"""Integration with HuggingFace's `transformers` library.""" +"""Integration with Hugging Face's `transformers` library.""" import functools from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union @@ -21,7 +21,7 @@ def HuggingFaceCompletion( You should have the `torch` and `transformers` packages installed. First execution may take a while since the pre-trained weights will be downloaded. - Available models are listed on `HuggingFace's model page `_. + Available models are listed on `Hugging Face's model page `_. Note ---- @@ -33,7 +33,7 @@ def HuggingFaceCompletion( Parameters ---------- model_name: str - The name of the model as listed on HuggingFace's models page. + The name of the model as listed on Hugging Face's models page. max_tokens The maximum number of tokens to generate. temperature From 009609dc0ca10b1a84600f40f7b8dba9d0a4172b Mon Sep 17 00:00:00 2001 From: John <51100181+jwmza@users.noreply.github.com> Date: Sun, 20 Aug 2023 17:56:04 -0400 Subject: [PATCH 202/734] Fix `hf_transformers` typo --- outlines/models/hf_transformers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py index aeceff24..8cadc8ab 100644 --- a/outlines/models/hf_transformers.py +++ b/outlines/models/hf_transformers.py @@ -26,7 +26,7 @@ def HuggingFaceCompletion( Note ---- - To my knowledge `tranformers` does not simply allow to stop the generation + To my knowledge `transformers` does not simply allow to stop the generation after a given sequence has been generated. We will need to implement this manually for this integration to have the same features as `OpenAICompletion`. From 5a1ff6e5bdb883a0934b620dd7320ccfd8986e80 Mon Sep 17 00:00:00 2001 From: John <51100181+jwmza@users.noreply.github.com> Date: Sun, 20 Aug 2023 17:58:19 -0400 Subject: [PATCH 203/734] Fix `cosine_similarity` language mismatch --- outlines/vectors/retrieval.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/outlines/vectors/retrieval.py b/outlines/vectors/retrieval.py index aed2fbb4..ac060b61 100644 --- a/outlines/vectors/retrieval.py +++ b/outlines/vectors/retrieval.py @@ -7,7 +7,7 @@ def cosine_similarity( vectors: Sequence[np.ndarray], query: np.ndarray, k: int = 1 ) -> List[np.ndarray]: - """Use cosine similarity to retrieve the `top_n` closest vectors to the query. + """Use cosine similarity to retrieve the top `k` closest vectors to the query. Be mindful that Scipy computes the cosine distance, defined as one minus the cosine similarity. @@ -23,5 +23,5 @@ def cosine_similarity( """ similarities = [spatial.distance.cosine(v, query) for v in vectors] - top_n_indices = np.argsort(similarities)[:k] - return top_n_indices + top_k_indices = np.argsort(similarities)[:k] + return top_k_indices From ee941fd60749d88ea92746935462edc253d53cd6 Mon Sep 17 00:00:00 2001 From: Martin Krasser Date: Sun, 20 Aug 2023 06:22:57 +0200 Subject: [PATCH 204/734] Fix non-matching function definition and call Rename `agent` function to `my_commands`. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0318461c..e6d964e2 100644 --- a/README.md +++ b/README.md @@ -304,7 +304,7 @@ def wikipedia_search(query: str): @text.prompt -def agent(tools: List[Callable]): +def my_commands(tools: List[Callable]): """AVAILABLE COMMANDS: {% for tool in tools %} From 172235a5ff15ba61a652ef859ea796bedfb12469 Mon Sep 17 00:00:00 2001 From: Alvaro Bartolome Date: Sat, 2 Sep 2023 01:41:14 +0200 Subject: [PATCH 205/734] Remove unused `from typing import List` in `README.md` (#256) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hi to whoever is reading this! 🤗 ## What's in this PR? This is a simple PR to just remove an import in the `README.md` which is not needed. Thanks! --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index e6d964e2..f7f240d8 100644 --- a/README.md +++ b/README.md @@ -189,7 +189,6 @@ as non-guided generation. Outlines 〰 allows to guide the generation process so the output is *guaranteed* to follow a [JSON schema](https://json-schema.org/) or [Pydantic model](https://docs.pydantic.dev/latest/): ```python -from typing import List from enum import Enum from pydantic import BaseModel, constr From 8b324ead75a4c2e1db4b19061b890ffc81f9296c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=E2=80=94Rysana=2Ecom?= <51100181+jrysana@users.noreply.github.com> Date: Fri, 1 Sep 2023 21:55:40 -0400 Subject: [PATCH 206/734] Fix some typos/language in `regex` and `sequence` (#257) Please correct me if I'm wrong about any of the below! But I noticed a few more things that seem mismatched in comments. - [x] Fixed a typo in `regex/create_proposal` implying that this method is only for integer generation, whereas it seems clear that this method is used for all schemas. - [x] Fixed some typos where argument names were mismatched or nonexistent in the actual function code. - [x] Fixed some typos for overall language (e.g. "used to compute" instead of "used to computes") and to clarify potentially confusing comments. - [x] Fixed a broken link in `README.md` --- README.md | 2 +- outlines/text/generate/continuation.py | 2 +- outlines/text/generate/regex.py | 30 +++++++++++--------------- outlines/text/generate/sequence.py | 6 +++--- 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index f7f240d8..d91dc135 100644 --- a/README.md +++ b/README.md @@ -382,7 +382,7 @@ Do not hesitate to open a draft PR before your contribution is ready, especially - [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) - [Meta prompting](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) - [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/react.py) -- [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/dust/math-generate-code.py) +- [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/math_generate_code.py) - [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) - [Uncertainty](https://github.com/normal-computing/outlines/blob/main/examples/sampling.ipynb) - [Simulation-based inference](https://github.com/normal-computing/outlines/blob/main/examples/simulation_based_inference.ipynb) diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index b6ff3efc..b45fb0e1 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -96,7 +96,7 @@ def continuation( Parameters ---------- model - The model to use to computes the next-token logits. + The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. stop diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 74137607..76a0a4c3 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -74,7 +74,7 @@ def partial_match_filter(string, end_idx, state_seq): def create_proposal( self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor ) -> torch.DoubleTensor: - """Modify the next-token logits so that only integers can be generated. + """Modify the next-token logits so that only valid tokens can be generated. Parameters ---------- @@ -97,16 +97,16 @@ def create_proposal( generated_token_ids, self.pstates, ): - # Get the tokens we haven't already processed + # Get the tokens we haven't already processed, readable_tokens = token_seq[last_token_idx:] - # excluding any EOS tokens + # excluding any EOS tokens. not_eos_mask = [ tk != self.model.tokenizer.eos_token_id for tk in readable_tokens ] readable_tokens = readable_tokens[not_eos_mask] if len(readable_tokens) > 0: # If we previously ended with an EOS, we shouldn't be - # getting/sampling any more non-EOS tokens + # getting/sampling any more non-EOS tokens. assert last_fsm_state > -1 sequence = self.model.tokenizer.decode(readable_tokens) @@ -153,9 +153,9 @@ def regex(model, regex_string: str, max_tokens: Optional[int] = None): Parameters ---------- model - The model to use to computes the next-token logits. - regex - The regular expression generated expressions must match. + The language model to use to compute the next-token logits. + regex_string + The regular expression that generated expressions must match. max_tokens The maximum number of tokens to generate. @@ -173,9 +173,7 @@ def integer(model, max_tokens: Optional[int] = None): Parameters ---------- model - The model to use to computes the next-token logits. - regex - The regular expression generated expressions must match. + The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. @@ -193,9 +191,7 @@ def float(model, max_tokens: Optional[int] = None): Parameters ---------- model - The model to use to computes the next-token logits. - regex - The regular expression generated expressions must match. + The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. @@ -210,16 +206,16 @@ def choice(model, choices: List[str], max_tokens: Optional[int] = None): def json(model, schema: Union[str, BaseModel], max_tokens: Optional[int] = None): - """Generate a text sequence that follows a JSON schema. + """Generate a text sequence that follows a JSON schema or Pydantic model. Parameters --------- model - The model to use to computes the next-token logits. + The language model to use to compute the next-token logits. schema - The JSON schema, or Pydantic model, that guides the generation. + The JSON schema or Pydantic model that guides the generation. max_tokens - The maximum number of tokens to generate at each step. + The maximum number of tokens to generate. """ if isinstance(schema, type(BaseModel)): diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 594f107a..77edcfc0 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -57,7 +57,7 @@ def step( Parameters ---------- rng - NumPy random number Generator instance + NumPy random number Generator instance. num_prompt_tokens The number of tokens in the prompt. token_ids @@ -82,10 +82,10 @@ def step( probs = self.create_proposal(token_ids[:, num_prompt_tokens:], probs) probs = torch.nn.functional.softmax(probs, dim=-1) - # Sample `samples`-many new tokens + # Sample `samples`-many new tokens. next_token_ids = vectorized_random_choice(rng, probs, samples) - # Add the missing `num_tokens` and `num_sample` dimensions + # Add the missing `num_tokens` and `num_sample` dimensions. next_token_ids = torch.unsqueeze(next_token_ids, -1) token_ids = torch.unsqueeze(token_ids, 0) From ce0fad4a77a7d42ea4917ba22300f8d24e8adb1e Mon Sep 17 00:00:00 2001 From: Bram Vanroy <2779410+BramVanroy@users.noreply.github.com> Date: Sat, 2 Sep 2023 12:23:47 +0200 Subject: [PATCH 207/734] Allow user to choose device for models --- outlines/models/transformers.py | 39 +++++++++++++++---- pyproject.toml | 1 + tests/models/test_transformers.py | 4 +- .../generate/test_integration_transfomers.py | 2 +- 4 files changed, 36 insertions(+), 10 deletions(-) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 1ae61603..81649438 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -8,7 +8,6 @@ if TYPE_CHECKING: from transformers import PreTrainedModel, PreTrainedTokenizer - __all__ = ["transformers"] @@ -19,10 +18,9 @@ def __init__( self, model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", - device: Optional[str] = None, ): - self.device = device if device is not None else "cpu" - self.model = model.to(self.device) + self.device = model.device + self.model = model self.tokenizer = tokenizer def __call__( @@ -86,7 +84,33 @@ def convert_token_to_string(self, token: str) -> str: return string -def transformers(model_name: str, device: Optional[str] = None, **model_kwargs): +def transformers( + model_name: str, + device: Optional[str] = None, + model_kwargs: dict = {}, + tokenizer_kwargs: dict = {}, +): + """Instantiate a model from the `transformers` library and its tokenizer. + + Parameters + ---------- + model_name + The name of the model as listed on Hugging Face's model page. + device_map + The device(s) on which the model should be loaded. This overrides + the value passed for `device_map` in `model_kwargs`. + model_kwargs + A dictionary that contains the keyword arguments to pass to the + `from_pretrained` method when loading the model. + tokenizer_kwargs + A dictionary that contains the keyword arguments to pass to the + `from_pretrained` method when loading the tokenizer. + + Returns + ------- + A `TransformersModel` model instance. + + """ try: from transformers import AutoModelForCausalLM except ImportError: @@ -94,7 +118,8 @@ def transformers(model_name: str, device: Optional[str] = None, **model_kwargs): "The `transformers` library needs to be installed in order to use `transformers` models." ) + model_kwargs["device_map"] = device model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs) - tokenizer = TransformersTokenizer(model_name) + tokenizer = TransformersTokenizer(model_name, **tokenizer_kwargs) - return Transformers(model, tokenizer, device) + return Transformers(model, tokenizer) diff --git a/pyproject.toml b/pyproject.toml index 788e64e4..f0eb4b42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ dependencies = [ "scipy", "tenacity", "torch", + "accelerate", ] dynamic = ["version"] diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index b5e9ae44..100ec33a 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -39,12 +39,12 @@ def test_tokenizer(): def test_model(): - with pytest.raises(RuntimeError, match="Expected one of cpu, cuda"): + with pytest.raises(ValueError, match="When passing device_map as a string"): transformers(TEST_MODEL, device="non_existent") model = transformers(TEST_MODEL, device="cpu") assert isinstance(model.tokenizer, TransformersTokenizer) - assert model.device == "cpu" + assert model.device.type == "cpu" input_ids = torch.tensor([[0, 1, 2]]) logits = model(input_ids, torch.ones_like(input_ids)) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 70bdbb0b..99344a07 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -114,7 +114,7 @@ def test_transformers_integration_choice(): def test_transformers_integration_with_pad_token(): model_name = "hf-internal-testing/tiny-random-XLMRobertaXLForCausalLM" - model = models.transformers(model_name, device="cpu") + model = models.transformers(model_name, device="meta") assert model.tokenizer.pad_token_id == 1 assert model.tokenizer.pad_token == "" From 23299e07923548ce14a0e5d8f6b0213b252ef8d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 17 Aug 2023 00:32:35 +0200 Subject: [PATCH 208/734] Escape special characters in JSON structure --- outlines/text/json_schema.py | 9 +++++---- tests/text/test_json_schema.py | 17 +++++++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 8b96c9dc..ae917a32 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -1,5 +1,6 @@ import itertools import json +import re from typing import Dict STRING = r'".*"' @@ -192,7 +193,7 @@ def match_step_to_regex(step): Parameters ---------- step: - A string that represents the schema's structure, or a dictionnary + A string that represents the schema's structure, or a dictionary that represents a field in the schema. Returns @@ -203,13 +204,13 @@ def match_step_to_regex(step): """ match step: case str() as step: - return step + return re.escape(step) case {"enum": choices, "type": "string"}: - choices = [f'"{choice}"' for choice in choices] + choices = [f'"{re.escape(choice)}"' for choice in choices] return f"({'|'.join(choices)})" case {"enum": choices}: - choices = [str(choice) for choice in choices] + choices = [re.escape(str(choice)) for choice in choices] return f"({'|'.join(choices)})" case {"type": "array", "items": items}: diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 4e3018e0..1d46cdf6 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -299,6 +299,11 @@ def test_match_number(pattern, does_match): '("Marc"|"Jean")', [('"Marc"', True), ('"Jean"', True), ('"John"', False)], ), + ( + {"title": "Foo", "enum": [".*", r"\s*"], "type": "string"}, + r'("\.\*"|"\\s\*")', + [('".*"', True), (r'"\s*"', True), (r'"\.\*"', False)], + ), ( {"title": "Foo", "enum": [0, 1], "type": "integer"}, "(0|1)", @@ -310,7 +315,7 @@ def test_match_number(pattern, does_match): "type": "object", "properties": {"count": {"title": "Count", "type": "integer"}}, }, - '{\n "count": ' + INTEGER + "\n}", + '\\{\\\n\\ \\ "count":\\ ' + INTEGER + "\\\n\\}", [('{\n "count": 100\n}', True)], ), ( @@ -339,16 +344,20 @@ def test_match_number(pattern, does_match): } }, }, - '{\n "fuzz": {\n "spam": ' + INTEGER + "\n }\n}", + '\\{\\\n\\ \\ "fuzz":\\ \\{\\\n\\ \\ \\ \\ "spam":\\ ' + + INTEGER + + "\\\n\\ \\ \\}\\\n\\}", [('{\n "fuzz": {\n "spam": 100\n }\n}', True)], ), ], ) def test_match(step, regex, examples): - assert match_step_to_regex(step) == regex + test_regex = match_step_to_regex(step) + + assert test_regex == regex for string, does_match in examples: - match = re.fullmatch(regex, string) + match = re.fullmatch(test_regex, string) if does_match: assert match[0] == string assert match.span() == (0, len(string)) From 3bf5bd0f5f0a739eef6edd308e980e437f0d972c Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sat, 9 Sep 2023 20:23:12 -0500 Subject: [PATCH 209/734] Fix unescaped string case in JSON string regex --- outlines/text/json_schema.py | 7 ++++--- tests/text/test_json_schema.py | 10 ++++++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index ae917a32..c076a2e4 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -3,7 +3,8 @@ import re from typing import Dict -STRING = r'".*"' +STRING_INNER = r'(?:[^"\\]|\\.)' +STRING = f'"{STRING_INNER}*"' INTEGER = r"(0|[1-9][0-9]*)" NUMBER = rf"(-)?({INTEGER})(\.[0-9]+)?([eE][+-][0-9]+)?" BOOLEAN = r"(true|false)" @@ -225,9 +226,9 @@ def match_step_to_regex(step): return regex_str case {"type": "string", "maxLength": max_length}: - return f'".{{,{max_length}}}"' + return f'"{STRING_INNER}{{,{max_length}}}"' case {"type": "string", "minLength": min_length}: - return f'".{{{min_length},}}"' + return f'"{STRING_INNER}{{{min_length},}}"' case {"type": field_type}: return type_to_regex[field_type] diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 1d46cdf6..f8814aeb 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -12,6 +12,7 @@ NULL, NUMBER, STRING, + STRING_INNER, build_schedule_from_schema, match_step_to_regex, ) @@ -258,13 +259,13 @@ def test_match_number(pattern, does_match): ), ( {"title": "Foo", "type": "string", "maxLength": 3}, - '".{,3}"', - [('"ab"', True), ('"abcd"', False)], + f'"{STRING_INNER}{{,3}}"', + [('"ab"', True), ('"a""', False), ('"abcd"', False)], ), ( {"title": "Foo", "type": "string", "minLength": 3}, - '".{3,}"', - [('"ab"', False), ('"abcd"', True)], + f'"{STRING_INNER}{{3,}}"', + [('"ab"', False), ('"abcd"', True), ('"abc""', False)], ), ( {"title": "Foo", "type": "boolean"}, @@ -290,6 +291,7 @@ def test_match_number(pattern, does_match): f"({STRING}|{NUMBER})", [ ('"string"', True), + ('"st"ring"', False), ("1000", True), ("true", False), ], From e83cd73ed654ed7706905629d7acbc664110f54b Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Mon, 11 Sep 2023 15:51:32 -0500 Subject: [PATCH 210/734] Reset FSM states in Regex after completion --- outlines/text/generate/regex.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 76a0a4c3..15e28152 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -146,6 +146,10 @@ def create_proposal( return logits + mask + def postprocess_completions(self, completions: List[str]) -> List[str]: + self.pstates.clear() + return super().postprocess_completions(completions) + def regex(model, regex_string: str, max_tokens: Optional[int] = None): """Generate text sequences that match the input regex. From cbc7c7f87ffa9a617274003b2777c96d4102d9d0 Mon Sep 17 00:00:00 2001 From: mondaychen Date: Tue, 15 Aug 2023 12:29:49 -0400 Subject: [PATCH 211/734] Use random seeds in Sequence --- README.md | 34 ++++++++++++++++++++++++------ outlines/text/generate/sequence.py | 1 + 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index d91dc135..3c90b75b 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,8 @@ from pydantic import BaseModel, constr import outlines.models as models import outlines.text.generate as generate +import torch + class Weapon(str, Enum): sword = "sword" @@ -219,20 +221,38 @@ class Character(BaseModel): strength: int -model = models.transformers("gpt2") -sequence = generate.json(model, Character)("Give me a character description") +model = models.transformers("gpt2", device="cuda") + +# Construct guided sequence generator +generator = generate.json(model, Character, max_tokens=100) + +# Draw a sample +rng = torch.Generator(device="cuda") +rng.manual_seed(789001) + +sequence = generator("Give me a character description", rng=rng) +print(sequence) +# { +# "name": "clerame", +# "age": 7, +# "armor": "plate", +# "weapon": "mace", +# "strength": 4171 +# } + +sequence = generator("Give me an interesting character description", rng=rng) print(sequence) # { -# "name": "ranbelt", -# "age": 26, +# "name": "piggyback", +# "age": 23, # "armor": "chainmail", -# "weapon": "bow", -# "strength": 5 +# "weapon": "sword", +# "strength": 0 # } parsed = Character.model_validate_json(sequence) print(parsed) -# name='ranbelt' age=26 armor= weapon= strength=5 +# name='piggyback' age=23 armor= weapon= strength=0 ``` The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/normal-computing/outlines/issues/215), but everything else should work. diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 77edcfc0..699f12c5 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -199,6 +199,7 @@ def __call__( if rng is None: rng = torch.Generator(device=self.device) + rng.seed() num_prompt_tokens = token_ids.shape[-1] From 93b32e73fd950da8b1dbdca7313093d3530f46ac Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 26 Jul 2023 18:39:07 -0500 Subject: [PATCH 212/734] Use custom Lark objects --- examples/parsing.py | 12 +- outlines/text/parsing.py | 355 ++++++++++++++++++++++++++----------- tests/text/test_parsing.py | 34 ++-- 3 files changed, 278 insertions(+), 123 deletions(-) diff --git a/examples/parsing.py b/examples/parsing.py index f3c78988..ee17d3d9 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -2,9 +2,9 @@ import math import time import urllib.request +from copy import copy import torch -from lark import Lark from lark.indenter import DedentError from lark.lexer import UnexpectedCharacters, UnexpectedToken from transformers import ( @@ -15,7 +15,7 @@ set_seed, ) -from outlines.text.parsing import PartialPythonIndenter, copy_parser_state, parse_to_end +from outlines.text.parsing import PartialLark, PartialPythonIndenter, parse_to_end revision = None checkpoint = "Salesforce/codegen-350M-mono" @@ -34,12 +34,12 @@ with open("sql_grammar.lark", "w") as f: f.write(sql_grammar) -sqlparser = Lark.open( +sqlparser = PartialLark.open( "sql_grammar.lark", parser="lalr", ) -pyparser = Lark.open_from_package( +pyparser = PartialLark.open_from_package( "lark", "python.lark", ["grammars"], @@ -54,7 +54,7 @@ class ParserLogitsProcessor(LogitsProcessor): def __init__(self, parser): ip = parser.parse_interactive("") - self.parser_state = copy_parser_state(ip.parser_state) + self.parser_state = ip.parser_state self.states_stack = [self.parser_state] self.token_seq = None self.token_idx = 0 @@ -88,7 +88,7 @@ def __call__( # those should dramatically reduce the amount of work done here. t0 = time.perf_counter() for test_token, token_id in tokenizer.vocab.items(): - ps = copy_parser_state(self.parser_state) + ps = copy(self.parser_state) ls = ps.lexer.state ls.text = self.token_seq + test_token diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 2daff0ab..80732d4a 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,22 +1,13 @@ from collections import ChainMap, defaultdict -from copy import copy -from typing import ( - TYPE_CHECKING, - Any, - Callable, - DefaultDict, - Dict, - Iterable, - Optional, - Set, - Tuple, -) +from copy import copy, deepcopy +from typing import Any, Callable, DefaultDict, Dict, Iterable, Optional, Set, Tuple import interegular import regex from interegular.fsm import FSM, anything_else from interegular.patterns import Unsupported from lark import Lark, Token +from lark.common import LexerConf, ParserConf from lark.exceptions import ( LexError, UnexpectedCharacters, @@ -24,16 +15,25 @@ UnexpectedToken, ) from lark.indenter import PythonIndenter -from lark.lexer import BasicLexer, ContextualLexer, LexerState, Scanner -from lark.parsers.lalr_analysis import Shift +from lark.lexer import ( + BasicLexer, + CallChain, + ContextualLexer, + LexerState, + LexerThread, + Scanner, + _create_unless, +) +from lark.parser_frontends import ( + ParsingFrontend, + PostLexConnector, + _validate_frontend_args, +) +from lark.parsers.lalr_analysis import IntParseTable, LALR_Analyzer, ParseTable, Shift from lark.parsers.lalr_interactive_parser import InteractiveParser -from lark.parsers.lalr_parser import ParseConf, ParserState +from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser from lark.utils import get_regexp_width -if TYPE_CHECKING: - from lark.lexer import LexerThread - - PartialParseState = Tuple[str, int] @@ -41,15 +41,185 @@ class PartialTokenEOF(UnexpectedEOF): pass +class PartialParserConf(ParserConf): + __serialize_fields__ = "rules", "start", "parser_type", "deterministic" + + def __init__(self, rules, callbacks, start, deterministic): + super().__init__(rules, callbacks, start) + self.deterministic = deterministic + + +class PartialLark(Lark): + __serialize_fields__ = "parser", "rules", "options", "deterministic" + + def __init__(self, grammar, **options): + # TODO: Could've extended `LarkOptions`, but all these extensions are + # already way too much (and brittle). This library really needs a + # complete refactoring. + self.deterministic = options.pop("deterministic", False) + options["regex"] = True + super().__init__(grammar, **options) + assert self.options.parser == "lalr" + + def _build_lexer(self, dont_ignore: bool = False) -> "PartialBasicLexer": + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + + return PartialBasicLexer(lexer_conf) + + def _build_parser(self) -> "PartialParsingFrontend": + self._prepare_callbacks() + _validate_frontend_args(self.options.parser, self.options.lexer) + parser_conf = PartialParserConf( + self.rules, self._callbacks, self.options.start, self.deterministic + ) + + # This is `_construct_parsing_frontend` expanded/inlined + parser_type = self.options.parser + lexer_type = self.options.lexer + lexer_conf = self.lexer_conf + + assert isinstance(lexer_conf, LexerConf) + assert isinstance(parser_conf, ParserConf) + parser_conf.parser_type = parser_type + self.lexer_conf.lexer_type = lexer_type + return PartialParsingFrontend(lexer_conf, parser_conf, self.options) + + def __repr__(self): + return "{}(open({!r}), parser={!r}, lexer={!r}, ...)".format( + type(self).__name__, + self.source_path, + self.options.parser, + self.options.lexer, + ) + + +class PartialLexerThread(LexerThread): + def __copy__(self): + return type(self)(copy(self.lexer), copy(self.state)) + + def __repr__(self): + return f"{type(self).__name__}(lexer={self.lexer!r}, state={self.state!r})" + + +class PartialPostLexConnector(PostLexConnector): + def __copy__(self): + return type(self)(self.lexer, copy(self.postlexer)) + + def __repr__(self): + return ( + f"{type(self).__name__}(lexer={self.lexer!r}, postlexer={self.postlexer!r})" + ) + + +class PartialParsingFrontend(ParsingFrontend): + def __init__(self, lexer_conf, parser_conf, options, parser=None): + assert parser_conf.parser_type == "lalr" + + options._plugins["LALR_Parser"] = PartialLALRParser + options._plugins["BasicLexer"] = PartialBasicLexer + options._plugins["ContextualLexer"] = PartialContextualLexer + options._plugins["LexerThread"] = PartialLexerThread + + super().__init__(lexer_conf, parser_conf, options, parser=parser) + + if lexer_conf.postlex: + self.lexer = PartialPostLexConnector(self.lexer.lexer, lexer_conf.postlex) + + +class PartialLALRParser(LALR_Parser): + def __init__(self, parser_conf, debug=False, strict=False): + analysis = LALR_Analyzer( + parser_conf, debug=debug if not parser_conf.deterministic else True + ) + analysis.compute_lalr() + callbacks = parser_conf.callbacks + + self.parser_conf = parser_conf + self._parse_table = analysis.parse_table + + if parser_conf.deterministic: + old_to_new = {} + + def to_tuple(v): + new = old_to_new.get(v) + if new is None: + new = tuple(sorted(v, key=lambda y: str(y))) + old_to_new[v] = new + return new + + enum = sorted( + self._parse_table.states.keys(), + key=lambda x: str(sorted(x, key=lambda y: str(y))), + ) + + new_states = {} + for s in enum: + transitions = { + term: op if op[0] is not Shift else (op[0], to_tuple(op[1])) + for term, op in self._parse_table.states[s].items() + } + new_states[to_tuple(s)] = transitions + + self._parse_table = type(self._parse_table)( + new_states, + {k: to_tuple(v) for k, v in self._parse_table.start_states.items()}, + {k: to_tuple(v) for k, v in self._parse_table.end_states.items()}, + ) + + if not debug: + self._parse_table = IntParseTable.from_ParseTable(self._parse_table) + self.states_to_rulesets = dict( + zip(self._parse_table.states.keys(), new_states.keys()) + ) + + self.parser = PartialParser(self._parse_table, callbacks, debug) + + @classmethod + def deserialize(cls, data, memo, callbacks, debug=False): + inst = cls.__new__(cls) + inst._parse_table = ParseTable.deserialize(data, memo) + inst.parser = PartialParser(inst._parse_table, callbacks, debug) + return inst + + +class PartialParserState(ParserState): + def __copy__(self): + return type(self)( + self.parse_conf, + copy(self.lexer), + copy(self.state_stack), + deepcopy(self.value_stack), + ) + + def __repr__(self): + return f"{type(self).__name__}(lexer={self.lexer!r}, state_stack={self.state_stack!r})" + + +class PartialParser(_Parser): + def parse( + self, lexer, start, value_stack=None, state_stack=None, start_interactive=False + ): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = PartialParserState(parse_conf, lexer, state_stack, value_stack) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) + + class PartialScanner(Scanner): - def __init__(self, scanner: Scanner): - self.terminals = scanner.terminals - self.g_regex_flags = scanner.g_regex_flags + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags self.re_ = regex - self.use_bytes = scanner.use_bytes - self.match_whole = scanner.match_whole - self.allowed_types = scanner.allowed_types - self._mres = scanner._mres + self.use_bytes = use_bytes + self.match_whole = match_whole + self.allowed_types = {t.name for t in self.terminals} + self._mres = self._build_mres(terminals, len(terminals)) def match(self, text, pos) -> Optional[Tuple[str, Optional[str], bool]]: for mre in self._mres: @@ -59,24 +229,38 @@ def match(self, text, pos) -> Optional[Tuple[str, Optional[str], bool]]: return None +class PartialContextualLexer(ContextualLexer): + def __init__(self, conf: "LexerConf", states, always_accept=()): + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name + + trad_conf = copy(conf) + trad_conf.terminals = terminals + + lexer_by_tokens: Dict = {} + self.lexers = {} + for state, accepts in states.items(): + key = frozenset(accepts) + try: + lexer = lexer_by_tokens[key] + except KeyError: + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [ + terminals_by_name[n] for n in accepts if n in terminals_by_name + ] + lexer = PartialBasicLexer(lexer_conf) + lexer_by_tokens[key] = lexer + + self.lexers[state] = lexer + + assert trad_conf.terminals is terminals + self.root_lexer = PartialBasicLexer(trad_conf) + + class PartialBasicLexer(BasicLexer): - def __init__(self, basic_lexer: BasicLexer): - self.re = regex - self.newline_types = basic_lexer.newline_types - self.ignore_types = basic_lexer.ignore_types - self.terminals = basic_lexer.terminals - self.user_callbacks = basic_lexer.user_callbacks - self.g_regex_flags = basic_lexer.g_regex_flags - self.use_bytes = basic_lexer.use_bytes - self.terminals_by_name = basic_lexer.terminals_by_name - self.callback = getattr(basic_lexer, "callback", None) - - if basic_lexer._scanner is not None: - self._scanner: Optional[PartialScanner] = PartialScanner( - basic_lexer._scanner - ) - else: - self._scanner = None + def __init__(self, conf: "LexerConf"): + super().__init__(conf) # This is used to determine the token type for partial matches self.terminal_to_regex = {} @@ -86,8 +270,23 @@ def __init__(self, basic_lexer: BasicLexer): ) def _build_scanner(self): - super()._build_scanner() - self._scanner = PartialScanner(self._scanner) + terminals, self.callback = _create_unless( + self.terminals, self.g_regex_flags, self.re, self.use_bytes + ) + assert all(self.callback.values()) + + for type_, f in self.user_callbacks.items(): + if type_ in self.callback: + # Already a callback there, probably UnlessCallback + self.callback[type_] = CallChain( + self.callback[type_], f, lambda t: t.type == type_ + ) + else: + self.callback[type_] = f + + self._scanner = PartialScanner( + terminals, self.g_regex_flags, self.re, self.use_bytes + ) def partial_matches(self, value, type_): partial_matches = set() @@ -200,62 +399,18 @@ def __copy__(self): res.indent_level = copy(self.indent_level) return res - -def copy_lexer_thread(lexer_thread: "LexerThread") -> "LexerThread": - res = copy(lexer_thread) - res.lexer = copy(res.lexer) - - if getattr(res.lexer, "postlexer", None): - if isinstance(res.lexer.postlexer, PythonIndenter) and not isinstance( - res.lexer.postlexer, PartialPythonIndenter - ): - # Patch these methods so that the post lexer keeps its state - # XXX: This won't really work in generality. - postlexer = PartialPythonIndenter() - postlexer.paren_level = res.lexer.postlexer.paren_level - postlexer.indent_level = res.lexer.postlexer.indent_level - res.lexer.postlexer = postlexer - else: - res.lexer.postlexer = copy(res.lexer.postlexer) - - # Patch/replace the lexer objects so that they support partial matches - context_lexer = res.lexer - - if not isinstance(context_lexer, ContextualLexer): - # XXX: The layouts change with the grammars - context_lexer = context_lexer.lexer - assert isinstance(context_lexer, ContextualLexer) - - if not isinstance(context_lexer.root_lexer, PartialBasicLexer): - context_lexer.root_lexer = PartialBasicLexer(context_lexer.root_lexer) - - basic_lexers = context_lexer.lexers - for idx, lexer in basic_lexers.items(): - basic_lexers[idx] = PartialBasicLexer(lexer) - - return res - - -def copy_parser_state(parser_state: ParserState) -> ParserState: - res = copy(parser_state) - res.lexer = copy_lexer_thread(res.lexer) - - return res - - -def copy_ip(ip: "InteractiveParser") -> "InteractiveParser": - res = copy(ip) - res.lexer_thread = copy_lexer_thread(res.lexer_thread) - return res + def __repr__(self): + return f"{type(self).__name__}(paren_level={self.paren_level!r}, indent_level={self.indent_level!r})" -def parse_to_end(parser_state: ParserState) -> Tuple[ParserState, Set[str]]: +def parse_to_end( + parser_state: PartialParserState, +) -> Tuple[PartialParserState, Set[str]]: """Continue parsing from the current parse state and return partial next tokens. .. warning:: The parse state `parser_state` is updated in-place and must be patched - to work with this function. Either patch it manually or use - `copy_parser_state` before calling this. + to work with this function. """ @@ -347,7 +502,7 @@ def _partial_match( return res -def terminals_to_fsms(lp: Lark) -> Dict[str, FSM]: +def terminals_to_fsms(lp: PartialLark) -> Dict[str, FSM]: """Construct a ``dict`` mapping terminal symbol names to their finite state machines.""" symbol_names_and_fsms = {} @@ -421,7 +576,7 @@ def map_partial_states_to_vocab( return pstate_to_vocab, possible_paths -def terminals_to_lalr_states(lp: Lark) -> DefaultDict[str, Set[int]]: +def terminals_to_lalr_states(lp: PartialLark) -> DefaultDict[str, Set[int]]: terminals_to_states = defaultdict(set) parse_table = lp.parser.parser.parser.parse_table for state, tokens_to_ops in parse_table.states.items(): @@ -434,12 +589,12 @@ def terminals_to_lalr_states(lp: Lark) -> DefaultDict[str, Set[int]]: def create_pmatch_parser_states( - lp: Lark, + lp: PartialLark, terminals_to_states: Dict[str, Set[int]], term_type: str, ptoken: str, pmatch: Tuple[int, Tuple[int, ...]], -) -> Tuple[ParserState, ...]: +) -> Tuple[PartialParserState, ...]: parse_table = lp.parser.parser.parser.parse_table # TODO: We need to effectively disable the callbacks that build the @@ -455,7 +610,7 @@ def noop(*args, **kwargs): lexer_state.line_ctr.char_pos = pmatch[0] + 1 lexer_state.last_token = Token(term_type, "") res = tuple( - ParserState(parse_conf, lexer_thread, [state], None) + PartialParserState(parse_conf, lexer_thread, [state], None) for state in terminals_to_states[term_type] ) return res diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index eb9350fb..1cc87649 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -1,15 +1,15 @@ import random import re +from copy import copy import interegular import pytest -from lark import Lark from lark.indenter import DedentError from lark.lexer import UnexpectedCharacters, UnexpectedToken from outlines.text.parsing import ( + PartialLark, PartialPythonIndenter, - copy_parser_state, create_pmatch_parser_states, find_partial_matches, map_partial_states_to_vocab, @@ -20,7 +20,7 @@ def test_parse_to_end(): - pyparser = Lark.open_from_package( + pyparser = PartialLark.open_from_package( "lark", "python.lark", ["grammars"], @@ -30,37 +30,37 @@ def test_parse_to_end(): ) ip = pyparser.parse_interactive("x") - parser_state = copy_parser_state(ip.parser_state) + parser_state = copy(ip.parser_state) parser_state, expected_next_tokens = parse_to_end(parser_state) assert not parser_state.value_stack assert expected_next_tokens == {"NAME"} ip = pyparser.parse_interactive("x = '") - parser_state = copy_parser_state(ip.parser_state) + parser_state = copy(ip.parser_state) parser_state, expected_next_tokens = parse_to_end(parser_state) assert parser_state.value_stack[-1].type == "EQUAL" assert expected_next_tokens == {"LONG_STRING", "STRING"} ip = pyparser.parse_interactive("x = 'hi") - parser_state = copy_parser_state(ip.parser_state) + parser_state = copy(ip.parser_state) parser_state, expected_next_tokens = parse_to_end(parser_state) assert parser_state.value_stack[-1].type == "EQUAL" assert expected_next_tokens == {"STRING"} ip = pyparser.parse_interactive("x = ('hi") - parser_state = copy_parser_state(ip.parser_state) + parser_state = copy(ip.parser_state) parser_state, expected_next_tokens = parse_to_end(parser_state) assert parser_state.value_stack[-1].type == "LPAR" assert expected_next_tokens == {"STRING"} ip = pyparser.parse_interactive("def") - parser_state = copy_parser_state(ip.parser_state) + parser_state = copy(ip.parser_state) parser_state, expected_next_tokens = parse_to_end(parser_state) assert not parser_state.value_stack assert expected_next_tokens == {"NAME", "DEF"} # Now, try something incremental - parser_state = copy_parser_state(parser_state) + parser_state = copy(parser_state) last_lexer_state = parser_state.lexer.state last_lexer_state.text = "def blah()" @@ -93,7 +93,7 @@ def test_sequential_parse_example(): ] vocab = set(input_tokens) - pyparser = Lark.open_from_package( + pyparser = PartialLark.open_from_package( "lark", "python.lark", ["grammars"], @@ -102,7 +102,7 @@ def test_sequential_parse_example(): start="file_input", ) ip = pyparser.parse_interactive("") - parser_state = copy_parser_state(ip.parser_state) + parser_state = ip.parser_state token_seq = "" for i, token in enumerate(input_tokens): @@ -115,7 +115,7 @@ def test_sequential_parse_example(): next_vocab = set() for test_token in vocab: - ps = copy_parser_state(parser_state) + ps = copy(parser_state) ls = ps.lexer.state ls.text = token_seq + test_token @@ -184,7 +184,7 @@ def test_partial_match(): def test_map_partial_states_to_vocab_python(): - pyparser = Lark.open_from_package( + pyparser = PartialLark.open_from_package( "lark", "python.lark", ["grammars"], @@ -251,7 +251,7 @@ def test_map_partial_states_to_vocab_python(): def test_parse_from_partial_match(): """Make sure we can continue parsing from an FSM-based partial match.""" - lp = Lark( + lp = PartialLark( r""" start: funcdef @@ -287,7 +287,7 @@ def test_parse_from_partial_match(): ) # These copies also patch the lexers in the parse state, which is now # needed for use with `parse_to_end` - parser_state = copy_parser_state(parser_state) + parser_state = copy(parser_state) new_parser_state, expected_next_tokens = parse_to_end(parser_state) assert expected_next_tokens == {"NAME"} @@ -297,7 +297,7 @@ def test_parse_from_partial_match(): (parser_state,) = create_pmatch_parser_states( lp, terminals_to_states, term_type, ptoken, first_pmatch ) - parser_state = copy_parser_state(parser_state) + parser_state = copy(parser_state) new_parser_state, expected_next_tokens = parse_to_end(parser_state) assert not expected_next_tokens @@ -307,7 +307,7 @@ def test_parse_from_partial_match(): (parser_state,) = create_pmatch_parser_states( lp, terminals_to_states, term_type, ptoken, first_pmatch ) - parser_state = copy_parser_state(parser_state) + parser_state = copy(parser_state) with pytest.raises(UnexpectedToken): parse_to_end(parser_state) From 8f783f8426c2ffe61a2fa7db20cefff94cbdee45 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sun, 30 Jul 2023 18:40:43 -0500 Subject: [PATCH 213/734] Use deterministic FSM state labels --- outlines/text/generate/regex.py | 10 +++- outlines/text/parsing.py | 98 +++++++++++++++++++++++++++++---- tests/text/test_parsing.py | 22 +++----- 3 files changed, 104 insertions(+), 26 deletions(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 15e28152..ba1cc731 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -9,7 +9,11 @@ from outlines.text.generate.continuation import Continuation from outlines.text.json_schema import build_regex_from_schema -from outlines.text.parsing import find_partial_matches, map_partial_states_to_vocab +from outlines.text.parsing import ( + find_partial_matches, + make_deterministic_fsm, + map_partial_states_to_vocab, +) class Regex(Continuation): @@ -35,7 +39,7 @@ def __init__(self, model, regex_string: str, max_tokens: Optional[int]): ] regex_pattern = interegular.parse_pattern(regex_string) - self.regex_fsm = regex_pattern.to_fsm().reduce() + self.regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) def partial_match_filter(string, end_idx, state_seq): if end_idx is not None and end_idx < len(string) - 1: @@ -55,7 +59,7 @@ def partial_match_filter(string, end_idx, state_seq): queue = collections.deque([self.regex_fsm.initial]) while queue: symbol = queue.popleft() - for prev_state in paths["REGEX"][symbol]: + for prev_state in paths["REGEX"].get(symbol, ()): if prev_state not in traversed_states: traversed_states.add(prev_state) queue.append(prev_state) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 80732d4a..5972ac1b 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -4,7 +4,7 @@ import interegular import regex -from interegular.fsm import FSM, anything_else +from interegular.fsm import FSM, Alphabet, anything_else from interegular.patterns import Unsupported from lark import Lark, Token from lark.common import LexerConf, ParserConf @@ -37,6 +37,80 @@ PartialParseState = Tuple[str, int] +def make_deterministic_fsm(fsm: FSM) -> Tuple[FSM, Dict[int, int]]: + """Construct an equivalent FSM with deterministic state labels.""" + old_to_new_trans_keys = { + trans_key: i + for i, (trans_key, _) in enumerate( + sorted(fsm.alphabet.by_transition.items(), key=lambda x: sorted(x[1])) + ) + } + + new_symbol_mapping = { + symbol: old_to_new_trans_keys[trans_key] + for symbol, trans_key in fsm.alphabet._symbol_mapping.items() + } + + new_alphabet = Alphabet(new_symbol_mapping) + + new_map = { + from_state: { + old_to_new_trans_keys[trans_key]: to_state + for trans_key, to_state in trans_map.items() + } + for from_state, trans_map in fsm.map.items() + } + + old_to_new_states = {} + old_to_new_states[fsm.initial] = 0 + + i = 0 + seen = {fsm.initial} + old_state_queue = [fsm.initial] + while old_state_queue: + old_state = old_state_queue.pop(-1) + transitions = new_map[old_state] + sorted_transitions = sorted(transitions.items(), key=lambda v: v[0]) + for _, old_state in sorted_transitions: + if old_state not in seen: + old_state_queue.append(old_state) + seen.add(old_state) + if old_state not in old_to_new_states: + i += 1 + old_to_new_states[old_state] = i + + new_map = dict( + sorted( + ( + ( + old_to_new_states[from_state], + dict( + sorted( + ( + (trans_key, old_to_new_states[to_state]) + for trans_key, to_state in trans_map.items() + ), + key=lambda v: v[0], + ) + ), + ) + for from_state, trans_map in new_map.items() + ), + key=lambda v: v[0], + ) + ) + + new_initial = 0 + new_finals = frozenset( + sorted(old_to_new_states[old_state] for old_state in fsm.finals) + ) + new_states = frozenset(sorted(new_map.keys())) + + new_fsm = FSM(new_alphabet, new_states, new_initial, new_finals, new_map) + + return new_fsm, old_to_new_states + + class PartialTokenEOF(UnexpectedEOF): pass @@ -510,7 +584,7 @@ def terminals_to_fsms(lp: PartialLark) -> Dict[str, FSM]: pattern = interegular.parse_pattern(terminal.pattern.to_regexp()) # TODO: Use `pyparser.terminals[0].pattern.flags`? try: - fsm = pattern.to_fsm().reduce() + fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) except Unsupported: fsm = None @@ -526,9 +600,7 @@ def map_partial_states_to_vocab( [str, Optional[int], Tuple[int, ...]], bool ] = lambda *args: True, final_state_string: Optional[str] = None, -) -> Tuple[ - DefaultDict[PartialParseState, Set[int]], Dict[str, DefaultDict[int, Set[int]]] -]: +) -> Tuple[Dict[PartialParseState, Set[int]], Dict[str, Dict[int, Set[int]]]]: """Construct a map from partial parse states to subsets of `vocabulary`. The subsets of `vocabulary` consist of elements that are accepted by--or @@ -552,18 +624,22 @@ def map_partial_states_to_vocab( final_state_string_idx = None # Partial parse states to the subsets of the vocabulary that accept them - pstate_to_vocab = defaultdict(set) + pstate_to_vocab: Dict[Tuple[str, int], Set[int]] = {} possible_paths = {} for symbol_name, fsm in terminals_to_fsms_map.items(): - terminal_possible_paths = defaultdict(set) + terminal_possible_paths: Dict[int, Set[int]] = {} for i, vocab_string in enumerate(vocabulary): if vocab_string == final_state_string: final_state_string_idx = i for end_idx, state_seq in find_partial_matches(fsm, vocab_string): if partial_match_filter(vocab_string, end_idx, state_seq): - terminal_possible_paths[state_seq[0]].add(state_seq[-1]) - pstate_to_vocab[(symbol_name, state_seq[0])].add(i) + terminal_possible_paths.setdefault(state_seq[0], set()).add( + state_seq[-1] + ) + pstate_to_vocab.setdefault((symbol_name, state_seq[0]), set()).add( + i + ) possible_paths[symbol_name] = terminal_possible_paths @@ -571,7 +647,9 @@ def map_partial_states_to_vocab( # Allow transitions to EOS from all terminals FSM states for symbol_name, fsm in terminals_to_fsms_map.items(): for state in fsm.finals: - pstate_to_vocab[(symbol_name, state)].add(final_state_string_idx) + pstate_to_vocab.setdefault((symbol_name, state), set()).add( + final_state_string_idx + ) return pstate_to_vocab, possible_paths diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 1cc87649..b4277d94 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -12,6 +12,7 @@ PartialPythonIndenter, create_pmatch_parser_states, find_partial_matches, + make_deterministic_fsm, map_partial_states_to_vocab, parse_to_end, terminals_to_fsms, @@ -134,11 +135,11 @@ def test_sequential_parse_example(): def test_partial_match(): name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") - name_fsm = name_pattern.to_fsm().reduce() + name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) assert name_fsm.initial == 0 def_pattern = interegular.parse_pattern("def") - def_fsm = def_pattern.to_fsm().reduce() + def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) assert def_fsm.initial == 0 assert find_partial_matches(def_fsm, "def") == {(2, (0, 1, 2, 3))} @@ -170,17 +171,12 @@ def test_partial_match(): float_pattern = interegular.parse_pattern( r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))" ) - float_fsm = float_pattern.to_fsm().reduce() + float_fsm, _ = make_deterministic_fsm(float_pattern.to_fsm().reduce()) + assert 5 in float_fsm.finals + assert 2 not in float_fsm.finals - # XXX: It look like there's a lot of set/frozenset usage that prevents us - # from adequately reproducing the exact state sequences in this case. - # It seems to stem from `_CharGroup`s and the FSM map construction process. res = find_partial_matches(float_fsm, ".") - assert {v[0] for v in res} == {0, 0, None} - # Make sure that the terminated sequences actually end in final states - assert all(v[1][-1] in float_fsm.finals for v in res if v[0] == 0) - # Make sure that the non-terminated sequences don't end in final states - assert all(v[1][-1] not in float_fsm.finals for v in res if v[0] != 0) + assert res == {(0, (3, 5)), (0, (4, 5)), (None, (0, 2))} def test_map_partial_states_to_vocab_python(): @@ -315,7 +311,7 @@ def test_parse_from_partial_match(): def test_map_partial_states_to_vocab_regex(): regex_string = r"([0-9]+([.][0-9]*)?|[.][0-9]+)" regex_pattern = interegular.parse_pattern(regex_string) - regex_fsm = regex_pattern.to_fsm().reduce() + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) vocabulary = [ "1.", @@ -350,7 +346,7 @@ def partial_match_filter(string, end_idx, state_seq): {1, 5, 8, 12}, {1, 5, 8}, ] - assert possible_paths["FLOAT"] == {0: {1, 2, 3}, 1: {1, 3}, 2: {3}, 3: {3}} + assert possible_paths["FLOAT"] == {2: {2, 3}, 0: {1, 2, 3}, 3: {3}, 1: {3}} pstate_to_vocab = {k: tuple(v) for k, v in pstate_to_vocab.items()} From 77e1593e68fe01b1716731efd9fee63874bc1297 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sun, 9 Jul 2023 22:33:07 -0500 Subject: [PATCH 214/734] Use FSM-based scanning All the terminal symbols regexs in each parse-state-dependent lexer are combined/unioned into a single FSM, and scanning is performed according to those combined FSMs. The function `fsm_union` was added for that purpose. Since the parser needs to know exactly which terminal symbols were matched, we now need to the ability to determine exactly which sub-FSM (i.e. one of the combined terminal symbol FSMs) accepted an input string. The function `get_sub_fsms_from_seq` serves this purpose. --- examples/parsing.py | 62 +-- outlines/text/parsing.py | 781 ++++++++++++++++++++++++++------- pyproject.toml | 2 - tests/text/__init__.py | 0 tests/text/partial_python.lark | 314 +++++++++++++ tests/text/test_parsing.py | 413 +++++++++++------ 6 files changed, 1245 insertions(+), 327 deletions(-) create mode 100644 tests/text/__init__.py create mode 100644 tests/text/partial_python.lark diff --git a/examples/parsing.py b/examples/parsing.py index ee17d3d9..1e6e05c4 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -1,7 +1,6 @@ """An example illustrating parser-based masking.""" import math import time -import urllib.request from copy import copy import torch @@ -15,7 +14,7 @@ set_seed, ) -from outlines.text.parsing import PartialLark, PartialPythonIndenter, parse_to_end +from outlines.text.parsing import PartialLark, PartialPythonIndenter revision = None checkpoint = "Salesforce/codegen-350M-mono" @@ -27,22 +26,27 @@ checkpoint, trust_remote_code=True, revision=revision ).to(device) -sql_grammar_url = "https://github.com/zbrookle/sql_to_ibis/raw/0e9226da42065940ce21439d490f9fcacadc7f92/sql_to_ibis/grammar/sql.lark" -sql_grammar = "".join( - [line.decode("utf-8") for line in urllib.request.urlopen(sql_grammar_url)] -) -with open("sql_grammar.lark", "w") as f: - f.write(sql_grammar) - -sqlparser = PartialLark.open( - "sql_grammar.lark", - parser="lalr", -) - -pyparser = PartialLark.open_from_package( - "lark", - "python.lark", - ["grammars"], +# import urllib.request +# +# sql_grammar_url = "https://github.com/zbrookle/sql_to_ibis/raw/0e9226da42065940ce21439d490f9fcacadc7f92/sql_to_ibis/grammar/sql.lark" +# sql_grammar = "".join( +# [line.decode("utf-8") for line in urllib.request.urlopen(sql_grammar_url)] +# ) +# with open("sql_grammar.lark", "w") as f: +# f.write(sql_grammar) +# +# TODO: `_STRING_ESC_INNER` from `%import common.ESCAPED_STRING` introduces a +# (potentially superfluous) look-back; we need to replace it or implement +# look-backs. +# parser = PartialLark.open( +# "sql_grammar.lark", +# parser="lalr", +# ) + +parser = PartialLark.open_from_package( + "tests", + "partial_python.lark", + ["text"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", @@ -53,8 +57,8 @@ class ParserLogitsProcessor(LogitsProcessor): """Bias invalid token scores according to a running parse state.""" def __init__(self, parser): - ip = parser.parse_interactive("") - self.parser_state = ip.parser_state + self.parser = parser + self.parser_state = parser.parse("") self.states_stack = [self.parser_state] self.token_seq = None self.token_idx = 0 @@ -73,10 +77,9 @@ def __call__( lex_state = self.parser_state.lexer.state lex_state.text = self.token_seq - self.parser_state, partial_tokens = parse_to_end(self.parser_state) + parser.parse_from_state(self.parser_state, is_end=False) print(f'parsed:"{self.token_seq}"') - print(f"partial_tokens: {partial_tokens}") mask = torch.full_like(scores, -math.inf) @@ -84,19 +87,19 @@ def __call__( # given the parser state. # # TODO: This is a very naive and slow approach. It could be done in - # parallel, but there are a few other approaches to try first, and - # those should dramatically reduce the amount of work done here. + # parallel, easily memoized/cached, etc., but there are a few other + # approaches to try first that will dramatically reduce the + # amount of work needed here. t0 = time.perf_counter() for test_token, token_id in tokenizer.vocab.items(): ps = copy(self.parser_state) ls = ps.lexer.state - ls.text = self.token_seq + test_token + ls.text = self.token_seq + tokenizer.convert_tokens_to_string([test_token]) try: - # TODO: The resulting states could possibly be reused? - parse_to_end(ps) + parser.parse_from_state(ps, is_end=False) mask[0][token_id] = 0 - except (UnexpectedToken, UnexpectedCharacters, DedentError): + except (EOFError, UnexpectedToken, UnexpectedCharacters, DedentError): pass print(f"next token masking duration: {time.perf_counter() - t0}") @@ -106,8 +109,7 @@ def __call__( set_seed(20399) -parser = sqlparser -input_text = "select " +input_text = "def " inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) outputs = model.generate( diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 5972ac1b..161e76cf 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,23 +1,36 @@ -from collections import ChainMap, defaultdict +from collections import ChainMap from copy import copy, deepcopy -from typing import Any, Callable, DefaultDict, Dict, Iterable, Optional, Set, Tuple +from dataclasses import dataclass +from functools import cache +from typing import ( + Any, + Callable, + Dict, + FrozenSet, + Generator, + Iterable, + Iterator, + Optional, + Sequence, + Set, + Tuple, + Union, +) import interegular -import regex -from interegular.fsm import FSM, Alphabet, anything_else +from interegular.fsm import FSM, Alphabet, OblivionError from interegular.patterns import Unsupported from lark import Lark, Token from lark.common import LexerConf, ParserConf from lark.exceptions import ( LexError, UnexpectedCharacters, - UnexpectedEOF, + UnexpectedInput, UnexpectedToken, ) -from lark.indenter import PythonIndenter +from lark.indenter import Indenter from lark.lexer import ( BasicLexer, - CallChain, ContextualLexer, LexerState, LexerThread, @@ -29,12 +42,34 @@ PostLexConnector, _validate_frontend_args, ) -from lark.parsers.lalr_analysis import IntParseTable, LALR_Analyzer, ParseTable, Shift +from lark.parsers.lalr_analysis import ( + Action, + IntParseTable, + LALR_Analyzer, + ParseTable, + Shift, +) from lark.parsers.lalr_interactive_parser import InteractiveParser from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser -from lark.utils import get_regexp_width PartialParseState = Tuple[str, int] +ParseStateType = Union[int, FrozenSet] + + +@dataclass +class PartialTerminalInfo: + priority: int + terminal_name: str + can_transition: bool + is_final: bool + + +@dataclass +class PartialTokensInfo: + fsm_state_seq: Tuple[int, ...] + is_not_finished: bool + terminals_and_info: Tuple[PartialTerminalInfo, ...] + final_terminals_and_info: Tuple[PartialTerminalInfo, ...] def make_deterministic_fsm(fsm: FSM) -> Tuple[FSM, Dict[int, int]]: @@ -111,10 +146,6 @@ def make_deterministic_fsm(fsm: FSM) -> Tuple[FSM, Dict[int, int]]: return new_fsm, old_to_new_states -class PartialTokenEOF(UnexpectedEOF): - pass - - class PartialParserConf(ParserConf): __serialize_fields__ = "rules", "start", "parser_type", "deterministic" @@ -171,6 +202,9 @@ def __repr__(self): self.options.lexer, ) + def parse_from_state(self, parse_state: "PartialParseState", is_end=False): + return self.parser.parser.parser.parse_from_state(parse_state, is_end=is_end) + class PartialLexerThread(LexerThread): def __copy__(self): @@ -204,6 +238,114 @@ def __init__(self, lexer_conf, parser_conf, options, parser=None): if lexer_conf.postlex: self.lexer = PartialPostLexConnector(self.lexer.lexer, lexer_conf.postlex) + self._termset_fsm_info = None + self._symbols_to_states: Optional[ + Dict[str, Set[Tuple[ParseStateType, Action]]] + ] = None + self._reverse_shifts: Optional[ + Dict[ParseStateType, Dict[str, Set[ParseStateType]]] + ] = None + # self._state_transition_map: Optional[ + # Dict[Tuple[ParseStateType, str], Set[ParseStateType]] + # ] = None + + def _compute_maps( + self, + ): + """Compute state transition and symbols-to-states maps.""" + self._reverse_shifts = {} + self._symbols_to_states = {} + + parse_table = self.parser.parser.parse_table + + for from_state, symbols_to_ops in parse_table.states.items(): + for symbol, op in symbols_to_ops.items(): + if op[0] == Shift: + symbols_to_from_states = self._reverse_shifts.setdefault(op[1], {}) + symbols_to_from_states.setdefault(symbol, set()).add(from_state) + self._symbols_to_states.setdefault(symbol, set()).add((from_state, op)) + + # # TODO: This approach is very wasteful. + # context_lexer = get_contextual_lexer(self) + # self._state_transition_map = {} + # + # for from_state, transitions in parse_table.states.items(): + # for symbol, action in transitions.items(): + # # TODO: Filter non-terminals + # if symbol not in context_lexer.root_lexer.terminals_by_name: + # continue + # + # if action[0] is Shift: + # self._state_transition_map.setdefault( + # (from_state, symbol), set() + # ).add(action[1]) + # continue + # + # antecedent_state_seqs = parse_to_terminal(self, [(from_state,)], symbol) + # + # for antecedent_state_seq in antecedent_state_seqs: + # antecedent_state = antecedent_state_seq[-1] + # self._state_transition_map.setdefault( + # (from_state, symbol), set() + # ).add(antecedent_state) + + def _compute_termset_fsm_info(self): + """Collect and return information about terminal symbol sets and their FSMs. + + Terminal symbol sets (or "termsets") are ordered sequences of terminal + symbols that are used by each parser state. Associated with each is a + collection of FSMs for each terminal and a single parse state FSM that is + the union of each terminal's FSM. + + This constructs a list of tuples containing the termset, the set of + parse states that use the termsets, parse state FSMs, and information + mapping the components of the parse state FSMs to their terminal symbol + FSMs. + + """ + context_lexer = get_contextual_lexer(self) + termsets_to_fsms = {} + termsets_to_parse_states: Dict[Tuple[str, ...], Set[ParseStateType]] = {} + for parse_state, lexer in context_lexer.lexers.items(): + scanner = lexer.scanner + key = tuple(term.name for term in scanner.terminals) + termsets_to_fsms[key] = (scanner.fsm, scanner.fsms_to_trans_finals) + termsets_to_parse_states.setdefault(key, set()).add(parse_state) + + self._termset_fsm_info = [ + ( + termset, + frozenset(termsets_to_parse_states[termset]), + fsm, + fsms_to_trans_finals, + ) + for termset, (fsm, fsms_to_trans_finals) in termsets_to_fsms.items() + ] + + @property + def termset_fsm_info(self): + if self._termset_fsm_info is None: + self._compute_termset_fsm_info() + return self._termset_fsm_info + + @property + def symbols_to_states(self): + if self._symbols_to_states is None: + self._compute_maps() + return self._symbols_to_states + + @property + def reverse_shifts(self): + if self._reverse_shifts is None: + self._compute_maps() + return self._reverse_shifts + + # @property + # def state_transition_map(self): + # if self._state_transition_map is None: + # self._compute_maps() + # return self._state_transition_map + class PartialLALRParser(LALR_Parser): def __init__(self, parser_conf, debug=False, strict=False): @@ -273,34 +415,154 @@ def __copy__(self): def __repr__(self): return f"{type(self).__name__}(lexer={self.lexer!r}, state_stack={self.state_stack!r})" + def feed_token(self, token, is_end=False): + if token.type == "partial": + current_state = self.state_stack[-1] + current_transitions = self.parse_conf.states[current_state] + current_lexer = get_contextual_lexer(self.lexer).lexers[current_state] + + if not any( + terminal_info.terminal_name in current_transitions + or terminal_info.terminal_name in current_lexer.ignore_types + for terminal_info in token.value.terminals_and_info + ): + # If none of the terminals can transition, we should + # know sooner than later + expected = { + s + for s in self.parse_conf.states[current_state].keys() + if s.isupper() + } + raise UnexpectedToken( + token, expected, state=self, interactive_parser=None + ) + return + + super().feed_token(token, is_end=is_end) + class PartialParser(_Parser): def parse( self, lexer, start, value_stack=None, state_stack=None, start_interactive=False ): parse_conf = ParseConf(self.parse_table, self.callbacks, start) - parser_state = PartialParserState(parse_conf, lexer, state_stack, value_stack) + parser_state = PartialParserState( + parse_conf, copy(lexer), state_stack, value_stack + ) if start_interactive: return InteractiveParser(self, parser_state, parser_state.lexer) return self.parse_from_state(parser_state) + def parse_from_state(self, state, last_token=None, is_end=False): + try: + token = last_token + for token in state.lexer.lex(state): + state.feed_token(token) + + if is_end and (not token or token.type != "partial"): + end_token = ( + Token.new_borrow_pos("$END", "", token) + if token + else Token("$END", "", 0, 1, 1) + ) + state.feed_token(end_token, True) + + return state + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e + except Exception: + if self.debug: + print("") + print("STATE STACK DUMP") + print("----------------") + for i, s in enumerate(state.state_stack): + print("%d)" % i, s) + print("") + + raise + class PartialScanner(Scanner): + @classmethod + @cache + def construct_terminal_fsm(cls, terminal): + # TODO: This should really be done at the lexer/parser level so that + # the lifetime of these objects is tied to the parser itself. + regex_str = terminal.pattern.to_regexp() + pattern = interegular.parse_pattern(regex_str) + fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) + return fsm, pattern.prefix_postfix + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): self.terminals = terminals self.g_regex_flags = g_regex_flags - self.re_ = regex self.use_bytes = use_bytes self.match_whole = match_whole self.allowed_types = {t.name for t in self.terminals} - self._mres = self._build_mres(terminals, len(terminals)) + self._mres = None + + fsms = [] + for t in self.terminals: + fsm, prefix_postfix = self.construct_terminal_fsm(t) + + # TODO FIXME: We don't support this right now. + assert prefix_postfix == (0, 0) + + fsms.append(fsm) + + self.fsm, self.fsms_to_trans_finals = fsm_union(fsms) + + def get_terminals_info( + self, fsm_state_seq + ) -> Tuple[Tuple[PartialTerminalInfo, ...], Tuple[PartialTerminalInfo, ...]]: + """Get the possible terminal symbols for an FSM state sequence.""" + terminals_and_info: Tuple[PartialTerminalInfo, ...] = () + final_terminals_and_info: Tuple[PartialTerminalInfo, ...] = () + for i, (fsm_id, fsm_reads_more, in_final) in enumerate( + get_sub_fsms_from_seq(fsm_state_seq, self.fsms_to_trans_finals) + ): + terminal_name = self.terminals[fsm_id].name + info = PartialTerminalInfo(i, terminal_name, fsm_reads_more, in_final) + terminals_and_info += (info,) + if in_final: + final_terminals_and_info += (info,) + + return terminals_and_info, final_terminals_and_info + + def match(self, text, pos, last_fsm_state_seq: Optional[Tuple[int, ...]] = None): + """Determine an FSM match over `text` starting at `pos` and continuing `last_fsm_state_seq`.""" + + start_pos = pos + + if last_fsm_state_seq: + assert len(last_fsm_state_seq) > 1 + start_pos += len(last_fsm_state_seq) - 1 + start_state = last_fsm_state_seq[-1] + else: + start_state = self.fsm.initial + + text_part = text[start_pos:] + + res = find_partial_matches( + self.fsm, + text_part, + start_state=start_state, + full_match=self.match_whole, + ) + + if len(res) == 0: + return None + + ((_, state_seq),) = res - def match(self, text, pos) -> Optional[Tuple[str, Optional[str], bool]]: - for mre in self._mres: - m = mre.match(text, pos=pos, partial=True) - if m: # and ((not m.partial) or m.endpos == len(text)): - return m.group(0), m.lastgroup, m.partial - return None + if last_fsm_state_seq: + state_seq = last_fsm_state_seq[:-1] + state_seq + + return state_seq class PartialContextualLexer(ContextualLexer): @@ -311,12 +573,12 @@ def __init__(self, conf: "LexerConf", states, always_accept=()): trad_conf = copy(conf) trad_conf.terminals = terminals - lexer_by_tokens: Dict = {} + lexer_by_symbols: Dict = {} self.lexers = {} for state, accepts in states.items(): key = frozenset(accepts) try: - lexer = lexer_by_tokens[key] + lexer = lexer_by_symbols[key] except KeyError: accepts = set(accepts) | set(conf.ignore) | set(always_accept) lexer_conf = copy(trad_conf) @@ -324,100 +586,153 @@ def __init__(self, conf: "LexerConf", states, always_accept=()): terminals_by_name[n] for n in accepts if n in terminals_by_name ] lexer = PartialBasicLexer(lexer_conf) - lexer_by_tokens[key] = lexer + lexer_by_symbols[key] = lexer self.lexers[state] = lexer assert trad_conf.terminals is terminals self.root_lexer = PartialBasicLexer(trad_conf) + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + try: + while True: + lexer = self.lexers[parser_state.position] + yield lexer.next_token(lexer_state, parser_state) + except EOFError: + pass + class PartialBasicLexer(BasicLexer): def __init__(self, conf: "LexerConf"): super().__init__(conf) - # This is used to determine the token type for partial matches - self.terminal_to_regex = {} - for name, terminal in self.terminals_by_name.items(): - self.terminal_to_regex[name] = self.re.compile( - terminal.pattern.to_regexp(), self.g_regex_flags - ) - def _build_scanner(self): + # This seems incredibly convoluted: `lark` creates callback-triggered + # nested scanners for regex-defined terminals that overlap with + # string-defined terminals when both types of terminals have the same + # priority. Unless I'm missing something important, why not simply + # reorder the terminals so that the string-defined ones come before the + # regex-defined ones? terminals, self.callback = _create_unless( self.terminals, self.g_regex_flags, self.re, self.use_bytes ) - assert all(self.callback.values()) - for type_, f in self.user_callbacks.items(): - if type_ in self.callback: - # Already a callback there, probably UnlessCallback - self.callback[type_] = CallChain( - self.callback[type_], f, lambda t: t.type == type_ - ) - else: - self.callback[type_] = f + # We can't let people arbitrarily mess with the scanning process. + assert not self.user_callbacks + # for type_, f in self.user_callbacks.items(): + # if type_ in self.callback: + # # Already a callback there, probably UnlessCallback + # self.callback[type_] = CallChain( + # self.callback[type_], f, lambda t: t.type == type_ + # ) + # else: + # self.callback[type_] = f + + # We used the "callback" results to reorder the terminals (see the + # comments above). + for terminal_name, callback in self.callback.items(): + terminal = self.terminals_by_name[terminal_name] + for sub_terminal in callback.scanner.terminals: + self.terminals.remove(sub_terminal) + idx = self.terminals.index(terminal) + self.terminals.insert(idx, sub_terminal) self._scanner = PartialScanner( - terminals, self.g_regex_flags, self.re, self.use_bytes + self.terminals, self.g_regex_flags, self.re, self.use_bytes ) - def partial_matches(self, value, type_): - partial_matches = set() - - # TODO: It's unfortunate that we have to do this costly search (again). - # It would be better if we could *not* short-circuit the first time we - # scan in the call to `self.match`. - for term_name, term_regex in self.terminal_to_regex.items(): - if term_name == type_: - # A standard lexed token result could actual indicate a partial - # match - regex_min, regex_max = get_regexp_width(term_regex.pattern) - if regex_min <= len(value) < regex_max: - partial_matches.add(term_name) - else: - m = term_regex.match(value, partial=True) - if m: - partial_matches.add(term_name) - - return partial_matches + def match(self, text, pos, last_fsm_state_seq=None): + return self.scanner.match(text, pos, last_fsm_state_seq) def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + last_token = lex_state.last_token + + last_fsm_state_seq = None + if last_token and last_token.type == "partial": + # Continue from last partial lexer state + last_fsm_state_seq = last_token.value.fsm_state_seq + line_ctr = lex_state.line_ctr - while line_ctr.char_pos < len(lex_state.text): - res = self.match(lex_state.text, line_ctr.char_pos) + end_pos = line_ctr.char_pos + ( + len(last_fsm_state_seq) - 1 if last_fsm_state_seq else 0 + ) + while end_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos, last_fsm_state_seq) if not res: - allowed = self.scanner.allowed_types - self.ignore_types - if not allowed: - allowed = {""} - raise UnexpectedCharacters( - lex_state.text, - line_ctr.char_pos, - line_ctr.line, - line_ctr.column, - allowed=allowed, - token_history=lex_state.last_token and [lex_state.last_token], - state=parser_state, - terminals_by_name=self.terminals_by_name, - ) + if ( + not last_fsm_state_seq + or last_fsm_state_seq[-1] not in self.scanner.fsm.finals + ): + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters( + lex_state.text, + line_ctr.char_pos, + line_ctr.line, + line_ctr.column, + allowed=allowed, + token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, + terminals_by_name=self.terminals_by_name, + ) + + # The partial match might be complete now + fsm_state_seq = last_token.value.fsm_state_seq + terminals_and_info = last_token.value.terminals_and_info + final_terminals_and_info = last_token.value.final_terminals_and_info + else: + fsm_state_seq = res + ( + terminals_and_info, + final_terminals_and_info, + ) = self.scanner.get_terminals_info(fsm_state_seq) + + priority_terminal_info = ( + final_terminals_and_info[0] + if final_terminals_and_info + else terminals_and_info[0] + ) - value, type_, partial = res + is_not_finished = ( + not priority_terminal_info.is_final + or priority_terminal_info.can_transition + or len(terminals_and_info) > 1 + ) - # Don't advance the lexing state if we're at the end; there could - # be ambiguous token types that aren't finished. - if line_ctr.char_pos + len(value) >= len(lex_state.text): - partial_matches = self.partial_matches(value, type_) - if partial_matches or partial: - raise PartialTokenEOF(partial_matches) + start_pos = line_ctr.char_pos + end_pos = start_pos + len(fsm_state_seq) - 1 + + if end_pos >= len(lex_state.text) and is_not_finished: + type_name = "partial" + token_value = PartialTokensInfo( + fsm_state_seq, + is_not_finished, + terminals_and_info, + final_terminals_and_info, + ) + # Don't update the line counter states until we've finished + value = "" + else: + type_name = priority_terminal_info.terminal_name + # The token value should contain all partial scan parts in this + # case + value = token_value = lex_state.text[start_pos:end_pos] assert isinstance(self.callback, Dict) - if type_ not in self.ignore_types: + if type_name not in self.ignore_types: t = Token( - type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column + type_name, + token_value, + line_ctr.char_pos, + line_ctr.line, + line_ctr.column, ) - line_ctr.feed(value, type_ in self.newline_types) + + line_ctr.feed(value, type_name in self.newline_types) + t.end_line = line_ctr.line t.end_column = line_ctr.column t.end_pos = line_ctr.char_pos @@ -430,18 +745,20 @@ def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: lex_state.last_token = t return t - if type_ in self.callback: + if type_name in self.callback: t2 = Token( - type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column + type_name, value, line_ctr.char_pos, line_ctr.line, line_ctr.column ) - self.callback[type_](t2) + self.callback[type_name](t2) - line_ctr.feed(value, type_ in self.newline_types) + line_ctr.feed(value, type_name in self.newline_types) + + last_fsm_state_seq = None raise EOFError(self) -class PartialPythonIndenter(PythonIndenter): +class PartialIndenter(Indenter): """An `Indenter` that doesn't reset its state every time `process` is called.""" def process(self, stream): @@ -463,10 +780,22 @@ def _process(self, stream): else: yield token + # TODO: What do we want to do here? # while len(self.indent_level) > 1: # self.indent_level.pop() # yield Token(self.DEDENT_type, "") + def accepts_token_type(self, token_type): + if token_type in self.CLOSE_PAREN_types and self.paren_level - 1 < 0: + return False + + # TODO: + # if token_type == self.NL_type and self.paren_level == 0: + # ... + # return False + + return True + def __copy__(self): res = type(self)() res.paren_level = self.paren_level @@ -477,30 +806,25 @@ def __repr__(self): return f"{type(self).__name__}(paren_level={self.paren_level!r}, indent_level={self.indent_level!r})" -def parse_to_end( - parser_state: PartialParserState, -) -> Tuple[PartialParserState, Set[str]]: - """Continue parsing from the current parse state and return partial next tokens. - - .. warning:: - The parse state `parser_state` is updated in-place and must be patched - to work with this function. - - """ +class PartialPythonIndenter(PartialIndenter): + NL_type = "_NEWLINE" + OPEN_PAREN_types = ["LPAR", "LSQB", "LBRACE"] + CLOSE_PAREN_types = ["RPAR", "RSQB", "RBRACE"] + INDENT_type = "_INDENT" + DEDENT_type = "_DEDENT" + tab_len = 8 - expected_next_tokens: Set[str] = set() - try: - for token in parser_state.lexer.lex(parser_state): - parser_state.feed_token(token) - except PartialTokenEOF as e: - expected_next_tokens = e.expected - return parser_state, expected_next_tokens +def get_contextual_lexer(x: Union[PartialLexerThread, PartialParsingFrontend]): + if isinstance(x.lexer, ContextualLexer): + return x.lexer + else: + return x.lexer.lexer def find_partial_matches( - fsm: FSM, input_string: str, start_state: Optional[int] = None -) -> Set[Tuple[Optional[int], Tuple[int, ...]]]: + fsm: FSM, input_string: str, start_state: Optional[int] = None, full_match=True +) -> Set[Tuple[int, Tuple[int, ...]]]: """Find the states in the finite state machine `fsm` that accept `input_string`. This will consider all possible states in the finite state machine (FSM) @@ -517,17 +841,19 @@ def find_partial_matches( A single fixed starting state to consider. For example, if this value is set to `fsm.initial`, it attempt to read `input_string` from the beginning of the FSM/regular expression. + full_match + Matches must cover the entire string. Returns ------- A set of tuples corresponding to each valid starting state in the FSM. The - first element of each tuple contains either ``None`` or an integer - indicating the position in `input_string` at which the FSM terminated. The - second element is the tuple of states visited during execution of the FSM - plus the next, unvisited transition state. + first element of each tuple contains an integer indicating the position in + `input_string` at which the FSM stopped. The second element is the tuple + of states visited during execution of the FSM plus the next, unvisited + transition state. """ - if len(input_string) == 0 or input_string[0] not in fsm.alphabet: + if len(input_string) == 0: return set() trans_key = fsm.alphabet[input_string[0]] @@ -540,20 +866,30 @@ def _partial_match( fsm_map = ChainMap({fsm.initial: trans}, fsm.map) state = fsm.initial accepted_states: Tuple[int, ...] = () + last_final_idx = -1 for i, symbol in enumerate(input_string): - if anything_else in fsm.alphabet and symbol not in fsm.alphabet: - symbol = anything_else - trans_key = fsm.alphabet[symbol] - if not (state in fsm_map and trans_key in fsm_map[state]): - if state in fsm.finals: - i -= 1 - break + trans_map = fsm_map.get(state) + + if trans_map is None or trans_key not in trans_map: + if full_match: + if state in fsm.finals: + i -= 1 + break + else: + if last_final_idx > -1: + i = last_final_idx + accepted_states = accepted_states[: last_final_idx + 1] + break + return None, None - state = fsm_map[state][trans_key] + state = trans_map[trans_key] + + if state in fsm.finals: + last_final_idx = i accepted_states += (state,) @@ -561,7 +897,7 @@ def _partial_match( if not terminated and state == fsm.initial: return None, None - return None if not terminated else i, accepted_states + return i, accepted_states res = set() transition_maps = ( @@ -569,9 +905,9 @@ def _partial_match( ) for state, trans in transition_maps.items(): if trans_key in trans: - n_matched, path = _partial_match(trans) - if path is not None: - res.add((n_matched, (state,) + path)) + last_match_idx, path = _partial_match(trans) + if last_match_idx is not None and path is not None: + res.add((last_match_idx, (state,) + path)) return res @@ -654,41 +990,158 @@ def map_partial_states_to_vocab( return pstate_to_vocab, possible_paths -def terminals_to_lalr_states(lp: PartialLark) -> DefaultDict[str, Set[int]]: - terminals_to_states = defaultdict(set) - parse_table = lp.parser.parser.parser.parse_table - for state, tokens_to_ops in parse_table.states.items(): - for token, op in tokens_to_ops.items(): - if op[0] == Shift: - # `op[1]` is the state we shift to when `token` is observed - terminals_to_states[token].add(op[1]) - - return terminals_to_states - - -def create_pmatch_parser_states( - lp: PartialLark, - terminals_to_states: Dict[str, Set[int]], - term_type: str, - ptoken: str, - pmatch: Tuple[int, Tuple[int, ...]], -) -> Tuple[PartialParserState, ...]: - parse_table = lp.parser.parser.parser.parse_table - - # TODO: We need to effectively disable the callbacks that build the - # trees, because we aren't actually parsing a valid state that can, say, - # be reduced - def noop(*args, **kwargs): - pass - - callbacks = {rule: noop for rule, cb in lp._callbacks.items()} - parse_conf = ParseConf(parse_table, callbacks, lp.options.start[0]) - lexer_thread = lp.parser._make_lexer_thread(ptoken) - lexer_state = lexer_thread.state - lexer_state.line_ctr.char_pos = pmatch[0] + 1 - lexer_state.last_token = Token(term_type, "") - res = tuple( - PartialParserState(parse_conf, lexer_thread, [state], None) - for state in terminals_to_states[term_type] +def fsm_union( + fsms: Sequence[FSM], +) -> Tuple[FSM, Dict[int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]]]: + """Construct an FSM representing the union of the FSMs in `fsms`. + + This is an updated version of `interegular.fsm.FSM.union` made to return an + extra map of component FSMs to the sets of state transitions that + correspond to them in the new FSM. + + """ + + alphabet, new_to_old = Alphabet.union(*[fsm.alphabet for fsm in fsms]) + + indexed_fsms = tuple(enumerate(fsms)) + + initial = {i: fsm.initial for (i, fsm) in indexed_fsms} + + # Dedicated function accepting a "superset" and returning the next + # "superset" obtained by following this transition in the new FSM + def follow(current_state, new_transition: int): + next = {} + for i, f in indexed_fsms: + old_transition = new_to_old[i][new_transition] + if ( + i in current_state + and current_state[i] in f.map + and old_transition in f.map[current_state[i]] + ): + next[i] = f.map[current_state[i]][old_transition] + if not next: + raise OblivionError + return next + + states = [initial] + finals: Set[int] = set() + map: Dict[int, Dict[int, int]] = {} + + # Map component FSMs to their new state-to-state transitions, finals, and a + # map translating component FSM states to aggregate FSM states + fsms_to_trans_finals: Dict[ + int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] + ] = {} + + i = 0 + while i < len(states): + state = states[i] + + # Add to the finals of the aggregate FSM whenever we hit a final in a + # component FSM + if any(state.get(j, -1) in fsm.finals for (j, fsm) in indexed_fsms): + finals.add(i) + + # Compute the map for this state + map[i] = {} + for transition in alphabet.by_transition: + try: + next = follow(state, transition) + except OblivionError: + # Reached an oblivion state; don't list it + continue + else: + try: + # TODO: Seems like this could--and should--be avoided + j = states.index(next) + except ValueError: + j = len(states) + states.append(next) + + map[i][transition] = j + + for fsm_id, fsm_state in next.items(): + ( + fsm_transitions, + fsm_finals, + fsm_old_to_new, + ) = fsms_to_trans_finals.setdefault(fsm_id, (set(), set(), {})) + old_from = state[fsm_id] + old_to = fsm_state + fsm_old_to_new.setdefault(old_from, set()).add(i) + fsm_old_to_new.setdefault(old_to, set()).add(j) + fsm_transitions.add((i, j)) + if fsm_state in fsms[fsm_id].finals: + fsm_finals.add(j) + + i += 1 + + fsm = FSM( + alphabet=alphabet, + states=range(len(states)), + initial=0, + finals=finals, + map=map, + __no_validation__=True, + ) + + fsm, old_to_new_states = make_deterministic_fsm(fsm) + _fsms_to_trans_finals = { + fsm_id: ( + {(old_to_new_states[s1], old_to_new_states[s2]) for s1, s2 in transitions}, + {old_to_new_states[s] for s in finals}, + { + old_state: {old_to_new_states[new_state] for new_state in new_states} + for old_state, new_states in old_to_new.items() + }, + ) + for fsm_id, (transitions, finals, old_to_new) in sorted( + fsms_to_trans_finals.items(), key=lambda x: x[0] + ) + } + + return ( + fsm, + _fsms_to_trans_finals, + ) + + +def get_sub_fsms_from_seq( + state_seq: Sequence[int], + fsms_to_trans_finals: Dict[ + int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] + ], +) -> Generator[Tuple[int, bool, bool], None, None]: + """Get the indices of the sub-FSMs in `fsm` that could have matched the state sequence `state_seq`. + + Parameters + ---------- + state_seq + A state sequence. + fsms_to_trans_finals + A map from FSM indices to tuples containing sets of their state transitions + and sets of the final/accept states. + + Returns + ------- + A generator returning tuples containing each sub-FSM index (in the order + they were union-ed to construct `fsm`) and booleans indicating whether or + not there is another valid transition from the last state in the sequence + for the associated sub-FSM (i.e. if the FSM can continue + accepting/matching) and whether or not the sequence ends in a final state + of the sub-FSM. + """ + state_seq_transitions = set(zip(state_seq[:-1], state_seq[1:])) + last_fsm_state = state_seq[-1] + yield from ( + ( + # The sub-FMS index + fsm_idx, + # Is there another possible transition in this sub-FSM? + any(last_fsm_state == from_s for (from_s, to_s) in transitions), + # Is this sub-FSM in a final state? + state_seq[-1] in finals, + ) + for fsm_idx, (transitions, finals, _) in fsms_to_trans_finals.items() + if state_seq_transitions.issubset(transitions) ) - return res diff --git a/pyproject.toml b/pyproject.toml index f0eb4b42..d6bb3919 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,6 @@ test = [ "coverage[toml]>=5.1", "diff-cover", "lark", - "regex", "interegular", ] @@ -99,7 +98,6 @@ module = [ "torch", "transformers.*", "lark.*", - "regex.*", "interegular.*", ] ignore_missing_imports = true diff --git a/tests/text/__init__.py b/tests/text/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/text/partial_python.lark b/tests/text/partial_python.lark new file mode 100644 index 00000000..973e5963 --- /dev/null +++ b/tests/text/partial_python.lark @@ -0,0 +1,314 @@ +// Python 3 grammar for Lark +// +// This grammar should parse all python 3.x code successfully. +// +// Adapted from: https://docs.python.org/3/reference/grammar.html +// +// This version is actually a subset of Lark's Python grammar without the +// regex look-arounds in the string terminals. +// +// Start symbols for the grammar: +// single_input is a single interactive statement; +// file_input is a module or sequence of commands read from an input file; +// eval_input is the input for the eval() functions. +// NB: compound_stmt in single_input is followed by extra NEWLINE! +// + +single_input: _NEWLINE | simple_stmt | compound_stmt _NEWLINE +file_input: (_NEWLINE | stmt)* +eval_input: testlist _NEWLINE* + +decorator: "@" dotted_name [ "(" [arguments] ")" ] _NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: "async" funcdef +funcdef: "def" name "(" [parameters] ")" ["->" test] ":" suite + +parameters: paramvalue ("," paramvalue)* ["," SLASH ("," paramvalue)*] ["," [starparams | kwparams]] + | starparams + | kwparams + +SLASH: "/" // Otherwise the it will completely disappear and it will be undisguisable in the result +starparams: (starparam | starguard) poststarparams +starparam: "*" typedparam +starguard: "*" +poststarparams: ("," paramvalue)* ["," kwparams] +kwparams: "**" typedparam ","? + +?paramvalue: typedparam ("=" test)? +?typedparam: name (":" test)? + + +lambdef: "lambda" [lambda_params] ":" test +lambdef_nocond: "lambda" [lambda_params] ":" test_nocond +lambda_params: lambda_paramvalue ("," lambda_paramvalue)* ["," [lambda_starparams | lambda_kwparams]] + | lambda_starparams + | lambda_kwparams +?lambda_paramvalue: name ("=" test)? +lambda_starparams: "*" [name] ("," lambda_paramvalue)* ["," [lambda_kwparams]] +lambda_kwparams: "**" name ","? + + +?stmt: simple_stmt | compound_stmt +?simple_stmt: small_stmt (";" small_stmt)* [";"] _NEWLINE +?small_stmt: (expr_stmt | assign_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr +assign_stmt: annassign | augassign | assign + +annassign: testlist_star_expr ":" test ["=" test] +assign: testlist_star_expr ("=" (yield_expr|testlist_star_expr))+ +augassign: testlist_star_expr augassign_op (yield_expr|testlist) +!augassign_op: "+=" | "-=" | "*=" | "@=" | "/=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "**=" | "//=" +?testlist_star_expr: test_or_star_expr + | test_or_star_expr ("," test_or_star_expr)+ ","? -> tuple + | test_or_star_expr "," -> tuple + +// For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: "del" exprlist +pass_stmt: "pass" +?flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: "break" +continue_stmt: "continue" +return_stmt: "return" [testlist] +yield_stmt: yield_expr +raise_stmt: "raise" [test ["from" test]] +import_stmt: import_name | import_from +import_name: "import" dotted_as_names +// note below: the ("." | "...") is necessary because "..." is tokenized as ELLIPSIS +import_from: "from" (dots? dotted_name | dots) "import" ("*" | "(" import_as_names ")" | import_as_names) +!dots: "."+ +import_as_name: name ["as" name] +dotted_as_name: dotted_name ["as" name] +import_as_names: import_as_name ("," import_as_name)* [","] +dotted_as_names: dotted_as_name ("," dotted_as_name)* +dotted_name: name ("." name)* +global_stmt: "global" name ("," name)* +nonlocal_stmt: "nonlocal" name ("," name)* +assert_stmt: "assert" test ["," test] + +?compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | match_stmt + | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: "async" (funcdef | with_stmt | for_stmt) +if_stmt: "if" test ":" suite elifs ["else" ":" suite] +elifs: elif_* +elif_: "elif" test ":" suite +while_stmt: "while" test ":" suite ["else" ":" suite] +for_stmt: "for" exprlist "in" testlist ":" suite ["else" ":" suite] +try_stmt: "try" ":" suite except_clauses ["else" ":" suite] [finally] + | "try" ":" suite finally -> try_finally +finally: "finally" ":" suite +except_clauses: except_clause+ +except_clause: "except" [test ["as" name]] ":" suite +// NB compile.c makes sure that the default except clause is last + + +with_stmt: "with" with_items ":" suite +with_items: with_item ("," with_item)* +with_item: test ["as" name] + +match_stmt: "match" test ":" _NEWLINE _INDENT case+ _DEDENT + +case: "case" pattern ["if" test] ":" suite + +?pattern: sequence_item_pattern "," _sequence_pattern -> sequence_pattern + | as_pattern +?as_pattern: or_pattern ("as" NAME)? +?or_pattern: closed_pattern ("|" closed_pattern)* +?closed_pattern: literal_pattern + | NAME -> capture_pattern + | "_" -> any_pattern + | attr_pattern + | "(" as_pattern ")" + | "[" _sequence_pattern "]" -> sequence_pattern + | "(" (sequence_item_pattern "," _sequence_pattern)? ")" -> sequence_pattern + | "{" (mapping_item_pattern ("," mapping_item_pattern)* ","?)?"}" -> mapping_pattern + | "{" (mapping_item_pattern ("," mapping_item_pattern)* ",")? "**" NAME ","? "}" -> mapping_star_pattern + | class_pattern + +literal_pattern: inner_literal_pattern + +?inner_literal_pattern: "None" -> const_none + | "True" -> const_true + | "False" -> const_false + | STRING -> string + | number + +attr_pattern: NAME ("." NAME)+ -> value + +name_or_attr_pattern: NAME ("." NAME)* -> value + +mapping_item_pattern: (literal_pattern|attr_pattern) ":" as_pattern + +_sequence_pattern: (sequence_item_pattern ("," sequence_item_pattern)* ","?)? +?sequence_item_pattern: as_pattern + | "*" NAME -> star_pattern + +class_pattern: name_or_attr_pattern "(" [arguments_pattern ","?] ")" +arguments_pattern: pos_arg_pattern ["," keyws_arg_pattern] + | keyws_arg_pattern -> no_pos_arguments + +pos_arg_pattern: as_pattern ("," as_pattern)* +keyws_arg_pattern: keyw_arg_pattern ("," keyw_arg_pattern)* +keyw_arg_pattern: NAME "=" as_pattern + + + +suite: simple_stmt | _NEWLINE _INDENT stmt+ _DEDENT + +?test: or_test ("if" or_test "else" test)? + | lambdef + | assign_expr + +assign_expr: name ":=" test + +?test_nocond: or_test | lambdef_nocond + +?or_test: and_test ("or" and_test)* +?and_test: not_test_ ("and" not_test_)* +?not_test_: "not" not_test_ -> not_test + | comparison +?comparison: expr (comp_op expr)* +star_expr: "*" expr + +?expr: or_expr +?or_expr: xor_expr ("|" xor_expr)* +?xor_expr: and_expr ("^" and_expr)* +?and_expr: shift_expr ("&" shift_expr)* +?shift_expr: arith_expr (_shift_op arith_expr)* +?arith_expr: term (_add_op term)* +?term: factor (_mul_op factor)* +?factor: _unary_op factor | power + +!_unary_op: "+"|"-"|"~" +!_add_op: "+"|"-" +!_shift_op: "<<"|">>" +!_mul_op: "*"|"@"|"/"|"%"|"//" +// <> isn't actually a valid comparison operator in Python. It's here for the +// sake of a __future__ import described in PEP 401 (which really works :-) +!comp_op: "<"|">"|"=="|">="|"<="|"<>"|"!="|"in"|"not" "in"|"is"|"is" "not" + +?power: await_expr ("**" factor)? +?await_expr: AWAIT? atom_expr +AWAIT: "await" + +?atom_expr: atom_expr "(" [arguments] ")" -> funccall + | atom_expr "[" subscriptlist "]" -> getitem + | atom_expr "." name -> getattr + | atom + +?atom: "(" yield_expr ")" + | "(" _tuple_inner? ")" -> tuple + | "(" comprehension{test_or_star_expr} ")" -> tuple_comprehension + | "[" _exprlist? "]" -> list + | "[" comprehension{test_or_star_expr} "]" -> list_comprehension + | "{" _dict_exprlist? "}" -> dict + | "{" comprehension{key_value} "}" -> dict_comprehension + | "{" _exprlist "}" -> set + | "{" comprehension{test} "}" -> set_comprehension + | name -> var + | number + | string_concat + | "(" test ")" + | "..." -> ellipsis + | "None" -> const_none + | "True" -> const_true + | "False" -> const_false + + +?string_concat: string+ + +_tuple_inner: test_or_star_expr (("," test_or_star_expr)+ [","] | ",") + +?test_or_star_expr: test + | star_expr + +?subscriptlist: subscript + | subscript (("," subscript)+ [","] | ",") -> subscript_tuple +?subscript: test | ([test] ":" [test] [sliceop]) -> slice +sliceop: ":" [test] +?exprlist: (expr|star_expr) + | (expr|star_expr) (("," (expr|star_expr))+ [","]|",") +?testlist: test | testlist_tuple +testlist_tuple: test (("," test)+ [","] | ",") +_dict_exprlist: (key_value | "**" expr) ("," (key_value | "**" expr))* [","] + +key_value: test ":" test + +_exprlist: test_or_star_expr ("," test_or_star_expr)* [","] + +classdef: "class" name ["(" [arguments] ")"] ":" suite + + + +arguments: argvalue ("," argvalue)* ("," [ starargs | kwargs])? + | starargs + | kwargs + | comprehension{test} + +starargs: stararg ("," stararg)* ("," argvalue)* ["," kwargs] +stararg: "*" test +kwargs: "**" test ("," argvalue)* + +?argvalue: test ("=" test)? + + +comprehension{comp_result}: comp_result comp_fors [comp_if] +comp_fors: comp_for+ +comp_for: [ASYNC] "for" exprlist "in" or_test +ASYNC: "async" +?comp_if: "if" test_nocond + +// not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: name + +yield_expr: "yield" [testlist] + | "yield" "from" test -> yield_from + +number: DEC_NUMBER | HEX_NUMBER | BIN_NUMBER | OCT_NUMBER | FLOAT_NUMBER | IMAG_NUMBER +string: STRING // | LONG_STRING + +// Other terminals + +_NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ + +%ignore /[\t \f]+/ // WS +%ignore /\\[\t \f]*\r?\n/ // LINE_CONT +%ignore COMMENT +%declare _INDENT _DEDENT + + +// Python terminals + +!name: NAME | "match" | "case" +NAME: /[^\W\d]\w*/ +COMMENT: /#[^\n]*/ + +// We only need a usable approximation for something like this until we fully +// implement look-arounds, or use a regex/FSM library that supports them. +// STRING: /([ubf]?r?|r[ubf])("(?!"").*?(? value - -%ignore /[\t \f]+/ // WS - -!name: NAME | "match" | "case" -NAME: /[^\W\d]\w*/ - - - """, - parser="lalr", - postlex=PartialPythonIndenter(), - ) - - terminals_to_states = terminals_to_lalr_states(lp) - symbol_names_and_fsms = terminals_to_fsms(lp) - - term_type = "DEF" - term_fsm = symbol_names_and_fsms[term_type] - - # TODO FIXME: This is broken, and it's a bug in `lark`'s Python grammar? - # ptoken = "defx" - - ptoken = "ef foo" - pmatches = find_partial_matches(term_fsm, ptoken) - first_pmatch = next(pm for pm in pmatches if pm[0] is not None) - (parser_state,) = create_pmatch_parser_states( - lp, terminals_to_states, term_type, ptoken, first_pmatch - ) - # These copies also patch the lexers in the parse state, which is now - # needed for use with `parse_to_end` - parser_state = copy(parser_state) - new_parser_state, expected_next_tokens = parse_to_end(parser_state) - assert expected_next_tokens == {"NAME"} - - ptoken = "ef foo():" - pmatches = find_partial_matches(term_fsm, ptoken) - first_pmatch = next(pm for pm in pmatches if pm[0] is not None) - (parser_state,) = create_pmatch_parser_states( - lp, terminals_to_states, term_type, ptoken, first_pmatch - ) - parser_state = copy(parser_state) - new_parser_state, expected_next_tokens = parse_to_end(parser_state) - assert not expected_next_tokens - - ptoken = "ef (" - pmatches = find_partial_matches(term_fsm, ptoken) - first_pmatch = next(pm for pm in pmatches if pm[0] is not None) - (parser_state,) = create_pmatch_parser_states( - lp, terminals_to_states, term_type, ptoken, first_pmatch - ) - parser_state = copy(parser_state) - with pytest.raises(UnexpectedToken): - parse_to_end(parser_state) - - def test_map_partial_states_to_vocab_regex(): regex_string = r"([0-9]+([.][0-9]*)?|[.][0-9]+)" regex_pattern = interegular.parse_pattern(regex_string) @@ -383,3 +384,153 @@ def partial_match_filter(string, end_idx, state_seq): # Make sure the whole thing matches the regex assert re.fullmatch(regex_string, sample_seq) is not None + + +def test_get_sub_fsms_from_seq(): + name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") + name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) + + def_pattern = interegular.parse_pattern("def") + def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) + + match_pattern = interegular.parse_pattern("match") + match_fsm, _ = make_deterministic_fsm(match_pattern.to_fsm().reduce()) + + peq_pattern = interegular.parse_pattern(r"\+=") + peq_fsm, _ = make_deterministic_fsm(peq_pattern.to_fsm().reduce()) + + plus_pattern = interegular.parse_pattern(r"\+") + plus_fsm, _ = make_deterministic_fsm(plus_pattern.to_fsm().reduce()) + + fsms = [def_fsm, match_fsm, name_fsm, peq_fsm, plus_fsm] + + fsm, fsms_to_trans_finals = fsm_union(fsms) + + assert fsms_to_trans_finals == { + 0: ({(0, 3), (3, 9), (9, 10)}, {10}, {0: {0}, 1: {3}, 2: {9}, 3: {10}}), + 1: ( + {(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)}, + {8}, + {0: {0}, 1: {4}, 2: {5}, 3: {6}, 4: {7}, 5: {8}}, + ), + 2: ( + { + (0, 2), + (0, 3), + (0, 4), + (2, 2), + (3, 2), + (3, 9), + (4, 2), + (4, 5), + (5, 2), + (5, 6), + (6, 2), + (6, 7), + (7, 2), + (7, 8), + (8, 2), + (9, 2), + (9, 10), + (10, 2), + }, + {2, 3, 4, 5, 6, 7, 8, 9, 10}, + {0: {0}, 1: {2, 3, 4, 5, 6, 7, 8, 9, 10}}, + ), + 3: ({(0, 1), (1, 11)}, {11}, {0: {0}, 1: {1}, 2: {11}}), + 4: ({(0, 1)}, {1}, {0: {0}, 1: {1}}), + } + + assert not fsm.accepts("1a") + assert fsm.accepts("a1") + assert fsm.accepts("def") + assert fsm.accepts("match") + assert fsm.accepts("+=") + assert fsm.accepts("+") + + ((_, state_seq),) = find_partial_matches(fsm, "def", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, False, True), (2, True, True)] + + # Make sure the old-to-new state map is correct + ((_, def_state_seq),) = find_partial_matches( + def_fsm, "def", start_state=fsm.initial + ) + def_old_to_new_states = fsms_to_trans_finals[0][2] + assert all( + new_state in def_old_to_new_states[old_state] + for old_state, new_state in zip(def_state_seq, state_seq) + ) + + ((_, state_seq),) = find_partial_matches(fsm, "ef", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(2, True, True)] + + ((_, name_state_seq),) = find_partial_matches( + name_fsm, "ef", start_state=fsm.initial + ) + name_old_to_new_states = fsms_to_trans_finals[2][2] + assert all( + new_state in name_old_to_new_states[old_state] + for old_state, new_state in zip(name_state_seq, state_seq) + ) + + ((_, state_seq),) = find_partial_matches(fsm, "match", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(1, False, True), (2, True, True)] + + ((_, match_state_seq),) = find_partial_matches( + match_fsm, "match", start_state=fsm.initial + ) + match_old_to_new_states = fsms_to_trans_finals[1][2] + assert all( + new_state in match_old_to_new_states[old_state] + for old_state, new_state in zip(match_state_seq, state_seq) + ) + + ((_, state_seq),) = find_partial_matches(fsm, "defa", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(2, True, True)] + + ((_, state_seq),) = find_partial_matches(fsm, "de", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, True, False), (2, True, True)] + + ((_, state_seq),) = find_partial_matches(fsm, "+", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(3, True, False), (4, False, True)] + + ((_, state_seq),) = find_partial_matches(fsm, "+=", start_state=fsm.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(3, False, True)] + + # Test some overlapping patterns + join_fsms = [ + interegular.parse_pattern(r"JOIN").to_fsm().reduce(), + interegular.parse_pattern(r"JOIN LEFT").to_fsm().reduce(), + ] + fsm, fsms_to_trans_finals = fsm_union(join_fsms) + ((_, state_seq),) = find_partial_matches( + fsm, "OI", start_state=None, full_match=False + ) + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, True, False), (1, True, False)] + + ((_, state_seq),) = find_partial_matches( + fsm, "N", start_state=None, full_match=False + ) + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, False, True), (1, True, False)] + + ((_, state_seq),) = find_partial_matches( + fsm, " ", start_state=None, full_match=False + ) + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(1, True, False)] From e6ff5834e263928835e40607ba522f163ab77b39 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 2 Aug 2023 14:06:53 -0500 Subject: [PATCH 215/734] Make parse tree/value computations optional --- outlines/text/parsing.py | 120 ++++++++++++++++++++++++++++++++----- tests/text/test_parsing.py | 23 +++++++ 2 files changed, 127 insertions(+), 16 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 161e76cf..00a7a8a2 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -147,21 +147,35 @@ def make_deterministic_fsm(fsm: FSM) -> Tuple[FSM, Dict[int, int]]: class PartialParserConf(ParserConf): - __serialize_fields__ = "rules", "start", "parser_type", "deterministic" + __serialize_fields__ = ( + "rules", + "start", + "parser_type", + "deterministic", + "use_value_stack", + ) - def __init__(self, rules, callbacks, start, deterministic): + def __init__(self, rules, callbacks, start, deterministic, use_value_stack): super().__init__(rules, callbacks, start) self.deterministic = deterministic + self.use_value_stack = use_value_stack class PartialLark(Lark): - __serialize_fields__ = "parser", "rules", "options", "deterministic" + __serialize_fields__ = ( + "parser", + "rules", + "options", + "deterministic", + "use_value_stack", + ) def __init__(self, grammar, **options): # TODO: Could've extended `LarkOptions`, but all these extensions are # already way too much (and brittle). This library really needs a # complete refactoring. self.deterministic = options.pop("deterministic", False) + self.use_value_stack = options.pop("use_value_stack", False) options["regex"] = True super().__init__(grammar, **options) assert self.options.parser == "lalr" @@ -180,7 +194,11 @@ def _build_parser(self) -> "PartialParsingFrontend": self._prepare_callbacks() _validate_frontend_args(self.options.parser, self.options.lexer) parser_conf = PartialParserConf( - self.rules, self._callbacks, self.options.start, self.deterministic + self.rules, + self._callbacks, + self.options.start, + self.deterministic, + self.use_value_stack, ) # This is `_construct_parsing_frontend` expanded/inlined @@ -393,7 +411,12 @@ def to_tuple(v): zip(self._parse_table.states.keys(), new_states.keys()) ) - self.parser = PartialParser(self._parse_table, callbacks, debug) + self.parser = PartialParser( + self._parse_table, + callbacks, + debug, + use_value_stack=parser_conf.use_value_stack, + ) @classmethod def deserialize(cls, data, memo, callbacks, debug=False): @@ -404,16 +427,20 @@ def deserialize(cls, data, memo, callbacks, debug=False): class PartialParserState(ParserState): - def __copy__(self): - return type(self)( - self.parse_conf, - copy(self.lexer), - copy(self.state_stack), - deepcopy(self.value_stack), - ) + __slots__ = "use_value_stack" - def __repr__(self): - return f"{type(self).__name__}(lexer={self.lexer!r}, state_stack={self.state_stack!r})" + def __init__( + self, + parse_conf, + lexer, + state_stack=None, + value_stack=None, + use_value_stack=False, + ): + super().__init__( + parse_conf, lexer, state_stack=state_stack, value_stack=value_stack + ) + self.use_value_stack = use_value_stack def feed_token(self, token, is_end=False): if token.type == "partial": @@ -438,16 +465,77 @@ def feed_token(self, token, is_end=False): ) return - super().feed_token(token, is_end=is_end) + if self.use_value_stack: + super().feed_token(token, is_end=is_end) + else: + self.feed_token_no_stack(token, is_end=is_end) + + def feed_token_no_stack(self, token, is_end=False): + """ + This is a copy of `ParserState.feed_token` with all the value stack + steps removed. Since we're not exactly parsing in order to obtain a + CST or anything similar, we can avoid the growing expense of tracking + the parse tree. + """ + state_stack = self.state_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken( + token, expected, state=self, interactive_parser=None + ) + + assert arg != end_state + + if action is Shift: + # shift once and return + assert not is_end + state_stack.append(arg) + return + else: + # reduce+shift as many times as necessary + rule = arg + size = len(rule.expansion) + if size: + del state_stack[-size:] + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + + if is_end and state_stack[-1] == end_state: + return + + def __copy__(self): + return type(self)( + self.parse_conf, + copy(self.lexer), + copy(self.state_stack), + deepcopy(self.value_stack), + use_value_stack=self.use_value_stack, + ) + + def __repr__(self): + return f"{type(self).__name__}(lexer={self.lexer!r}, state_stack={self.state_stack!r})" class PartialParser(_Parser): + def __init__(self, parse_table, callbacks, debug=False, use_value_stack=False): + super().__init__(parse_table, callbacks, debug=debug) + self.use_value_stack = use_value_stack + def parse( self, lexer, start, value_stack=None, state_stack=None, start_interactive=False ): parse_conf = ParseConf(self.parse_table, self.callbacks, start) parser_state = PartialParserState( - parse_conf, copy(lexer), state_stack, value_stack + parse_conf, copy(lexer), state_stack, value_stack, self.use_value_stack ) if start_interactive: return InteractiveParser(self, parser_state, parser_state.lexer) diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index fb13452f..f4a08dd3 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -37,6 +37,7 @@ def test_partial_parsing(): assert last_token.type == "partial" assert last_token.value.fsm_state_seq == (0, 15) assert last_token.value.is_not_finished is True + assert not parser_state.value_stack # End with an ignored token parser_state = lp.parse("x ") @@ -45,6 +46,7 @@ def test_partial_parsing(): assert last_token.type == "partial" assert last_token.value.fsm_state_seq == (0, 1) assert last_token.value.is_not_finished is True + assert not parser_state.value_stack # Could be a complete `=` or the start of a `==` parser_state = lp.parse("x =") @@ -55,6 +57,7 @@ def test_partial_parsing(): term_info.terminal_name == "EQUAL" for term_info in last_token.value.terminals_and_info ) + assert not parser_state.value_stack parser_state = lp.parse("x = '") assert parser_state.state_stack == [0, 58, 59] @@ -62,6 +65,7 @@ def test_partial_parsing(): assert last_token.type == "partial" assert last_token.value.fsm_state_seq == (0, 6) assert last_token.value.is_not_finished is True + assert not parser_state.value_stack parser_state = lp.parse("x = 'hi") assert parser_state.state_stack == [0, 58, 59] @@ -69,6 +73,7 @@ def test_partial_parsing(): assert last_token.type == "partial" assert last_token.value.fsm_state_seq == (0, 6, 6, 6) assert last_token.value.is_not_finished is True + assert not parser_state.value_stack parser_state = lp.parse("x = ('hi") assert parser_state.state_stack == [0, 58, 59, 254] @@ -76,6 +81,7 @@ def test_partial_parsing(): assert last_token.type == "partial" assert last_token.value.fsm_state_seq == (0, 6, 6, 6) assert last_token.value.is_not_finished is True + assert not parser_state.value_stack parser_state = lp.parse("def") assert parser_state.state_stack == [0] @@ -83,16 +89,19 @@ def test_partial_parsing(): assert last_token.type == "partial" assert last_token.value.fsm_state_seq == (0, 26, 99, 100) assert last_token.value.is_not_finished is True + assert not parser_state.value_stack # Now, try something incremental last_lexer_state = parser_state.lexer.state last_lexer_state.text += " blah()" lp.parse_from_state(parser_state, is_end=False) last_token = parser_state.lexer.state.last_token + assert not parser_state.value_stack last_lexer_state = parser_state.lexer.state last_valid_token = last_lexer_state.last_token assert last_valid_token.type == "RPAR" + assert not parser_state.value_stack # Something incremental and a little more complicated parser_state = lp.parse("x = 1\ndef foo(x):\n ") @@ -120,6 +129,20 @@ def test_partial_parsing(): with pytest.raises(UnexpectedToken): lp.parse("def \n") + lp = PartialLark.open_from_package( + "tests", + "partial_python.lark", + ["text"], + parser="lalr", + postlex=PartialPythonIndenter(), + start="file_input", + use_value_stack=True, + ) + parser_state = lp.parse("x = ('hi") + lp.parse_from_state(parser_state, is_end=False) + assert len(parser_state.state_stack) == 4 + assert parser_state.value_stack[-1].type == "LPAR" + def test_sequential_parse_example(): input_tokens = [ From d46a792a6d5ef20a9a5871758761aceab520bf19 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 16 Aug 2023 21:13:25 +0530 Subject: [PATCH 216/734] Use logits length for mask shape --- outlines/text/generate/regex.py | 5 ++--- .../generate/test_integration_transfomers.py | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index ba1cc731..77cdbeb7 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -133,10 +133,9 @@ def create_proposal( self.pstates = new_pstates masks = [] + mask_shape = (logits.shape[-1],) for pstate in self.pstates: - mask = torch.full( - (len(self.model.tokenizer.vocabulary),), -math.inf, device=self.device - ) + mask = torch.full(mask_shape, -math.inf, device=self.device) if pstate[1] > -1: next_support = self.pstate_to_vocab[pstate[:2]] diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 99344a07..6d73e3a8 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -225,3 +225,23 @@ class Spam(BaseModel): or isinstance(parsed["bar"], float) or isinstance(parsed["bar"], str) ) + + +def test_transformers_logits_vocab_size(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + + # Artificially increase the weights/logits size relative + # to the vocabulary + model.model.resize_token_embeddings(pad_to_multiple_of=2) + + assert len(model.tokenizer.vocabulary) == 1024 + assert model.model.base_model.wte.weight.shape[0] == 1026 + + generator = generate.choice(model, ["True", "False"]) + + rng = torch.Generator() + rng.manual_seed(4) + + masked_logits = generator("blah", rng=rng) + assert masked_logits == "True" From ae1f1c96cdf5fdc3333ade6b5446b902ccdbf4d5 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 12 Sep 2023 16:27:17 -0500 Subject: [PATCH 217/734] Rename test_prompt.py to test_prompts.py --- tests/text/{test_prompt.py => test_prompts.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/text/{test_prompt.py => test_prompts.py} (100%) diff --git a/tests/text/test_prompt.py b/tests/text/test_prompts.py similarity index 100% rename from tests/text/test_prompt.py rename to tests/text/test_prompts.py From 05098467ffc3f31a4bd50e9084aa240d720564a7 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 12 Sep 2023 16:28:39 -0500 Subject: [PATCH 218/734] Update Pydantic dependency version to match testing --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d6bb3919..b2671937 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ dependencies = [ "numpy", "pillow", "perscache", - "pydantic", + "pydantic>=2.0", "regex", "scipy", "tenacity", From 6dccd0d30348f5f2121651fbea9dc098638826b0 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 12 Sep 2023 16:43:42 -0500 Subject: [PATCH 219/734] Update organization links --- README.md | 24 ++++++++++++------------ docs/source/conf.py | 4 ++-- docs/source/installation.rst | 2 +- docs/source/reference/batching.rst | 2 +- docs/source/reference/multimodel.rst | 4 ++-- pyproject.toml | 8 ++++---- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 3c90b75b..0f698a40 100644 --- a/README.md +++ b/README.md @@ -49,9 +49,9 @@ Outlines 〰 has new releases and features coming every week! Make sure to ⭐ s ## Stay tuned for -- Context-Free Grammar guided generation ([#178](https://github.com/normal-computing/outlines/pull/178)); -- Prompt-token alignment so you don't have to think about tokenization details ([#201](https://github.com/normal-computing/outlines/pull/201)) -- An infilling DSL ([#182](https://github.com/normal-computing/outlines/issues/182)) +- Context-Free Grammar guided generation ([#178](https://github.com/outlines-dev/outlines/pull/178)); +- Prompt-token alignment so you don't have to think about tokenization details ([#201](https://github.com/outlines-dev/outlines/pull/201)) +- An infilling DSL ([#182](https://github.com/outlines-dev/outlines/issues/182)) You can follow [@NormalComputing](https://twitter.com/NormalComputing), [@remilouf](https://twitter.com/remilouf) or [@BrandonTWillard](https://twitter.com/BrandonTWillard) for regular updates! @@ -255,7 +255,7 @@ print(parsed) # name='piggyback' age=23 armor= weapon= strength=0 ``` -The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/normal-computing/outlines/issues/215), but everything else should work. +The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/outlines-dev/outlines/issues/215), but everything else should work. ## Prompting @@ -386,7 +386,7 @@ removing boilerplate prompting code. We currently only accept bug fixes and documentation contributions. If you have a feature request, please start a new -[discussion](https://github.com/normal-computing/outlines/discussions). The +[discussion](https://github.com/outlines-dev/outlines/discussions). The issue tracker is only intended for actionable items. ### How to contribute? @@ -399,13 +399,13 @@ Do not hesitate to open a draft PR before your contribution is ready, especially ## Examples -- [Pick the odd one out](https://github.com/normal-computing/outlines/blob/main/examples/pick_odd_one_out.py) -- [Meta prompting](https://github.com/normal-computing/outlines/blob/main/examples/meta_prompting.py) -- [ReAct](https://github.com/normal-computing/outlines/blob/main/examples/react.py) -- [Generate code to solve math problems](https://github.com/normal-computing/outlines/blob/main/examples/math_generate_code.py) -- [BabyAGI](https://github.com/normal-computing/outlines/blob/main/examples/babyagi.py) -- [Uncertainty](https://github.com/normal-computing/outlines/blob/main/examples/sampling.ipynb) -- [Simulation-based inference](https://github.com/normal-computing/outlines/blob/main/examples/simulation_based_inference.ipynb) +- [Pick the odd one out](https://github.com/outlines-dev/outlines/blob/main/examples/pick_odd_one_out.py) +- [Meta prompting](https://github.com/outlines-dev/outlines/blob/main/examples/meta_prompting.py) +- [ReAct](https://github.com/outlines-dev/outlines/blob/main/examples/react.py) +- [Generate code to solve math problems](https://github.com/outlines-dev/outlines/blob/main/examples/math_generate_code.py) +- [BabyAGI](https://github.com/outlines-dev/outlines/blob/main/examples/babyagi.py) +- [Uncertainty](https://github.com/outlines-dev/outlines/blob/main/examples/sampling.ipynb) +- [Simulation-based inference](https://github.com/outlines-dev/outlines/blob/main/examples/simulation_based_inference.ipynb) ## Cite Outlines diff --git a/docs/source/conf.py b/docs/source/conf.py index 3587d226..51175d19 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -7,7 +7,7 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "Outlines" -copyright = "2023, Normal Computing" +copyright = "2023, Normal Computing, Outlines Developers" author = "Remi Louf" release = "0.1" @@ -34,7 +34,7 @@ "icon_links": [ { "name": "GitHub", - "url": "https://github.com/normal-computing/outlines", # required + "url": "https://github.com/outlines-dev/outlines", # required "icon": "fa-brands fa-square-github", "type": "fontawesome", }, diff --git a/docs/source/installation.rst b/docs/source/installation.rst index d2b417f7..6d18cce1 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -38,7 +38,7 @@ To use the integrations with Hugging Face's `transformers `_ versions of the models. Please refer to the `PyTorch documentation `_ for questions related to **GPU support**. -The integration is fairly basic for now, and if you have specific performance needs please `open an issue `_ +The integration is fairly basic for now, and if you have specific performance needs please `open an issue `_ Other integrations ------------------ diff --git a/docs/source/reference/batching.rst b/docs/source/reference/batching.rst index 6182539b..20bee192 100644 --- a/docs/source/reference/batching.rst +++ b/docs/source/reference/batching.rst @@ -13,7 +13,7 @@ Outlines is sampling-first, and is built to generate several samples from the sa samples=10 ) -This will enable probabilistic applications down the line, stay tuned for more updates. In the meantime you can take a look at the `self-consistency example `_. +This will enable probabilistic applications down the line, stay tuned for more updates. In the meantime you can take a look at the `self-consistency example `_. Batching diff --git a/docs/source/reference/multimodel.rst b/docs/source/reference/multimodel.rst index 6ebef3f0..c44ef53c 100644 --- a/docs/source/reference/multimodel.rst +++ b/docs/source/reference/multimodel.rst @@ -39,7 +39,7 @@ Outlines can call models from Hugging Face's `transformers` and `diffusers` libr .. note:: - Outlines call the PyTorch version of models by default. The generation process also runs with defaults, please `open an issue `_ if you have more specific needs. + Outlines call the PyTorch version of models by default. The generation process also runs with defaults, please `open an issue `_ if you have more specific needs. Bring Your Own Model @@ -47,7 +47,7 @@ Bring Your Own Model Outlines models are currently simple functions that return a text or an image given a prompt, you can thus easily use any model. We will soon provide a more comprehensive integration that handles controlled generation for any model. -If you think the model you are using could be useful to others, `open an issue `_ 😊 +If you think the model you are using could be useful to others, `open an issue `_ 😊 Coming soon diff --git a/pyproject.toml b/pyproject.toml index b2671937..f77c13b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "outlines" -authors= [{name = "Normal Computing", email = "support@normalcomputing.com"}] +authors= [{name = "Outlines Developers"}] description = "Probabilistic Generative Model Programming" requires-python = ">=3.10" keywords=[ @@ -54,9 +54,9 @@ test = [ ] [project.urls] -homepage = "https://github.com/normal-computing/outlines" -documentation = "https://normal-computing.github.io/outlines/" -repository = "https://github.com/normal-computing/outlines" +homepage = "https://github.com/outlines-dev/outlines" +documentation = "https://outlines-dev.github.io/outlines/" +repository = "https://github.com/outlines-dev/outlines" [project.readme] file="README.md" From 702bbe7a4d892daa2296a9841a29e7e6c516ae3b Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 13 Sep 2023 18:11:54 -0500 Subject: [PATCH 220/734] Fix missing spaces in Tokenizer.convert_token_to_string --- outlines/models/transformers.py | 26 ++++++++++++++++++++++++++ tests/models/test_transformers.py | 10 ++++++++++ 2 files changed, 36 insertions(+) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 81649438..6b443a8f 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -2,6 +2,23 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Union import torch +from transformers.file_utils import SPIECE_UNDERLINE + +try: + from transformers.models.llama.tokenization_llama import LlamaTokenizer +except ImportError: + + class LlamaTokenizer: # type: ignore + pass + + +try: + from transformers.models.llama.tokenization_llama_fast import LlamaTokenizerFast +except ImportError: + + class LlamaTokenizerFast: # type: ignore + pass + from outlines.models.tokenizer import Tokenizer @@ -66,6 +83,9 @@ def __init__(self, model_name: str, **kwargs): self.pad_token = self.tokenizer.pad_token self.vocabulary = self.tokenizer.get_vocab() + self.is_sentencepiece = isinstance( + self.tokenizer, (LlamaTokenizerFast, LlamaTokenizer) + ) def encode( self, prompt: Union[str, List[str]], **kwargs @@ -81,6 +101,12 @@ def decode(self, token_ids: torch.LongTensor) -> List[str]: def convert_token_to_string(self, token: str) -> str: string = self.tokenizer.convert_tokens_to_string([token]) + + if self.is_sentencepiece: + # A hack to handle missing spaces from SentencePiece tokenizers + if token.startswith(SPIECE_UNDERLINE): + return " " + string + return string diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 100ec33a..f44aca4c 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -38,6 +38,16 @@ def test_tokenizer(): isinstance(text[1], str) +def test_llama_tokenizer(): + tokenizer = TransformersTokenizer("hf-internal-testing/llama-tokenizer") + + # Broken + assert tokenizer.tokenizer.convert_tokens_to_string(["▁baz"]) == "baz" + + # Not broken + assert tokenizer.convert_token_to_string("▁baz") == " baz" + + def test_model(): with pytest.raises(ValueError, match="When passing device_map as a string"): transformers(TEST_MODEL, device="non_existent") From 9229a5d53a253613a0b020e65e6d8ff6dc7c6360 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Fri, 15 Sep 2023 23:47:49 -0500 Subject: [PATCH 221/734] More HF Llama tokenizer space fixes --- outlines/models/transformers.py | 64 +++++++++++++++++++++---------- tests/models/test_transformers.py | 4 ++ 2 files changed, 48 insertions(+), 20 deletions(-) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 6b443a8f..80ee813b 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -4,28 +4,54 @@ import torch from transformers.file_utils import SPIECE_UNDERLINE -try: - from transformers.models.llama.tokenization_llama import LlamaTokenizer -except ImportError: +from outlines.models.tokenizer import Tokenizer - class LlamaTokenizer: # type: ignore - pass +if TYPE_CHECKING: + from transformers import PreTrainedModel, PreTrainedTokenizer +__all__ = ["transformers"] -try: - from transformers.models.llama.tokenization_llama_fast import LlamaTokenizerFast -except ImportError: - class LlamaTokenizerFast: # type: ignore - pass +def get_llama_tokenizer_types(): + """Get all the Llama tokenizer types/classes that need work-arounds. + When they can't be imported, a dummy class is created. -from outlines.models.tokenizer import Tokenizer + """ + try: + from transformers.models.llama import LlamaTokenizer + except ImportError: -if TYPE_CHECKING: - from transformers import PreTrainedModel, PreTrainedTokenizer + class LlamaTokenizer: # type: ignore + pass -__all__ = ["transformers"] + try: + from transformers.models.llama import LlamaTokenizerFast + except ImportError: + + class LlamaTokenizerFast: # type: ignore + pass + + try: + from transformers.models.code_llama import CodeLlamaTokenizer + except ImportError: + + class CodeLlamaTokenizer: # type: ignore + pass + + try: + from transformers.models.code_llama import CodeLlamaTokenizerFast + except ImportError: + + class CodeLlamaTokenizerFast: # type: ignore + pass + + return ( + LlamaTokenizer, + LlamaTokenizerFast, + CodeLlamaTokenizer, + CodeLlamaTokenizerFast, + ) class Transformers: @@ -83,9 +109,7 @@ def __init__(self, model_name: str, **kwargs): self.pad_token = self.tokenizer.pad_token self.vocabulary = self.tokenizer.get_vocab() - self.is_sentencepiece = isinstance( - self.tokenizer, (LlamaTokenizerFast, LlamaTokenizer) - ) + self.is_llama = isinstance(self.tokenizer, get_llama_tokenizer_types()) def encode( self, prompt: Union[str, List[str]], **kwargs @@ -102,9 +126,9 @@ def decode(self, token_ids: torch.LongTensor) -> List[str]: def convert_token_to_string(self, token: str) -> str: string = self.tokenizer.convert_tokens_to_string([token]) - if self.is_sentencepiece: - # A hack to handle missing spaces from SentencePiece tokenizers - if token.startswith(SPIECE_UNDERLINE): + if self.is_llama: + # A hack to handle missing spaces to HF's Llama tokenizers + if token.startswith(SPIECE_UNDERLINE) or token == "<0x20>": return " " + string return string diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index f44aca4c..a7e9f984 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -43,9 +43,13 @@ def test_llama_tokenizer(): # Broken assert tokenizer.tokenizer.convert_tokens_to_string(["▁baz"]) == "baz" + assert tokenizer.tokenizer.convert_tokens_to_string(["<0x20>"]) == "" + assert tokenizer.tokenizer.convert_tokens_to_string(["▁▁▁"]) == " " # Not broken assert tokenizer.convert_token_to_string("▁baz") == " baz" + assert tokenizer.convert_token_to_string("<0x20>") == " " + assert tokenizer.convert_token_to_string("▁▁▁") == " " def test_model(): From b5c22412f6bda82ebebefe532435a95da44034cf Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sat, 16 Sep 2023 01:18:43 -0500 Subject: [PATCH 222/734] Update dependencies and pin beartype in testing --- pyproject.toml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f77c13b5..1426b524 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,11 +31,9 @@ dependencies = [ "pillow", "perscache", "pydantic>=2.0", - "regex", "scipy", "tenacity", "torch", - "accelerate", ] dynamic = ["version"] @@ -43,14 +41,13 @@ dynamic = ["version"] test = [ "diffusers", "pre-commit", - "pydantic>=2.0", "pytest", "pytest-cov", "transformers", "coverage[toml]>=5.1", "diff-cover", - "lark", - "interegular", + "accelerate", + "beartype<0.16.0", ] [project.urls] From ff4ebb3d995909df15e0c4b98fa6c9e5475360d0 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sat, 16 Sep 2023 01:05:09 -0500 Subject: [PATCH 223/734] Fix whitespace and control character handling in JSON guidance --- outlines/text/json_schema.py | 25 ++++---- .../generate/test_integration_transfomers.py | 2 +- tests/text/test_json_schema.py | 62 +++++++++---------- 3 files changed, 43 insertions(+), 46 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index c076a2e4..7fffa0fa 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -3,7 +3,7 @@ import re from typing import Dict -STRING_INNER = r'(?:[^"\\]|\\.)' +STRING_INNER = r'(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)' STRING = f'"{STRING_INNER}*"' INTEGER = r"(0|[1-9][0-9]*)" NUMBER = rf"(-)?({INTEGER})(\.[0-9]+)?([eE][+-][0-9]+)?" @@ -142,7 +142,7 @@ def expand_json_schema(raw_schema: Dict, definitions: Dict): return raw_schema -def build_schedule_from_instance(instance: Dict, indent: int = 0): +def build_schedule_from_instance(instance: Dict): """Build a generation schedule from a instance. This recursively follows the references to other instances. @@ -163,27 +163,26 @@ def build_schedule_from_instance(instance: Dict, indent: int = 0): """ schedule = [] if "properties" in instance: - schedule.append("{\n") - schedule += build_schedule_from_instance(instance["properties"], indent + 2) - if indent > 0: - schedule.append(" " * indent) - schedule.append("}") + schedule.append(r"\{") + schedule += build_schedule_from_instance(instance["properties"]) + schedule.append(r"\}") else: for i, (name, annotation) in enumerate(instance.items()): - schedule.append(" " * indent) - schedule.append(f'"{name}": ') + whitespace = r"[\n ]*" + schedule.append(f'{whitespace}"{name}"{whitespace}:{whitespace}') + if "anyOf" in annotation: schedule.append(annotation) elif annotation["type"] == "object": - schedule += build_schedule_from_instance(annotation, indent) + schedule += build_schedule_from_instance(annotation) else: schedule.append(annotation) # We cannot add commas after the last key-value pair in JSON if i == len(instance) - 1: - schedule.append("\n") + schedule.append(whitespace) else: - schedule.append(",\n") + schedule.append(f"{whitespace},") return schedule @@ -205,7 +204,7 @@ def match_step_to_regex(step): """ match step: case str() as step: - return re.escape(step) + return step case {"enum": choices, "type": "string"}: choices = [f'"{re.escape(choice)}"' for choice in choices] diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 6d73e3a8..912e4775 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -136,7 +136,7 @@ class Spam(BaseModel): sequence = generate.json(model, Spam, max_tokens=1000)(prompt, rng=rng) parsed = json.loads(sequence) assert isinstance(parsed["foo"], int) - assert isinstance(parsed["bar"], float) + assert isinstance(parsed["bar"], int) assert isinstance(parsed["spam"], str) assert isinstance(parsed["fuzz"], bool) assert len(parsed["spam"]) == 10 diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index f8814aeb..68737788 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -30,19 +30,19 @@ class User(BaseModel): schema = json.dumps(User.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "user_id": ', + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', {"title": "User Id", "type": "integer"}, - ',\n "name": ', + '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', {"title": "Name", "type": "string"}, - ',\n "maxlength_name": ', + '[\\n ]*,[\\n ]*"maxlength_name"[\\n ]*:[\\n ]*', {"title": "Maxlength Name", "type": "string", "maxLength": 10}, - ',\n "minlength_name": ', + '[\\n ]*,[\\n ]*"minlength_name"[\\n ]*:[\\n ]*', {"title": "Minlength Name", "type": "string", "minLength": 10}, - ',\n "value": ', + '[\\n ]*,[\\n ]*"value"[\\n ]*:[\\n ]*', {"title": "Value", "type": "number"}, - ',\n "is_true": ', + '[\\n ]*,[\\n ]*"is_true"[\\n ]*:[\\n ]*', {"title": "Is True", "type": "boolean"}, - "\n}", + "[\\n ]*\\}", ] @@ -53,9 +53,9 @@ class Foo(BaseModel): schema = json.dumps(Foo.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "bar": ', + '\\{[\\n ]*"bar"[\\n ]*:[\\n ]*', {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bar"}, - "\n}", + "[\\n ]*\\}", ] @@ -67,11 +67,11 @@ class User(BaseModel): schema = json.dumps(User.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "user_id": ', + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', {"title": "User Id", "type": "integer"}, - ',\n "value": ', + '[\\n ]*,[\\n ]*"value"[\\n ]*:[\\n ]*', {"title": "Value", "type": "array", "items": {"type": "number"}}, - "\n}", + "[\\n ]*\\}", ] @@ -88,15 +88,15 @@ class User(BaseModel): schema = json.dumps(User.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "user_id": ', + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', {"title": "User Id", "type": "integer"}, - ',\n "name": ', + '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', { "title": "Name", "enum": ["John", "Marc", "Michel"], "type": "string", }, - "\n}", + "[\\n ]*\\}", ] @@ -122,15 +122,15 @@ class Spam(BaseModel): schema = json.dumps(Spam.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "foo": {\n "count": ', + '\\{[\\n ]*"foo"[\\n ]*:[\\n ]*\\{[\\n ]*"count"[\\n ]*:[\\n ]*', {"title": "Count", "type": "integer"}, - ',\n "size": {\n "buzz": ', + '[\\n ]*,[\\n ]*"size"[\\n ]*:[\\n ]*\\{[\\n ]*"buzz"[\\n ]*:[\\n ]*', {"title": "Buzz", "type": "string"}, - '\n }\n },\n "bars": {\n "apple": ', + '[\\n ]*\\}[\\n ]*\\}[\\n ]*,[\\n ]*"bars"[\\n ]*:[\\n ]*\\{[\\n ]*"apple"[\\n ]*:[\\n ]*', {"title": "Apple", "type": "string"}, - ',\n "banana": ', + '[\\n ]*,[\\n ]*"banana"[\\n ]*:[\\n ]*', {"title": "Banana", "type": "string"}, - "\n }\n}", + "[\\n ]*\\}[\\n ]*\\}", ] @@ -145,7 +145,7 @@ class Spam(BaseModel): schema = json.dumps(Spam.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "foo": ', + '\\{[\\n ]*"foo"[\\n ]*:[\\n ]*', { "items": { "title": "Foo", @@ -155,7 +155,7 @@ class Spam(BaseModel): "title": "Foo", "type": "array", }, - "\n}", + "[\\n ]*\\}", ] @@ -169,11 +169,11 @@ class Spam(BaseModel): schema = json.dumps(Spam.model_json_schema()) schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "foo": ', + '\\{[\\n ]*"foo"[\\n ]*:[\\n ]*', {"title": "Foo", "type": "integer"}, - ',\n "bar": ', + '[\\n ]*,[\\n ]*"bar"[\\n ]*:[\\n ]*', {"title": "Bar", "anyOf": [{"type": "number"}, {"type": "string"}]}, - "\n}", + "[\\n ]*\\}", ] @@ -181,11 +181,11 @@ def test_json_schema(): schema = '{"title": "User", "type": "object", "properties": {"user_id": {"title": "User Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["user_id", "name"]}' schedule = build_schedule_from_schema(schema) assert schedule == [ - '{\n "user_id": ', + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', {"title": "User Id", "type": "integer"}, - ',\n "name": ', + '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', {"title": "Name", "type": "string"}, - "\n}", + "[\\n ]*\\}", ] @@ -317,7 +317,7 @@ def test_match_number(pattern, does_match): "type": "object", "properties": {"count": {"title": "Count", "type": "integer"}}, }, - '\\{\\\n\\ \\ "count":\\ ' + INTEGER + "\\\n\\}", + '\\{[\\n ]*"count"[\\n ]*:[\\n ]*(0|[1-9][0-9]*)[\\n ]*\\}', [('{\n "count": 100\n}', True)], ), ( @@ -346,9 +346,7 @@ def test_match_number(pattern, does_match): } }, }, - '\\{\\\n\\ \\ "fuzz":\\ \\{\\\n\\ \\ \\ \\ "spam":\\ ' - + INTEGER - + "\\\n\\ \\ \\}\\\n\\}", + f'\\{{[\\n ]*"fuzz"[\\n ]*:[\\n ]*\\{{[\\n ]*"spam"[\\n ]*:[\\n ]*{INTEGER}[\\n ]*\\}}[\\n ]*\\}}', [('{\n "fuzz": {\n "spam": 100\n }\n}', True)], ), ], From 67238abbd6bd845bb119cd97e4430c3aecfb919d Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sat, 23 Sep 2023 18:17:30 -0500 Subject: [PATCH 224/734] Update the package keywords --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1426b524..d98162ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,11 +8,10 @@ authors= [{name = "Outlines Developers"}] description = "Probabilistic Generative Model Programming" requires-python = ">=3.10" keywords=[ - "normal computing", "machine learning", "deep learning", "language models", - "diffusion models", + "guided generation", ] classifiers = [ "Development Status :: 5 - Production/Stable", From 6c5b2fbf463802923bcd5198838f16a164fe820c Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sat, 23 Sep 2023 18:15:49 -0500 Subject: [PATCH 225/734] Add py.typed and package data information Closes #293 --- outlines/py.typed | 0 pyproject.toml | 3 +++ 2 files changed, 3 insertions(+) create mode 100644 outlines/py.typed diff --git a/outlines/py.typed b/outlines/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/pyproject.toml b/pyproject.toml index d98162ac..2b7212ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,6 +61,9 @@ content-type = "text/markdown" [tool.setuptools] packages = ["outlines"] +[tool.setuptools.package-data] +"outlines" = ["py.typed"] + [tool.setuptools_scm] write_to = "outlines/_version.py" From 52b105e340afb916d8e99e4293daa7f1c4b95721 Mon Sep 17 00:00:00 2001 From: Vibhor Kumar Date: Sun, 24 Sep 2023 08:28:41 -0700 Subject: [PATCH 226/734] Add to generate dating app profiles- combines prompt templating with JSON generation (#287) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let me know if there's any additional information you'd like me to add. I figure this practical example will be useful to demonstrate more complex JSON generation alongside prompt templating (which is more useful the more complex the JSON structure becomes). --------- Co-authored-by: Rémi Louf --- examples/dating_profile.py | 179 +++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 examples/dating_profile.py diff --git a/examples/dating_profile.py b/examples/dating_profile.py new file mode 100644 index 00000000..485dfa7d --- /dev/null +++ b/examples/dating_profile.py @@ -0,0 +1,179 @@ +from dataclasses import dataclass +from enum import Enum + +import torch +import transformers +from pydantic import BaseModel, conlist + +import outlines.models as models +import outlines.text as text + + +class QuestionChoice(str, Enum): + A = "The key to my heart is" + B = "The first item on my bucket list is" + C = "Perks of dating me" + D = "Message me if you also love" + E = "People would describe me as" + F = "I can beat you in a game of" + + +@dataclass +class QuestionAnswer: + question: QuestionChoice + answer: str + + +class DatingProfile(BaseModel): + # It is possible put length constraints on these strings using constr- however, this appears to dramatically increase the generation time + # This may be resolved in the future with this PR: https://github.com/outlines-dev/outlines/pull/272 + bio: str + job: str + # Ignore mypy checks here because it still doesn't support conlist or constr: https://github.com/pydantic/pydantic/issues/975 + interests: conlist(str, min_length=1, max_length=5) # type: ignore + qna1: QuestionAnswer + qna2: QuestionAnswer + + +@dataclass +class Example: + description: str + profile: DatingProfile + + +@text.prompt +def dating_profile_prompt(description: str, examples: list[Example]): + """ + You are a world-renowned matchmaker who understands the modern dating market. Your job is to generate dating app profiles for male clients interested in women based on a provided description. The profiles should be authentic, show off their strengths, and maximize their likelihood of getting matches on dating apps. + Here are some examples of past clients that you have successfully created profiles for: + {% for example in examples %} + Description: + {{ example.description }} + Profile: + {{ example.profile }} + {% endfor %} + Here is the new client who you need to create a profile for: + Description: {{ description }} + Profile: + """ + + +samples: list[Example] = [ + Example( + description="I'm an author and former professional soccer player living in Seattle who publishes popular fiction books. A typical day for me starts by hanging out with my cat, drinking a coffee, and reading as much as I can in a few hours. Then, I'll prepare a quick smoothie before starting to write for a few hours, take a break with soccer or running a few miles, and finally meet friends for dinner at a new, hip restaurant in the evening. Sometimes we go axe-throwing afterwards, or play poker, or watch a comedy show, or visit a dive bar. On my vacations, I travel extensively to countries South America, Europe, and Asia, with the goal of visiting them all!", + profile=DatingProfile( + bio="Adventurer, dreamer, author, and soccer enthusiast. Life’s too short to waste time so I make the most of each day by exploring new places and playing with my friends on the pitch. What’s your favorite way to get out and have fun?", + job="Famous Soccer Player -> Famous Author", + interests=["Soccer", "Travel", "Friends", "Books", "Fluffy Animals"], + qna1=QuestionAnswer( + question=QuestionChoice.B, answer="swim in all seven oceans!" + ), + qna2=QuestionAnswer( + question=QuestionChoice.E, + answer="fun-loving, adventurous, and a little bit crazy", + ), + ), + ), + Example( + description="I run my company and build houses for a living. I'm a big fan of the outdoors and love to go hiking, camping, and fishing. I don't like video games, but do like to watch movies. My love language is home-cooked food, and I'm looking for someone who isn't afraid to get their hands dirty.", + profile=DatingProfile( + bio="If you're looking for a Montana man who loves to get outdoors and hunt, and who's in-tune with his masculinity then I'm your guy!", + job="House Construction Manager / Entrepreneur", + interests=["Hunting", "Hiking", "The outdoors", "Home-cooked food"], + qna1=QuestionAnswer(question=QuestionChoice.A, answer="food made at home"), + qna2=QuestionAnswer( + question=QuestionChoice.C, + answer="having a man in your life who can fix anything", + ), + ), + ), + Example( + description="I run my own Youtube channel with 10M subscribers. I love working with kids, and my audience skews pretty young too. In my free time, I play Fortnite and Roblox. I'm looking for someone who is also a gamer and likes to have fun. I'm learning Japanese in my free time as well as how to cook.", + profile=DatingProfile( + bio="Easy on the eyes (find me on Youtube!) and great with kids. What more do you need?", + job="Youtuber 10M+ subscribers", + interests=["Kids", "Gaming", "Japanese"], + qna1=QuestionAnswer(question=QuestionChoice.D, answer="anime and gaming!"), + qna2=QuestionAnswer(question=QuestionChoice.F, answer="Fortnite, gg ez"), + ), + ), +] + + +# Below requires ~13GB of GPU memory +# https://huggingface.co/mosaicml/mpt-7b-8k-instruct +# Motivation: Reasonably large model that fits on a single GPU and has been fine-tuned for a larger context window +config = transformers.AutoConfig.from_pretrained( + "mosaicml/mpt-7b-8k-instruct", trust_remote_code=True +) +config.init_device = "meta" +model = models.transformers( + model_name="mosaicml/mpt-7b-8k-instruct", + device="cuda", + model_kwargs={ + "config": config, + "trust_remote_code": True, + "torch_dtype": torch.bfloat16, + "device_map": {"": 0}, + }, +) + +new_description = "I'm a laid-back lawyer who spends a lot of his free-time gaming. I work in a corporate office, but ended up here after the start-up I cofounded got acquired, so still play ping pong with my cool coworkers every day. I have a bar at home where I make cocktails, which is great for entertaining friends. I secretly like to wear suits and get a new one tailored every few months. I also like weddings because I get to wear those suits, and it's a good excuse for a date. I watch the latest series because I'm paying, with my hard-earned money, for every streaming service." + +prompt = dating_profile_prompt(description=new_description, examples=samples) +profile = text.generate.json(model, DatingProfile)(prompt) +print(profile) + +parsed_profile = DatingProfile.model_validate_json(profile) +print(parsed_profile) + +# Sample generated profiles +""" +{ + "bio": "I'm an ambitious lawyer with a casual and fashionable style. I love games and sports, but my true passion is preparing refreshing cocktails at home and dressing to the nines at weddings. I'm currently looking for a woman to show a good time to and get a kiss on the opulent suit I just had made. Send resumà € to this inbox.", + "job": "Lawyer", + "interests": + [ + "Stylish guys", + "Gaming", + "Ping pong", + "Cocktails", + "Weddings" + ], + "qna1": + { + "question": "The first item on my bucket list is", + "answer": "be married and have a family." + }, + "qna2": + { + "question": "People would describe me as", + "answer": "charming, stylish, and funny." + } +} +""" + +""" +{ + "bio": "I’m a sexy lawyer with time on my hands. I love to game and play ping pong, but the real reason you should swipe to the right is because I look great in a suit. Who doesn’t love a man in a suit? Just saying. Send me a message if you think it’s time to take your dating life to the next level.", + "job": "Lawyer", + "interests": + [ + "Gaming", + "Ping Pong", + "Tailored Suits", + "Weddings", + "Streaming Services" + ], + "qna1": + { + "question": "The first item on my bucket list is", + "answer": "simulate space but stay alive for as long as possible" + }, + "qna2": + { + "question": "People would describe me as", + "answer": "easy-going, a little nerdy but with a mature essence" + } +} +""" From 7d7abcc7ae55444016bcbb8408a2d39327a08411 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" <971601+brandonwillard@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:04:50 -0500 Subject: [PATCH 227/734] Refactor for Python 3.8-3.9 compatibility (#295) --- .github/workflows/tests.yml | 8 ++++-- outlines/text/json_schema.py | 49 ++++++++++++++++++++++-------------- outlines/text/parsing.py | 4 +-- outlines/text/prompts.py | 4 +-- pyproject.toml | 2 +- 5 files changed, 41 insertions(+), 26 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index dab938cb..b8d4208a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,11 +20,15 @@ jobs: tests: name: Run the tests runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.8", "3.10"] steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: ${{ matrix.python-version }} - name: Set up test environment run: | python -m pip install --upgrade pip diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 7fffa0fa..6e537e21 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -202,39 +202,50 @@ def match_step_to_regex(step): schedule's step. """ - match step: - case str() as step: - return step + if isinstance(step, str): + return step - case {"enum": choices, "type": "string"}: - choices = [f'"{re.escape(choice)}"' for choice in choices] + if isinstance(step, dict): + keys = set(step.keys()) + + if all(key in keys for key in ("enum", "type")) and step["type"] == "string": + choices = [f'"{re.escape(choice)}"' for choice in step["enum"]] return f"({'|'.join(choices)})" - case {"enum": choices}: - choices = [re.escape(str(choice)) for choice in choices] + + elif "enum" in keys: + choices = [re.escape(str(choice)) for choice in step["enum"]] return f"({'|'.join(choices)})" - case {"type": "array", "items": items}: - item_regexes = match_step_to_regex(items) + elif all(key in keys for key in ("type", "items")) and step["type"] == "array": + item_regexes = match_step_to_regex(step["items"]) return rf"\[({item_regexes})(,({item_regexes}))*\]" - case {"type": "object"} as object: - steps = build_schedule_from_schema(json.dumps(object)) + elif "type" in keys and step["type"] == "object": + steps = build_schedule_from_schema(json.dumps(step)) regex_str = "" for step in steps: regex_str += match_step_to_regex(step) return regex_str - case {"type": "string", "maxLength": max_length}: + elif ( + all(key in keys for key in ("type", "maxLength")) + and step["type"] == "string" + ): + max_length = step["maxLength"] return f'"{STRING_INNER}{{,{max_length}}}"' - case {"type": "string", "minLength": min_length}: + + elif ( + all(key in keys for key in ("type", "minLength")) + and step["type"] == "string" + ): + min_length = step["minLength"] return f'"{STRING_INNER}{{{min_length},}}"' - case {"type": field_type}: - return type_to_regex[field_type] + elif "type" in keys: + return type_to_regex[step["type"]] - case {"anyOf": choices}: - regexes = [match_step_to_regex(choice) for choice in choices] + elif "anyOf" in keys: + regexes = [match_step_to_regex(choice) for choice in step["anyOf"]] return rf"({'|'.join(regexes)})" - case _: - raise NotImplementedError + raise NotImplementedError diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 00a7a8a2..e1fd2e3f 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,7 +1,7 @@ from collections import ChainMap from copy import copy, deepcopy from dataclasses import dataclass -from functools import cache +from functools import lru_cache from typing import ( Any, Callable, @@ -576,7 +576,7 @@ def parse_from_state(self, state, last_token=None, is_end=False): class PartialScanner(Scanner): @classmethod - @cache + @lru_cache def construct_terminal_fsm(cls, terminal): # TODO: This should really be done at the lexer/parser level so that # the lifetime of these objects is tied to the parser itself. diff --git a/outlines/text/prompts.py b/outlines/text/prompts.py index 8b3d5eba..e58954ee 100644 --- a/outlines/text/prompts.py +++ b/outlines/text/prompts.py @@ -4,7 +4,7 @@ import re import textwrap from dataclasses import dataclass -from typing import Any, Callable, Dict, List, Optional, cast +from typing import Any, Callable, Dict, List, Optional, Type, cast from jinja2 import Environment, StrictUndefined from pydantic import BaseModel @@ -284,7 +284,7 @@ def get_schema_dict(model: Dict): @get_schema.register(type(BaseModel)) -def get_schema_pydantic(model: type[BaseModel]): +def get_schema_pydantic(model: Type[BaseModel]): """Return the schema of a Pydantic model.""" if not type(model) == type(BaseModel): raise TypeError("The `schema` filter only applies to Pydantic models.") diff --git a/pyproject.toml b/pyproject.toml index 2b7212ff..4da15828 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta" name = "outlines" authors= [{name = "Outlines Developers"}] description = "Probabilistic Generative Model Programming" -requires-python = ">=3.10" +requires-python = ">=3.8" keywords=[ "machine learning", "deep learning", From cae6a534f7b4aa8ea9b64aae2bd9f4335a5f24fc Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Mon, 25 Sep 2023 14:42:20 -0500 Subject: [PATCH 228/734] Add missing colon from agent example --- docs/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index b62d0392..6b995942 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -65,7 +65,7 @@ A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: """ - def agent(objective, goals, tools) + def agent(objective, goals, tools): complete = models.text_completion.hf("sshleifer/tiny-gpt2") prompt = agent_prompt(objective, goals, tools , command_response) answer = complete(prompt) From 37847847fe6b37bc9a6eb4e089ea596b84302fc6 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sat, 9 Sep 2023 22:36:52 -0500 Subject: [PATCH 229/734] Add special_tokens to Tokenizer interface --- outlines/models/tokenizer.py | 3 ++- outlines/models/transformers.py | 2 ++ tests/text/generate/test_regex.py | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/outlines/models/tokenizer.py b/outlines/models/tokenizer.py index bffcc517..209bc947 100644 --- a/outlines/models/tokenizer.py +++ b/outlines/models/tokenizer.py @@ -1,5 +1,5 @@ from abc import abstractmethod -from typing import Dict, List, Protocol, Tuple, Union +from typing import Dict, List, Protocol, Set, Tuple, Union import numpy as np from numpy.typing import NDArray @@ -10,6 +10,7 @@ class Tokenizer(Protocol): eos_token_id: int pad_token_id: int vocabulary: Dict[str, int] + special_tokens: Set[int] @abstractmethod def encode( diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 80ee813b..d20cc67c 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -108,6 +108,8 @@ def __init__(self, model_name: str, **kwargs): self.pad_token_id = self.tokenizer.pad_token_id self.pad_token = self.tokenizer.pad_token + self.special_tokens = set(self.tokenizer.special_tokens_map.values()) + self.vocabulary = self.tokenizer.get_vocab() self.is_llama = isinstance(self.tokenizer, get_llama_tokenizer_types()) diff --git a/tests/text/generate/test_regex.py b/tests/text/generate/test_regex.py index 6bdac68a..5ef3afcd 100644 --- a/tests/text/generate/test_regex.py +++ b/tests/text/generate/test_regex.py @@ -13,6 +13,7 @@ class Tokenizer: pad_token_id = -1 vocabulary = {"": 0, "-": 1, "1": 2, "0.": 3, "431": 4, "a": 5, "A": 6} tokens = list(vocabulary.keys()) + special_tokens = {""} def decode(self, token_ids): decoded = [] From 062c126d2361ec96d956b88e2dde0448e175d143 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 27 Sep 2023 13:35:09 -0500 Subject: [PATCH 230/734] Make tokenizers hashable --- outlines/models/tokenizer.py | 4 ++-- outlines/models/transformers.py | 11 +++++++++++ pyproject.toml | 3 +++ tests/models/test_transformers.py | 7 +++++++ 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/outlines/models/tokenizer.py b/outlines/models/tokenizer.py index 209bc947..72bdae0f 100644 --- a/outlines/models/tokenizer.py +++ b/outlines/models/tokenizer.py @@ -1,11 +1,11 @@ from abc import abstractmethod -from typing import Dict, List, Protocol, Set, Tuple, Union +from typing import Dict, Hashable, List, Protocol, Set, Tuple, Union import numpy as np from numpy.typing import NDArray -class Tokenizer(Protocol): +class Tokenizer(Protocol, Hashable): eos_token: str eos_token_id: int pad_token_id: int diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index d20cc67c..64242388 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -2,6 +2,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Union import torch +from datasets.fingerprint import Hasher from transformers.file_utils import SPIECE_UNDERLINE from outlines.models.tokenizer import Tokenizer @@ -97,6 +98,8 @@ def __init__(self, model_name: str, **kwargs): from transformers import AutoTokenizer kwargs.setdefault("padding_side", "left") + self.model_name = model_name + self.kwargs = frozenset(kwargs.items()) self.tokenizer = AutoTokenizer.from_pretrained(model_name, **kwargs) self.eos_token_id = self.tokenizer.eos_token_id self.eos_token = self.tokenizer.eos_token @@ -135,6 +138,14 @@ def convert_token_to_string(self, token: str) -> str: return string + def __eq__(self, other): + if isinstance(other, type(self)): + return other.model_name == self.model_name and other.kwargs == self.kwargs + return NotImplemented + + def __hash__(self): + return hash(Hasher.hash(self.tokenizer)) + def transformers( model_name: str, diff --git a/pyproject.toml b/pyproject.toml index 4da15828..75db171c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ test = [ "diff-cover", "accelerate", "beartype<0.16.0", + "datasets", ] [project.urls] @@ -98,6 +99,8 @@ module = [ "transformers.*", "lark.*", "interegular.*", + "numba.*", + "datasets.*", ] ignore_missing_imports = true diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index a7e9f984..8357fc59 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -78,3 +78,10 @@ def test_model(): assert logits.shape[0] == 2 assert logits.shape[1] == 2 assert torch.equal(logits[0][0], logits[1][1]) + + +def test_tokenizer_eq_hash(): + tokenizer = TransformersTokenizer("gpt2") + tokenizer2 = TransformersTokenizer("gpt2") + assert tokenizer == tokenizer2 + assert hash(tokenizer) == hash(tokenizer2) From 38b0b1060047b3ec03613b2ff5b503ea4156d4b9 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 6 Sep 2023 16:47:03 -0500 Subject: [PATCH 231/734] Refactor Regex and introduce Numba-based FSM utilities --- examples/parsing.py | 17 - outlines/text/fsm.py | 701 ++++++++++++++++++ outlines/text/generate/regex.py | 267 ++++--- outlines/text/parsing.py | 428 +---------- pyproject.toml | 7 +- .../generate/test_integration_transfomers.py | 15 + tests/text/generate/test_regex.py | 81 ++ tests/text/test_fsm.py | 419 +++++++++++ tests/text/test_parsing.py | 367 +-------- 9 files changed, 1414 insertions(+), 888 deletions(-) create mode 100644 outlines/text/fsm.py create mode 100644 tests/text/test_fsm.py diff --git a/examples/parsing.py b/examples/parsing.py index 1e6e05c4..f1b71cba 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -26,23 +26,6 @@ checkpoint, trust_remote_code=True, revision=revision ).to(device) -# import urllib.request -# -# sql_grammar_url = "https://github.com/zbrookle/sql_to_ibis/raw/0e9226da42065940ce21439d490f9fcacadc7f92/sql_to_ibis/grammar/sql.lark" -# sql_grammar = "".join( -# [line.decode("utf-8") for line in urllib.request.urlopen(sql_grammar_url)] -# ) -# with open("sql_grammar.lark", "w") as f: -# f.write(sql_grammar) -# -# TODO: `_STRING_ESC_INNER` from `%import common.ESCAPED_STRING` introduces a -# (potentially superfluous) look-back; we need to replace it or implement -# look-backs. -# parser = PartialLark.open( -# "sql_grammar.lark", -# parser="lalr", -# ) - parser = PartialLark.open_from_package( "tests", "partial_python.lark", diff --git a/outlines/text/fsm.py b/outlines/text/fsm.py new file mode 100644 index 00000000..d531a4e6 --- /dev/null +++ b/outlines/text/fsm.py @@ -0,0 +1,701 @@ +from collections import namedtuple +from functools import lru_cache +from itertools import chain +from typing import TYPE_CHECKING, Dict, Generator, List, Optional, Sequence, Set, Tuple + +import numba +import numpy as np +from interegular.fsm import FSM, Alphabet, OblivionError, anything_else +from joblib import Parallel, delayed +from numba.typed.typedobjectutils import _nonoptional + +if TYPE_CHECKING: + from outlines.models.tokenizer import Tokenizer + + +class BetterAlphabet(Alphabet): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + assert anything_else in self._symbol_mapping + self.anything_value = self._symbol_mapping[anything_else] + + def __getitem__(self, item): + return self._symbol_mapping.get(item, self.anything_value) + + def copy(self): + return BetterAlphabet(self._symbol_mapping.copy()) + + +class BetterFSM(FSM): + flat_transition_map: Dict[Tuple[int, int], int] + trans_key_to_states: Dict[int, List[int]] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if not isinstance(self.alphabet, BetterAlphabet): + self.__dict__["alphabet"] = BetterAlphabet(self.alphabet._symbol_mapping) + + flat_transition_map = {} + trans_key_to_states = {} + for from_state, trans_map in self.map.items(): + for trans_key, to_state in trans_map.items(): + flat_transition_map[(from_state, trans_key)] = to_state + trans_key_to_states.setdefault(trans_key, set()).add(from_state) + + self.__dict__["trans_key_to_states"] = trans_key_to_states + self.__dict__["flat_transition_map"] = flat_transition_map + self.__dict__["_fsm_info"] = None + + def copy(self): + return BetterFSM( + alphabet=self.alphabet.copy(), + states=self.states.copy(), + initial=self.initial, + finals=self.finals.copy(), + map=self.map.copy(), + __no_validation__=True, + ) + + @property + def fsm_info(self): + if self._fsm_info is None: + flat_transition_map_items = np.fromiter( + ((a[0], a[1], b) for a, b in self.flat_transition_map.items()), + dtype=np.dtype("i8, i8, i8"), + ) + trans_key_to_states_items = np.fromiter( + ((k, z) for k, v in self.trans_key_to_states.items() for z in v), + dtype=np.dtype("i8, i8"), + ) + alphabet_symbol_mapping_items = np.fromiter( + ( + it + for it in self.alphabet._symbol_mapping.items() + if it[0] != anything_else + ), + dtype=np.dtype("U1, i8"), + ) + nb_finals = np.fromiter(self.finals, dtype=np.dtype("i8")) + self.__dict__["_fsm_info"] = create_fsm_info( + self.initial, + nb_finals, + flat_transition_map_items, + trans_key_to_states_items, + self.alphabet.anything_value, + alphabet_symbol_mapping_items, + ) + + return self._fsm_info + + +nb_int_list_type = numba.types.ListType(numba.int64) +nb_int_pair_type = numba.types.UniTuple(numba.int64, 2) +nb_unichar_1_type = numba.types.UnicodeCharSeq(1) + + +@numba.njit(cache=True) +def create_fsm_info( + py_initial, + py_finals, + flat_transition_map_items, + trans_key_to_states_items, + py_anything_value, + alphabet_symbol_mapping_items, +): + trans_key_to_states = numba.typed.Dict.empty(numba.int64, nb_int_list_type) + for trans_key_and_state in trans_key_to_states_items: + trans_key_to_states.setdefault( + trans_key_and_state[0], numba.typed.List.empty_list(numba.int64) + ).append(trans_key_and_state[1]) + + flat_transition_map = numba.typed.Dict.empty(nb_int_pair_type, numba.int64) + for trans_key_and_state in flat_transition_map_items: + flat_transition_map[ + (trans_key_and_state[0], trans_key_and_state[1]) + ] = trans_key_and_state[2] + + alphabet_symbol_map = numba.typed.Dict.empty(nb_unichar_1_type, numba.int64) + for symbol_and_trans_key in alphabet_symbol_mapping_items: + alphabet_symbol_map[symbol_and_trans_key[0]] = symbol_and_trans_key[1] + + initial = numba.int64(py_initial) + + finals = set() + for final in py_finals: + finals.add(final) + + anything_value = numba.int64(py_anything_value) + + return FSMInfo( + initial, + finals, + flat_transition_map, + trans_key_to_states, + anything_value, + alphabet_symbol_map, + ) + + +FSMInfo = namedtuple( + "FSMInfo", + [ + "initial", + "finals", + "transitions", + "trans_key_to_states", + "alphabet_anything_value", + "alphabet_symbol_mapping", + ], +) + +spec = [ + numba.int64, + numba.types.Set(numba.int64), + numba.types.DictType(numba.types.UniTuple(numba.int64, 2), numba.int64), + numba.types.DictType(numba.int64, numba.types.ListType(numba.int64)), + numba.optional(numba.int64), + numba.types.DictType(numba.types.string, numba.int64), +] + +FSMInfoNumbaType = numba.types.NamedTuple(spec, FSMInfo) + + +def make_deterministic_fsm(fsm: FSM) -> Tuple[BetterFSM, Dict[int, int]]: + """Construct an equivalent FSM with deterministic state labels.""" + old_to_new_trans_keys = { + trans_key: i + for i, (trans_key, _) in enumerate( + sorted(fsm.alphabet.by_transition.items(), key=lambda x: sorted(x[1])) + ) + } + + new_symbol_mapping = { + symbol: old_to_new_trans_keys[trans_key] + for symbol, trans_key in fsm.alphabet._symbol_mapping.items() + } + + new_alphabet = BetterAlphabet(new_symbol_mapping) + + new_map = { + from_state: { + old_to_new_trans_keys[trans_key]: to_state + for trans_key, to_state in trans_map.items() + } + for from_state, trans_map in fsm.map.items() + } + + old_to_new_states = {} + old_to_new_states[fsm.initial] = 0 + + i = 0 + seen = {fsm.initial} + old_state_queue = [fsm.initial] + while old_state_queue: + old_state = old_state_queue.pop(-1) + transitions = new_map[old_state] + sorted_transitions = sorted(transitions.items(), key=lambda v: v[0]) + for _, old_state in sorted_transitions: + if old_state not in seen: + old_state_queue.append(old_state) + seen.add(old_state) + if old_state not in old_to_new_states: + i += 1 + old_to_new_states[old_state] = i + + new_map = dict( + sorted( + ( + ( + old_to_new_states[from_state], + dict( + sorted( + ( + (trans_key, old_to_new_states[to_state]) + for trans_key, to_state in trans_map.items() + ), + key=lambda v: v[0], + ) + ), + ) + for from_state, trans_map in new_map.items() + ), + key=lambda v: v[0], + ) + ) + + new_initial = 0 + new_finals = frozenset( + sorted(old_to_new_states[old_state] for old_state in fsm.finals) + ) + new_states = frozenset(sorted(new_map.keys())) + + new_fsm = BetterFSM(new_alphabet, new_states, new_initial, new_finals, new_map) + + return new_fsm, old_to_new_states + + +@numba.njit(nogil=True, cache=True) +def _walk_fsm( + fsm_transitions: Dict[Tuple[int, int], int], + alphabet_symbol_mapping: Dict[str, int], + alphabet_anything_value: int, + fsm_initial: int, + fsm_finals: Set[int], + input_string: str, + start_state: int, + full_match: bool = True, +) -> List[int]: + state = fsm_initial + accepted_states: List[int] = numba.typed.List.empty_list(numba.int64) + last_final_idx: int = numba.uint64(0) + + for i, symbol in enumerate(input_string): + trans_key = alphabet_symbol_mapping.get(symbol, alphabet_anything_value) + + if state == fsm_initial: + new_state = fsm_transitions.get((start_state, trans_key)) + else: + new_state = fsm_transitions.get((state, trans_key)) + + if new_state is None: + if full_match: + if state in fsm_finals: + break + elif last_final_idx > 0: + accepted_states = accepted_states[:last_final_idx] + break + + return numba.typed.List.empty_list(numba.int64) + + state = new_state + + if state in fsm_finals: + last_final_idx = numba.uint64(i + 1) + + accepted_states.append(_nonoptional(state)) + + terminated = state in fsm_finals + if not terminated and state == fsm_initial: + return numba.typed.List.empty_list(numba.int64) + + return accepted_states + + +def walk_fsm( + fsm_info, + input_string: str, + start_state: int, + full_match: bool = True, +) -> List[int]: + return _walk_fsm( + fsm_info.transitions, + fsm_info.alphabet_symbol_mapping, + fsm_info.alphabet_anything_value, + fsm_info.initial, + fsm_info.finals, + input_string, + start_state, + full_match=full_match, + ) + + +# TODO FIXME: Can't cache this due to https://github.com/numba/numba/issues/9177 +@numba.njit(nogil=True) +def find_partial_matches( + fsm_info: FSMInfo, + input_string: str, + full_match: bool = True, +) -> Generator[Tuple[int, List[int]], None, None]: + """Find the states in the finite state machine `fsm_info` that accept `input_string`. + + This will consider all possible states in the finite state machine (FSM) + that accept the beginning of `input_string` as starting points, unless a + specific `start_state` is provided. + + Parameters + ---------- + fsm_info + The finite state machine. + input_string + The string for which we generate partial matches. + full_match + Matches must cover the entire string. + + Returns + ------- + A set of tuples corresponding to each valid starting state in the FSM. The + first element of each tuple contains an integer indicating the position in + `input_string` at which the FSM stopped. The second element is the tuple + of states visited during execution of the FSM plus the next, unvisited + transition state. + + """ + + if len(input_string) == 0: + return + + trans_key = fsm_info.alphabet_symbol_mapping.get( + input_string[0], fsm_info.alphabet_anything_value + ) + + for state in fsm_info.trans_key_to_states.get( + trans_key, numba.typed.List.empty_list(numba.int64) # type: ignore + ): + path = _walk_fsm( + fsm_info.transitions, + fsm_info.alphabet_symbol_mapping, + fsm_info.alphabet_anything_value, + fsm_info.initial, + fsm_info.finals, + input_string, + state, + full_match=full_match, + ) + if path: + path.insert(0, state) + res = (len(path) - 2, path) + yield res + + +@numba.njit(nogil=True, cache=True) +def process_token_string( + fsm_info: FSMInfo, + token: str, + token_idx: int, + final_state_string: Optional[str] = None, +) -> Set[Tuple[int, int]]: + res = set() + vocab_string_len = len(token) + + for end_idx, state_seq in find_partial_matches(fsm_info, token): + if end_idx is not None and end_idx < vocab_string_len - 1: + continue + + res.add((state_seq[0], token_idx)) + + if token == final_state_string: + # Allow transitions to EOS from all terminals FSM states + for state in fsm_info.finals: + res.add((state, token_idx)) + + return res + + +def create_fsm_index( + fsm_info: FSMInfo, + vocabulary: Dict[str, int], + final_state_string: Optional[str] = None, + n_jobs=-1, +) -> Dict[int, Set[int]]: + """Construct a map from FSM states to subsets of `vocabulary`. + + The subsets of `vocabulary` consist of elements that are accepted by--or + transition to--the corresponding partial parse states. + + Parameters + ---------- + fsm + The finite-state machine. + vocabulary + The vocabulary composed of token strings mapped to token IDs. + final_state_string + A string from `vocabulary` that is to be added to all the final states + in the FSM (e.g. ``""``). + """ + + results = Parallel(backend="threading", n_jobs=n_jobs, return_as="generator")( + delayed(process_token_string)(fsm_info, token, token_idx, final_state_string) + for token, token_idx in vocabulary.items() + ) + + states_to_token_subsets: Dict[int, Set[int]] = {} + + for fsm_state, token_idx in chain.from_iterable(results): + states_to_token_subsets.setdefault(fsm_state, set()).add(token_idx) + + return states_to_token_subsets + + +def fsm_union( + fsms: Sequence[FSM], +) -> Tuple[FSM, Dict[int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]]]: + """Construct an FSM representing the union of the FSMs in `fsms`. + + This is an updated version of `interegular.fsm.FSM.union` made to return an + extra map of component FSMs to the sets of state transitions that + correspond to them in the new FSM. + + """ + + alphabet, new_to_old = Alphabet.union(*[fsm.alphabet for fsm in fsms]) + + indexed_fsms = tuple(enumerate(fsms)) + + initial = {i: fsm.initial for (i, fsm) in indexed_fsms} + + # Dedicated function accepting a "superset" and returning the next + # "superset" obtained by following this transition in the new FSM + def follow(current_state, new_transition: int): + next = {} + for i, f in indexed_fsms: + old_transition = new_to_old[i][new_transition] + if ( + i in current_state + and current_state[i] in f.map + and old_transition in f.map[current_state[i]] + ): + next[i] = f.map[current_state[i]][old_transition] + if not next: + raise OblivionError + return next + + states = [initial] + finals: Set[int] = set() + map: Dict[int, Dict[int, int]] = {} + + # Map component FSMs to their new state-to-state transitions, finals, and a + # map translating component FSM states to aggregate FSM states + fsms_to_trans_finals: Dict[ + int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] + ] = {} + + i = 0 + while i < len(states): + state = states[i] + + # Add to the finals of the aggregate FSM whenever we hit a final in a + # component FSM + if any(state.get(j, -1) in fsm.finals for (j, fsm) in indexed_fsms): + finals.add(i) + + # Compute the map for this state + map[i] = {} + for transition in alphabet.by_transition: + try: + next = follow(state, transition) + except OblivionError: + # Reached an oblivion state; don't list it + continue + else: + try: + # TODO: Seems like this could--and should--be avoided + j = states.index(next) + except ValueError: + j = len(states) + states.append(next) + + map[i][transition] = j + + for fsm_id, fsm_state in next.items(): + ( + fsm_transitions, + fsm_finals, + fsm_old_to_new, + ) = fsms_to_trans_finals.setdefault(fsm_id, (set(), set(), {})) + old_from = state[fsm_id] + old_to = fsm_state + fsm_old_to_new.setdefault(old_from, set()).add(i) + fsm_old_to_new.setdefault(old_to, set()).add(j) + fsm_transitions.add((i, j)) + if fsm_state in fsms[fsm_id].finals: + fsm_finals.add(j) + + i += 1 + + fsm = FSM( + alphabet=alphabet, + states=range(len(states)), + initial=0, + finals=finals, + map=map, + __no_validation__=True, + ) + + fsm, old_to_new_states = make_deterministic_fsm(fsm) + _fsms_to_trans_finals = { + fsm_id: ( + {(old_to_new_states[s1], old_to_new_states[s2]) for s1, s2 in transitions}, + {old_to_new_states[s] for s in finals}, + { + old_state: {old_to_new_states[new_state] for new_state in new_states} + for old_state, new_states in old_to_new.items() + }, + ) + for fsm_id, (transitions, finals, old_to_new) in sorted( + fsms_to_trans_finals.items(), key=lambda x: x[0] + ) + } + + return ( + fsm, + _fsms_to_trans_finals, + ) + + +def get_sub_fsms_from_seq( + state_seq: Sequence[int], + fsms_to_trans_finals: Dict[ + int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] + ], +) -> Generator[Tuple[int, bool, bool], None, None]: + """Get the indices of the sub-FSMs in `fsm` that could have matched the state sequence `state_seq`. + + Parameters + ---------- + state_seq + A state sequence. + fsms_to_trans_finals + A map from FSM indices to tuples containing sets of their state transitions + and sets of the final/accept states. + + Returns + ------- + A generator returning tuples containing each sub-FSM index (in the order + they were union-ed to construct `fsm`) and booleans indicating whether or + not there is another valid transition from the last state in the sequence + for the associated sub-FSM (i.e. if the FSM can continue + accepting/matching) and whether or not the sequence ends in a final state + of the sub-FSM. + """ + state_seq_transitions = set(zip(state_seq[:-1], state_seq[1:])) + last_fsm_state = state_seq[-1] + yield from ( + ( + # The sub-FMS index + fsm_idx, + # Is there another possible transition in this sub-FSM? + any(last_fsm_state == from_s for (from_s, to_s) in transitions), + # Is this sub-FSM in a final state? + state_seq[-1] in finals, + ) + for fsm_idx, (transitions, finals, _) in fsms_to_trans_finals.items() + if state_seq_transitions.issubset(transitions) + ) + + +@numba.njit(cache=True, nogil=True) +def state_scan_tokens( + fsm_transitions: Dict[Tuple[int, int], int], + alphabet_symbol_mapping: Dict[str, int], + alphabet_anything_value: int, + fsm_initial: int, + fsm_finals: Set[int], + vocabulary: Dict[str, List[int]], + start_state: int, +) -> Set[Tuple[int, int]]: + res = set() + + for token, token_ids in vocabulary.items(): + state_seq = _walk_fsm( + fsm_transitions, + alphabet_symbol_mapping, + alphabet_anything_value, + fsm_initial, + fsm_finals, + token, + start_state, + ) + + if state_seq is not None and len(state_seq) < len(token): + continue + + for token_id in token_ids: + res.add((token_id, state_seq[-1])) + + return res + + +def create_fsm_index_end_to_end( + fsm_info: FSMInfo, + vocabulary: Dict[str, List[int]], +) -> Dict[int, Set[Tuple[int, int]]]: + """Create an FSM state-to-vocabulary map/index through end-to-end token parsing.""" + + # TODO: Consider using a `List` of `Set`s instead; that way we can JIT this + # code, too. + states_to_token_subsets: Dict[int, Set[Tuple[int, int]]] = {} + seen: Set[int] = set() + next_states = {fsm_info.initial} + + while next_states: + start_state = next_states.pop() + + token_ids_end_states = state_scan_tokens( + fsm_info.transitions, + fsm_info.alphabet_symbol_mapping, + fsm_info.alphabet_anything_value, + fsm_info.initial, + fsm_info.finals, + vocabulary, + start_state, + ) + + for token_id_and_end_state in token_ids_end_states: + states_to_token_subsets.setdefault(start_state, set()).add( + token_id_and_end_state + ) + end_state = token_id_and_end_state[1] + if end_state not in seen: + next_states.add(end_state) + + seen.add(start_state) + + return states_to_token_subsets + + +# TODO: Cannot cache typed collections to disk, yet. See +# https://github.com/numba/numba/issues/4698 +@lru_cache +def reduced_vocabulary(tokenizer: "Tokenizer"): + """Create a map from decoded vocabulary tokens to lists of equivalent token ids.""" + vocabulary = numba.typed.Dict.empty( + numba.types.string, numba.types.ListType(numba.int64) + ) + empty_token_ids = set() + for token, token_idx in tokenizer.vocabulary.items(): + if token in tokenizer.special_tokens: + continue + + token_str = tokenizer.convert_token_to_string(token) + + if token_str: + vocabulary.setdefault( + token_str, + numba.typed.List.empty_list(numba.int64), + ).append(numba.int64(token_idx)) + else: + empty_token_ids.add(numba.int64(token_idx)) + + return vocabulary, empty_token_ids + + +def create_fsm_index_tokenizer( + fsm: BetterFSM, + tokenizer: "Tokenizer", +) -> Tuple[Dict[int, Dict[int, int]], Set[int]]: + """Construct an FMS index from a tokenizer. + + This uses the end-to-end approach of `create_fsm_index_end_to_end`. + + .. warning:: + + `fsm` needs to be deterministically ordered so that future caching makes sense. + + """ + vocabulary, empty_token_ids = reduced_vocabulary(tokenizer) + + states_to_token_subsets = create_fsm_index_end_to_end(fsm.fsm_info, vocabulary) + + # Allow transitions to EOS from all terminals FSM states that are + # reachable + # TODO: Do we really need this anymore? + for state in fsm.fsm_info.finals: + subset = states_to_token_subsets.get(state) + if subset is not None: + subset.add((tokenizer.eos_token_id, state)) + + # Convert to token-to-end-state maps + states_to_token_subsets = {k: dict(v) for k, v in states_to_token_subsets.items()} + + return states_to_token_subsets, empty_token_ids diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 77cdbeb7..a950e9bf 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -1,19 +1,14 @@ -import collections import math from json import dumps -from typing import List, Optional, Tuple, Union +from typing import Dict, List, Optional, Set, Tuple, Union import interegular import torch from pydantic import BaseModel +from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm from outlines.text.generate.continuation import Continuation from outlines.text.json_schema import build_regex_from_schema -from outlines.text.parsing import ( - find_partial_matches, - make_deterministic_fsm, - map_partial_states_to_vocab, -) class Regex(Continuation): @@ -29,51 +24,72 @@ class Regex(Continuation): """ - def __init__(self, model, regex_string: str, max_tokens: Optional[int]): - super().__init__(model, max_tokens) - - vocabulary = model.tokenizer.vocabulary - sorted_vocabulary = [ - model.tokenizer.convert_token_to_string(k) - for k, v in sorted(vocabulary.items(), key=lambda kv: kv[1]) - ] + def __init__( + self, + model, + regex_string: str, + max_tokens: Optional[int] = None, + allow_empty_tokens: bool = True, + initial_state: Optional[int] = None, + final_states: Optional[Set[int]] = None, + states_to_token_maps: Optional[Dict[int, Dict[int, int]]] = None, + empty_token_ids: Optional[Set[int]] = None, + ): + """ - regex_pattern = interegular.parse_pattern(regex_string) - self.regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + Parameters + ---------- + regex_string + The regex with which the token sampling process is guided/constrained. + max_tokens + The maximum number of tokens to be sampled. + allow_empty_tokens + Allow sampling of tokens corresponding to empty strings. + states_to_token_maps + Pre-computed map of FSM start states to maps between token ids and their + corresponding FSM end states. + empty_token_ids + Pre-computed set of token ids for tokens that are empty strings. - def partial_match_filter(string, end_idx, state_seq): - if end_idx is not None and end_idx < len(string) - 1: - return False - return True + """ + super().__init__(model, max_tokens) - pstate_to_vocab, paths = map_partial_states_to_vocab( - list(sorted_vocabulary), - {"REGEX": self.regex_fsm}, - partial_match_filter, - final_state_string=model.tokenizer.eos_token, - ) + if ( + states_to_token_maps is None + or empty_token_ids is None + or initial_state is None + or final_states is None + ): + regex_pattern = interegular.parse_pattern(regex_string) + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + ( + self.states_to_token_maps, + self.empty_token_ids, + ) = create_fsm_index_tokenizer(regex_fsm, model.tokenizer) + self.initial_state = regex_fsm.initial + self.final_states = regex_fsm.finals + else: + self.initial_state = initial_state + self.final_states = final_states + self.states_to_token_maps = states_to_token_maps + self.empty_token_ids = empty_token_ids # Check whether a terminal path (from the initial state of the FSM to # one of its terminal states) exists, raise an exception otherwise. - traversed_states = set() - queue = collections.deque([self.regex_fsm.initial]) - while queue: - symbol = queue.popleft() - for prev_state in paths["REGEX"].get(symbol, ()): - if prev_state not in traversed_states: - traversed_states.add(prev_state) - queue.append(prev_state) - - if traversed_states.intersection(self.regex_fsm.finals) == set(): + if not any( + self.final_states.intersection(v.values()) + for v in self.states_to_token_maps.values() + ): raise ValueError( "The vocabulary does not allow us to build a sequence that matches the input regex" ) - self.pstate_to_vocab = {k: list(v) for k, v in pstate_to_vocab.items()} - # These tuples are comprised of the FSM name, last FSM state, and - # number of processed tokens. # When an EOS is observed, the last FSM state becomes `-1`. - self.pstates: List[Tuple[str, int, int]] = [] + self.last_fsm_states: List[int] = [] + self.mask_cache: Dict[Tuple[int, int], torch.LongTensor] = {} + self.regex_string = regex_string + self.allow_empty_tokens = allow_empty_tokens def create_proposal( self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor @@ -89,72 +105,106 @@ def create_proposal( """ - if len(self.pstates) == 0: - self.pstates = [ - ("REGEX", self.regex_fsm.initial, 0) - for _ in range(generated_token_ids.shape[0]) + assert generated_token_ids.ndim == 2 + + if len(self.last_fsm_states) == 0: + self.last_fsm_states = [ + self.initial_state for _ in range(generated_token_ids.shape[0]) ] - if generated_token_ids.shape[-1] > 0: - new_pstates = [] - for token_seq, (_, last_fsm_state, last_token_idx) in zip( + masks = [] + + for i, (token_seq, last_state) in enumerate( + zip( generated_token_ids, - self.pstates, - ): - # Get the tokens we haven't already processed, - readable_tokens = token_seq[last_token_idx:] - # excluding any EOS tokens. - not_eos_mask = [ - tk != self.model.tokenizer.eos_token_id for tk in readable_tokens - ] - readable_tokens = readable_tokens[not_eos_mask] - if len(readable_tokens) > 0: + self.last_fsm_states, + ) + ): + if token_seq.shape[0] > 0: + # Get the last token that was sampled + last_token = int(token_seq[-1]) + + if last_token in self.empty_token_ids: + # An empty token was sampled, so the FSM state hasn't changed + next_state = last_state + next_token_ids = list(self.states_to_token_maps[last_state].keys()) + + elif last_token != self.model.tokenizer.eos_token_id: # If we previously ended with an EOS, we shouldn't be # getting/sampling any more non-EOS tokens. - assert last_fsm_state > -1 + assert last_state > -1 - sequence = self.model.tokenizer.decode(readable_tokens) + last_token_to_end_state = self.states_to_token_maps[last_state] - ((_, state_seq),) = find_partial_matches( - self.regex_fsm, - "".join(sequence), - start_state=last_fsm_state, - ) - pstate = ( - "REGEX", - state_seq[-1], - last_token_idx + len(sequence), + next_state = last_token_to_end_state[last_token] + + next_tokens_to_end_states = self.states_to_token_maps.get( + next_state ) + + if next_tokens_to_end_states is None: + # If there are no transitions from the current state, + # then we must've been in a final state of the FSM. + # We produce EOS tokens from here on. + assert next_state in self.final_states + next_state = -1 + next_token_ids = [self.model.tokenizer.eos_token_id] + else: + next_token_ids = list(next_tokens_to_end_states.keys()) else: - pstate = ("REGEX", -1, last_token_idx) + # Since we already have an EOS, only sample EOS tokes from + # here on. + next_state = -1 + next_token_ids = [self.model.tokenizer.eos_token_id] + else: + # There weren't any previous tokens, so we can't update the state + next_state = last_state + next_token_ids = list(self.states_to_token_maps[last_state].keys()) - new_pstates.append(pstate) + mask = self._get_mask_for_state( + next_state, logits.shape[-1], next_token_ids + ) + masks.append(mask) + self.last_fsm_states[i] = next_state - self.pstates = new_pstates + mask = torch.concatenate(masks, dim=0) - masks = [] - mask_shape = (logits.shape[-1],) - for pstate in self.pstates: - mask = torch.full(mask_shape, -math.inf, device=self.device) + return logits + mask - if pstate[1] > -1: - next_support = self.pstate_to_vocab[pstate[:2]] - else: - next_support = [self.model.tokenizer.eos_token_id] + def _get_mask_for_state( + self, state: int, size: int, next_token_ids: List[int] + ) -> torch.LongTensor: + mask = self.mask_cache.get((state, size)) - mask[next_support] = 0 - masks.append(mask.unsqueeze(0)) + if mask is None: + mask = torch.full( + (size,), + -math.inf, + device=self.device, + ) - mask = torch.concatenate(masks, dim=0) + if self.allow_empty_tokens: + token_ids = list(self.empty_token_ids) + next_token_ids + else: + token_ids = next_token_ids - return logits + mask + mask[token_ids] = 0 + mask = mask.unsqueeze(0) + self.mask_cache[(state, size)] = mask + + return mask def postprocess_completions(self, completions: List[str]) -> List[str]: - self.pstates.clear() + self.last_fsm_states.clear() return super().postprocess_completions(completions) -def regex(model, regex_string: str, max_tokens: Optional[int] = None): +def regex( + model, + regex_string: str, + max_tokens: Optional[int] = None, + allow_empty_tokens: bool = True, +): """Generate text sequences that match the input regex. Parameters @@ -165,12 +215,14 @@ def regex(model, regex_string: str, max_tokens: Optional[int] = None): The regular expression that generated expressions must match. max_tokens The maximum number of tokens to generate. + allow_empty_tokens + Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, regex_string, max_tokens) + return Regex(model, regex_string, max_tokens, allow_empty_tokens) -def integer(model, max_tokens: Optional[int] = None): +def integer(model, max_tokens: Optional[int] = None, allow_empty_tokens: bool = True): """Generate integers. The regex used to constrain the generation optionally matches plus or minus @@ -183,12 +235,14 @@ def integer(model, max_tokens: Optional[int] = None): The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. + allow_empty_tokens + Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, r"[-+]?\d+", max_tokens) + return Regex(model, r"[-+]?\d+", max_tokens, allow_empty_tokens) -def float(model, max_tokens: Optional[int] = None): +def float(model, max_tokens: Optional[int] = None, allow_empty_tokens: bool = True): """Generate floating-point numbers. The regex used to constrain the generation optionally matches plus or minus @@ -201,18 +255,35 @@ def float(model, max_tokens: Optional[int] = None): The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. + allow_empty_tokens + Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", max_tokens) - - -def choice(model, choices: List[str], max_tokens: Optional[int] = None): + return Regex( + model, + r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", + max_tokens, + allow_empty_tokens, + ) + + +def choice( + model, + choices: List[str], + max_tokens: Optional[int] = None, + allow_empty_tokens: bool = True, +): """Choose between different sequences.""" regex_str = r"(" + r"|".join(choices) + r")" - return Regex(model, regex_str, max_tokens) + return Regex(model, regex_str, max_tokens, allow_empty_tokens) -def json(model, schema: Union[str, BaseModel], max_tokens: Optional[int] = None): +def json( + model, + schema: Union[str, BaseModel], + max_tokens: Optional[int] = None, + allow_empty_tokens: bool = True, +): """Generate a text sequence that follows a JSON schema or Pydantic model. Parameters @@ -223,6 +294,8 @@ def json(model, schema: Union[str, BaseModel], max_tokens: Optional[int] = None) The JSON schema or Pydantic model that guides the generation. max_tokens The maximum number of tokens to generate. + allow_empty_tokens + Allow sampling of tokens corresponding to empty strings. """ if isinstance(schema, type(BaseModel)): @@ -230,4 +303,4 @@ def json(model, schema: Union[str, BaseModel], max_tokens: Optional[int] = None) regex_str = build_regex_from_schema(schema) - return Regex(model, regex_str, max_tokens) + return Regex(model, regex_str, max_tokens, allow_empty_tokens) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index e1fd2e3f..77d79821 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -1,24 +1,10 @@ -from collections import ChainMap from copy import copy, deepcopy from dataclasses import dataclass from functools import lru_cache -from typing import ( - Any, - Callable, - Dict, - FrozenSet, - Generator, - Iterable, - Iterator, - Optional, - Sequence, - Set, - Tuple, - Union, -) +from typing import Any, Dict, FrozenSet, Iterator, Optional, Set, Tuple, Union import interegular -from interegular.fsm import FSM, Alphabet, OblivionError +from interegular.fsm import FSM from interegular.patterns import Unsupported from lark import Lark, Token from lark.common import LexerConf, ParserConf @@ -52,6 +38,13 @@ from lark.parsers.lalr_interactive_parser import InteractiveParser from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser +from outlines.text.fsm import ( + fsm_union, + get_sub_fsms_from_seq, + make_deterministic_fsm, + walk_fsm, +) + PartialParseState = Tuple[str, int] ParseStateType = Union[int, FrozenSet] @@ -72,80 +65,6 @@ class PartialTokensInfo: final_terminals_and_info: Tuple[PartialTerminalInfo, ...] -def make_deterministic_fsm(fsm: FSM) -> Tuple[FSM, Dict[int, int]]: - """Construct an equivalent FSM with deterministic state labels.""" - old_to_new_trans_keys = { - trans_key: i - for i, (trans_key, _) in enumerate( - sorted(fsm.alphabet.by_transition.items(), key=lambda x: sorted(x[1])) - ) - } - - new_symbol_mapping = { - symbol: old_to_new_trans_keys[trans_key] - for symbol, trans_key in fsm.alphabet._symbol_mapping.items() - } - - new_alphabet = Alphabet(new_symbol_mapping) - - new_map = { - from_state: { - old_to_new_trans_keys[trans_key]: to_state - for trans_key, to_state in trans_map.items() - } - for from_state, trans_map in fsm.map.items() - } - - old_to_new_states = {} - old_to_new_states[fsm.initial] = 0 - - i = 0 - seen = {fsm.initial} - old_state_queue = [fsm.initial] - while old_state_queue: - old_state = old_state_queue.pop(-1) - transitions = new_map[old_state] - sorted_transitions = sorted(transitions.items(), key=lambda v: v[0]) - for _, old_state in sorted_transitions: - if old_state not in seen: - old_state_queue.append(old_state) - seen.add(old_state) - if old_state not in old_to_new_states: - i += 1 - old_to_new_states[old_state] = i - - new_map = dict( - sorted( - ( - ( - old_to_new_states[from_state], - dict( - sorted( - ( - (trans_key, old_to_new_states[to_state]) - for trans_key, to_state in trans_map.items() - ), - key=lambda v: v[0], - ) - ), - ) - for from_state, trans_map in new_map.items() - ), - key=lambda v: v[0], - ) - ) - - new_initial = 0 - new_finals = frozenset( - sorted(old_to_new_states[old_state] for old_state in fsm.finals) - ) - new_states = frozenset(sorted(new_map.keys())) - - new_fsm = FSM(new_alphabet, new_states, new_initial, new_finals, new_map) - - return new_fsm, old_to_new_states - - class PartialParserConf(ParserConf): __serialize_fields__ = ( "rules", @@ -604,6 +523,9 @@ def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): self.fsm, self.fsms_to_trans_finals = fsm_union(fsms) + # Eagerly construct the `FSMInfo` object + _ = self.fsm.fsm_info + def get_terminals_info( self, fsm_state_seq ) -> Tuple[Tuple[PartialTerminalInfo, ...], Tuple[PartialTerminalInfo, ...]]: @@ -635,22 +557,22 @@ def match(self, text, pos, last_fsm_state_seq: Optional[Tuple[int, ...]] = None) text_part = text[start_pos:] - res = find_partial_matches( - self.fsm, + state_seq = walk_fsm( + self.fsm.fsm_info, text_part, - start_state=start_state, + start_state, full_match=self.match_whole, ) - if len(res) == 0: + if not state_seq: return None - ((_, state_seq),) = res - if last_fsm_state_seq: - state_seq = last_fsm_state_seq[:-1] + state_seq + res = last_fsm_state_seq + tuple(state_seq) + else: + res = (start_state,) + tuple(state_seq) - return state_seq + return res class PartialContextualLexer(ContextualLexer): @@ -693,6 +615,8 @@ def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: class PartialBasicLexer(BasicLexer): def __init__(self, conf: "LexerConf"): super().__init__(conf) + # Eagerly construct the scanner + self._build_scanner() def _build_scanner(self): # This seems incredibly convoluted: `lark` creates callback-triggered @@ -910,96 +834,6 @@ def get_contextual_lexer(x: Union[PartialLexerThread, PartialParsingFrontend]): return x.lexer.lexer -def find_partial_matches( - fsm: FSM, input_string: str, start_state: Optional[int] = None, full_match=True -) -> Set[Tuple[int, Tuple[int, ...]]]: - """Find the states in the finite state machine `fsm` that accept `input_string`. - - This will consider all possible states in the finite state machine (FSM) - that accept the beginning of `input_string` as starting points, unless a - specific `start_state` is provided. - - Parameters - ---------- - fsm - The finite state machine. - input_string - The string for which we generate partial matches. - start_state - A single fixed starting state to consider. For example, if this value - is set to `fsm.initial`, it attempt to read `input_string` from the - beginning of the FSM/regular expression. - full_match - Matches must cover the entire string. - - Returns - ------- - A set of tuples corresponding to each valid starting state in the FSM. The - first element of each tuple contains an integer indicating the position in - `input_string` at which the FSM stopped. The second element is the tuple - of states visited during execution of the FSM plus the next, unvisited - transition state. - - """ - if len(input_string) == 0: - return set() - - trans_key = fsm.alphabet[input_string[0]] - - # TODO: We could probably reuse parts of the computed paths when computing - # results for multiple starting points. - def _partial_match( - trans: Dict[int, int] - ) -> Tuple[Optional[int], Optional[Tuple[int, ...]]]: - fsm_map = ChainMap({fsm.initial: trans}, fsm.map) - state = fsm.initial - accepted_states: Tuple[int, ...] = () - last_final_idx = -1 - - for i, symbol in enumerate(input_string): - trans_key = fsm.alphabet[symbol] - - trans_map = fsm_map.get(state) - - if trans_map is None or trans_key not in trans_map: - if full_match: - if state in fsm.finals: - i -= 1 - break - else: - if last_final_idx > -1: - i = last_final_idx - accepted_states = accepted_states[: last_final_idx + 1] - break - - return None, None - - state = trans_map[trans_key] - - if state in fsm.finals: - last_final_idx = i - - accepted_states += (state,) - - terminated = state in fsm.finals - if not terminated and state == fsm.initial: - return None, None - - return i, accepted_states - - res = set() - transition_maps = ( - fsm.map if start_state is None else {start_state: fsm.map[start_state]} - ) - for state, trans in transition_maps.items(): - if trans_key in trans: - last_match_idx, path = _partial_match(trans) - if last_match_idx is not None and path is not None: - res.add((last_match_idx, (state,) + path)) - - return res - - def terminals_to_fsms(lp: PartialLark) -> Dict[str, FSM]: """Construct a ``dict`` mapping terminal symbol names to their finite state machines.""" @@ -1015,221 +849,3 @@ def terminals_to_fsms(lp: PartialLark) -> Dict[str, FSM]: symbol_names_and_fsms[terminal.name] = fsm return symbol_names_and_fsms - - -def map_partial_states_to_vocab( - vocabulary: Iterable[str], - terminals_to_fsms_map: Dict[str, FSM], - partial_match_filter: Callable[ - [str, Optional[int], Tuple[int, ...]], bool - ] = lambda *args: True, - final_state_string: Optional[str] = None, -) -> Tuple[Dict[PartialParseState, Set[int]], Dict[str, Dict[int, Set[int]]]]: - """Construct a map from partial parse states to subsets of `vocabulary`. - - The subsets of `vocabulary` consist of elements that are accepted by--or - transition to--the corresponding partial parse states. - - Parameters - ---------- - vocabulary - The vocabulary composed of strings. - terminals_to_fsms_map - Terminal symbol names mapped to FSMs, as provided by `terminals_to_fsms`. - partial_match_filter - A callable that determines which partial matches to keep. The first - argument is the string being match, the rest are the unpacked partial - match return values of `find_partial_matches`. - final_state_string - A string from `vocabulary` that is to be added to all the final states - in the FSM. - """ - - final_state_string_idx = None - - # Partial parse states to the subsets of the vocabulary that accept them - pstate_to_vocab: Dict[Tuple[str, int], Set[int]] = {} - possible_paths = {} - for symbol_name, fsm in terminals_to_fsms_map.items(): - terminal_possible_paths: Dict[int, Set[int]] = {} - for i, vocab_string in enumerate(vocabulary): - if vocab_string == final_state_string: - final_state_string_idx = i - - for end_idx, state_seq in find_partial_matches(fsm, vocab_string): - if partial_match_filter(vocab_string, end_idx, state_seq): - terminal_possible_paths.setdefault(state_seq[0], set()).add( - state_seq[-1] - ) - pstate_to_vocab.setdefault((symbol_name, state_seq[0]), set()).add( - i - ) - - possible_paths[symbol_name] = terminal_possible_paths - - if final_state_string_idx is not None: - # Allow transitions to EOS from all terminals FSM states - for symbol_name, fsm in terminals_to_fsms_map.items(): - for state in fsm.finals: - pstate_to_vocab.setdefault((symbol_name, state), set()).add( - final_state_string_idx - ) - - return pstate_to_vocab, possible_paths - - -def fsm_union( - fsms: Sequence[FSM], -) -> Tuple[FSM, Dict[int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]]]: - """Construct an FSM representing the union of the FSMs in `fsms`. - - This is an updated version of `interegular.fsm.FSM.union` made to return an - extra map of component FSMs to the sets of state transitions that - correspond to them in the new FSM. - - """ - - alphabet, new_to_old = Alphabet.union(*[fsm.alphabet for fsm in fsms]) - - indexed_fsms = tuple(enumerate(fsms)) - - initial = {i: fsm.initial for (i, fsm) in indexed_fsms} - - # Dedicated function accepting a "superset" and returning the next - # "superset" obtained by following this transition in the new FSM - def follow(current_state, new_transition: int): - next = {} - for i, f in indexed_fsms: - old_transition = new_to_old[i][new_transition] - if ( - i in current_state - and current_state[i] in f.map - and old_transition in f.map[current_state[i]] - ): - next[i] = f.map[current_state[i]][old_transition] - if not next: - raise OblivionError - return next - - states = [initial] - finals: Set[int] = set() - map: Dict[int, Dict[int, int]] = {} - - # Map component FSMs to their new state-to-state transitions, finals, and a - # map translating component FSM states to aggregate FSM states - fsms_to_trans_finals: Dict[ - int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] - ] = {} - - i = 0 - while i < len(states): - state = states[i] - - # Add to the finals of the aggregate FSM whenever we hit a final in a - # component FSM - if any(state.get(j, -1) in fsm.finals for (j, fsm) in indexed_fsms): - finals.add(i) - - # Compute the map for this state - map[i] = {} - for transition in alphabet.by_transition: - try: - next = follow(state, transition) - except OblivionError: - # Reached an oblivion state; don't list it - continue - else: - try: - # TODO: Seems like this could--and should--be avoided - j = states.index(next) - except ValueError: - j = len(states) - states.append(next) - - map[i][transition] = j - - for fsm_id, fsm_state in next.items(): - ( - fsm_transitions, - fsm_finals, - fsm_old_to_new, - ) = fsms_to_trans_finals.setdefault(fsm_id, (set(), set(), {})) - old_from = state[fsm_id] - old_to = fsm_state - fsm_old_to_new.setdefault(old_from, set()).add(i) - fsm_old_to_new.setdefault(old_to, set()).add(j) - fsm_transitions.add((i, j)) - if fsm_state in fsms[fsm_id].finals: - fsm_finals.add(j) - - i += 1 - - fsm = FSM( - alphabet=alphabet, - states=range(len(states)), - initial=0, - finals=finals, - map=map, - __no_validation__=True, - ) - - fsm, old_to_new_states = make_deterministic_fsm(fsm) - _fsms_to_trans_finals = { - fsm_id: ( - {(old_to_new_states[s1], old_to_new_states[s2]) for s1, s2 in transitions}, - {old_to_new_states[s] for s in finals}, - { - old_state: {old_to_new_states[new_state] for new_state in new_states} - for old_state, new_states in old_to_new.items() - }, - ) - for fsm_id, (transitions, finals, old_to_new) in sorted( - fsms_to_trans_finals.items(), key=lambda x: x[0] - ) - } - - return ( - fsm, - _fsms_to_trans_finals, - ) - - -def get_sub_fsms_from_seq( - state_seq: Sequence[int], - fsms_to_trans_finals: Dict[ - int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]] - ], -) -> Generator[Tuple[int, bool, bool], None, None]: - """Get the indices of the sub-FSMs in `fsm` that could have matched the state sequence `state_seq`. - - Parameters - ---------- - state_seq - A state sequence. - fsms_to_trans_finals - A map from FSM indices to tuples containing sets of their state transitions - and sets of the final/accept states. - - Returns - ------- - A generator returning tuples containing each sub-FSM index (in the order - they were union-ed to construct `fsm`) and booleans indicating whether or - not there is another valid transition from the last state in the sequence - for the associated sub-FSM (i.e. if the FSM can continue - accepting/matching) and whether or not the sequence ends in a final state - of the sub-FSM. - """ - state_seq_transitions = set(zip(state_seq[:-1], state_seq[1:])) - last_fsm_state = state_seq[-1] - yield from ( - ( - # The sub-FMS index - fsm_idx, - # Is there another possible transition in this sub-FSM? - any(last_fsm_state == from_s for (from_s, to_s) in transitions), - # Is this sub-FSM in a final state? - state_seq[-1] in finals, - ) - for fsm_idx, (transitions, finals, _) in fsms_to_trans_finals.items() - if state_seq_transitions.issubset(transitions) - ) diff --git a/pyproject.toml b/pyproject.toml index 75db171c..39ab8207 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,8 @@ dependencies = [ "scipy", "tenacity", "torch", + "numba", + "joblib", ] dynamic = ["version"] @@ -72,6 +74,7 @@ write_to = "outlines/_version.py" testpaths = ["tests"] filterwarnings = [ "error", + "ignore::numba.core.errors.NumbaPendingDeprecationWarning", "ignore::FutureWarning:transformers.*", "ignore::FutureWarning:diffusers.*", "ignore::UserWarning:torch.cuda.*" @@ -84,7 +87,7 @@ exclude=["examples"] module = [ "diffusers", "jinja2", - "joblib", + "joblib.*", "openai", "numpy.*", "perscache.*", @@ -99,8 +102,8 @@ module = [ "transformers.*", "lark.*", "interegular.*", - "numba.*", "datasets.*", + "numba.*", ] ignore_missing_imports = true diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 912e4775..74aea7c1 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -9,6 +9,8 @@ import outlines.models as models import outlines.text.generate as generate +from outlines.models.transformers import TransformersTokenizer +from outlines.text.fsm import reduced_vocabulary def test_transformers_integration_continuation(): @@ -245,3 +247,16 @@ def test_transformers_logits_vocab_size(): masked_logits = generator("blah", rng=rng) assert masked_logits == "True" + + +def test_transformers_reduced_vocabulary_caching(): + tokenizer = TransformersTokenizer("gpt2") + tokenizer2 = TransformersTokenizer("gpt2") + + # TODO: We might actually want only one copy of a given tokenizer. + assert tokenizer is not tokenizer2 + + vocab = reduced_vocabulary(tokenizer) + vocab2 = reduced_vocabulary(tokenizer2) + + assert vocab2 is vocab diff --git a/tests/text/generate/test_regex.py b/tests/text/generate/test_regex.py index 5ef3afcd..a32a8b85 100644 --- a/tests/text/generate/test_regex.py +++ b/tests/text/generate/test_regex.py @@ -1,9 +1,12 @@ import math +import interegular import pytest import torch import outlines.text.generate as generate +from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm +from outlines.text.generate.regex import Regex class Tokenizer: @@ -15,6 +18,12 @@ class Tokenizer: tokens = list(vocabulary.keys()) special_tokens = {""} + def encode(self, tokens): + if not isinstance(tokens, (tuple, list)): + tokens = [tokens] + + return [self.vocabulary[token] for token in tokens] + def decode(self, token_ids): decoded = [] for i in range(token_ids.shape[0]): @@ -26,11 +35,21 @@ def convert_token_to_string(self, token): return token +class TokenizerWithEmpty(Tokenizer): + vocabulary = {"": 0, "-": 1, "1": 2, "0.": 3, "431": 4, "a": 5, "A": 6, "": 7} + tokens = list(vocabulary.keys()) + + class Model: tokenizer = Tokenizer() device = "cpu" +class ModelWithEmpty: + tokenizer = TokenizerWithEmpty() + device = "cpu" + + @pytest.mark.parametrize( "regex_string, valid_first_token, proposal", [ @@ -153,3 +172,65 @@ def test_float_proposal(input_ids, proposal): result, torch.tensor(proposal), ) + + +@pytest.mark.parametrize( + "input_ids, proposal, with_empty", + [ + ([[]], [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf, 1]], True), + ( + [[]], + [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf, -math.inf]], + False, + ), + ([[3]], [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf, 1]], True), + ( + [[3]], + [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf, -math.inf]], + False, + ), + ], +) +def test_empty_strings(input_ids, proposal, with_empty): + model = ModelWithEmpty() + generator = generate.float(model, allow_empty_tokens=with_empty) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor(input_ids), logits) + assert torch.equal( + result, + torch.tensor(proposal), + ) + + +def test_Regex_precomputed(): + model = Model() + choices = ["1", "431a", "431A-"] + regex_str = r"(" + r"|".join(choices) + r")" + + regex_pattern = interegular.parse_pattern(regex_str) + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + ( + states_to_token_maps, + empty_token_ids, + ) = create_fsm_index_tokenizer(regex_fsm, model.tokenizer) + + generator = Regex( + model, + regex_str, + max_tokens=100, + initial_state=regex_fsm.initial, + final_states=regex_fsm.finals, + states_to_token_maps=states_to_token_maps, + empty_token_ids=empty_token_ids, + ) + + logits = torch.ones(len(model.tokenizer.vocabulary)) + result = generator.create_proposal(torch.tensor([[]]), logits) + assert torch.equal( + result, + torch.tensor( + [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]] + ), + ) diff --git a/tests/text/test_fsm.py b/tests/text/test_fsm.py new file mode 100644 index 00000000..616c1359 --- /dev/null +++ b/tests/text/test_fsm.py @@ -0,0 +1,419 @@ +import interegular +import numba +import pytest + +from outlines.models.transformers import TransformersTokenizer +from outlines.text.fsm import ( + create_fsm_index, + create_fsm_index_end_to_end, + create_fsm_index_tokenizer, + find_partial_matches, + fsm_union, + get_sub_fsms_from_seq, + make_deterministic_fsm, + walk_fsm, +) + + +def test_walk_fsm(): + regex_pattern = interegular.parse_pattern("0|[1-9][2-9]*") + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + # This should fail, because state `1` reads nothing + res = tuple(walk_fsm(regex_fsm.fsm_info, "0", numba.int64(1), full_match=True)) + assert res == tuple() + + pattern = interegular.parse_pattern(r"(?:[^\W\d]\w*|[\t \x0c]+)") + fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) + + res = tuple(walk_fsm(fsm.fsm_info, "x ", fsm.fsm_info.initial, full_match=False)) + assert res == (2,) + + +def test_partial_match(): + name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") + name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) + assert name_fsm.initial == 0 + + name_fsm = name_fsm.fsm_info + + def_pattern = interegular.parse_pattern("def") + def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) + assert def_fsm.initial == 0 + + def_fsm = def_fsm.fsm_info + + def to_python(res): + return {(x, tuple(y)) for x, y in res} + + res = to_python(find_partial_matches(def_fsm, "def")) + assert res == {(2, (0, 1, 2, 3))} + res = to_python(find_partial_matches(def_fsm, "de")) + assert res == {(1, (0, 1, 2))} + res = to_python(find_partial_matches(def_fsm, "d")) + assert res == {(0, (0, 1))} + res = to_python(find_partial_matches(def_fsm, "")) + assert res == set() + res = to_python(find_partial_matches(def_fsm, "df")) + assert res == set() + res = to_python(find_partial_matches(def_fsm, "ef")) + assert res == {(1, (1, 2, 3))} + res = to_python(find_partial_matches(def_fsm, "e")) + assert res == {(0, (1, 2))} + res = to_python(find_partial_matches(def_fsm, "f")) + assert res == {(0, (2, 3))} + res = to_python(find_partial_matches(def_fsm, "ef foo")) + assert res == {(1, (1, 2, 3))} + + # This string has a `DEF` token in it, but should ultimately not lex one + res = to_python(find_partial_matches(def_fsm, "defb")) + assert res == {(2, (0, 1, 2, 3))} + + # `NAME` can have multiple start states for this input + res = to_python(find_partial_matches(name_fsm, "d")) + assert res == {(0, (0, 1)), (0, (1, 1))} + # Not this case + res = to_python(find_partial_matches(name_fsm, "1d")) + assert res == {(1, (1, 1, 1))} + + res = to_python(find_partial_matches(name_fsm, "blah")) + assert res == { + (3, (0, 1, 1, 1, 1)), + (3, (1, 1, 1, 1, 1)), + } + + float_pattern = interegular.parse_pattern( + r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))" + ) + float_fsm, _ = make_deterministic_fsm(float_pattern.to_fsm().reduce()) + assert 5 in float_fsm.finals + assert 2 not in float_fsm.finals + + float_fsm = float_fsm.fsm_info + + res = to_python(find_partial_matches(float_fsm, ".")) + assert res == {(0, (3, 5)), (0, (4, 5)), (0, (0, 2))} + + joins_fsm, _ = make_deterministic_fsm( + interegular.parse_pattern(r"(JOIN LEFT|JOIN)").to_fsm().reduce() + ) + + joins_fsm = joins_fsm.fsm_info + + res = to_python(find_partial_matches(joins_fsm, "JOIN BLAH", full_match=False)) + assert res == {(3, (0, 1, 2, 3, 4))} + + res = to_python(find_partial_matches(joins_fsm, "JOIN L", full_match=False)) + assert res == {(5, (0, 1, 2, 3, 4, 5, 6))} + + res = to_python(find_partial_matches(joins_fsm, "JOI", full_match=False)) + assert res == {(2, (0, 1, 2, 3))} + + regex_pattern = interegular.parse_pattern("0|[1-9][2-9]*") + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + # State `1` has no transitions + assert not regex_fsm.map[1] + + res = to_python(find_partial_matches(regex_fsm.fsm_info, "0", numba.int64(1))) + assert res == {(0, (0, 1))} + + +def test_create_fsm_index(): + regex_str = "0|[1-9][0-9]*" + + regex_pattern = interegular.parse_pattern(regex_str) + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + vocabulary = {"blah": 0, "1a": 1, "2": 2, "0": 3, "": 4} + + res = create_fsm_index(regex_fsm.fsm_info, vocabulary) + + assert res == {0: {2, 3}, 2: {2, 3}} + + res = create_fsm_index(regex_fsm.fsm_info, vocabulary, "") + + assert res == {0: {2, 3}, 1: {4}, 2: {2, 3, 4}} + + +def test_get_sub_fsms_from_seq(): + name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") + name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) + + def_pattern = interegular.parse_pattern("def") + def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) + + match_pattern = interegular.parse_pattern("match") + match_fsm, _ = make_deterministic_fsm(match_pattern.to_fsm().reduce()) + + peq_pattern = interegular.parse_pattern(r"\+=") + peq_fsm, _ = make_deterministic_fsm(peq_pattern.to_fsm().reduce()) + + plus_pattern = interegular.parse_pattern(r"\+") + plus_fsm, _ = make_deterministic_fsm(plus_pattern.to_fsm().reduce()) + + fsms = [def_fsm, match_fsm, name_fsm, peq_fsm, plus_fsm] + + fsm, fsms_to_trans_finals = fsm_union(fsms) + + assert fsms_to_trans_finals == { + 0: ({(0, 3), (3, 9), (9, 10)}, {10}, {0: {0}, 1: {3}, 2: {9}, 3: {10}}), + 1: ( + {(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)}, + {8}, + {0: {0}, 1: {4}, 2: {5}, 3: {6}, 4: {7}, 5: {8}}, + ), + 2: ( + { + (0, 2), + (0, 3), + (0, 4), + (2, 2), + (3, 2), + (3, 9), + (4, 2), + (4, 5), + (5, 2), + (5, 6), + (6, 2), + (6, 7), + (7, 2), + (7, 8), + (8, 2), + (9, 2), + (9, 10), + (10, 2), + }, + {2, 3, 4, 5, 6, 7, 8, 9, 10}, + {0: {0}, 1: {2, 3, 4, 5, 6, 7, 8, 9, 10}}, + ), + 3: ({(0, 1), (1, 11)}, {11}, {0: {0}, 1: {1}, 2: {11}}), + 4: ({(0, 1)}, {1}, {0: {0}, 1: {1}}), + } + + assert not fsm.accepts("1a") + assert fsm.accepts("a1") + assert fsm.accepts("def") + assert fsm.accepts("match") + assert fsm.accepts("+=") + assert fsm.accepts("+") + + state_seq = walk_fsm(fsm.fsm_info, "def", fsm.fsm_info.initial) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, False, True), (2, True, True)] + + # Make sure the old-to-new state map is correct + def_state_seq = walk_fsm(def_fsm.fsm_info, "def", fsm.fsm_info.initial) + def_state_seq.insert(0, fsm.fsm_info.initial) + + def_old_to_new_states = fsms_to_trans_finals[0][2] + assert all( + new_state in def_old_to_new_states[old_state] + for old_state, new_state in zip(def_state_seq, state_seq) + ) + + state_seq = walk_fsm(fsm.fsm_info, "ef", fsm.fsm_info.initial) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(2, True, True)] + + name_state_seq = walk_fsm(name_fsm.fsm_info, "ef", fsm.fsm_info.initial) + name_state_seq.insert(0, fsm.fsm_info.initial) + + name_old_to_new_states = fsms_to_trans_finals[2][2] + assert all( + new_state in name_old_to_new_states[old_state] + for old_state, new_state in zip(name_state_seq, state_seq) + ) + + state_seq = walk_fsm(fsm.fsm_info, "match", fsm.fsm_info.initial) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(1, False, True), (2, True, True)] + + match_state_seq = walk_fsm(match_fsm.fsm_info, "match", fsm.fsm_info.initial) + match_state_seq.insert(0, fsm.fsm_info.initial) + + match_old_to_new_states = fsms_to_trans_finals[1][2] + assert all( + new_state in match_old_to_new_states[old_state] + for old_state, new_state in zip(match_state_seq, state_seq) + ) + + state_seq = walk_fsm(fsm.fsm_info, "defa", fsm.fsm_info.initial) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(2, True, True)] + + state_seq = walk_fsm(fsm.fsm_info, "de", fsm.fsm_info.initial) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, True, False), (2, True, True)] + + state_seq = walk_fsm(fsm.fsm_info, "+", fsm.fsm_info.initial, False) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(3, True, False), (4, False, True)] + + state_seq = walk_fsm(fsm.fsm_info, "+=", fsm.fsm_info.initial) + state_seq.insert(0, fsm.fsm_info.initial) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(3, False, True)] + + # Test some overlapping patterns + join_fsms = [ + interegular.parse_pattern(r"JOIN").to_fsm().reduce(), + interegular.parse_pattern(r"JOIN LEFT").to_fsm().reduce(), + ] + fsm, fsms_to_trans_finals = fsm_union(join_fsms) + + ((_, state_seq),) = find_partial_matches(fsm.fsm_info, "OI", full_match=False) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, True, False), (1, True, False)] + + ((_, state_seq),) = find_partial_matches(fsm.fsm_info, "N", full_match=False) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(0, False, True), (1, True, False)] + + ((_, state_seq),) = find_partial_matches(fsm.fsm_info, " ", full_match=False) + + res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) + assert res == [(1, True, False)] + + +def test_create_fsm_index_end_to_end(): + regex_str = "0|[1-9][0-9]*" + + regex_pattern = interegular.parse_pattern(regex_str) + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + vocabulary = { + "blah": numba.typed.List([0]), + "1a": numba.typed.List([1]), + "2": numba.typed.List([2]), + "0": numba.typed.List([3]), + "": numba.typed.List([4]), + } + + vocabulary_nb = numba.typed.Dict.empty( + numba.types.string, numba.types.ListType(numba.int64) + ) + vocabulary_nb.update(vocabulary) + + res = create_fsm_index_end_to_end(regex_fsm.fsm_info, vocabulary_nb) + + assert res == {0: {(2, 2), (3, 1)}, 2: {(2, 2), (3, 2)}} + + +def test_create_fsm_index_tokenizer(): + # The combined regular expressions of a lexer state in a Python grammar + regex_str = "(?:(?:[0-9](?:(?:_)?[0-9])*(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*|(?:[0-9](?:(?:_)?[0-9])*\\.(?:[0-9](?:(?:_)?[0-9])*)?|\\.[0-9](?:(?:_)?[0-9])*)(?:(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*)?)|[0-9](?:(?:_)?[0-9])*)(?:J|j)|(?:[0-9](?:(?:_)?[0-9])*(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*|(?:[0-9](?:(?:_)?[0-9])*\\.(?:[0-9](?:(?:_)?[0-9])*)?|\\.[0-9](?:(?:_)?[0-9])*)(?:(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*)?)|0(?:x|X)(?:(?:_)?(?:[0-9]|[a-f]|[A-F]))+|0(?:b|B)(?:(?:_)?[0-1])+|0(?:o|O)(?:(?:_)?[0-7])+|(?:(?i:([ubf]?r?|r[ubf])('([^\\\\']|.)*?'))|(?i:([ubf]?r?|r[ubf])(\"([^\\\"]|.)*?\")))|(?:(?:\r?\n[\t ]*|#[^\n]*))+|[1-9](?:(?:_)?[0-9])*|\\\\[\t \x0c]*\r?\n|continue|nonlocal|assert|global|import|lambda|return|async|await|break|class|False|match|raise|while|yield|case|from|None|pass|True|with|def|del|for|not|try|if|[^\\W\\d]\\w*|#[^\n]*|[\t \x0c]+|\\.\\.\\.|@|\\{|\\(|\\[|\\-|\\+|\\*|\\~" + + regex_pattern = interegular.parse_pattern(regex_str) + # Not reduced, so that there are many states + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm()) + + num_fsm_states = len(regex_fsm.states) + assert num_fsm_states == 220 + + tokenizer = TransformersTokenizer("gpt2") + + states_to_token_subsets, empty_token_ids = create_fsm_index_tokenizer( + regex_fsm, tokenizer + ) + + assert not empty_token_ids + assert len(states_to_token_subsets) / num_fsm_states > 0.94 + + +@pytest.mark.skip(reason="Only for local profiling") +def test_regex_index_performance(): + from line_profiler import LineProfiler # type: ignore [import] + + regex_str = "(?:(?:[0-9](?:(?:_)?[0-9])*(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*|(?:[0-9](?:(?:_)?[0-9])*\\.(?:[0-9](?:(?:_)?[0-9])*)?|\\.[0-9](?:(?:_)?[0-9])*)(?:(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*)?)|[0-9](?:(?:_)?[0-9])*)(?:J|j)|(?:[0-9](?:(?:_)?[0-9])*(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*|(?:[0-9](?:(?:_)?[0-9])*\\.(?:[0-9](?:(?:_)?[0-9])*)?|\\.[0-9](?:(?:_)?[0-9])*)(?:(?:e|E)(?:(?:\\+|\\-))?[0-9](?:(?:_)?[0-9])*)?)|0(?:x|X)(?:(?:_)?(?:[0-9]|[a-f]|[A-F]))+|0(?:b|B)(?:(?:_)?[0-1])+|0(?:o|O)(?:(?:_)?[0-7])+|(?:(?i:([ubf]?r?|r[ubf])('([^\\\\']|.)*?'))|(?i:([ubf]?r?|r[ubf])(\"([^\\\"]|.)*?\")))|(?:(?:\r?\n[\t ]*|#[^\n]*))+|[1-9](?:(?:_)?[0-9])*|\\\\[\t \x0c]*\r?\n|continue|nonlocal|assert|global|import|lambda|return|async|await|break|class|False|match|raise|while|yield|case|from|None|pass|True|with|def|del|for|not|try|if|[^\\W\\d]\\w*|#[^\n]*|[\t \x0c]+|\\.\\.\\.|@|\\{|\\(|\\[|\\-|\\+|\\*|\\~" + + regex_pattern = interegular.parse_pattern(regex_str) + # Not reduced, so that there are many states + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm()) + + num_fsm_states = len(regex_fsm.states) + assert num_fsm_states == 220 + + tokenizer = TransformersTokenizer("gpt2") + + # Pre-compile Numba functions + res, _ = create_fsm_index_tokenizer(regex_fsm, tokenizer) + assert len(res) > 1 + + profiler = LineProfiler(create_fsm_index_end_to_end) + + profiler.runctx( + "create_fsm_index_tokenizer(regex_fsm, tokenizer)", + globals(), + locals(), + ) + profiler.dump_stats("line-profiler-create_fsm_index.pkl") + profiler.print_stats(output_unit=1e-3, summarize=True, stripzeros=True) + + +@pytest.mark.skip(reason="Only for local profiling") +def test_json_index_performance(): + import json + from enum import Enum + + from line_profiler import LineProfiler # type: ignore [import] + from pydantic import BaseModel, constr + + import outlines.models as models + from outlines.text.generate.regex import Regex, build_regex_from_schema + + class Weapon(str, Enum): + sword = "sword" + axe = "axe" + mace = "mace" + spear = "spear" + bow = "bow" + crossbow = "crossbow" + + class Armor(str, Enum): + leather = "leather" + chainmail = "chainmail" + plate = "plate" + + class Character(BaseModel): + name: constr(max_length=10) + # TODO: Add support for conint + age: int # conint(int, ge=18, le=100) + armor: Armor + weapon: Weapon + # TODO: Add support for conint + strength: int # conint(int, ge=0, le=100) + + model = models.transformers("gpt2", device="cuda") + json_schema = json.dumps(Character.model_json_schema()) + + def build_regex(): + regex_str = build_regex_from_schema(json_schema) + Regex(model, regex_str, 100) + + profiler = LineProfiler(create_fsm_index_end_to_end) + profiler.add_function(create_fsm_index_tokenizer) + profiler.add_function(Regex.__init__) + + profiler.runctx( + "build_regex()", + globals(), + locals(), + ) + profiler.dump_stats("line-profiler-build-json-regex.pkl") + profiler.print_stats(output_unit=1e-3, summarize=True, stripzeros=True) diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index f4a08dd3..20b96e7d 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -1,22 +1,10 @@ -import random -import re from copy import copy -import interegular import pytest from lark.indenter import DedentError from lark.lexer import UnexpectedCharacters, UnexpectedToken -from outlines.text.parsing import ( - PartialLark, - PartialPythonIndenter, - find_partial_matches, - fsm_union, - get_sub_fsms_from_seq, - make_deterministic_fsm, - map_partial_states_to_vocab, - terminals_to_fsms, -) +from outlines.text.parsing import PartialLark, PartialPythonIndenter def test_partial_parsing(): @@ -204,356 +192,3 @@ def test_sequential_parse_example(): if i + 1 == len(input_tokens): assert all(tk in next_vocab for tk in ["\n", "\nde", " ", " + 1"]) - - -def test_find_partial_matches(): - name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") - name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) - assert name_fsm.initial == 0 - - def_pattern = interegular.parse_pattern("def") - def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) - assert def_fsm.initial == 0 - - assert find_partial_matches(def_fsm, "def") == {(2, (0, 1, 2, 3))} - assert find_partial_matches(def_fsm, "de") == {(1, (0, 1, 2))} - assert find_partial_matches(def_fsm, "d") == {(0, (0, 1))} - assert find_partial_matches(def_fsm, "") == set() - assert find_partial_matches(def_fsm, "df") == set() - assert find_partial_matches(def_fsm, "ef") == {(1, (1, 2, 3))} - assert find_partial_matches(def_fsm, "e") == {(0, (1, 2))} - assert find_partial_matches(def_fsm, "f") == {(0, (2, 3))} - assert find_partial_matches(def_fsm, "ef foo") == {(1, (1, 2, 3))} - - # This string has a `DEF` token in it, but should ultimately not lex one - assert find_partial_matches(def_fsm, "defb") == {(2, (0, 1, 2, 3))} - - # `NAME` can have multiple start states for this input - assert find_partial_matches(name_fsm, "d") == { - (0, (0, 1)), - (0, (1, 1)), - } - # Not this case - assert find_partial_matches(name_fsm, "1d") == {(1, (1, 1, 1))} - - assert find_partial_matches(name_fsm, "blah") == { - (3, (0, 1, 1, 1, 1)), - (3, (1, 1, 1, 1, 1)), - } - - float_pattern = interegular.parse_pattern( - r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))" - ) - float_fsm, _ = make_deterministic_fsm(float_pattern.to_fsm().reduce()) - assert 5 in float_fsm.finals - assert 2 not in float_fsm.finals - - res = find_partial_matches(float_fsm, ".") - assert res == {(0, (3, 5)), (0, (4, 5)), (0, (0, 2))} - - joins_fsm, _ = make_deterministic_fsm( - interegular.parse_pattern(r"(JOIN LEFT|JOIN)").to_fsm().reduce() - ) - res = find_partial_matches( - joins_fsm, "JOIN BLAH", joins_fsm.initial, full_match=False - ) - assert res == {(3, (0, 1, 2, 3, 4))} - - res = find_partial_matches(joins_fsm, "JOIN L", joins_fsm.initial, full_match=False) - assert res == {(5, (0, 1, 2, 3, 4, 5, 6))} - - res = find_partial_matches(joins_fsm, "JOI", joins_fsm.initial, full_match=False) - assert res == {(2, (0, 1, 2, 3))} - - -def test_map_partial_states_to_vocab_python(): - pyparser = PartialLark.open_from_package( - "tests", - "partial_python.lark", - ["text"], - parser="lalr", - postlex=PartialPythonIndenter(), - start="file_input", - ) - - symbol_names_and_fsms = terminals_to_fsms(pyparser) - test_symbols = {"DEF", "NAME", "__IGNORE_0"} - symbol_names_and_fsms = { - k: v for k, v in symbol_names_and_fsms.items() if k in test_symbols - } - - assert len(symbol_names_and_fsms["DEF"].states) == 4 - assert len(symbol_names_and_fsms["NAME"].states) == 2 - assert len(symbol_names_and_fsms["__IGNORE_0"].states) == 2 - - vocabulary = ["d", "e", "ef foo", "f ", " ", "1d", ""] - - pstate_to_vocab, possible_paths = map_partial_states_to_vocab( - vocabulary, symbol_names_and_fsms - ) - - assert dict(pstate_to_vocab) == { - ("__IGNORE_0", 0): {4}, - ("__IGNORE_0", 1): {4}, - ("NAME", 0): {0, 1, 2, 3}, - ("NAME", 1): {0, 1, 2, 3, 5}, - ("DEF", 0): {0}, - ("DEF", 1): {1, 2}, - ("DEF", 2): {3}, - } - assert possible_paths["__IGNORE_0"] == {0: {1}, 1: {1}} - assert possible_paths["NAME"] == {0: {1}, 1: {1}} - assert possible_paths["DEF"] == {0: {1}, 1: {2, 3}, 2: {3}} - - pstate_to_vocab, possible_paths = map_partial_states_to_vocab( - vocabulary, symbol_names_and_fsms, final_state_string="" - ) - - assert dict(pstate_to_vocab) == { - ("__IGNORE_0", 0): { - 4, - }, - ("__IGNORE_0", 1): {4, 6}, - ("NAME", 0): {0, 1, 2, 3}, - ("NAME", 1): {0, 1, 2, 3, 5, 6}, - ("DEF", 0): { - 0, - }, - ("DEF", 1): {1, 2}, - ("DEF", 2): { - 3, - }, - ("DEF", 3): { - 6, - }, - } - assert possible_paths["__IGNORE_0"] == {0: {1}, 1: {1}} - assert possible_paths["NAME"] == {0: {1}, 1: {1}} - assert possible_paths["DEF"] == {0: {1}, 1: {2, 3}, 2: {3}} - - -def test_map_partial_states_to_vocab_regex(): - regex_string = r"([0-9]+([.][0-9]*)?|[.][0-9]+)" - regex_pattern = interegular.parse_pattern(regex_string) - regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) - - vocabulary = [ - "1.", - "2", - "3.", - ".", - ".80", - "42", - "1a", - " ", - "0", - "a", - "b", - "$", - "", - ] - - # We want the vocabulary strings to entirely match the regex--not just the - # prefixes of the vocabulary strings - def partial_match_filter(string, end_idx, state_seq): - if end_idx is not None and end_idx < len(string) - 1: - return False - return True - - pstate_to_vocab, possible_paths = map_partial_states_to_vocab( - vocabulary, {"FLOAT": regex_fsm}, partial_match_filter, "" - ) - - assert sorted(pstate_to_vocab.values(), key=lambda x: -len(x)) == [ - {0, 1, 2, 3, 4, 5, 8, 12}, - {0, 1, 2, 3, 4, 5, 8}, - {1, 5, 8, 12}, - {1, 5, 8}, - ] - assert possible_paths["FLOAT"] == {2: {2, 3}, 0: {1, 2, 3}, 3: {3}, 1: {3}} - - pstate_to_vocab = {k: tuple(v) for k, v in pstate_to_vocab.items()} - - random.seed(24080) - - for n in range(50): - # Start at the initial state - pstate = ("FLOAT", regex_fsm.initial) - - sample_seq = "" - - for i in range(5): - next_support = pstate_to_vocab[pstate] - - (next_sample_idx,) = random.sample(next_support, 1) - - next_sample = vocabulary[next_sample_idx] - - if next_sample == "": - break - - sample_seq += next_sample - - # Continue matching from where we left off - (pmatch,) = find_partial_matches( - regex_fsm, next_sample, start_state=pstate[-1] - ) - - # Create the next state - pstate = (pstate[0], pmatch[1][-1]) - - # TODO: We could check if the FSM is done (i.e. in an final/accept - # state) and end the sampling loop - - # Make sure the whole thing matches the regex - assert re.fullmatch(regex_string, sample_seq) is not None - - -def test_get_sub_fsms_from_seq(): - name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") - name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) - - def_pattern = interegular.parse_pattern("def") - def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) - - match_pattern = interegular.parse_pattern("match") - match_fsm, _ = make_deterministic_fsm(match_pattern.to_fsm().reduce()) - - peq_pattern = interegular.parse_pattern(r"\+=") - peq_fsm, _ = make_deterministic_fsm(peq_pattern.to_fsm().reduce()) - - plus_pattern = interegular.parse_pattern(r"\+") - plus_fsm, _ = make_deterministic_fsm(plus_pattern.to_fsm().reduce()) - - fsms = [def_fsm, match_fsm, name_fsm, peq_fsm, plus_fsm] - - fsm, fsms_to_trans_finals = fsm_union(fsms) - - assert fsms_to_trans_finals == { - 0: ({(0, 3), (3, 9), (9, 10)}, {10}, {0: {0}, 1: {3}, 2: {9}, 3: {10}}), - 1: ( - {(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)}, - {8}, - {0: {0}, 1: {4}, 2: {5}, 3: {6}, 4: {7}, 5: {8}}, - ), - 2: ( - { - (0, 2), - (0, 3), - (0, 4), - (2, 2), - (3, 2), - (3, 9), - (4, 2), - (4, 5), - (5, 2), - (5, 6), - (6, 2), - (6, 7), - (7, 2), - (7, 8), - (8, 2), - (9, 2), - (9, 10), - (10, 2), - }, - {2, 3, 4, 5, 6, 7, 8, 9, 10}, - {0: {0}, 1: {2, 3, 4, 5, 6, 7, 8, 9, 10}}, - ), - 3: ({(0, 1), (1, 11)}, {11}, {0: {0}, 1: {1}, 2: {11}}), - 4: ({(0, 1)}, {1}, {0: {0}, 1: {1}}), - } - - assert not fsm.accepts("1a") - assert fsm.accepts("a1") - assert fsm.accepts("def") - assert fsm.accepts("match") - assert fsm.accepts("+=") - assert fsm.accepts("+") - - ((_, state_seq),) = find_partial_matches(fsm, "def", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(0, False, True), (2, True, True)] - - # Make sure the old-to-new state map is correct - ((_, def_state_seq),) = find_partial_matches( - def_fsm, "def", start_state=fsm.initial - ) - def_old_to_new_states = fsms_to_trans_finals[0][2] - assert all( - new_state in def_old_to_new_states[old_state] - for old_state, new_state in zip(def_state_seq, state_seq) - ) - - ((_, state_seq),) = find_partial_matches(fsm, "ef", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(2, True, True)] - - ((_, name_state_seq),) = find_partial_matches( - name_fsm, "ef", start_state=fsm.initial - ) - name_old_to_new_states = fsms_to_trans_finals[2][2] - assert all( - new_state in name_old_to_new_states[old_state] - for old_state, new_state in zip(name_state_seq, state_seq) - ) - - ((_, state_seq),) = find_partial_matches(fsm, "match", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(1, False, True), (2, True, True)] - - ((_, match_state_seq),) = find_partial_matches( - match_fsm, "match", start_state=fsm.initial - ) - match_old_to_new_states = fsms_to_trans_finals[1][2] - assert all( - new_state in match_old_to_new_states[old_state] - for old_state, new_state in zip(match_state_seq, state_seq) - ) - - ((_, state_seq),) = find_partial_matches(fsm, "defa", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(2, True, True)] - - ((_, state_seq),) = find_partial_matches(fsm, "de", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(0, True, False), (2, True, True)] - - ((_, state_seq),) = find_partial_matches(fsm, "+", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(3, True, False), (4, False, True)] - - ((_, state_seq),) = find_partial_matches(fsm, "+=", start_state=fsm.initial) - - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(3, False, True)] - - # Test some overlapping patterns - join_fsms = [ - interegular.parse_pattern(r"JOIN").to_fsm().reduce(), - interegular.parse_pattern(r"JOIN LEFT").to_fsm().reduce(), - ] - fsm, fsms_to_trans_finals = fsm_union(join_fsms) - ((_, state_seq),) = find_partial_matches( - fsm, "OI", start_state=None, full_match=False - ) - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(0, True, False), (1, True, False)] - - ((_, state_seq),) = find_partial_matches( - fsm, "N", start_state=None, full_match=False - ) - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(0, False, True), (1, True, False)] - - ((_, state_seq),) = find_partial_matches( - fsm, " ", start_state=None, full_match=False - ) - res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) - assert res == [(1, True, False)] From 34b4530d1c008157bcff0d56b50a6718d4139c31 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 28 Sep 2023 19:26:20 -0500 Subject: [PATCH 232/734] Use device_map value when device is unspecified --- outlines/models/transformers.py | 8 +++++--- tests/models/test_transformers.py | 8 ++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 64242388..34c8174d 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -159,9 +159,9 @@ def transformers( ---------- model_name The name of the model as listed on Hugging Face's model page. - device_map + device The device(s) on which the model should be loaded. This overrides - the value passed for `device_map` in `model_kwargs`. + the `device_map` entry in `model_kwargs` when provided. model_kwargs A dictionary that contains the keyword arguments to pass to the `from_pretrained` method when loading the model. @@ -181,7 +181,9 @@ def transformers( "The `transformers` library needs to be installed in order to use `transformers` models." ) - model_kwargs["device_map"] = device + if device is not None: + model_kwargs["device_map"] = device + model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs) tokenizer = TransformersTokenizer(model_name, **tokenizer_kwargs) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 8357fc59..1262e0c0 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -60,6 +60,14 @@ def test_model(): assert isinstance(model.tokenizer, TransformersTokenizer) assert model.device.type == "cpu" + model = transformers(TEST_MODEL, model_kwargs={"device_map": "cpu"}) + assert isinstance(model.tokenizer, TransformersTokenizer) + assert model.device.type == "cpu" + + model = transformers(TEST_MODEL, device="cpu", model_kwargs={"device_map": "cuda"}) + assert isinstance(model.tokenizer, TransformersTokenizer) + assert model.device.type == "cpu" + input_ids = torch.tensor([[0, 1, 2]]) logits = model(input_ids, torch.ones_like(input_ids)) assert logits.type() == "torch.FloatTensor" From 5af26cb93905605451ffe3a2594a7c60316a7505 Mon Sep 17 00:00:00 2001 From: Barahlush Date: Sun, 1 Oct 2023 19:54:58 +0600 Subject: [PATCH 233/734] Fix typos in model usage examples --- docs/source/reference/batching.rst | 2 +- docs/source/reference/controlled_generation.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/reference/batching.rst b/docs/source/reference/batching.rst index 20bee192..beb330f8 100644 --- a/docs/source/reference/batching.rst +++ b/docs/source/reference/batching.rst @@ -8,7 +8,7 @@ Outlines is sampling-first, and is built to generate several samples from the sa import outlines.models as models sample = models.text_generation.openai("text-davinci-003") - answers = complete( + answers = sample( "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", samples=10 ) diff --git a/docs/source/reference/controlled_generation.rst b/docs/source/reference/controlled_generation.rst index acaec852..703b9069 100644 --- a/docs/source/reference/controlled_generation.rst +++ b/docs/source/reference/controlled_generation.rst @@ -34,7 +34,7 @@ In some cases we know the output is to be chosen between different options. We c import outlines.models as models complete = models.text_completion.openai("text-davinci-002") - answer = model( + answer = complete( "Pick the odd word out: skirt, dress, pen, jacket", is_in=["skirt", "dress", "pen", "jacket"] ) @@ -52,7 +52,7 @@ We can ask completions to be restricted to `int`s or `float`s using the `type` k import outlines.models as models complete = models.text_completion.openai("text-davinci-002") - answer = model( + answer = complete( "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", type="int" ) From 4ce76ff8ae2e52d2e72d4c08277abe0e92439b2b Mon Sep 17 00:00:00 2001 From: Matt Kindy Date: Mon, 2 Oct 2023 16:31:57 -0500 Subject: [PATCH 234/734] Modify resolution strategy to support resolving values internal to the schema --- outlines/text/json_schema.py | 73 +++++++++++++++++----- tests/text/test_json_schema.py | 108 +++++++++++++++++++++++++++++++++ 2 files changed, 166 insertions(+), 15 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 6e537e21..4fe8da1c 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -1,7 +1,7 @@ import itertools import json import re -from typing import Dict +from typing import Callable, Dict STRING_INNER = r'(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)' STRING = f'"{STRING_INNER}*"' @@ -43,6 +43,45 @@ def build_regex_from_schema(schema: str): return regex +def _ref_resolver(schema: Dict) -> Callable[[str], Dict]: + cache: Dict[str, Dict] = dict() + + if "$id" in schema: + cache[schema["$id"]] = schema + + if "$defs" in schema: + for definition, annotation in schema["$defs"].items(): + cache[f"#/$defs/{definition}"] = annotation + + if "$id" in annotation: + cache[annotation["$id"]] = annotation + + def resolver(reference: str) -> Dict: + """Resolve a $ref reference in the context of the top-level schema.""" + + if reference in cache: + return cache[reference] + + path = reference.split("/") + + # Navigate through the top-level schema based on the path + subschema = schema + + if path[0] != "#": + raise ValueError(f"Unable to resolve reference: {reference}") + + for step in path[1:]: + if step in subschema: + subschema = subschema[step] + else: + raise ValueError(f"Unable to resolve reference: {reference}") + + cache[reference] = subschema + return subschema + + return resolver + + def build_schedule_from_schema(schema: str): """Turn a JSON schema into a regex that matches any JSON object that follows this schema. @@ -73,13 +112,7 @@ def build_schedule_from_schema(schema: str): """ schema = json.loads(schema) - # Find object definitions in the schema, if any - definitions = {} - if "$defs" in schema: - for definition, annotation in schema["$defs"].items(): - definitions[f"#/$defs/{definition}"] = annotation - - schema = expand_json_schema(schema, definitions) + schema = expand_json_schema(schema, resolver=_ref_resolver(schema)) schedule = build_schedule_from_instance(schema) # Concatenate adjacent strings @@ -92,20 +125,26 @@ def build_schedule_from_schema(schema: str): return reduced_schedule -def expand_json_schema(raw_schema: Dict, definitions: Dict): +def expand_json_schema( + raw_schema: Dict, + resolver: Callable[[str], Dict], +): """Replace references by their value in the JSON Schema. This recursively follows the references to other schemas in case - of nested models. Other schemas are stored under the "definitions" - key in the schema of the top-level model. + of nested models. Other schemas that may exist at a higher level + within the overall schema may be referenced via the `$ref` keyword + according to the JSON Schema specification. + Parameters --------- raw_schema The raw JSON schema as a Python dictionary, possibly with definitions and references. - definitions - The currently known definitions. + resolver + A function that takes a reference and returns the corresponding schema + or subschema from the currently scoped top-level schema. Returns ------- @@ -116,16 +155,20 @@ def expand_json_schema(raw_schema: Dict, definitions: Dict): expanded_properties = {} if "properties" in raw_schema: + if "$id" in raw_schema: + # see https://json-schema.org/understanding-json-schema/structuring#bundling + resolver = _ref_resolver(raw_schema) + for name, value in raw_schema["properties"].items(): if "$ref" in value: # if item is a single element expanded_properties[name] = expand_json_schema( - definitions[value["$ref"]], definitions + resolver(value["$ref"]), resolver ) elif "type" in value and value["type"] == "array": # if item is a list expanded_properties[name] = value if "$ref" in value["items"]: expanded_properties[name]["items"] = expand_json_schema( - definitions[value["items"]["$ref"]], definitions + resolver(value["items"]["$ref"]), resolver ) else: expanded_properties[name]["items"] = value["items"] diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 68737788..5f19ce21 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -189,6 +189,114 @@ def test_json_schema(): ] +def test_json_schema_with_property_ref(): + schema = """{ + "title": "User", + "type": "object", + "properties": { + "user_id": {"title": "User Id", "type": "integer"}, + "name": {"title": "Name", "type": "string"}, + "a": {"$ref": "#/properties/name"}, + "b": {"$ref": "#/properties/name"}, + "c": {"$ref": "#/properties/name"} + }, + "required": ["user_id", "name"]} + """ + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', + {"title": "User Id", "type": "integer"}, + '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', + {"title": "Name", "type": "string"}, + '[\\n ]*,[\\n ]*"a"[\\n ]*:[\\n ]*', + {"title": "Name", "type": "string"}, + '[\\n ]*,[\\n ]*"b"[\\n ]*:[\\n ]*', + {"title": "Name", "type": "string"}, + '[\\n ]*,[\\n ]*"c"[\\n ]*:[\\n ]*', + {"title": "Name", "type": "string"}, + "[\\n ]*\\}", + ] + + +def test_json_schema_with_def_ref(): + schema = """{ + "title": "User", + "type": "object", + "$defs": { + "name": {"title": "Name2", "type": "string"} + }, + "properties": { + "user_id": {"title": "User Id", "type": "integer"}, + "name": {"title": "Name", "type": "string"}, + "name2": {"$ref": "#/$defs/name"} + }, + "required": ["user_id", "name"]} + """ + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', + {"title": "User Id", "type": "integer"}, + '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', + {"title": "Name", "type": "string"}, + '[\\n ]*,[\\n ]*"name2"[\\n ]*:[\\n ]*', + {"title": "Name2", "type": "string"}, + "[\\n ]*\\}", + ] + + +def test_json_schema_with_bundled_ref(): + schema = """{ + "$id": "https://example.com/schemas/customer", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Customer", + "type": "object", + "properties": { + "first_name": { "type": "string" }, + "last_name": { "type": "string" }, + "shipping_address": { "$ref": "/schemas/address" }, + "billing_address": { "$ref": "/schemas/address" } + }, + "required": ["first_name", "last_name", "shipping_address", "billing_address"], + "$defs": { + "address": { + "title": "Address", + "$id": "/schemas/address", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "street_address": { "type": "string" }, + "city": { "type": "string" }, + "state": { "$ref": "#/definitions/state" } + }, + "required": ["street_address", "city", "state"], + "definitions": { + "state": { "type": "object", "title": "State", "properties": { "name": { "type": "string" } }, "required": ["name"] } + } + } + } + }""" + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '\\{[\\n ]*"first_name"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*,[\\n ]*"last_name"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*,[\\n ]*"shipping_address"[\\n ]*:[\\n ]*\\{[\\n ]*"street_address"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*,[\\n ]*"city"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*,[\\n ]*"state"[\\n ]*:[\\n ]*\\{[\\n ]*"name"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*\\}[\\n ]*\\}[\\n ]*,[\\n ]*"billing_address"[\\n ]*:[\\n ]*\\{[\\n ]*"street_address"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*,[\\n ]*"city"[\\n ]*:[\\n ]*', + {"type": "string"}, + '[\\n ]*,[\\n ]*"state"[\\n ]*:[\\n ]*\\{[\\n ]*"name"[\\n ]*:[\\n ]*', + {"type": "string"}, + "[\\n ]*\\}[\\n ]*\\}[\\n ]*\\}", + ] + + class MockTokenizer: pad_token_id = 0 eos_token_id = 0 From 1171e68402a87af400f91057432b505a85b71e9b Mon Sep 17 00:00:00 2001 From: Matt Kindy Date: Mon, 2 Oct 2023 14:44:28 -0500 Subject: [PATCH 235/734] Fix JSON schema generation to accept schemas without 'title' property --- outlines/text/json_schema.py | 2 +- tests/text/test_json_schema.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 4fe8da1c..ddccde5c 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -176,7 +176,7 @@ def expand_json_schema( expanded_properties[name] = value return { - "title": raw_schema["title"], + **({"title": raw_schema["title"]} if "title" in raw_schema else {}), "type": raw_schema["type"], "properties": expanded_properties, } diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 5f19ce21..19c01c3e 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -189,6 +189,18 @@ def test_json_schema(): ] +def test_json_schema_no_titles(): + schema = '{"type": "object", "properties": {"user_id": {"type": "integer"}, "name": {"type": "string"}}, "required": ["user_id", "name"]}' + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', + {"type": "integer"}, + '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', + {"type": "string"}, + "[\\n ]*\\}", + ] + + def test_json_schema_with_property_ref(): schema = """{ "title": "User", From f43a47f3f6fbf75f6de6c331c9794dfd49b1af9b Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 3 Oct 2023 16:18:29 -0500 Subject: [PATCH 236/734] Update resize padding multiple in test_transformers_logits_vocab_size --- tests/text/generate/test_integration_transfomers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 74aea7c1..3ea93b93 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -235,7 +235,7 @@ def test_transformers_logits_vocab_size(): # Artificially increase the weights/logits size relative # to the vocabulary - model.model.resize_token_embeddings(pad_to_multiple_of=2) + model.model.resize_token_embeddings(pad_to_multiple_of=3) assert len(model.tokenizer.vocabulary) == 1024 assert model.model.base_model.wte.weight.shape[0] == 1026 From 07a9535134d5ea8d0bd90253dc74ce908bfe7802 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 3 Oct 2023 18:25:27 -0500 Subject: [PATCH 237/734] Avoid unhashable additional_special_tokens values in special_tokens_map --- outlines/models/transformers.py | 5 +++-- tests/models/test_transformers.py | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 34c8174d..af1d9a66 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -99,7 +99,8 @@ def __init__(self, model_name: str, **kwargs): kwargs.setdefault("padding_side", "left") self.model_name = model_name - self.kwargs = frozenset(kwargs.items()) + # TODO: Do something to make this hashable? + self.kwargs = kwargs self.tokenizer = AutoTokenizer.from_pretrained(model_name, **kwargs) self.eos_token_id = self.tokenizer.eos_token_id self.eos_token = self.tokenizer.eos_token @@ -111,7 +112,7 @@ def __init__(self, model_name: str, **kwargs): self.pad_token_id = self.tokenizer.pad_token_id self.pad_token = self.tokenizer.pad_token - self.special_tokens = set(self.tokenizer.special_tokens_map.values()) + self.special_tokens = set(self.tokenizer.all_special_tokens) self.vocabulary = self.tokenizer.get_vocab() self.is_llama = isinstance(self.tokenizer, get_llama_tokenizer_types()) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 1262e0c0..02360c49 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -37,6 +37,12 @@ def test_tokenizer(): isinstance(text[0], str) isinstance(text[1], str) + tokenizer = TransformersTokenizer( + TEST_MODEL, additional_special_tokens=["", ""] + ) + assert "" in tokenizer.special_tokens + assert "" in tokenizer.special_tokens + def test_llama_tokenizer(): tokenizer = TransformersTokenizer("hf-internal-testing/llama-tokenizer") From bb07150909c978e8d714bc50aabde5493dc702aa Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sun, 1 Oct 2023 17:01:03 -0500 Subject: [PATCH 238/734] Remove unnecessary array allocations in generation process --- outlines/text/generate/continuation.py | 28 ++-- outlines/text/generate/regex.py | 4 +- outlines/text/generate/sequence.py | 157 +++++++----------- .../generate/test_integration_transfomers.py | 10 +- tests/text/generate/test_sequence.py | 154 +++++++---------- 5 files changed, 147 insertions(+), 206 deletions(-) diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index b45fb0e1..fa498143 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -46,22 +46,22 @@ def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: """ - sequences = self.model.tokenizer.decode(token_ids) - contains_stop_sequence = [] - for sequence in sequences: - found = False - for stop_str in self.stop_sequences: - if stop_str in sequence: - found = True - - contains_stop_sequence.append(found) - - contains_stop_sequence = torch.tensor( - contains_stop_sequence, dtype=torch.bool, device=self.model.device - ) contains_eos = token_ids[:, -1] == self.model.tokenizer.eos_token_id - return torch.logical_or(contains_eos, contains_stop_sequence) + if self.stop_sequences: + sequences = self.model.tokenizer.decode(token_ids) + contains_stop_sequence = [] + for sequence in sequences: + contains_stop_sequence.append( + any(stop_str in sequence for stop_str in self.stop_sequences) + ) + contains_stop_sequence = torch.tensor( + contains_stop_sequence, dtype=torch.bool, device=self.model.device + ) + + return torch.logical_or(contains_eos, contains_stop_sequence) + else: + return contains_eos def postprocess_completions(self, completions: List[str]) -> List[str]: """Remove the EOS token from the completion. diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index a950e9bf..ca75673a 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -108,9 +108,7 @@ def create_proposal( assert generated_token_ids.ndim == 2 if len(self.last_fsm_states) == 0: - self.last_fsm_states = [ - self.initial_state for _ in range(generated_token_ids.shape[0]) - ] + self.last_fsm_states = [self.initial_state for _ in range(logits.shape[0])] masks = [] diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 699f12c5..60decfd8 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -1,3 +1,4 @@ +import math from typing import List, Optional, Tuple, Union import torch @@ -61,115 +62,55 @@ def step( num_prompt_tokens The number of tokens in the prompt. token_ids - The token ids passed as an input to the model, of shape `batch_shape - + (num_tokens,)`, where `num_tokens` is the sequences' length. + The token sequences. It has dimensions ``(n_seqs, n)`` for + some sequence length ``n <= num_prompt_tokens``. samples The number of continuations to sample from the next-token probability distribution. Returns ------- - A tuple with an array of shape `new_batch_shape + (num_tokens+1,)`that - contains the completed sequences (input token ids and generated token - ids) and an array of shape `new_batch_shape + (vocab_size,)` that - contains the next token probabilities. - `new_batch_shape` is computed by removing dimensions of size one in - `(samples,) + batch_shape`. + A tuple with an array of shape ``(samples, n_seqs, 1)`` + that contains the completed sequences (i.e. input token IDs and + generated token IDs) and an array of shape + ``(samples, n_seqs, vocab_size)`` that contains the next token + probabilities. """ - num_input_dims = token_ids.ndim probs = self.model(token_ids, attention_mask) probs = self.create_proposal(token_ids[:, num_prompt_tokens:], probs) probs = torch.nn.functional.softmax(probs, dim=-1) - # Sample `samples`-many new tokens. - next_token_ids = vectorized_random_choice(rng, probs, samples) + assert probs.shape[:-1] == token_ids.shape[:-1] - # Add the missing `num_tokens` and `num_sample` dimensions. - next_token_ids = torch.unsqueeze(next_token_ids, -1) - token_ids = torch.unsqueeze(token_ids, 0) + next_token_ids = vectorized_random_choice(rng, probs, samples).unsqueeze(-1) + probs = torch.broadcast_to(probs, (samples,) + probs.shape) - # Expand the input `token_ids` array to be able to concatenate several - # samples. - if samples > 1: - repetitions = (samples,) + (1,) * num_input_dims - token_ids = torch.tile(token_ids, repetitions) - probs = torch.tile(probs, repetitions) - - token_ids = torch.concatenate([token_ids, next_token_ids], axis=-1) - - # Merge sample and batch dimensions by removing dimensions of length - # 1. The shape of the resulting arrays is `new_batch_shape + (num_tokens,)` - # and `new_batch_shape + (vocab_size,)` respectively. - token_ids = torch.atleast_2d(token_ids.squeeze()) - probs = torch.atleast_2d(probs.squeeze()) - - return token_ids, probs + return next_token_ids, probs def expand_attention_mask( self, attention_mask: torch.LongTensor ) -> torch.LongTensor: - """Expand the attention mask after the last completion.""" - batch_shape = attention_mask.shape[:-1] - attention_mask = torch.concatenate( - [ - attention_mask, - torch.broadcast_to( - torch.tensor([1], device=self.device), batch_shape + (1,) - ), - ], - axis=-1, - ) - return attention_mask - - def update_token_ids( - self, - is_finished: torch.BoolTensor, - token_ids: torch.LongTensor, - token_ids_unfinished: torch.LongTensor, - ) -> torch.LongTensor: - """Update the array of token ids after the last completion. - - We only generate new tokens for the sequences that are not finished. We thus - update the array with the new tokens, and append pad tokens to the finished - sequences. + """Expand the attention mask after the last completion. Parameters ---------- - is_finished - Boolean array that indicates which sequences are finished. - token_ids - Array that contains the sequences before the generation's last step. - token_ids_unfinished - Array that contains the sequences of the unfinished sequences - after the generation's last step. + attention_mask + An attention mask with shape ``(n_seqs, attention_mask_len)``. Returns ------- - An array that contains the updated array that contains the sequences. We append - pad tokens to the finished sequences. + A new attention mask with shape ``(n_seqs, attention_mask_len + 1)``. """ - batch_shape = token_ids.shape[:-1] - num_tokens = token_ids.shape[-1] - new_token_ids = torch.empty( - batch_shape + (num_tokens + 1,), dtype=torch.int64, device=self.device - ) - token_ids_finished = torch.concatenate( + attention_mask = torch.concatenate( [ - token_ids[is_finished], - torch.broadcast_to( - self.pad_token_id, - token_ids[is_finished].shape[:-1] + (1,), - ), + attention_mask, + torch.ones(attention_mask.shape[:-1] + (1,), device=self.device), ], axis=-1, ) - - new_token_ids[~is_finished] = token_ids_unfinished - new_token_ids[is_finished] = token_ids_finished - - return new_token_ids + return attention_mask @torch.inference_mode() def __call__( @@ -192,8 +133,12 @@ def __call__( The full sequence that contains the prompts and the generated string. """ + token_ids, attention_mask = self.model.tokenizer.encode(prompt) + token_ids = token_ids.squeeze(0) + attention_mask = attention_mask.squeeze(0) + token_ids = token_ids.to(self.device) attention_mask = attention_mask.to(self.device) @@ -201,40 +146,56 @@ def __call__( rng = torch.Generator(device=self.device) rng.seed() + orig_batch_shape = token_ids.shape[:-1] num_prompt_tokens = token_ids.shape[-1] - if samples > 1: - token_ids, _ = self.step( - rng, num_prompt_tokens, token_ids, attention_mask, samples - ) - is_finished = self.is_finished(token_ids) + token_ids = torch.broadcast_to(token_ids, (samples,) + token_ids.shape) + attention_mask = torch.broadcast_to( + attention_mask, (samples,) + attention_mask.shape + ) - num_batch_dims = token_ids.ndim - 1 - repetitions = (samples,) + (1,) * num_batch_dims - attention_mask = torch.tile(attention_mask, repetitions) - attention_mask = self.expand_attention_mask(attention_mask) - else: - batch_shape = token_ids.shape[:-1] - is_finished = torch.zeros(batch_shape, dtype=torch.bool, device=self.device) + # We flatten the original batch and sample dimensions so that the + # resulting shape we work in is simply `(num_of_sequences, tokens)` + batch_size = samples * math.prod(orig_batch_shape) + token_ids = token_ids.reshape((batch_size, num_prompt_tokens)) + attention_mask = attention_mask.reshape((batch_size, num_prompt_tokens)) + + is_finished = torch.zeros(batch_size, dtype=torch.bool, device=self.device) while True: num_generated_tokens = token_ids.shape[-1] - num_prompt_tokens if torch.all(is_finished) or num_generated_tokens == self.max_tokens: break - updated_token_ids, _ = self.step( + is_not_finished = ~is_finished + + # Draw samples only for the sequences that aren't finished + unfinished_token_ids = token_ids[is_not_finished] + unfinished_attention_mask = attention_mask[is_not_finished] + unfinished_next_token_ids, _ = self.step( rng, num_prompt_tokens, - token_ids[~is_finished], - attention_mask[~is_finished], + unfinished_token_ids, + unfinished_attention_mask, + ) + unfinished_next_token_ids = unfinished_next_token_ids.squeeze(0) + + # Create an array for the next tokens of every sequence, including + # the finished ones (but pad them) + next_token_ids = torch.full( + (batch_size, 1), self.pad_token_id, device=self.device ) - token_ids = self.update_token_ids(is_finished, token_ids, updated_token_ids) + next_token_ids[is_not_finished] = unfinished_next_token_ids + + token_ids = torch.concatenate([token_ids, next_token_ids], axis=-1) + attention_mask = self.expand_attention_mask(attention_mask) - is_finished[~is_finished] = self.is_finished( - updated_token_ids[:, num_prompt_tokens:] + + is_finished[is_not_finished] = self.is_finished( + token_ids[is_not_finished][:, num_prompt_tokens:] ).flatten() - result = self.model.tokenizer.decode(token_ids[..., num_prompt_tokens:]) + result = self.model.tokenizer.decode(token_ids[:, num_prompt_tokens:]) result = self.postprocess_completions(result) if len(result) == 1: diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 3ea93b93..ef54dcbd 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -58,9 +58,17 @@ def test_transformers_various_regexes(): model = models.transformers(model_name, device="cpu") prompt = "Write an email address" regex_str = r"([a-z]{10})@([a-z]{5})\.([a-z]{3})" - sequence = generate.regex(model, regex_str)(prompt, rng=rng) + generator = generate.regex(model, regex_str) + + # One prompt + sequence = generator(prompt, rng=rng) assert re.fullmatch(regex_str, sequence) is not None + # Two prompts + sequence = generator([prompt, prompt], rng=rng) + assert re.fullmatch(regex_str, sequence[0]) is not None + assert re.fullmatch(regex_str, sequence[1]) is not None + def test_transformers_integration_integer(): rng = torch.Generator() diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 8e5f3dd1..f983b0c0 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -1,9 +1,11 @@ import math from typing import Dict, List, Union +import numpy as np import pytest import torch +from outlines.models.tokenizer import Tokenizer from outlines.text.generate.sequence import Sequence, vectorized_random_choice @@ -27,10 +29,12 @@ def __call__(self, input_ids, *_): return shaped_logits.reshape(batch_shape + vocab_shape) -class MockTokenizer: +class MockTokenizer(Tokenizer): def __init__(self, vocabulary: Dict[str, int]): self.vocabulary = vocabulary + self.id_to_str = {v: k for k, v in vocabulary.items()} if vocabulary else {} self.pad_token_id = -1 + self.id_to_str[self.pad_token_id] = "" def encode(self, prompts: Union[str, List[str]]): if isinstance(prompts, str): @@ -42,7 +46,22 @@ def encode(self, prompts: Union[str, List[str]]): return token_ids, attention_mask def decode(self, token_ids): - return token_ids + ndims = np.ndim(token_ids) + + assert 0 < ndims <= 2 + + if ndims == 1: + token_ids = [token_ids] + + res = ["".join(self.id_to_str[int(idx)] for idx in seq) for seq in token_ids] + + return res if ndims > 1 else res[0] + + def convert_token_to_string(self, token: str) -> str: + return token + + def __hash__(self): + return id(self) def test_vectorized_random_choice(): @@ -117,44 +136,44 @@ def test_sequence_step(): input_ids = torch.tensor([[1, 2]]) token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) - assert torch.equal(token_ids, torch.tensor([[1, 2, 1]])) - assert probs.shape == (1, 4) + assert torch.equal(token_ids, torch.tensor([[[1]]])) + assert probs.shape == (1, 1, 4) def test_sequence_step_batch(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) + logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2], [3, 4]]) token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) - assert torch.equal(token_ids, torch.tensor([[1, 2, 1], [3, 4, 1]])) - assert probs.shape == (2, 4) + assert torch.equal(token_ids, torch.tensor([[[1], [2]]])) + assert probs.shape == (1, 2, 4) def test_sequence_step_sample(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) + logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) - assert torch.equal(token_ids, torch.tensor([[1, 2, 1], [1, 2, 1], [1, 2, 1]])) - assert probs.shape == (3, 4) + assert torch.equal(token_ids, torch.tensor([[[1]], [[2]], [[1]]])) + assert probs.shape == (3, 1, 4) def test_sequence_step_sample_batch(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) + logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) @@ -164,9 +183,9 @@ def test_sequence_step_sample_batch(): token_ids, torch.tensor( [ - [[1, 2, 1, 1], [3, 4, 1, 1]], - [[1, 2, 1, 1], [3, 4, 1, 1]], - [[1, 2, 1, 1], [3, 4, 1, 1]], + [[1], [2]], + [[1], [1]], + [[1], [2]], ] ), ) @@ -178,91 +197,54 @@ def test_sequence_step_loop(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) + logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) - token_ids, probs = sequence.step(rng, 2, token_ids, torch.ones((1, 3))) - assert torch.equal(token_ids, torch.tensor([[1, 2, 1, 1]])) - assert probs.shape == (1, 4) + token_ids, probs = sequence.step(rng, 2, token_ids.squeeze(0), torch.ones((1, 3))) + assert torch.equal(token_ids, torch.tensor([[[2]]])) + assert probs.shape == (1, 1, 4) input_ids = torch.tensor([[1, 2], [3, 4]]) token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) - token_ids, probs = sequence.step(rng, 2, token_ids, torch.ones((2, 3))) - assert torch.equal(token_ids, torch.tensor([[1, 2, 1, 1], [3, 4, 1, 1]])) - assert probs.shape == (2, 4) + token_ids, probs = sequence.step(rng, 2, token_ids.squeeze(0), torch.ones((2, 3))) + assert torch.equal(token_ids, torch.tensor([[[1], [2]]])) + assert probs.shape == (1, 2, 4) # The number of samples becomes the batch size at the next iteration. input_ids = torch.tensor([[1, 2]]) token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) - token_ids, probs = sequence.step(rng, 2, token_ids, torch.ones((3, 3))) - assert torch.equal( - token_ids, torch.tensor([[1, 2, 1, 1], [1, 2, 1, 1], [1, 2, 1, 1]]) - ) - assert probs.shape == (3, 4) + token_ids, probs = sequence.step(rng, 2, token_ids.squeeze(1), torch.ones((3, 3))) + assert torch.equal(token_ids, torch.tensor([[[2], [1], [1]]])) + assert probs.shape == (1, 3, 4) def test_sequence_step_loop_general(): rng = torch.Generator() rng.manual_seed(0) - logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) + logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) model = ModelStep(MockTokenizer(None), logits) sequence = Sequence(model) input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) token_ids, _ = sequence.step(rng, 3, input_ids, torch.ones((1, 3)), samples=3) result, _ = sequence.step(rng, 3, token_ids, torch.ones((3, 4))) - assert result.shape == (3, 2, 5) + assert result.shape == (1, 3, 2, 1) assert torch.equal( - result, + result.squeeze(0), torch.tensor( [ - [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], - [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], - [[1, 2, 1, 1, 1], [3, 4, 1, 1, 1]], + [[1], [2]], + [[1], [2]], + [[1], [1]], ] ), ) -class TokenizerUpdateTokens: - pad_token_id = -1 - - -class ModelUpdateTokens: - tokenizer = TokenizerUpdateTokens() - device = "cpu" - - -def test_update_token_ids_all_unfinished(): - sequence = Sequence(ModelUpdateTokens()) - - previous_token_ids = torch.tensor([[1, 1], [1, 1]]) - is_finished = torch.tensor([False, False]) - token_ids_unfinished = torch.tensor([[1, 1, 1], [1, 1, 1]]) - - result = sequence.update_token_ids( - is_finished, previous_token_ids, token_ids_unfinished - ) - assert torch.equal(result, torch.tensor([[1, 1, 1], [1, 1, 1]])) - - -def test_update_token_ids_some_unfinished(): - "Makes sure that the pad token is appended to finished sequences." - sequence = Sequence(ModelUpdateTokens()) - - previous_token_ids = torch.tensor([[1, 1], [1, 1]]) - token_ids_unfinished = torch.tensor([[1, 1, 1]]) - is_finished = torch.tensor([True, False]) - result = sequence.update_token_ids( - is_finished, previous_token_ids, token_ids_unfinished - ) - assert torch.equal(result, torch.tensor([[1, 1, -1], [1, 1, 1]])) - - def test_call_single_prompt(): class FinishAfterTwo(Sequence): def __init__(self, model): @@ -285,25 +267,10 @@ def is_finished(self, token_ids): sequence = FinishAfterTwo(model) result = sequence("Test") - assert torch.equal(result, torch.tensor([0, 1])) + assert result == "Testa" def test_call_prompt_list(): - class Tokenizer: - def __init__(self, vocabulary: Dict[str, int]): - self.vocabulary = vocabulary - self.pad_token_id = -1 - - def __call__(self, prompts: List[str], **_): - return { - "input_ids": torch.tensor( - [[self.vocabulary[prompt]] for prompt in prompts] - ) - } - - def batch_decode(self, token_ids): - return token_ids - class FinishAfterThree(Sequence): def __init__(self, model): super().__init__(model) @@ -339,7 +306,7 @@ def is_finished(self, token_ids): sequence = FinishAfterThree(model) result = sequence(["Test1", "Test2", "Test3"]) - assert torch.equal(result, torch.tensor([[2, 3, -1], [2, 3, 4], [2, 3, -1]])) + assert result == ["ab", "abc", "ab"] def test_call_single_prompt_samples(): @@ -368,7 +335,7 @@ def is_finished(self, token_ids): ) sequence = FinishAfterTwo(model) result = sequence("Test", samples=3) - assert torch.equal(result, torch.tensor([[0, 1], [0, 1], [0, 1]])) + assert result == ["ab", "ab", "ab"] class FinishAfterOne(Sequence): def __init__(self, model): @@ -390,7 +357,7 @@ def is_finished(self, token_ids): ) sequence = FinishAfterOne(model) result = sequence("Test", samples=3) - assert torch.equal(result, torch.tensor([[0], [0], [0]])) + assert result == ["a", "a", "a"] def test_call_prompt_list_samples(): @@ -429,7 +396,14 @@ def is_finished(self, token_ids): sequence = FinishAfterThree(model) result = sequence(["Test1", "Test2", "Test3"], samples=3) - assert torch.equal( - result, - torch.tile(torch.tensor([[0, 1, -1], [0, 1, 2], [0, 1, -1]]), (3, 1, 1)), - ) + assert result == [ + "ab", + "abc", + "ab", + "ab", + "abc", + "ab", + "ab", + "abc", + "ab", + ] From a8429a3cac2a4f55d4d882f406e76486ba4628ad Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Tue, 3 Oct 2023 17:09:48 -0500 Subject: [PATCH 239/734] Enable KV caching Closes #186 --- outlines/models/transformers.py | 53 +++++++++++++++++++++------- outlines/text/generate/sequence.py | 29 +++++++++++---- tests/models/test_transformers.py | 9 ++--- tests/text/generate/test_sequence.py | 48 ++++++++++++++++--------- 4 files changed, 97 insertions(+), 42 deletions(-) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index af1d9a66..7c22f017 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -1,4 +1,3 @@ -import math from typing import TYPE_CHECKING, List, Optional, Tuple, Union import torch @@ -13,6 +12,9 @@ __all__ = ["transformers"] +KVCacheType = Tuple[Tuple[torch.DoubleTensor, torch.DoubleTensor], ...] + + def get_llama_tokenizer_types(): """Get all the Llama tokenizer types/classes that need work-arounds. @@ -67,15 +69,33 @@ def __init__( self.model = model self.tokenizer = tokenizer - def __call__( - self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor - ) -> torch.FloatTensor: - # `transformers` model accept `input_ids` of size at most equal to 2. We - # thus reshape the input array, call the model and reshape the output - # logits. - batch_shape = input_ids.shape[:-1] - num_tokens = input_ids.shape[-1] - input_ids = input_ids.reshape(math.prod(batch_shape), num_tokens) + def forward( + self, + input_ids: torch.LongTensor, + attention_mask: torch.LongTensor, + past_key_values: Optional[Tuple] = None, + ) -> Tuple[torch.FloatTensor, Optional[KVCacheType]]: + """Compute a forward pass through the transformer model. + + Parameters + ---------- + input_ids + The input token ids. Must be one or two dimensional. + attention_mask + The attention mask. Must be one or two dimensional. + past_key_values + A tuple of tuples containing the cached key and value tensors for each + attention head. + + Returns + ------- + The computed logits and the new cached key and value tensors. + + """ + assert 0 < input_ids.ndim < 3 + + if past_key_values: + input_ids = input_ids[..., -1].unsqueeze(-1) output = self.model( input_ids, @@ -83,12 +103,19 @@ def __call__( return_dict=True, output_attentions=False, output_hidden_states=False, + past_key_values=past_key_values, ) - next_token_logits = output.logits[:, -1, :] + next_token_logits = output.logits[..., -1, :] - next_token_logits = next_token_logits.reshape(batch_shape + (-1,)) + return next_token_logits, output.past_key_values - return next_token_logits + def __call__( + self, + input_ids: torch.LongTensor, + attention_mask: torch.LongTensor, + past_key_values: Optional[Tuple] = None, + ) -> torch.FloatTensor: + return self.forward(input_ids, attention_mask, past_key_values)[0] class TransformersTokenizer(Tokenizer): diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 60decfd8..bae937a6 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -1,13 +1,16 @@ import math -from typing import List, Optional, Tuple, Union +from typing import TYPE_CHECKING, List, Optional, Tuple, Union import torch +if TYPE_CHECKING: + from outlines.models.transformers import KVCacheType, Transformers + class Sequence: """Represents a sequence generation method.""" - def __init__(self, model, max_tokens: Optional[int] = None): + def __init__(self, model: "Transformers", max_tokens: Optional[int] = None): """Create a `Sequence` instance. Parameters @@ -48,7 +51,8 @@ def step( token_ids: torch.LongTensor, attention_mask: torch.LongTensor, samples: int = 1, - ) -> Tuple[torch.LongTensor, torch.FloatTensor]: + past_key_values: Optional["KVCacheType"] = None, + ) -> Tuple[torch.LongTensor, torch.FloatTensor, Optional["KVCacheType"]]: """Generate one or several tokens that complete the input sequence. The sampling step consists in using a model to generate next-token @@ -77,7 +81,9 @@ def step( probabilities. """ - probs = self.model(token_ids, attention_mask) + probs, past_key_values = self.model.forward( + token_ids, attention_mask, past_key_values + ) probs = self.create_proposal(token_ids[:, num_prompt_tokens:], probs) probs = torch.nn.functional.softmax(probs, dim=-1) @@ -86,7 +92,7 @@ def step( next_token_ids = vectorized_random_choice(rng, probs, samples).unsqueeze(-1) probs = torch.broadcast_to(probs, (samples,) + probs.shape) - return next_token_ids, probs + return next_token_ids, probs, past_key_values def expand_attention_mask( self, attention_mask: torch.LongTensor @@ -161,6 +167,7 @@ def __call__( attention_mask = attention_mask.reshape((batch_size, num_prompt_tokens)) is_finished = torch.zeros(batch_size, dtype=torch.bool, device=self.device) + unfinished_past_key_values = None while True: num_generated_tokens = token_ids.shape[-1] - num_prompt_tokens @@ -172,11 +179,12 @@ def __call__( # Draw samples only for the sequences that aren't finished unfinished_token_ids = token_ids[is_not_finished] unfinished_attention_mask = attention_mask[is_not_finished] - unfinished_next_token_ids, _ = self.step( + unfinished_next_token_ids, _, past_key_values = self.step( rng, num_prompt_tokens, unfinished_token_ids, unfinished_attention_mask, + past_key_values=unfinished_past_key_values, ) unfinished_next_token_ids = unfinished_next_token_ids.squeeze(0) @@ -191,10 +199,17 @@ def __call__( attention_mask = self.expand_attention_mask(attention_mask) - is_finished[is_not_finished] = self.is_finished( + local_is_finished = self.is_finished( token_ids[is_not_finished][:, num_prompt_tokens:] ).flatten() + is_finished[is_not_finished] = local_is_finished + + if past_key_values: + unfinished_past_key_values = tuple( + tuple(vv[~local_is_finished] for vv in v) for v in past_key_values + ) + result = self.model.tokenizer.decode(token_ids[:, num_prompt_tokens:]) result = self.postprocess_completions(result) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 02360c49..71389960 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -86,12 +86,9 @@ def test_model(): assert logits.ndim == 2 assert logits.shape[0] == 3 - input_ids = torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [0, 1, 2]]]) - logits = model(input_ids, torch.ones_like(input_ids)) - assert logits.ndim == 3 - assert logits.shape[0] == 2 - assert logits.shape[1] == 2 - assert torch.equal(logits[0][0], logits[1][1]) + with pytest.raises(AssertionError): + input_ids = torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [0, 1, 2]]]) + logits = model(input_ids, torch.ones_like(input_ids)) def test_tokenizer_eq_hash(): diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index f983b0c0..0844c002 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -16,7 +16,7 @@ def __init__(self, tokenizer, logits): self.iteration_idx = 0 self.device = "cpu" - def __call__(self, input_ids, *_): + def forward(self, input_ids, *_): import math batch_shape = input_ids.shape[:-1] @@ -26,7 +26,10 @@ def __call__(self, input_ids, *_): ) self.iteration_idx += 1 - return shaped_logits.reshape(batch_shape + vocab_shape) + return shaped_logits.reshape(batch_shape + vocab_shape), None + + def __call__(self, input_ids, *_): + return self.forward(input_ids)[0] class MockTokenizer(Tokenizer): @@ -110,7 +113,7 @@ def __init__(self, tokenizer, logits): self.logits = logits self.tokenizer = tokenizer - def __call__(self, input_ids, *_): + def forward(self, input_ids, *_): """Call the model. We first repeat the logits `num_sequences` times, and then @@ -122,7 +125,10 @@ def __call__(self, input_ids, *_): batch_shape = input_ids.shape[:-1] vocab_shape = (self.logits.shape[-1],) shaped_logits = torch.tile(self.logits, (math.prod(batch_shape), 1)) - return shaped_logits.reshape(batch_shape + vocab_shape) + return shaped_logits.reshape(batch_shape + vocab_shape), None + + def __call__(self, input_ids, *_): + return self.forward(input_ids)[0] def test_sequence_step(): @@ -135,7 +141,7 @@ def test_sequence_step(): sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) - token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) + token_ids, probs, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) assert torch.equal(token_ids, torch.tensor([[[1]]])) assert probs.shape == (1, 1, 4) @@ -150,7 +156,7 @@ def test_sequence_step_batch(): sequence = Sequence(model) input_ids = torch.tensor([[1, 2], [3, 4]]) - token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) + token_ids, probs, _ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) assert torch.equal(token_ids, torch.tensor([[[1], [2]]])) assert probs.shape == (1, 2, 4) @@ -164,7 +170,9 @@ def test_sequence_step_sample(): sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) - token_ids, probs = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) + token_ids, probs, _ = sequence.step( + rng, 2, input_ids, torch.ones((1, 2)), samples=3 + ) assert torch.equal(token_ids, torch.tensor([[[1]], [[2]], [[1]]])) assert probs.shape == (3, 1, 4) @@ -178,7 +186,9 @@ def test_sequence_step_sample_batch(): sequence = Sequence(model) input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) - token_ids, probs = sequence.step(rng, 3, input_ids, torch.ones((2, 3)), samples=3) + token_ids, probs, _ = sequence.step( + rng, 3, input_ids, torch.ones((2, 3)), samples=3 + ) assert torch.equal( token_ids, torch.tensor( @@ -202,21 +212,27 @@ def test_sequence_step_loop(): sequence = Sequence(model) input_ids = torch.tensor([[1, 2]]) - token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) - token_ids, probs = sequence.step(rng, 2, token_ids.squeeze(0), torch.ones((1, 3))) + token_ids, *_ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) + token_ids, probs, _ = sequence.step( + rng, 2, token_ids.squeeze(0), torch.ones((1, 3)) + ) assert torch.equal(token_ids, torch.tensor([[[2]]])) assert probs.shape == (1, 1, 4) input_ids = torch.tensor([[1, 2], [3, 4]]) - token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) - token_ids, probs = sequence.step(rng, 2, token_ids.squeeze(0), torch.ones((2, 3))) + token_ids, *_ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) + token_ids, probs, _ = sequence.step( + rng, 2, token_ids.squeeze(0), torch.ones((2, 3)) + ) assert torch.equal(token_ids, torch.tensor([[[1], [2]]])) assert probs.shape == (1, 2, 4) # The number of samples becomes the batch size at the next iteration. input_ids = torch.tensor([[1, 2]]) - token_ids, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) - token_ids, probs = sequence.step(rng, 2, token_ids.squeeze(1), torch.ones((3, 3))) + token_ids, *_ = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) + token_ids, probs, _ = sequence.step( + rng, 2, token_ids.squeeze(1), torch.ones((3, 3)) + ) assert torch.equal(token_ids, torch.tensor([[[2], [1], [1]]])) assert probs.shape == (1, 3, 4) @@ -230,8 +246,8 @@ def test_sequence_step_loop_general(): sequence = Sequence(model) input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) - token_ids, _ = sequence.step(rng, 3, input_ids, torch.ones((1, 3)), samples=3) - result, _ = sequence.step(rng, 3, token_ids, torch.ones((3, 4))) + token_ids, *_ = sequence.step(rng, 3, input_ids, torch.ones((1, 3)), samples=3) + result, *_ = sequence.step(rng, 3, token_ids, torch.ones((3, 4))) assert result.shape == (1, 3, 2, 1) assert torch.equal( result.squeeze(0), From 13a0d6dee7bb6e17d47e959fe256365c98d75e58 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 12 Oct 2023 11:17:58 -0500 Subject: [PATCH 240/734] Allow configurable sampling steps --- outlines/text/generate/continuation.py | 26 +++++- outlines/text/generate/regex.py | 124 ++++++++++++++++++++++--- outlines/text/generate/sample.py | 111 ++++++++++++++++++++++ outlines/text/generate/sequence.py | 64 +++++-------- tests/text/generate/test_sequence.py | 34 +------ 5 files changed, 265 insertions(+), 94 deletions(-) create mode 100644 outlines/text/generate/sample.py diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index fa498143..8f70ccf9 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -1,9 +1,12 @@ -from typing import List, Optional, Union +from typing import TYPE_CHECKING, List, Optional, Union import torch from outlines.text.generate.sequence import Sequence +if TYPE_CHECKING: + from outlines.text.generate.sample import Sampler + class Continuation(Sequence): """Represents a completion generation model. @@ -18,9 +21,13 @@ class Continuation(Sequence): """ def __init__( - self, model, max_tokens: Optional[int] = None, stop: Union[str, List[str]] = [] + self, + model, + max_tokens: Optional[int] = None, + sampler: Optional["Sampler"] = None, + stop: Union[str, List[str]] = [], ): - super().__init__(model, max_tokens) + super().__init__(model, max_tokens, sampler) self.eos_token_id = torch.tensor( [self.model.tokenizer.eos_token_id], device=self.device ) @@ -89,7 +96,11 @@ def postprocess_completions(self, completions: List[str]) -> List[str]: def continuation( - model, max_tokens: Optional[int] = None, *, stop: Union[str, List[str]] = [] + model, + max_tokens: Optional[int] = None, + *, + sampler: Optional["Sampler"] = None, + stop: Union[str, List[str]] = [], ): """Generate text sequences. @@ -99,9 +110,14 @@ def continuation( The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. stop A string or list of strings which, when generated, stops the generation for this sequence. """ - return Continuation(model, max_tokens, stop) + return Continuation(model, max_tokens, sampler, stop) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index ca75673a..7eedb2ab 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -1,6 +1,6 @@ import math from json import dumps -from typing import Dict, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union import interegular import torch @@ -10,17 +10,28 @@ from outlines.text.generate.continuation import Continuation from outlines.text.json_schema import build_regex_from_schema +if TYPE_CHECKING: + from outlines.text.generate.sample import Sampler + class Regex(Continuation): """Represents a regex-based generation model. `Regex` instances are constrained generation models that only generate - sequences that match an input regex. We assume that the sequence can be - terminated (but not necessarily) when the finite state machine corresponding - to the regex is in an accepting state. + sequences matching a given regex. >>> import outlines.text as text - >>> sequence = text.generate.regex(model, "(0|[1-9][0-9]+)")("Return an integer between 0 and 10") + >>> generator = text.generate.regex(model, "(0|[1-9][0-9]+)") + + Sequences can then be generated from a prompt as follows: + + >>> sequence_1 = generator("Return an integer between 0 and 10") + >>> sequence_2 = generator("Rate the movie "Hackers" on a scale from 0 to 10") + + .. note: + Reuse instances of these guided generators (e.g. `generator` from the + above example) whenever possible, because constructing them has more + overhead than generating token sequences from them. """ @@ -29,6 +40,7 @@ def __init__( model, regex_string: str, max_tokens: Optional[int] = None, + sampler: Optional["Sampler"] = None, allow_empty_tokens: bool = True, initial_state: Optional[int] = None, final_states: Optional[Set[int]] = None, @@ -39,10 +51,17 @@ def __init__( Parameters ---------- + model + The instance of the model used to generate next-token probabilities. regex_string The regex with which the token sampling process is guided/constrained. max_tokens The maximum number of tokens to be sampled. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. allow_empty_tokens Allow sampling of tokens corresponding to empty strings. states_to_token_maps @@ -52,7 +71,7 @@ def __init__( Pre-computed set of token ids for tokens that are empty strings. """ - super().__init__(model, max_tokens) + super().__init__(model, max_tokens, sampler) if ( states_to_token_maps is None @@ -201,10 +220,17 @@ def regex( model, regex_string: str, max_tokens: Optional[int] = None, + *, + sampler: Optional["Sampler"] = None, allow_empty_tokens: bool = True, ): """Generate text sequences that match the input regex. + .. note: + Reuse instances of these guided generators whenever possible, + because constructing them has more overhead than generating + token sequences from them. See the docstring for `Regex`. + Parameters ---------- model @@ -213,46 +239,83 @@ def regex( The regular expression that generated expressions must match. max_tokens The maximum number of tokens to generate. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. allow_empty_tokens Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, regex_string, max_tokens, allow_empty_tokens) + return Regex(model, regex_string, max_tokens, sampler, allow_empty_tokens) -def integer(model, max_tokens: Optional[int] = None, allow_empty_tokens: bool = True): +def integer( + model, + max_tokens: Optional[int] = None, + *, + sampler: Optional["Sampler"] = None, + allow_empty_tokens: bool = True, +): """Generate integers. The regex used to constrain the generation optionally matches plus or minus signs and forbids leading zeros (even if the `int` function in Python allows them). + .. note: + Reuse instances of these guided generators whenever possible, + because constructing them has more overhead than generating + token sequences from them. See the docstring for `Regex`. + Parameters ---------- model The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. allow_empty_tokens Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, r"[-+]?\d+", max_tokens, allow_empty_tokens) + return Regex(model, r"[-+]?\d+", max_tokens, sampler, allow_empty_tokens) -def float(model, max_tokens: Optional[int] = None, allow_empty_tokens: bool = True): +def float( + model, + max_tokens: Optional[int] = None, + *, + sampler: Optional["Sampler"] = None, + allow_empty_tokens: bool = True, +): """Generate floating-point numbers. The regex used to constrain the generation optionally matches plus or minus signs, and forbids leading zeros (even if the `float` function in Python allows them). + .. note: + Reuse instances of these guided generators whenever possible, + because constructing them has more overhead than generating + token sequences from them. See the docstring for `Regex`. + Parameters ---------- model The language model to use to compute the next-token logits. max_tokens The maximum number of tokens to generate. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. allow_empty_tokens Allow sampling of tokens corresponding to empty strings. @@ -261,6 +324,7 @@ def float(model, max_tokens: Optional[int] = None, allow_empty_tokens: bool = Tr model, r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", max_tokens, + sampler, allow_empty_tokens, ) @@ -269,21 +333,50 @@ def choice( model, choices: List[str], max_tokens: Optional[int] = None, + *, + sampler: Optional["Sampler"] = None, allow_empty_tokens: bool = True, ): - """Choose between different sequences.""" + """Choose between different sequences. + + .. note: + Reuse instances of these guided generators whenever possible, + because constructing them has more overhead than generating + token sequences from them. See the docstring for `Regex`. + + Parameters + ---------- + model + The language model to use to compute the next-token logits. + max_tokens + The maximum number of tokens to generate. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. + allow_empty_tokens + Allow sampling of tokens corresponding to empty strings. + """ regex_str = r"(" + r"|".join(choices) + r")" - return Regex(model, regex_str, max_tokens, allow_empty_tokens) + return Regex(model, regex_str, max_tokens, sampler, allow_empty_tokens) def json( model, schema: Union[str, BaseModel], max_tokens: Optional[int] = None, + *, + sampler: Optional["Sampler"] = None, allow_empty_tokens: bool = True, ): """Generate a text sequence that follows a JSON schema or Pydantic model. + .. note: + Reuse instances of these guided generators whenever possible, + because constructing them has more overhead than generating + token sequences from them. See the docstring for `Regex`. + Parameters --------- model @@ -292,6 +385,11 @@ def json( The JSON schema or Pydantic model that guides the generation. max_tokens The maximum number of tokens to generate. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. allow_empty_tokens Allow sampling of tokens corresponding to empty strings. @@ -301,4 +399,4 @@ def json( regex_str = build_regex_from_schema(schema) - return Regex(model, regex_str, max_tokens, allow_empty_tokens) + return Regex(model, regex_str, max_tokens, sampler, allow_empty_tokens) diff --git a/outlines/text/generate/sample.py b/outlines/text/generate/sample.py new file mode 100644 index 00000000..a91e1f09 --- /dev/null +++ b/outlines/text/generate/sample.py @@ -0,0 +1,111 @@ +from typing import Protocol + +import torch + + +class Sampler(Protocol): + def __call__( + self, logits: torch.DoubleTensor, samples: int, rng: torch.Generator + ) -> torch.DoubleTensor: + ... + + +def greedy(logits: torch.DoubleTensor, samples: int, *_) -> torch.DoubleTensor: + """Greedy Sampling algorithm. + + Greedy sampling consists in choosing the token with the largest + likelihood at every step. + + Parameters + ---------- + logits + A tensor of shape ``(n_seqs, vocab_size,)`` that represents the + probability distribution of the next token over the vocabulary. + samples + The number of sequences to produce. In this case, the top-`samples` + logit values are returned. + rng + A random number generator. + + Returns + ------- + The ids of the sampled tokens having shape ``(samples, n_seqs)``. + + """ + if samples == 1: + next_token_ids = torch.argmax(logits, dim=-1, keepdim=True).T + else: + next_token_ids = torch.topk( + logits, samples, dim=-1, largest=True, sorted=True + ).indices.T + + return next_token_ids + + +def multinomial( + logits: torch.DoubleTensor, samples: int, rng: torch.Generator +) -> torch.DoubleTensor: + """Multinomial sampling algorithm. + + Multinomial sampling consists in randomly sampling the next token assuming + its distribution is a Categorical distribution parametrized by the + next-token logits. + + Parameters + ---------- + logits + A tensor of shape ``(n_seqs, vocab_size,)`` that represents the + probability distribution of the next token over the vocabulary. + samples + The number of sequences to sample. + rng + A random number generator. + + Returns + ------- + The ids of the sampled tokens having shape ``(samples, n_seqs)``. + + """ + probs = torch.nn.functional.softmax(logits, dim=-1) + # next_token_ids = torch.multinomial(probs, num_samples=samples, generator=rng) + next_token_ids = vectorized_random_choice(rng, probs, samples) + return next_token_ids + + +def vectorized_random_choice( + rng: torch.Generator, + p: torch.FloatTensor, + samples: int = 1, +): + """Vectorized implementation of `np.random.choice`. + + `np.random.choice` does not support arrays of probability. This implements + the equivalent of this function where the `p` argument can be a matrix. + + Note + ---- + `torch.searchsorted` may be more efficient, but it is not implemented for + every backend, for instance MPS. + + Parameters + ---------- + rng + Torch random number Generator instance + p + An array of probability of shape ``(num_probability_vectors, num_items)`` + that must sum to 1. + samples + The number of samples to take for each probability vector. + + Returns + ------- + An array of shape ``(num_samples, batch_size)`` + + """ + cumsum = torch.unsqueeze(p.cumsum(axis=-1), 0) + rand = torch.rand( + (samples,) + p.shape[:-1] + (1,), generator=rng, device=rng.device + ) + idx = (cumsum < rand).sum(axis=-1) + + return idx diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index bae937a6..acb97204 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -5,12 +5,18 @@ if TYPE_CHECKING: from outlines.models.transformers import KVCacheType, Transformers + from outlines.text.generate.sample import Sampler class Sequence: """Represents a sequence generation method.""" - def __init__(self, model: "Transformers", max_tokens: Optional[int] = None): + def __init__( + self, + model: "Transformers", + max_tokens: Optional[int] = None, + sampler: Optional["Sampler"] = None, + ): """Create a `Sequence` instance. Parameters @@ -20,6 +26,11 @@ def __init__(self, model: "Transformers", max_tokens: Optional[int] = None): max_tokens The maximum number of tokens that will be generated if no termination condition is met. + sampler + The function used to draw samples. Defaults to + `outlines.text.generate.sample.multinomial`. See + `outlines.text.generate.sample.Sampler` for the expected form of + such functions. """ self.model = model @@ -28,6 +39,10 @@ def __init__(self, model: "Transformers", max_tokens: Optional[int] = None): self.pad_token_id = torch.tensor( model.tokenizer.pad_token_id, device=model.device ) + if sampler is None: + from outlines.text.generate.sample import multinomial + + self.sampler = multinomial def create_proposal( self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor @@ -62,7 +77,7 @@ def step( Parameters ---------- rng - NumPy random number Generator instance. + Random number Generator instance. num_prompt_tokens The number of tokens in the prompt. token_ids @@ -85,11 +100,11 @@ def step( token_ids, attention_mask, past_key_values ) probs = self.create_proposal(token_ids[:, num_prompt_tokens:], probs) - probs = torch.nn.functional.softmax(probs, dim=-1) assert probs.shape[:-1] == token_ids.shape[:-1] - next_token_ids = vectorized_random_choice(rng, probs, samples).unsqueeze(-1) + next_token_ids = self.sampler(probs, samples, rng).unsqueeze(-1) + probs = torch.broadcast_to(probs, (samples,) + probs.shape) return next_token_ids, probs, past_key_values @@ -195,6 +210,8 @@ def __call__( ) next_token_ids[is_not_finished] = unfinished_next_token_ids + # TODO: Terminate if the sampled sequence is larger than the + # context size of the model? token_ids = torch.concatenate([token_ids, next_token_ids], axis=-1) attention_mask = self.expand_attention_mask(attention_mask) @@ -217,42 +234,3 @@ def __call__( return result[0] return result - - -def vectorized_random_choice( - rng: torch.Generator, - p: torch.FloatTensor, - samples: int = 1, -): - """Vectorized implementation of `np.random.choice`. - - `np.random.choice` does not support arrays of probability. This implements - the equivalent of this function where the `p` argument can be a matrix. - - Note - ---- - `torch.searchsorted` may be more efficient, but it is not implemented for - every backend, for instance MPS. - - Parameters - ---------- - rng - Torch random number Generator instance - p - An array of probability of shape `(num_probability_vectors, num_items)` - that must sum to 1. - samples - The number of samples to take for each probability vector. - - Returns - ------- - An array of shape `(num_samples, batch_size)` - - """ - cumsum = torch.unsqueeze(p.cumsum(axis=-1), 0) - rand = torch.rand( - (samples,) + p.shape[:-1] + (1,), generator=rng, device=rng.device - ) - idx = (cumsum < rand).sum(axis=-1) - - return idx diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 0844c002..05980872 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -6,7 +6,7 @@ import torch from outlines.models.tokenizer import Tokenizer -from outlines.text.generate.sequence import Sequence, vectorized_random_choice +from outlines.text.generate.sequence import Sequence class MockModel: @@ -67,38 +67,6 @@ def __hash__(self): return id(self) -def test_vectorized_random_choice(): - rng = torch.Generator() - rng.manual_seed(0) - - probs = torch.tensor([[1, 0, 0, 0]]) - sample = vectorized_random_choice(rng, probs) - assert sample.shape == (1, 1) - assert torch.equal(sample, torch.zeros((1, 1))) - - probs = torch.tensor([[1, 0, 0, 0]]) - sample = vectorized_random_choice(rng, probs, samples=3) - assert sample.shape == (3, 1) - assert torch.equal(sample, torch.zeros((3, 1))) - - probs = torch.tile(torch.tensor([[1, 0, 0, 0]]), (2, 1)) - sample = vectorized_random_choice(rng, probs) - assert sample.shape == (1, 2) - assert torch.equal(sample, torch.zeros((1, 2))) - - probs = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]]) - sample = vectorized_random_choice(rng, probs, samples=3) - assert sample.shape == (3, 2) - assert torch.equal(sample, torch.tensor([[0, 1], [0, 1], [0, 1]])) - - probs = torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0]], [[0, 0, 1, 0], [0, 0, 0, 1]]]) - sample = vectorized_random_choice(rng, probs, samples=3) - assert sample.shape == (3, 2, 2) - assert torch.equal( - sample, torch.tensor([[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[0, 1], [2, 3]]]) - ) - - def test_sequence_error(): with pytest.raises(NotImplementedError, match="must be implemented"): sequence = Sequence(MockModel(MockTokenizer(None), None)) From f6e33ddee63572d7571242298e579008886c12d3 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 12 Oct 2023 13:09:54 -0500 Subject: [PATCH 241/734] Fix tokens array dimensions in is_finished test The dimensions of the mock decoder output and the test input token ids did not match and were causing PyTorch to issue a warning/error in CI. --- tests/text/generate/test_continuation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/text/generate/test_continuation.py b/tests/text/generate/test_continuation.py index 78a3e34a..944ae6df 100644 --- a/tests/text/generate/test_continuation.py +++ b/tests/text/generate/test_continuation.py @@ -53,7 +53,7 @@ def test_continuation_stop_is_finished(): model = continuation(model, stop=["\n"]) - token_ids = torch.tensor([[2, 3]]) + token_ids = torch.tensor([[2, 3], [2, 3]]) result = model.is_finished(token_ids) assert torch.equal(result, torch.tensor([True, False])) From fb465db7321769745de68a488d10894421a298ea Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 12 Oct 2023 14:52:51 -0500 Subject: [PATCH 242/734] Allow stop keyword in Regex --- outlines/text/generate/regex.py | 42 +++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 7eedb2ab..af87f9f5 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -40,7 +40,9 @@ def __init__( model, regex_string: str, max_tokens: Optional[int] = None, + *, sampler: Optional["Sampler"] = None, + stop: Union[str, List[str]] = [], allow_empty_tokens: bool = True, initial_state: Optional[int] = None, final_states: Optional[Set[int]] = None, @@ -62,6 +64,8 @@ def __init__( `outlines.text.generate.sample.multinomial`. See `outlines.text.generate.sample.Sampler` for the expected form of such functions. + stop + Optional stopping string(s). allow_empty_tokens Allow sampling of tokens corresponding to empty strings. states_to_token_maps @@ -71,7 +75,7 @@ def __init__( Pre-computed set of token ids for tokens that are empty strings. """ - super().__init__(model, max_tokens, sampler) + super().__init__(model, max_tokens, sampler, stop) if ( states_to_token_maps is None @@ -248,7 +252,13 @@ def regex( Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, regex_string, max_tokens, sampler, allow_empty_tokens) + return Regex( + model, + regex_string, + max_tokens, + sampler=sampler, + allow_empty_tokens=allow_empty_tokens, + ) def integer( @@ -284,7 +294,13 @@ def integer( Allow sampling of tokens corresponding to empty strings. """ - return Regex(model, r"[-+]?\d+", max_tokens, sampler, allow_empty_tokens) + return Regex( + model, + r"[-+]?\d+", + max_tokens, + sampler=sampler, + allow_empty_tokens=allow_empty_tokens, + ) def float( @@ -324,8 +340,8 @@ def float( model, r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", max_tokens, - sampler, - allow_empty_tokens, + sampler=sampler, + allow_empty_tokens=allow_empty_tokens, ) @@ -359,7 +375,13 @@ def choice( Allow sampling of tokens corresponding to empty strings. """ regex_str = r"(" + r"|".join(choices) + r")" - return Regex(model, regex_str, max_tokens, sampler, allow_empty_tokens) + return Regex( + model, + regex_str, + max_tokens, + sampler=sampler, + allow_empty_tokens=allow_empty_tokens, + ) def json( @@ -399,4 +421,10 @@ def json( regex_str = build_regex_from_schema(schema) - return Regex(model, regex_str, max_tokens, sampler, allow_empty_tokens) + return Regex( + model, + regex_str, + max_tokens, + sampler=sampler, + allow_empty_tokens=allow_empty_tokens, + ) From 70830032677ec252c1772244e2e03937314a3049 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 12 Oct 2023 14:53:27 -0500 Subject: [PATCH 243/734] Fix custom sampler setup and add a test --- outlines/text/generate/sequence.py | 2 ++ .../generate/test_integration_transfomers.py | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index acb97204..1630113a 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -43,6 +43,8 @@ def __init__( from outlines.text.generate.sample import multinomial self.sampler = multinomial + else: + self.sampler = sampler def create_proposal( self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index ef54dcbd..d54ca884 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -268,3 +268,33 @@ def test_transformers_reduced_vocabulary_caching(): vocab2 = reduced_vocabulary(tokenizer2) assert vocab2 is vocab + + +def test_custom_sampler(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + + model = models.transformers(model_name) + + seen = False + target_token_ids = model.tokenizer.encode(["c"])[0] + + def biased_sampler( + logits: torch.DoubleTensor, samples: int, *_ + ) -> torch.DoubleTensor: + nonlocal seen + + if not seen: + seen = True + return target_token_ids + else: + return torch.tensor([[model.tokenizer.eos_token_id]]) + + generator = generate.choice(model, ["a", "b", "c"], sampler=biased_sampler) + sequence = generator( + """What is 1+1? + a. 3 + b. 4 + c. 2""" + ) + + assert sequence == "c" From 80513fa075d9f4af12451f4bce1eda6eee55f196 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 4 Oct 2023 20:03:40 -0500 Subject: [PATCH 244/734] Reference the correct parser object in the example --- examples/parsing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/parsing.py b/examples/parsing.py index f1b71cba..dbee9ab9 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -60,7 +60,7 @@ def __call__( lex_state = self.parser_state.lexer.state lex_state.text = self.token_seq - parser.parse_from_state(self.parser_state, is_end=False) + self.parser.parse_from_state(self.parser_state, is_end=False) print(f'parsed:"{self.token_seq}"') @@ -80,7 +80,7 @@ def __call__( ls.text = self.token_seq + tokenizer.convert_tokens_to_string([test_token]) try: - parser.parse_from_state(ps, is_end=False) + self.parser.parse_from_state(ps, is_end=False) mask[0][token_id] = 0 except (EOFError, UnexpectedToken, UnexpectedCharacters, DedentError): pass From 41f46e05d495b01e17dfd612bc6760a6e1024e82 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Wed, 4 Oct 2023 20:05:40 -0500 Subject: [PATCH 245/734] Correctly check partial REDUCE states --- outlines/text/parsing.py | 47 +++++++++++++++++++++++++------------- tests/text/test_parsing.py | 3 +++ 2 files changed, 34 insertions(+), 16 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 77d79821..68dbe828 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -8,12 +8,7 @@ from interegular.patterns import Unsupported from lark import Lark, Token from lark.common import LexerConf, ParserConf -from lark.exceptions import ( - LexError, - UnexpectedCharacters, - UnexpectedInput, - UnexpectedToken, -) +from lark.exceptions import LexError, UnexpectedInput from lark.indenter import Indenter from lark.lexer import ( BasicLexer, @@ -21,6 +16,8 @@ LexerState, LexerThread, Scanner, + UnexpectedCharacters, + UnexpectedToken, _create_unless, ) from lark.parser_frontends import ( @@ -363,17 +360,36 @@ def __init__( def feed_token(self, token, is_end=False): if token.type == "partial": + # If none of the potential terminals can transition, we need to know now current_state = self.state_stack[-1] - current_transitions = self.parse_conf.states[current_state] current_lexer = get_contextual_lexer(self.lexer).lexers[current_state] - if not any( - terminal_info.terminal_name in current_transitions - or terminal_info.terminal_name in current_lexer.ignore_types - for terminal_info in token.value.terminals_and_info - ): - # If none of the terminals can transition, we should - # know sooner than later + # We have to feed the token and determine whether or not at least + # one terminal is consistent with the stack; otherwise, we'll miss + # invalid REDUCE cases. + # TODO: We should track separate parses conditional on possible + # token/symbol types, then we can coherently reuse the following + # results instead of recomputing it later. + can_transition = False + for terminal_info in token.value.terminals_and_info: + if terminal_info.terminal_name not in current_lexer.ignore_types: + test_token = Token.new_borrow_pos( + terminal_info.terminal_name, "", token + ) + + stack = copy(self.state_stack) + try: + self.feed_token_no_stack(test_token, is_end=is_end) + can_transition = True + break + except UnexpectedToken: + continue + finally: + self.state_stack = stack + else: + can_transition = True + + if not can_transition: expected = { s for s in self.parse_conf.states[current_state].keys() @@ -382,9 +398,8 @@ def feed_token(self, token, is_end=False): raise UnexpectedToken( token, expected, state=self, interactive_parser=None ) - return - if self.use_value_stack: + elif self.use_value_stack: super().feed_token(token, is_end=is_end) else: self.feed_token_no_stack(token, is_end=is_end) diff --git a/tests/text/test_parsing.py b/tests/text/test_parsing.py index 20b96e7d..8c7dff92 100644 --- a/tests/text/test_parsing.py +++ b/tests/text/test_parsing.py @@ -117,6 +117,9 @@ def test_partial_parsing(): with pytest.raises(UnexpectedToken): lp.parse("def \n") + with pytest.raises(UnexpectedToken): + lp.parse("def hot no") + lp = PartialLark.open_from_package( "tests", "partial_python.lark", From f84763226a606780b313a53e4194e75dc3b0a253 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 19 Oct 2023 16:26:47 -0500 Subject: [PATCH 246/734] Add missing sample tests --- tests/text/generate/test_sample.py | 72 ++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 tests/text/generate/test_sample.py diff --git a/tests/text/generate/test_sample.py b/tests/text/generate/test_sample.py new file mode 100644 index 00000000..884bb4a3 --- /dev/null +++ b/tests/text/generate/test_sample.py @@ -0,0 +1,72 @@ +import math + +import torch + +from outlines.text.generate.sample import greedy, multinomial, vectorized_random_choice + + +def test_greedy(): + logits = torch.tensor([[1.0, 2.0, 5.0]]) + next_token_ids = greedy(logits, samples=1) + assert next_token_ids.equal(torch.tensor([[2]])) + + next_token_ids = greedy(logits, samples=2) + assert next_token_ids.equal(torch.tensor([[2], [1]])) + + logits = torch.tensor([[10.0, 0.0, 3.0], [-math.inf, 2.0, 5.0]]) + next_token_ids = greedy(logits, samples=1) + assert next_token_ids.equal(torch.tensor([[0, 2]])) + + next_token_ids = greedy(logits, samples=2) + assert next_token_ids.equal(torch.tensor([[0, 2], [2, 1]])) + + +def test_multinomial(): + rng = torch.Generator() + rng.manual_seed(239) + + logits = torch.tensor([[1.0, 4.0, 5.0]]) + next_token_ids = multinomial(logits, 1, rng) + assert next_token_ids.equal(torch.tensor([[2]])) + + next_token_ids = multinomial(logits, 2, rng) + assert next_token_ids.equal(torch.tensor([[2], [1]])) + + logits = torch.tensor([[10.0, 0.0, 9.0], [-math.inf, 4.0, 5.0]]) + next_token_ids = multinomial(logits, 1, rng) + assert next_token_ids.equal(torch.tensor([[0, 2]])) + + next_token_ids = multinomial(logits, 2, rng) + assert next_token_ids.equal(torch.tensor([[0, 1], [2, 2]])) + + +def test_vectorized_random_choice(): + rng = torch.Generator() + rng.manual_seed(0) + + probs = torch.tensor([[1, 0, 0, 0]]) + sample = vectorized_random_choice(rng, probs) + assert sample.shape == (1, 1) + assert torch.equal(sample, torch.zeros((1, 1))) + + probs = torch.tensor([[1, 0, 0, 0]]) + sample = vectorized_random_choice(rng, probs, samples=3) + assert sample.shape == (3, 1) + assert torch.equal(sample, torch.zeros((3, 1))) + + probs = torch.tile(torch.tensor([[1, 0, 0, 0]]), (2, 1)) + sample = vectorized_random_choice(rng, probs) + assert sample.shape == (1, 2) + assert torch.equal(sample, torch.zeros((1, 2))) + + probs = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]]) + sample = vectorized_random_choice(rng, probs, samples=3) + assert sample.shape == (3, 2) + assert torch.equal(sample, torch.tensor([[0, 1], [0, 1], [0, 1]])) + + probs = torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0]], [[0, 0, 1, 0], [0, 0, 0, 1]]]) + sample = vectorized_random_choice(rng, probs, samples=3) + assert sample.shape == (3, 2, 2) + assert torch.equal( + sample, torch.tensor([[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[0, 1], [2, 3]]]) + ) From 07c47dbf409fa7518affccf8886f5cb87782304b Mon Sep 17 00:00:00 2001 From: AL-377 <535338194@qq.com> Date: Tue, 10 Oct 2023 17:51:47 +0800 Subject: [PATCH 247/734] Support recursive arrays in JSON when an item is an array --- outlines/text/json_schema.py | 22 +++++++++--- tests/text/test_json_schema.py | 65 ++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 4 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index ddccde5c..b9d6a84c 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -125,6 +125,18 @@ def build_schedule_from_schema(schema: str): return reduced_schedule +def expand_item_json_schema(expanded_property: Dict, resolver: Callable[[str], Dict]): + """Recursively expand "$ref"s in "item"s.""" + if "items" not in expanded_property.keys(): + return + elif "$ref" in expanded_property["items"]: + expanded_property["items"] = expand_json_schema( + resolver(expanded_property["items"]["$ref"]), resolver + ) + else: + expand_item_json_schema(expanded_property["items"], resolver) + + def expand_json_schema( raw_schema: Dict, resolver: Callable[[str], Dict], @@ -166,12 +178,14 @@ def expand_json_schema( ) elif "type" in value and value["type"] == "array": # if item is a list expanded_properties[name] = value - if "$ref" in value["items"]: - expanded_properties[name]["items"] = expand_json_schema( - resolver(value["items"]["$ref"]), resolver - ) + + if "$ref" in value["items"] or ( + "type" in value["items"] and value["items"]["type"] == "array" + ): + expand_item_json_schema(expanded_properties[name], resolver) else: expanded_properties[name]["items"] = value["items"] + else: expanded_properties[name] = value diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 19c01c3e..239effb6 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -159,6 +159,71 @@ class Spam(BaseModel): ] +def test_pydantic_recursive_list_object(): + class ItemModel(BaseModel): + name: str + + class ArrayModel1(BaseModel): + item_model_lists: List[List[ItemModel]] + + class ArrayModel2(BaseModel): + nums: List[List[int]] + + class ArrayModel3(BaseModel): + array_model_lists: List[List[ArrayModel1]] + + schema = json.dumps(ArrayModel1.model_json_schema()) + schedule = build_schedule_from_schema(schema) + array_model_1_schema = { + "items": { + "items": { + "title": "ItemModel", + "type": "object", + "properties": {"name": {"title": "Name", "type": "string"}}, + }, + "type": "array", + }, + "title": "Item Model Lists", + "type": "array", + } + assert schedule == [ + '\\{[\\n ]*"item_model_lists"[\\n ]*:[\\n ]*', + array_model_1_schema, + "[\\n ]*\\}", + ] + + schema = json.dumps(ArrayModel2.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '\\{[\\n ]*"nums"[\\n ]*:[\\n ]*', + { + "items": {"items": {"type": "integer"}, "type": "array"}, + "title": "Nums", + "type": "array", + }, + "[\\n ]*\\}", + ] + + schema = json.dumps(ArrayModel3.model_json_schema()) + schedule = build_schedule_from_schema(schema) + assert schedule == [ + '\\{[\\n ]*"array_model_lists"[\\n ]*:[\\n ]*', + { + "items": { + "items": { + "title": "ArrayModel1", + "type": "object", + "properties": {"item_model_lists": array_model_1_schema}, + }, + "type": "array", + }, + "title": "Array Model Lists", + "type": "array", + }, + "[\\n ]*\\}", + ] + + def test_pydantic_union(): """Schemas with Union types.""" From a51d60db0bba7a5fe3c706b8a4bd0c1fb0df0f29 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 19 Oct 2023 17:59:10 -0500 Subject: [PATCH 248/734] Refactor walk_fsm and introduce a pure Python implementation --- outlines/text/fsm.py | 60 +++++++++++++++------------ outlines/text/parsing.py | 2 +- tests/text/test_fsm.py | 88 +++++++++++++++++++++++++++++----------- 3 files changed, 100 insertions(+), 50 deletions(-) diff --git a/outlines/text/fsm.py b/outlines/text/fsm.py index d531a4e6..27b5af88 100644 --- a/outlines/text/fsm.py +++ b/outlines/text/fsm.py @@ -246,25 +246,18 @@ def _walk_fsm( start_state: int, full_match: bool = True, ) -> List[int]: - state = fsm_initial + state = start_state accepted_states: List[int] = numba.typed.List.empty_list(numba.int64) last_final_idx: int = numba.uint64(0) for i, symbol in enumerate(input_string): trans_key = alphabet_symbol_mapping.get(symbol, alphabet_anything_value) - if state == fsm_initial: - new_state = fsm_transitions.get((start_state, trans_key)) - else: - new_state = fsm_transitions.get((state, trans_key)) + new_state = fsm_transitions.get((state, trans_key)) if new_state is None: - if full_match: - if state in fsm_finals: - break - elif last_final_idx > 0: - accepted_states = accepted_states[:last_final_idx] - break + if not full_match and last_final_idx > 0: + return accepted_states[:last_final_idx] return numba.typed.List.empty_list(numba.int64) @@ -275,29 +268,44 @@ def _walk_fsm( accepted_states.append(_nonoptional(state)) - terminated = state in fsm_finals - if not terminated and state == fsm_initial: - return numba.typed.List.empty_list(numba.int64) - return accepted_states def walk_fsm( - fsm_info, + fsm: BetterFSM, input_string: str, start_state: int, full_match: bool = True, ) -> List[int]: - return _walk_fsm( - fsm_info.transitions, - fsm_info.alphabet_symbol_mapping, - fsm_info.alphabet_anything_value, - fsm_info.initial, - fsm_info.finals, - input_string, - start_state, - full_match=full_match, - ) + fsm_finals = fsm.finals + + state = start_state + accepted_states: List[int] = [] + last_final_idx: int = 0 + + alphabet_symbol_mapping = fsm.alphabet._symbol_mapping + alphabet_anything_value = fsm.alphabet.anything_value + fsm_transitions = fsm.flat_transition_map + + for i, symbol in enumerate(input_string): + trans_key = alphabet_symbol_mapping.get(symbol, alphabet_anything_value) + + new_state = fsm_transitions.get((state, trans_key)) + + if new_state is None: + if not full_match and last_final_idx > 0: + return accepted_states[:last_final_idx] + + return [] + + state = new_state + + if state in fsm_finals: + last_final_idx = i + 1 + + accepted_states.append(state) + + return accepted_states # TODO FIXME: Can't cache this due to https://github.com/numba/numba/issues/9177 diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 68dbe828..6ec9ef2d 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -573,7 +573,7 @@ def match(self, text, pos, last_fsm_state_seq: Optional[Tuple[int, ...]] = None) text_part = text[start_pos:] state_seq = walk_fsm( - self.fsm.fsm_info, + self.fsm, text_part, start_state, full_match=self.match_whole, diff --git a/tests/text/test_fsm.py b/tests/text/test_fsm.py index 616c1359..b19562c4 100644 --- a/tests/text/test_fsm.py +++ b/tests/text/test_fsm.py @@ -4,6 +4,7 @@ from outlines.models.transformers import TransformersTokenizer from outlines.text.fsm import ( + _walk_fsm, create_fsm_index, create_fsm_index_end_to_end, create_fsm_index_tokenizer, @@ -15,20 +16,61 @@ ) -def test_walk_fsm(): +def walk_fsm_numba( + fsm, + input_string: str, + start_state: int, + full_match: bool = True, +): + return _walk_fsm( + fsm.fsm_info.transitions, + fsm.fsm_info.alphabet_symbol_mapping, + fsm.fsm_info.alphabet_anything_value, + fsm.fsm_info.initial, + fsm.fsm_info.finals, + input_string, + start_state, + full_match=full_match, + ) + + +@pytest.mark.parametrize( + "function", + [ + walk_fsm, + walk_fsm_numba, + ], +) +def test_walk_fsm(function): regex_pattern = interegular.parse_pattern("0|[1-9][2-9]*") regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + res = tuple(function(regex_fsm, "0", regex_fsm.initial, full_match=True)) + assert res == (1,) + + res = tuple(function(regex_fsm, "00", regex_fsm.initial, full_match=False)) + assert res == (1,) + + res = tuple(function(regex_fsm, "!", regex_fsm.initial, full_match=True)) + assert res == tuple() + + res = tuple(function(regex_fsm, "00", regex_fsm.initial, full_match=True)) + assert res == tuple() + # This should fail, because state `1` reads nothing - res = tuple(walk_fsm(regex_fsm.fsm_info, "0", numba.int64(1), full_match=True)) + res = tuple(function(regex_fsm, "0", 1, full_match=True)) assert res == tuple() pattern = interegular.parse_pattern(r"(?:[^\W\d]\w*|[\t \x0c]+)") fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) - res = tuple(walk_fsm(fsm.fsm_info, "x ", fsm.fsm_info.initial, full_match=False)) + res = tuple(function(fsm, "x ", fsm.initial, full_match=False)) assert res == (2,) + start_state = list(fsm.finals)[0] + res = tuple(function(fsm, "!", start_state, full_match=False)) + assert res == tuple() + def test_partial_match(): name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") @@ -62,11 +104,11 @@ def to_python(res): assert res == {(0, (1, 2))} res = to_python(find_partial_matches(def_fsm, "f")) assert res == {(0, (2, 3))} - res = to_python(find_partial_matches(def_fsm, "ef foo")) + res = to_python(find_partial_matches(def_fsm, "ef foo", full_match=False)) assert res == {(1, (1, 2, 3))} # This string has a `DEF` token in it, but should ultimately not lex one - res = to_python(find_partial_matches(def_fsm, "defb")) + res = to_python(find_partial_matches(def_fsm, "defb", full_match=False)) assert res == {(2, (0, 1, 2, 3))} # `NAME` can have multiple start states for this input @@ -198,14 +240,14 @@ def test_get_sub_fsms_from_seq(): assert fsm.accepts("+=") assert fsm.accepts("+") - state_seq = walk_fsm(fsm.fsm_info, "def", fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "def", fsm.initial) state_seq.insert(0, fsm.fsm_info.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(0, False, True), (2, True, True)] # Make sure the old-to-new state map is correct - def_state_seq = walk_fsm(def_fsm.fsm_info, "def", fsm.fsm_info.initial) + def_state_seq = walk_fsm(def_fsm, "def", fsm.initial) def_state_seq.insert(0, fsm.fsm_info.initial) def_old_to_new_states = fsms_to_trans_finals[0][2] @@ -214,14 +256,14 @@ def test_get_sub_fsms_from_seq(): for old_state, new_state in zip(def_state_seq, state_seq) ) - state_seq = walk_fsm(fsm.fsm_info, "ef", fsm.fsm_info.initial) - state_seq.insert(0, fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "ef", fsm.initial) + state_seq.insert(0, fsm.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(2, True, True)] - name_state_seq = walk_fsm(name_fsm.fsm_info, "ef", fsm.fsm_info.initial) - name_state_seq.insert(0, fsm.fsm_info.initial) + name_state_seq = walk_fsm(name_fsm, "ef", fsm.initial) + name_state_seq.insert(0, fsm.initial) name_old_to_new_states = fsms_to_trans_finals[2][2] assert all( @@ -229,14 +271,14 @@ def test_get_sub_fsms_from_seq(): for old_state, new_state in zip(name_state_seq, state_seq) ) - state_seq = walk_fsm(fsm.fsm_info, "match", fsm.fsm_info.initial) - state_seq.insert(0, fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "match", fsm.initial) + state_seq.insert(0, fsm.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(1, False, True), (2, True, True)] - match_state_seq = walk_fsm(match_fsm.fsm_info, "match", fsm.fsm_info.initial) - match_state_seq.insert(0, fsm.fsm_info.initial) + match_state_seq = walk_fsm(match_fsm, "match", fsm.initial) + match_state_seq.insert(0, fsm.initial) match_old_to_new_states = fsms_to_trans_finals[1][2] assert all( @@ -244,26 +286,26 @@ def test_get_sub_fsms_from_seq(): for old_state, new_state in zip(match_state_seq, state_seq) ) - state_seq = walk_fsm(fsm.fsm_info, "defa", fsm.fsm_info.initial) - state_seq.insert(0, fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "defa", fsm.initial) + state_seq.insert(0, fsm.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(2, True, True)] - state_seq = walk_fsm(fsm.fsm_info, "de", fsm.fsm_info.initial) - state_seq.insert(0, fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "de", fsm.initial) + state_seq.insert(0, fsm.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(0, True, False), (2, True, True)] - state_seq = walk_fsm(fsm.fsm_info, "+", fsm.fsm_info.initial, False) - state_seq.insert(0, fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "+", fsm.initial, False) + state_seq.insert(0, fsm.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(3, True, False), (4, False, True)] - state_seq = walk_fsm(fsm.fsm_info, "+=", fsm.fsm_info.initial) - state_seq.insert(0, fsm.fsm_info.initial) + state_seq = walk_fsm(fsm, "+=", fsm.initial) + state_seq.insert(0, fsm.initial) res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(3, False, True)] From 3cf3f960cf3319b9be74335763a8978418482c49 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 19 Oct 2023 19:33:08 -0500 Subject: [PATCH 249/734] Disable eager Numba FSM info construction step in parser --- outlines/text/parsing.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/outlines/text/parsing.py b/outlines/text/parsing.py index 6ec9ef2d..fc34fff7 100644 --- a/outlines/text/parsing.py +++ b/outlines/text/parsing.py @@ -538,9 +538,6 @@ def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): self.fsm, self.fsms_to_trans_finals = fsm_union(fsms) - # Eagerly construct the `FSMInfo` object - _ = self.fsm.fsm_info - def get_terminals_info( self, fsm_state_seq ) -> Tuple[Tuple[PartialTerminalInfo, ...], Tuple[PartialTerminalInfo, ...]]: From 95a9d97c5101579fc41b0954e1beab2663027a9a Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 19 Oct 2023 20:18:39 -0500 Subject: [PATCH 250/734] Add another missing walk_fsm condition The full-match option handling was not correct for scanned/walked strings with valid transitions but not ending in a final state. --- outlines/text/fsm.py | 9 ++++++++- tests/text/test_fsm.py | 26 +++++++++++++++++++------- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/outlines/text/fsm.py b/outlines/text/fsm.py index 27b5af88..f78f33e4 100644 --- a/outlines/text/fsm.py +++ b/outlines/text/fsm.py @@ -268,6 +268,9 @@ def _walk_fsm( accepted_states.append(_nonoptional(state)) + if full_match and last_final_idx - 1 != i: + return numba.typed.List.empty_list(numba.int64) + return accepted_states @@ -305,6 +308,9 @@ def walk_fsm( accepted_states.append(state) + if full_match and last_final_idx - 1 != i: + return [] + return accepted_states @@ -376,7 +382,7 @@ def process_token_string( res = set() vocab_string_len = len(token) - for end_idx, state_seq in find_partial_matches(fsm_info, token): + for end_idx, state_seq in find_partial_matches(fsm_info, token, full_match=False): if end_idx is not None and end_idx < vocab_string_len - 1: continue @@ -603,6 +609,7 @@ def state_scan_tokens( fsm_finals, token, start_state, + False, ) if state_seq is not None and len(state_seq) < len(token): diff --git a/tests/text/test_fsm.py b/tests/text/test_fsm.py index b19562c4..ce4a3647 100644 --- a/tests/text/test_fsm.py +++ b/tests/text/test_fsm.py @@ -61,6 +61,18 @@ def test_walk_fsm(function): res = tuple(function(regex_fsm, "0", 1, full_match=True)) assert res == tuple() + regex_pattern = interegular.parse_pattern("0|[1-9][2-9]+") + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + + res = tuple(function(regex_fsm, "1", regex_fsm.initial, full_match=True)) + assert res == tuple() + + res = tuple(function(regex_fsm, "1", regex_fsm.initial, full_match=False)) + assert res == (2,) + + res = tuple(function(regex_fsm, "12", regex_fsm.initial, full_match=True)) + assert res == (2, 3) + pattern = interegular.parse_pattern(r"(?:[^\W\d]\w*|[\t \x0c]+)") fsm, _ = make_deterministic_fsm(pattern.to_fsm().reduce()) @@ -90,19 +102,19 @@ def to_python(res): res = to_python(find_partial_matches(def_fsm, "def")) assert res == {(2, (0, 1, 2, 3))} - res = to_python(find_partial_matches(def_fsm, "de")) + res = to_python(find_partial_matches(def_fsm, "de", full_match=False)) assert res == {(1, (0, 1, 2))} - res = to_python(find_partial_matches(def_fsm, "d")) + res = to_python(find_partial_matches(def_fsm, "d", full_match=False)) assert res == {(0, (0, 1))} res = to_python(find_partial_matches(def_fsm, "")) assert res == set() res = to_python(find_partial_matches(def_fsm, "df")) assert res == set() - res = to_python(find_partial_matches(def_fsm, "ef")) + res = to_python(find_partial_matches(def_fsm, "ef", full_match=False)) assert res == {(1, (1, 2, 3))} - res = to_python(find_partial_matches(def_fsm, "e")) + res = to_python(find_partial_matches(def_fsm, "e", full_match=False)) assert res == {(0, (1, 2))} - res = to_python(find_partial_matches(def_fsm, "f")) + res = to_python(find_partial_matches(def_fsm, "f", full_match=False)) assert res == {(0, (2, 3))} res = to_python(find_partial_matches(def_fsm, "ef foo", full_match=False)) assert res == {(1, (1, 2, 3))} @@ -112,7 +124,7 @@ def to_python(res): assert res == {(2, (0, 1, 2, 3))} # `NAME` can have multiple start states for this input - res = to_python(find_partial_matches(name_fsm, "d")) + res = to_python(find_partial_matches(name_fsm, "d", full_match=False)) assert res == {(0, (0, 1)), (0, (1, 1))} # Not this case res = to_python(find_partial_matches(name_fsm, "1d")) @@ -133,7 +145,7 @@ def to_python(res): float_fsm = float_fsm.fsm_info - res = to_python(find_partial_matches(float_fsm, ".")) + res = to_python(find_partial_matches(float_fsm, ".", full_match=False)) assert res == {(0, (3, 5)), (0, (4, 5)), (0, (0, 2))} joins_fsm, _ = make_deterministic_fsm( From c3421aa3467088a15d0e7fcb858a2cd72999cf92 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Sun, 29 Oct 2023 15:37:48 -0500 Subject: [PATCH 251/734] Make sure that indices are available on all relevant devices --- outlines/text/generate/sequence.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 1630113a..9c277200 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -226,7 +226,8 @@ def __call__( if past_key_values: unfinished_past_key_values = tuple( - tuple(vv[~local_is_finished] for vv in v) for v in past_key_values + tuple(vv[~local_is_finished.to(vv.device)] for vv in v) + for v in past_key_values ) result = self.model.tokenizer.decode(token_ids[:, num_prompt_tokens:]) From d2c61f2f382934991273f81d695a71446ebeddff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 26 Oct 2023 09:32:37 +0200 Subject: [PATCH 252/734] Remove vector storage and retrieval --- outlines/vectors/__init__.py | 62 --------------------------------- outlines/vectors/retrieval.py | 27 -------------- tests/vectors/test_retrieval.py | 24 ------------- tests/vectors/test_vectors.py | 21 ----------- 4 files changed, 134 deletions(-) delete mode 100644 outlines/vectors/__init__.py delete mode 100644 outlines/vectors/retrieval.py delete mode 100644 tests/vectors/test_retrieval.py delete mode 100644 tests/vectors/test_vectors.py diff --git a/outlines/vectors/__init__.py b/outlines/vectors/__init__.py deleted file mode 100644 index 2e208d19..00000000 --- a/outlines/vectors/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -from collections import deque -from typing import Callable, Dict, List, Tuple - -import numpy as np - -from outlines.vectors.retrieval import cosine_similarity - - -class VectorStore: - """Represents a vector store. - - Vector stores are used to store embeddings and, given a query, retrieve the - closest entries to this query. This class provides a layer of abstraction - on top of practical implementations and integrations. - - Attributes - ---------- - embedding_model - A function which returns an `numpy.ndarray` of floats when passed a string. - retrieval_fn - A function which returns the nearest vector to a given query vector in a list - of vectors. Defaults to cosine similarity. - storage - A list of tuples where text and the corresponding embeddings are stored. - - """ - - def __init__( - self, embedding_model: Callable, retrieval_fn: Callable = cosine_similarity - ): - self.embedding_model = embedding_model - self.retrieval_fn = retrieval_fn - self.storage: List[Tuple[np.ndarray, str]] = [] - - def query(self, query: str, k: int = 1) -> List[str]: - """Find the store entries that are closest to the query. - - Parameters - ---------- - query - A string for which we want to find the closest matches in the store. - k - The number of closest matches to return. - - """ - query_embedding = self.embedding_model(query) - top_k_indices = self.retrieval_fn( - [elem[0] for elem in self.storage], query_embedding, k - ) - return [self.storage[i][1] for i in top_k_indices] - - def insert(self, query: str) -> None: - """Insert the query and its embedding vector in the store. - - Parameters - ---------- - query - The string to insert in the store. - - """ - query_embedding = self.embedding_model(query) - self.storage.append((query_embedding, query)) diff --git a/outlines/vectors/retrieval.py b/outlines/vectors/retrieval.py deleted file mode 100644 index ac060b61..00000000 --- a/outlines/vectors/retrieval.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import List, Sequence - -import numpy as np -import scipy.spatial as spatial - - -def cosine_similarity( - vectors: Sequence[np.ndarray], query: np.ndarray, k: int = 1 -) -> List[np.ndarray]: - """Use cosine similarity to retrieve the top `k` closest vectors to the query. - - Be mindful that Scipy computes the cosine distance, defined as one minus the cosine - similarity. - - Parameters - ---------- - vectors - A sequence that contains the vectors to search from. - query - The vector whose nearest neighbour we want to find. - k - The number of closest matches to return. - - """ - similarities = [spatial.distance.cosine(v, query) for v in vectors] - top_k_indices = np.argsort(similarities)[:k] - return top_k_indices diff --git a/tests/vectors/test_retrieval.py b/tests/vectors/test_retrieval.py deleted file mode 100644 index 8ffc59e3..00000000 --- a/tests/vectors/test_retrieval.py +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np -from numpy.testing import assert_array_equal -from scipy.spatial.transform import Rotation as R - -from outlines.vectors.retrieval import cosine_similarity - - -def test_cosine_similarity(): - query = np.ones(3) - vectors = [ - R.from_rotvec([0, 0, np.pi / 3]).apply(query), - query, - R.from_rotvec([0, 0, np.pi / 4]).apply(query), - R.from_rotvec([0, 0, np.pi / 5]).apply(query), - R.from_rotvec([0, 0, np.pi / 6]).apply(query), - ] - - result_idx = cosine_similarity(vectors, query) - assert_array_equal(result_idx[0], 1) - - results_idx = cosine_similarity(vectors, query, k=3) - assert_array_equal(results_idx[0], 1) - assert_array_equal(results_idx[1], 4) - assert_array_equal(results_idx[2], 3) diff --git a/tests/vectors/test_vectors.py b/tests/vectors/test_vectors.py deleted file mode 100644 index 71eb9afa..00000000 --- a/tests/vectors/test_vectors.py +++ /dev/null @@ -1,21 +0,0 @@ -import numpy as np - -from outlines.vectors import VectorStore - - -def test_vector_store(): - def dummy_embedding_model(query: str): - """We compute a simplistic embedding by converting characters to an int.""" - return np.array([ord(c) for c in query]) - - store = VectorStore(dummy_embedding_model) - - store.insert("Test1") - store.insert("Test2") - assert len(store.storage) == 2 - - result = store.query("Test1") - assert result[0] == "Test1" - - result = store.query("Test2") - assert result[0] == "Test2" From 0ec8b86966e3ea6c92d6d491fc65a6fd783da68a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 26 Oct 2023 09:34:19 +0200 Subject: [PATCH 253/734] Remove embeddings models --- outlines/models/__init__.py | 4 ++-- outlines/models/embeddings.py | 4 ---- outlines/models/openai.py | 45 ----------------------------------- 3 files changed, 2 insertions(+), 51 deletions(-) delete mode 100644 outlines/models/embeddings.py diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index e26df934..9d546f04 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,8 +5,8 @@ codebase. """ -from . import embeddings, image_generation, text_completion +from . import image_generation, text_completion from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion -from .openai import OpenAICompletion, OpenAIEmbeddings, OpenAIImageGeneration +from .openai import OpenAICompletion, OpenAIImageGeneration from .transformers import transformers diff --git a/outlines/models/embeddings.py b/outlines/models/embeddings.py deleted file mode 100644 index 12e96613..00000000 --- a/outlines/models/embeddings.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Router for embedding models.""" -from .openai import OpenAIEmbeddings - -openai = OpenAIEmbeddings diff --git a/outlines/models/openai.py b/outlines/models/openai.py index aad5ca75..80f0a36d 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -21,7 +21,6 @@ __all__ = [ "OpenAICompletion", - "OpenAIEmbeddings", "OpenAIImageGeneration", ] @@ -168,33 +167,6 @@ async def generate_choice( return generate -def OpenAIEmbeddings(model_name: str): - """Create a function that will call OpenAI's embeddings endpoint. - - You should have the `openai` package installed. Available models are listed - in the `OpenAI documentation `_. - - Parameters - ---------- - model_name: str - The model name as listed in the OpenAI documentation. - - Returns - ------- - A function that will call OpenAI's embedding API with the given parameters when - passed a prompt. - - """ - - @functools.partial(outlines.vectorize, signature="()->(s)") - async def generate(query: str) -> np.ndarray: - api_response = await call_embeddings_api(model_name, query) - response = api_response["data"][0]["embedding"] - return np.array(response) - - return generate - - def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): """Create a function that will call OpenAI's image generation endpoint. @@ -388,23 +360,6 @@ async def call_chat_completion_api( return response -@retry(**retry_config) -@error_handler -@cache -async def call_embeddings_api( - model: str, - input: str, -): - import openai - - response = await openai.Embedding.acreate( - model=model, - input=input, - ) - - return response - - @retry(**retry_config) @error_handler @cache From ae04c3d412d15a911e908c05fac6685f6b7a5847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 26 Oct 2023 09:37:17 +0200 Subject: [PATCH 254/734] Remove `diffusers` integration --- environment.yml | 1 - outlines/models/__init__.py | 1 - outlines/models/hf_diffusers.py | 60 ----------------------------- outlines/models/image_generation.py | 2 - pyproject.toml | 3 -- tests/models/test_hf_diffusers.py | 19 --------- 6 files changed, 86 deletions(-) delete mode 100644 outlines/models/hf_diffusers.py delete mode 100644 tests/models/test_hf_diffusers.py diff --git a/environment.yml b/environment.yml index 8f7eae92..b0b3b62c 100644 --- a/environment.yml +++ b/environment.yml @@ -14,7 +14,6 @@ dependencies: - pillow - pydantic - scipy - - diffusers - pytest - pre-commit - transformers diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 9d546f04..2532f0ce 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -6,7 +6,6 @@ """ from . import image_generation, text_completion -from .hf_diffusers import HuggingFaceDiffuser from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion, OpenAIImageGeneration from .transformers import transformers diff --git a/outlines/models/hf_diffusers.py b/outlines/models/hf_diffusers.py deleted file mode 100644 index e7ebb851..00000000 --- a/outlines/models/hf_diffusers.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Integration with Hugging Face's `diffusers` library.""" -import functools -from typing import List, Union - -import numpy as np -from PIL.Image import Image as PILImage - -import outlines - - -def HuggingFaceDiffuser(model_name: str) -> PILImage: - """Create a function that will call a stable diffusion pipeline. - - Parameters - ---------- - model_name: str - The name of the model as listed on Hugging Face's models page. - - """ - - def call(prompt: Union[str, List[str]], samples: int = 1) -> str: - if isinstance(prompt, str): - prompt = [prompt] - - results = call_stable_diffusion_pipeline(model_name, prompt, samples) - - return results - - return call - - -@functools.partial(outlines.vectorize, signature="(),(m),()->(m,s)") -def call_stable_diffusion_pipeline( - model_name: str, prompt: List[str], samples: int -) -> PILImage: - """Build and call the Stable Diffusion pipeline. - - We convert the returned image - """ - import torch - from diffusers import StableDiffusionPipeline - - # Pipelines don't accept NumPy arrays - prompt = list(prompt) - - pipe = StableDiffusionPipeline.from_pretrained(model_name) - if torch.cuda.is_available(): - pipe = pipe.to("cuda") - elif torch.backends.mps.is_available(): - pipe = pipe.to("mps") - - images = pipe(prompt, num_images_per_prompt=samples).images - if not isinstance(images, list): - images = [images] - - array = np.empty((samples,), dtype="object") - for idx, image in enumerate(images): - array[idx] = image - - return np.atleast_2d(array) diff --git a/outlines/models/image_generation.py b/outlines/models/image_generation.py index ff26d21b..08c7e762 100644 --- a/outlines/models/image_generation.py +++ b/outlines/models/image_generation.py @@ -1,6 +1,4 @@ """Router for image generation models.""" -from .hf_diffusers import HuggingFaceDiffuser from .openai import OpenAIImageGeneration -hf = HuggingFaceDiffuser openai = OpenAIImageGeneration diff --git a/pyproject.toml b/pyproject.toml index 39ab8207..c769baf1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,6 @@ dynamic = ["version"] [project.optional-dependencies] test = [ - "diffusers", "pre-commit", "pytest", "pytest-cov", @@ -76,7 +75,6 @@ filterwarnings = [ "error", "ignore::numba.core.errors.NumbaPendingDeprecationWarning", "ignore::FutureWarning:transformers.*", - "ignore::FutureWarning:diffusers.*", "ignore::UserWarning:torch.cuda.*" ] @@ -85,7 +83,6 @@ exclude=["examples"] [[tool.mypy.overrides]] module = [ - "diffusers", "jinja2", "joblib.*", "openai", diff --git a/tests/models/test_hf_diffusers.py b/tests/models/test_hf_diffusers.py deleted file mode 100644 index 450d3c12..00000000 --- a/tests/models/test_hf_diffusers.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -from PIL.Image import Image as PILImage - -from outlines.models.hf_diffusers import HuggingFaceDiffuser - -MODEL = "hf-internal-testing/tiny-stable-diffusion-torch" - - -def test_stable_diffusion(): - model = HuggingFaceDiffuser(MODEL) - - image = model("test") - assert isinstance(image, PILImage) - - images = model("test", samples=3) - assert isinstance(images, np.ndarray) - assert len(images) == 3 - for img in images: - assert isinstance(image, PILImage) From d42c5defe11fe9dc91daabe8bfb59650c275ae19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 31 Oct 2023 11:27:58 +0100 Subject: [PATCH 255/734] Remove OpenAI DallE integration --- outlines/models/__init__.py | 4 +- outlines/models/image_generation.py | 4 -- outlines/models/openai.py | 60 ----------------------------- outlines/models/routers.py | 46 ---------------------- 4 files changed, 2 insertions(+), 112 deletions(-) delete mode 100644 outlines/models/image_generation.py delete mode 100644 outlines/models/routers.py diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 2532f0ce..75846b15 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,7 +5,7 @@ codebase. """ -from . import image_generation, text_completion +from . import text_completion from .hf_transformers import HuggingFaceCompletion -from .openai import OpenAICompletion, OpenAIImageGeneration +from .openai import OpenAICompletion from .transformers import transformers diff --git a/outlines/models/image_generation.py b/outlines/models/image_generation.py deleted file mode 100644 index 08c7e762..00000000 --- a/outlines/models/image_generation.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Router for image generation models.""" -from .openai import OpenAIImageGeneration - -openai = OpenAIImageGeneration diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 80f0a36d..6726256e 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,14 +1,10 @@ """Integration with OpenAI's API.""" -import base64 import functools import os import warnings -from io import BytesIO from typing import Callable, Dict, List, Optional, Union import numpy as np -from PIL import Image -from PIL.Image import Image as PILImage from tenacity import ( retry, retry_if_exception_type, @@ -21,7 +17,6 @@ __all__ = [ "OpenAICompletion", - "OpenAIImageGeneration", ] @@ -167,48 +162,6 @@ async def generate_choice( return generate -def OpenAIImageGeneration(model_name: str = "", size: str = "512x512"): - """Create a function that will call OpenAI's image generation endpoint. - - You should have the `openai` package installed. Available models are listed - in the `OpenAI documentation `_. - - Parameters - ---------- - model_name: str - The model name as listed in the OpenAI documentation. - size: str - The size of the image to generate. One of `256x256`, `512x512` or - `1024x1024`. - - Returns - ------- - A function that will call OpenAI's image API with the given parameters when - passed a prompt. - - """ - - def generate(prompt: str, samples: int = 1): - return generate_base(prompt, samples) - - @functools.partial(outlines.vectorize, signature="(),()->(s)") - async def generate_base(prompt: str, samples: int) -> PILImage: - api_response = await call_image_generation_api(prompt, size, samples) - - images = [] - for i in range(samples): - response = api_response["data"][i]["b64_json"] - images.append(Image.open(BytesIO(base64.b64decode(response)))) - - array = np.empty((samples,), dtype="object") - for idx, image in enumerate(images): - array[idx] = image - - return np.atleast_2d(array) - - return generate - - def create_int_mask(encoder): """Create an exclusive mask for digit tokens.""" warnings.warn( @@ -358,16 +311,3 @@ async def call_chat_completion_api( ) return response - - -@retry(**retry_config) -@error_handler -@cache -async def call_image_generation_api(prompt: str, size: str, samples: int): - import openai - - response = await openai.Image.acreate( - prompt=prompt, size=size, n=int(samples), response_format="b64_json" - ) - - return response diff --git a/outlines/models/routers.py b/outlines/models/routers.py deleted file mode 100644 index cbb34e99..00000000 --- a/outlines/models/routers.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Route model names to their corresponding implementation.""" -import functools -from typing import Callable, Dict, Tuple - -import outlines.models as models - - -def image_generation(model_path: str) -> Callable: - """Return the model and model name corresponding to the model path. - - Parameters - ---------- - model_path - A string of the form "model_provider/model_name" - - Returns - ------- - The model builder with bound model name. - - """ - - registry: Dict[str, Callable] = { - "hf": models.HuggingFaceDiffuser, - "openai": models.OpenAIImageGeneration, - } - - provider, model_name = parse_model_path(model_path) - - try: - model = registry[provider] - except KeyError: - raise ValueError(f"The model provider {provider} is not available.") - - return functools.partial(model, model_name) - - -def parse_model_path(model_path: str) -> Tuple[str, str]: - """Parse a model path in the form 'provider/model_name'""" - - if "/" not in model_path: - raise ValueError("Model names must be in the form 'provider_name/model_name'") - - provider_name = model_path.split("/")[0] - model_name = model_path[len(provider_name) + 1 :] - - return provider_name, model_name From f5ec1afcfd211e159592ac2a8388cae084610ae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 31 Oct 2023 13:38:55 +0100 Subject: [PATCH 256/734] Remove old `transformers` integration --- outlines/models/__init__.py | 1 - outlines/models/hf_transformers.py | 438 --------------------------- outlines/models/text_completion.py | 2 - tests/models/test_hf_transformers.py | 104 ------- 4 files changed, 545 deletions(-) delete mode 100644 outlines/models/hf_transformers.py delete mode 100644 tests/models/test_hf_transformers.py diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 75846b15..086e33b5 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -6,6 +6,5 @@ """ from . import text_completion -from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion from .transformers import transformers diff --git a/outlines/models/hf_transformers.py b/outlines/models/hf_transformers.py deleted file mode 100644 index 8cadc8ab..00000000 --- a/outlines/models/hf_transformers.py +++ /dev/null @@ -1,438 +0,0 @@ -"""Integration with Hugging Face's `transformers` library.""" -import functools -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union - -import numpy as np - -import outlines -from outlines.text.masks import create_float_mask, create_int_mask - -if TYPE_CHECKING: - import torch - from transformers import PreTrainedTokenizerBase - - -def HuggingFaceCompletion( - model_name: str, - max_tokens: Optional[int] = None, - temperature: Optional[float] = None, -) -> Callable: - """Create a function that will call the `generate` method of a `transformers` model. - - You should have the `torch` and `transformers` packages installed. First - execution may take a while since the pre-trained weights will be downloaded. - Available models are listed on `Hugging Face's model page `_. - - Note - ---- - - To my knowledge `transformers` does not simply allow to stop the generation - after a given sequence has been generated. We will need to implement this - manually for this integration to have the same features as `OpenAICompletion`. - - Parameters - ---------- - model_name: str - The name of the model as listed on Hugging Face's models page. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - Returns - ------- - A function that will generate tokens from the model when passed a prompt. - - """ - if max_tokens is None: - max_tokens = 216 - - if temperature is None: - temperature = 1.0 - - def call( - prompt: Union[str, List[str]], - *, - samples: int = 1, - stop_at: List[Optional[str]] = [], - is_in: List[Optional[str]] = [], - type: Optional[str] = None, - ) -> str: - if isinstance(prompt, str): - prompt = [prompt] - - return call_model_generate_method( - model_name, - prompt, - max_tokens, - temperature, - samples, - stop_at, - is_in, - type, - ) - - return call - - -@functools.partial(outlines.vectorize, signature="(),(m),(),(),(),(i),(j),()->(m,s)") -def call_model_generate_method( - model_name: str, - prompt: str, - max_tokens: int, - temperature: float, - samples: int, - stop_at: List[Optional[str]], - is_in: np.ndarray, - type: str, -) -> str: - import torch - from transformers import AutoModelForCausalLM, AutoTokenizer - - # `generate` does not accept NumPy arrays - prompt = list(prompt) - - tokenizer = AutoTokenizer.from_pretrained(model_name, padding_size="left") - model = AutoModelForCausalLM.from_pretrained(model_name) - - tokenizer.pad_token = tokenizer.eos_token - prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True) - - logit_processors: Optional[List[Callable]] = None - stopping_criteria: Optional[List[Callable]] = None - postprocessing: Callable = lambda x: x - if type is not None: - if samples > 1: - raise NotImplementedError( - "It is currently not possible to control the generation of several samples with the `transformers` integration" - ) - if is_in.size > 0: - raise ValueError( - "You cannot both restrict to a set of choices with `is_in` and to a type with `type`" - ) - logit_processor, stopping_criterion, postprocessing = create_type_constraint( - type, tokenizer, prompt_tokens["input_ids"] - ) - logit_processors = [logit_processor] - stopping_criteria = [stopping_criterion] - elif is_in.size > 0: - if samples > 1: - raise NotImplementedError( - "It is currently not possible to control the generation of several samples with the `transformers` integration" - ) - if stop_at.size > 0: - raise ValueError( - "You cannot both restrict to a set of choices with `is_in` and set a stopping criterion" - ) - logit_processor, stopping_criterion, postprocessing = create_choice_constraint( - is_in, tokenizer, prompt_tokens["input_ids"] - ) - logit_processors = [logit_processor] - stopping_criteria = [stopping_criterion] - elif stop_at.size > 0: - if samples > 1: - raise NotImplementedError( - "It is currently not possible to control the generation of several samples with the `transformers` integration" - ) - logit_processor, stopping_criterion, postprocessing = create_stop_constraint( - stop_at, tokenizer, prompt_tokens["input_ids"] - ) - logit_processors = [logit_processor] - stopping_criteria = [stopping_criterion] - - if torch.cuda.is_available(): - model = model.to("cuda") - prompt_tokens = prompt_tokens.to("cuda") - elif torch.backends.mps.is_available(): - model = model.to("mps") - prompt_tokens = prompt_tokens.to("mps") - - returned_tokens = model.generate( - **prompt_tokens, - do_sample=True, - temperature=temperature, - max_new_tokens=max_tokens, - pad_token_id=tokenizer.eos_token_id, - num_return_sequences=int(samples), - logits_processor=logit_processors, - stopping_criteria=stopping_criteria, - ) - new_tokens = returned_tokens[:, prompt_tokens["input_ids"].shape[1] :] - if len(prompt) == 1: - new_tokens = new_tokens.squeeze() - - if new_tokens.ndim < 2: - results = tokenizer.decode(new_tokens, skip_special_tokens=True) - results = np.array([postprocessing(results)]) - else: - results = tokenizer.batch_decode(new_tokens, skip_special_tokens=True) - results = [postprocessing(result) for result in results] - results = np.array(results) - - if len(prompt) == 1: - results = np.expand_dims(results, 0) - else: - results = np.expand_dims(results, 1) - - # If we pass a batch of prompts to the model and ask for - # several samples we get a list of results that we need - # to reshape to the right dimensions. - if len(prompt) > 1 and samples > 1: - results = np.reshape(results, (-1, samples)) - - return results - - -def create_stop_constraint( - stop_at: List[str], - tokenizer: "PreTrainedTokenizerBase", - prompt_tokens: "torch.Tensor", -) -> Tuple[Callable, Callable, Callable]: - """Create a constraint that stops generation after a sequence has been found. - - Parameters - ---------- - stop_at - The list of sequences which, once generated, the generation is stopped. - tokenizer - The tokenizer that corresponds to the model used for generation. - prompt_tokens - An array that contains the tokenized prompt. - - """ - import torch - - num_prompt_tokens = prompt_tokens.shape[-1] - - def stopping_criterion(input_ids: torch.Tensor, _) -> bool: - """Choose whether to stop the generation after this step. - - We check whether either of the stopping sequences is present in the - current generation. If either one is found we stop the generation. - - """ - decoded_input = tokenizer.decode( - input_ids[0, num_prompt_tokens:], skip_special_tokens=True - ) - for stopping_sequence in stop_at: - if stopping_sequence in decoded_input: - return True - - return False - - def postprocess(generated_sequence: str) -> str: - """Postprocess the generated text. - - We need to remove the stopping sequence that triggered the end of - the generation at the end. - - """ - for stopping_sequence in stop_at: - idx = generated_sequence.find(stopping_sequence) - if idx != -1: - return generated_sequence[:idx] - - return generated_sequence - - return lambda _, x: x, stopping_criterion, postprocess - - -def create_choice_constraint( - choices: List[str], - tokenizer: "PreTrainedTokenizerBase", - prompt_tokens: "torch.Tensor", -) -> Tuple[Callable, Callable, Callable]: - """Create a constraint that forces the generation to be among a list of choices. - - Parameters - ---------- - choices - The list of sequences to which the generated sequences must belong. - tokenizer - The tokenizer that corresponds to the model used for generation. - prompt_tokens - An array that contains the tokenized prompt. - - """ - import torch - - num_prompt_tokens = prompt_tokens.shape[-1] - tokenized_choices = [tokenizer.encode(word) for word in choices] - - def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: - """Pre-process the model's output logits before generating the next token. - - At each step we forbid the tokens that do not steer the generation in the - direction of being either of the choices. - - """ - output = input_ids[0, num_prompt_tokens:] - decoded_output = tokenizer.decode(output, skip_special_tokens=True) - - mask = torch.zeros(len(tokenizer), dtype=torch.bool) - for choice, tokens in zip(choices, tokenized_choices): - if not choice.startswith(decoded_output): - continue - else: - mask[tokens[len(output)]] = True - - expanded_mask = mask.expand_as(scores) - scores[~expanded_mask] = -float("inf") - - return scores - - def stopping_criterion(input_ids: torch.Tensor, _) -> bool: - """Choose whether to stop the generation after this step. - - We stop generation when either of the choices has been found. - - TODO: We can stop the generation once we have excluded all possibilities - but one, and the full sequence can be recovered during post-processing. - - """ - decoded_input = tokenizer.decode( - input_ids[0, num_prompt_tokens:], skip_special_tokens=True - ) - - is_present_in_output = [] - for choice in choices: - if choice == decoded_input: - return True - elif choice.startswith(decoded_input): - is_present_in_output.append(1) - else: - is_present_in_output.append(0) - - # If we have eliminated all possibilities but one, return - if sum(is_present_in_output) == 1: - return True - - return False - - def postprocess(output_sequence: str) -> str: - for choice in choices: - if choice.startswith(output_sequence): - return choice - - return output_sequence - - return logit_processor, stopping_criterion, postprocess - - -def create_int_constraint( - tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" -) -> Tuple[Callable, Callable, Callable]: - """Create a constraints that forces the generated sequence to be an integer. - - Parameters - ---------- - tokenizer - The tokenizer that corresponds to the model used for generation. - prompt_tokens - An array that contains the tokenized prompt. - - """ - import torch - - num_prompt_tokens = prompt_tokens.shape[-1] - mask = create_int_mask(tokenizer.get_vocab()) - - def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: - """Pre-process the model's output logits before generating the next token. - - At each step we forbid the tokens that do not correspond to a digit. We forbid - EOS tokens until at least one digit has been generated. - - # TODO: Do we need to allow " ", "\n", "\r" and other delimiters? - - """ - if input_ids.shape[1] > num_prompt_tokens + 1: - mask[tokenizer.eos_token_id] = True - expanded_mask = mask.expand_as(scores) - scores[~expanded_mask] = -float("inf") - return scores - - return logit_processor, lambda *_: False, lambda x: x - - -def create_float_constraint( - tokenizer: "PreTrainedTokenizerBase", - prompt_tokens: "torch.Tensor", - decimals: int = 3, -) -> Tuple[Callable, Callable, Callable]: - """Create a constraints that forces the generated sequence to be an floating point number. - - Parameters - ---------- - tokenizer - The tokenizer that corresponds to the model used for generation. - prompt_tokens - An array that contains the tokenized prompt. - - """ - import torch - - num_prompt_tokens = prompt_tokens.shape[-1] - mask = create_float_mask(tokenizer.get_vocab()) - - def logit_processor(input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: - """Pre-process the model's output logits before generating the next token. - - At each step we forbid the tokens that do not correspond to a digit. We forbid - EOS tokens until at least one digit has been generated. - - # TODO: Do we need to allow " ", "\n", "\r" and other delimiters? - - """ - if input_ids.shape[1] > num_prompt_tokens + 1: - mask[tokenizer.eos_token_id] = True - expanded_mask = mask.expand_as(scores) - scores[~expanded_mask] = -float("inf") - return scores - - def stopping_criterion(input_ids: torch.Tensor, _) -> bool: - """Choose whether to stop the generation after this step. - - We stop generation if the sequence contains more than one period, or - if the desired number of decimals has been generated. - - """ - decoded_input = tokenizer.decode( - input_ids[0, num_prompt_tokens:], skip_special_tokens=True - ) - if decoded_input.count(".") > 1: - return True - - if ( - decoded_input.count(".") == 1 - and len(decoded_input.strip().split(".")[1]) > decimals - ): - return True - - return False - - def postprocessing(output: str) -> str: - """Postprocess the generated text. - - We need to remove the trailing period, present if the generation - was stopped because a second period was found. - - """ - return output.rstrip(".") - - return logit_processor, stopping_criterion, postprocessing - - -type_to_mask: Dict[str, Callable] = { - "float": create_float_constraint, - "int": create_int_constraint, -} - - -def create_type_constraint( - type: str, tokenizer: "PreTrainedTokenizerBase", prompt_tokens: "torch.Tensor" -) -> Tuple[Callable, Callable, Callable]: - if type not in ["int", "float"]: - raise NotImplementedError(f"Cannot restrict the generation to type {type}") - - return type_to_mask[type](tokenizer, prompt_tokens) diff --git a/outlines/models/text_completion.py b/outlines/models/text_completion.py index a2d6baad..49155b29 100644 --- a/outlines/models/text_completion.py +++ b/outlines/models/text_completion.py @@ -1,6 +1,4 @@ """Router for text completion models.""" -from .hf_transformers import HuggingFaceCompletion from .openai import OpenAICompletion -hf = HuggingFaceCompletion openai = OpenAICompletion diff --git a/tests/models/test_hf_transformers.py b/tests/models/test_hf_transformers.py deleted file mode 100644 index f7323c1f..00000000 --- a/tests/models/test_hf_transformers.py +++ /dev/null @@ -1,104 +0,0 @@ -import numpy as np -import pytest - -from outlines.models.hf_transformers import HuggingFaceCompletion - -TEST_MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" - - -def test_samples(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - - answer = model("test", samples=1) - assert isinstance(answer, str) - - answer = model("test") - assert isinstance(answer, str) - - answers = model("test", samples=3) - assert isinstance(answers, np.ndarray) - assert len(answers) == 3 - - -def test_prompt_array(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - prompts = [["Hello", "Bonjour"], ["Ciao", "Hallo"]] - answers = model(prompts) - assert isinstance(answers, np.ndarray) - assert answers.shape == (2, 2) - - answers = model(prompts, samples=5) - assert isinstance(answers, np.ndarray) - assert answers.shape == (2, 2, 5) - - -def test_type_int(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - answer = model("test", type="int") - int(answer) - - answers = model(["test", "other_test"], type="int") - for answer in answers: - int(answer) - - -def test_type_float(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - answer = model("test", type="float") - float(answer) - - answers = model(["test", "other_test"], type="float") - for answer in answers: - float(answer) - - -def test_incompatible_constraints(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - - with pytest.raises(ValueError): - model("test", type="float", is_in=["test"]) - - -def test_choices(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=50) - - choices = ["a", "and a long sequence", "with\n line break"] - answer = model("test", is_in=choices) - assert answer in choices - - answers = model(["test", "other_test"], is_in=choices) - for answer in answers: - assert answer in choices - - -def test_stop(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=1000) - - stop = [" ", "\n"] - answer = model("test", stop_at=stop) - for seq in stop: - assert seq not in answer - - answers = model(["test", "other_test"], stop_at=stop) - for seq in stop: - for answer in answers: - assert seq not in answer - - -@pytest.mark.xfail -def test_type_multiple_samples(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - answer = model("test", type="int", samples=2) - int(answer) - - -@pytest.mark.xfail -def test_is_in_multiple_samples(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - model("test", is_in=["a", "b"], samples=2) - - -@pytest.mark.xfail -def test_stop_at_multiple_samples(): - model = HuggingFaceCompletion(TEST_MODEL, max_tokens=10) - model("test", stop_at=[" "], samples=2) From 42d7fdc1775c7b24f30fdb024a1c97a868b39ad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 2 Nov 2023 08:48:49 +0100 Subject: [PATCH 257/734] Add bug report issue template --- .github/ISSUE_TEMPLATE/bug_report.yml | 80 +++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..e675a6a3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,80 @@ +# Issue template inspired by NumPy's excellent template: +# https://github.com/numpy/numpy/edit/main/.github/ISSUE_TEMPLATE/bug-report.yml +name: 🐞 Bug report +description: Create a bug report to help us reproduce and fix it. +title: "" +labels: ["bug"] + +body: + - type: markdown + attributes: + value: >- + Thank you for taking the time to file a bug report. First, carefully read + the following before everything else: + + - Does your issue only arise in a library that uses Outlines? If so, + submit your issue to this library's issue tracker. + - Did you check the issue tracker for open and closed issues that may be + related to your bug? + + - type: textarea + attributes: + label: "Describe the issue as clearly as possible:" + validations: + required: true + + - type: textarea + attributes: + label: "Steps/code to reproduce the bug:" + description: > + A short code example that reproduces the problem/missing feature. It + should be self-contained, i.e., can be copy-pasted into the Python + interpreter or run as-is via `python myproblem.py`. + placeholder: | + import outlines + + << your code here >> + render: python + validations: + required: true + + - type: textarea + attributes: + label: "Expected result:" + description: > + Please describe what you expect the above example to output. + placeholder: | + << the expected result here >> + render: shell + validations: + required: true + + - type: textarea + attributes: + label: "Error message:" + description: > + Please include the full error message, if any. + placeholder: | + << Full traceback starting from `Traceback: ...` >> + render: shell + + - type: textarea + attributes: + label: "Outlines/Python version information:" + description: Please run the following code and paste the output here. + placeholder: | + import outlines; print("Outlines", outlines.__version__) + import sys; print("Python", sys.version) + render: python + validations: + required: true + + - type: textarea + attributes: + label: "Context for the issue:" + description: | + Please explain how this issue affects your work or why it should be prioritized. + placeholder: | + << your explanation here >> + validations: + required: false From b2ec3b432d2a2c21aa71c9ccc33741d67bbec5cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 2 Nov 2023 08:50:26 +0100 Subject: [PATCH 258/734] Redirect questions to discussions --- .github/ISSUE_TEMPLATE/config.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..a396c1e7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,4 @@ +contact_links: + - name: 🤔 Questions & Help + url: https://github.com/outlines-dev/outlines/discussions/new + about: "If you have a question about how to use Outlines, please start a discussion." From b992fcdecc1a63d8c0d1b73c45fa2d72555b4ca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 2 Nov 2023 08:53:12 +0100 Subject: [PATCH 259/734] Add enhancement issue template --- .github/ISSUE_TEMPLATE/improvement.yml | 34 ++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/improvement.yml diff --git a/.github/ISSUE_TEMPLATE/improvement.yml b/.github/ISSUE_TEMPLATE/improvement.yml new file mode 100644 index 00000000..64212eed --- /dev/null +++ b/.github/ISSUE_TEMPLATE/improvement.yml @@ -0,0 +1,34 @@ +name: ✨ Improvement +description: Propose an improvement to Outlines. +title: "" +labels: "enhancement" + +body: + - type: markdown + attributes: + value: >- + Before suggesting an improvement, please make sure this hasn't already been suggested by searching through the past issues and the PR tracker. + + ## Current behavior + + - type: textarea + attributes: + label: "What behavior of the library made you think about the improvement?" + placeholder: | + Sample code with the current behavior + + - type: markdown + attributes: + value: >- + ## Desired behavior + + - type: textarea + attributes: + label: "How would you like it to behave?" + placeholder: | + Sample code with the desired behavior + + - type: markdown + attributes: + value: >- + **Be aware that your proposal may be challenged.** Outlines has strong design principles that we are committed to stick to. If there is an alternative way to achieve what you would like, we will let you know. From a82351d13742ed199b2340ed691516c99d2069d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 2 Nov 2023 08:55:46 +0100 Subject: [PATCH 260/734] Add feature request issue template --- .github/ISSUE_TEMPLATE/feature_request.md | 27 +++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..702a41bd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,27 @@ +--- +name: 🚀 New feature +about: Request a new feature. +title: '' +labels: '' +assignees: '' +--- + +Before suggesting a new feature, please make sure this hasn't already been suggested by searching through the past issues and the PR tracker. + +### Presentation of the new feature + +Assume that we know nothing about this feature. Please give us as much information as possible so we can judge it fairly. That includes (but is not limited to): +- Academic article +- Blog posts +- Implementations +- Personnal experience with the feature + +### Where does it fit in Outlines? + +Please explain to us why you are suggesting this feature for integration in Outlines. + +### Are you willing to open a PR? + +Tell us whether you are willing to add the feature yourself, and if so if you can share a design plan. **You may be challenged.** + +Thanks for contributing! From f7fd0c382af02cf2bd711351e555e40f6ecf21f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 2 Nov 2023 10:17:42 +0100 Subject: [PATCH 261/734] Add Pull Request template --- .../pull_request_template.md | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE/pull_request_template.md diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md new file mode 100644 index 00000000..bc181466 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md @@ -0,0 +1,23 @@ +# 🚧 Thank you for opening a PR! + +A few important guidelines and requirements before we can merge your PR: + +- [ ] We should be able to understand what the PR does from its title only; +- [ ] There is a high-level description of the changes; +- [ ] *If I add a new feature*, there is an [issue][issues] discussing it already; +- [ ] There are links to *all* the relevant issues, discussions and PRs; +- [ ] The branch is rebased on the latest `main` commit; +- [ ] **Commit messages** follow these [guidelines][git-guidelines]; +- [ ] One commit per logical change; +- [ ] The code respects the current **naming conventions**; +- [ ] Docstrings follow the [numpy style guide][docstring-guidelines]; +- [ ] `pre-commit` is installed and configured on your machine, and you ran it before opening the PR; +- [ ] There are tests covering the changes; +- [ ] The documentation is up-to-date; + +Consider opening a **Draft PR** if your work is still in progress but you would +like some feedback from other contributors. + +[issues]: https://github.com/outlines-dev/outlines/issues +[git-guidelines]: https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[docstring-guidelines]: https://numpydoc.readthedocs.io/en/latest/format.html From 3667e04b57feed66faa82fa64eee84421c6eb9e0 Mon Sep 17 00:00:00 2001 From: Matheus Westhelle <23220394+mwesthelle@users.noreply.github.com> Date: Mon, 6 Nov 2023 11:21:54 -0300 Subject: [PATCH 262/734] fix math error in sampling example --- examples/sampling.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/sampling.ipynb b/examples/sampling.ipynb index 0743bb9d..bcbcca1e 100644 --- a/examples/sampling.ipynb +++ b/examples/sampling.ipynb @@ -135,7 +135,7 @@ "id": "1a895b6d-d4d4-40f9-9156-24ba7e21cc08", "metadata": {}, "source": [ - "The correct answer to this question is 35. Let us now count the different answers, and take a look at their distribution. Let us first define a few utility functions:" + "The correct answer to this question is 67. Let us now count the different answers, and take a look at their distribution. Let us first define a few utility functions:" ] }, { From a64aa76bd131b27487d29bc411aed1716479514b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 8 Nov 2023 21:11:08 +0100 Subject: [PATCH 263/734] Remove the `function` interface The `function` interface is not a very useful layer of abstraction, and we don't want to have to maintain it. This commit removes it from the codebase. --- examples/babyagi.py | 20 +++++------ examples/math_generate_code.py | 10 ++---- outlines/text/__init__.py | 1 - outlines/text/functions.py | 63 ---------------------------------- tests/text/test_function.py | 51 --------------------------- 5 files changed, 11 insertions(+), 134 deletions(-) delete mode 100644 outlines/text/functions.py delete mode 100644 tests/text/test_function.py diff --git a/examples/babyagi.py b/examples/babyagi.py index e3213125..d21630da 100644 --- a/examples/babyagi.py +++ b/examples/babyagi.py @@ -28,9 +28,6 @@ def perform_task_ppt(objective: str, task: str): """ -perform_task = text.function(model, perform_task_ppt) - - ##################### # Create a new task # ##################### @@ -67,9 +64,6 @@ def create_tasks_fmt(result: str) -> List[str]: return task_list -create_tasks = text.function(model, create_tasks_ppt, create_tasks_fmt) - - ######################## # Prioritize new tasks # ######################## @@ -104,9 +98,6 @@ def prioritize_tasks_fmt(result: str): return task_list -prioritize_tasks = text.function(model, prioritize_tasks_ppt, prioritize_tasks_fmt) - - objective = "Becoming rich while doing nothing." first_task = { "task_id": 1, @@ -134,18 +125,23 @@ def one_cycle(objective: str, task_list, next_task_id: int): """ task = task_list.popleft() - result = perform_task(objective, task) - new_tasks = create_tasks( + + prompt = perform_task_ppt(objective, task) + result = model(prompt) + + prompt = create_tasks_ppt( objective, first_task["task_name"], result, [first_task["task_name"]] ) + new_tasks = model(prompt) for task in new_tasks: next_task_id += 1 task_list.append({"task_id": next_task_id, "task_name": task}) - prioritized_tasks = prioritize_tasks( + prompt = prioritize_tasks_ppt( objective, [task["task_name"] for task in task_list], next_task_id ) + prioritized_tasks = model(prompt) return task, result, prioritized_tasks, next_task_id diff --git a/examples/math_generate_code.py b/examples/math_generate_code.py index 06921158..b2b25a94 100644 --- a/examples/math_generate_code.py +++ b/examples/math_generate_code.py @@ -34,11 +34,7 @@ def execute_code(code): return result -answer_with_code = text.function( - models.text_completion.openai("text-davinci-003"), - answer_with_code_prompt, - execute_code, -) - -result = answer_with_code(question, examples) +prompt = answer_with_code_prompt(question, examples) +answer = models.text_completion.openai("text-davinci-003")(prompt) +result = execute_code(answer) print(f"It takes Carla {result:.0f} minutes to download the file.") diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index 8870c7a1..b1ae976c 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,3 +1,2 @@ -from .functions import function from .generate import continuation from .prompts import prompt, render diff --git a/outlines/text/functions.py b/outlines/text/functions.py deleted file mode 100644 index c3c6bfa5..00000000 --- a/outlines/text/functions.py +++ /dev/null @@ -1,63 +0,0 @@ -import functools -from dataclasses import dataclass -from typing import Callable, Optional, Union - -from pydantic import BaseModel - -FunctionType = type(lambda x: None) -BaseModelType = type(BaseModel) - - -@dataclass -class function: - """Represents a function that uses a language model to generate its output. - - When called, the `function` instance passes the arguments to the prompt - function, the rendered prompt is passed to the language model, and its - result to an (optional) validation function. - - Attributes - ---------- - model - A function that takes a string and returns a string that contains the - model's return value. - prompt - A prompt-generating function. - validator - A function that takes the output of the language model, parses it and - returns it in a normalized format. - - """ - - model: Callable - prompt: Callable - validator: Optional[Union[Callable, BaseModel]] = None - - def __call__(self, *args, **kwargs): - rendered_prompt = self.prompt(*args, **kwargs) - result = self.model(rendered_prompt) - validated_result = validate(self.validator, result) - return validated_result - - -@functools.singledispatch -def validate(validator, result): - if validator is not None: - raise NotImplementedError( - f"Cannot validate the input with validator of type {type(validator)}" - ) - else: - return result - - -@validate.register(BaseModelType) -def validate_pydantic(validator, result): - if hasattr(validator, "model_validate_json"): - return validator.model_validate_json(result) - else: # pragma: no cover - return validator.parse_raw(result) - - -@validate.register(FunctionType) -def validate_function(validator, result): - return validator(result) diff --git a/tests/text/test_function.py b/tests/text/test_function.py deleted file mode 100644 index ac473b61..00000000 --- a/tests/text/test_function.py +++ /dev/null @@ -1,51 +0,0 @@ -import json - -from pydantic import BaseModel - -import outlines.text as text - - -def test_function_no_validator(): - def passthrough_model(prompt: str): - return prompt - - @text.prompt - def prompt(query: str): - "{{query}}" - - fn = text.function(passthrough_model, prompt) - assert fn("Hello") == "Hello" - - -def test_function_fn_validator(): - def constant_model(_): - return "[1, 2, 3]" - - @text.prompt - def prompt(query: str): - "{{query}}" - - def validator(result): - return json.loads(result) - - fn = text.function(constant_model, prompt, validator) - assert fn("Hello") == [1, 2, 3] - - -def test_function_pydantic_validator(): - class Response(BaseModel): - thought: str - command: str - - def constant_model(_): - return '{"thought": "test thought", "command": "resume"}' - - @text.prompt - def prompt(query: str): - "{{query}}" - - fn = text.function(constant_model, prompt, Response) - result = fn("Hello") - assert isinstance(result, Response) - assert result.thought == "test thought" - assert result.command == "resume" From 5f82da4c10fad592e6a961917cba533f7b7fc15c Mon Sep 17 00:00:00 2001 From: HerrIvan <129194928+HerrIvan@users.noreply.github.com> Date: Sun, 12 Nov 2023 17:01:36 +0100 Subject: [PATCH 264/734] Fix `babyagi.py` and `meta_prompting.py` example scripts Fixes example scripts. - Adds calls to *_fmt functions in `babyagi.py`, s.t. the script can work properly without triggering an Exception. - Removes one stop criterion and increases max_tokens in a few calls, s.t. the answers by the GPTs are not truncated. --- examples/babyagi.py | 4 ++++ examples/meta_prompting.py | 16 +++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/examples/babyagi.py b/examples/babyagi.py index d21630da..a745c93e 100644 --- a/examples/babyagi.py +++ b/examples/babyagi.py @@ -134,6 +134,8 @@ def one_cycle(objective: str, task_list, next_task_id: int): ) new_tasks = model(prompt) + new_tasks = create_tasks_fmt(new_tasks) + for task in new_tasks: next_task_id += 1 task_list.append({"task_id": next_task_id, "task_name": task}) @@ -143,6 +145,8 @@ def one_cycle(objective: str, task_list, next_task_id: int): ) prioritized_tasks = model(prompt) + prioritized_tasks = prioritize_tasks_fmt(prioritized_tasks) + return task, result, prioritized_tasks, next_task_id diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index dcfdad42..b41843db 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -22,7 +22,7 @@ def solve(question): Let's solve this problem by splitting it into steps. """ - complete = models.text_completion.openai(model_name) + complete = models.text_completion.openai(model_name, max_tokens=500) prompt = solve(question) answer = complete(prompt) @@ -43,12 +43,12 @@ def determine_goal(question): def solve(memory): """{{memory}}. Let's begin.""" - complete = models.text_completion.openai(model_name) + complete = models.text_completion.openai(model_name, max_tokens=500) prompt = determine_goal(question) answer = complete(prompt, stop_at=["."]) prompt = solve(prompt + answer) - answer = complete(prompt, stop_at=["."]) + answer = complete(prompt) completed = prompt + answer return completed @@ -76,14 +76,14 @@ def find_expert(question): @text.prompt def get_answer(question, expert, memory): """ - {{memory}} + {{memory}}". I am ready to ask my question. "{{expert}}" I say, {{question}} """ complete_expert = models.text_completion.openai(model_name) - complete_answer = models.text_completion.openai(model_name) + complete_answer = models.text_completion.openai(model_name, max_tokens=500) prompt = find_expert(question) expert = complete_expert(prompt, stop_at=['"']) @@ -111,7 +111,7 @@ def get_answer(expert, memory): """ model_expert = models.text_completion.openai(model_name) - model_answer = models.text_completion.openai(model_name) + model_answer = models.text_completion.openai(model_name, max_tokens=500) prompt = find_expert(question) expert = model_expert(prompt, stop_at=["\n", "."]) @@ -157,7 +157,9 @@ def run_example(model_fn, question, model_name): meaning_q = "What is the meaning of life?" run_example(split_into_steps, math_q, args.model) - run_example(split_into_steps, sat_q, args.model) + run_example( + split_into_steps, sat_q, args.model + ) # gpt>3.5 usually gets this one right run_example(fill_in_the_blanks, sat_q, args.model) run_example(ask_an_expert, alignment_q, args.model) run_example(ask_an_expert_simple, meaning_q, args.model) From 4b653d52a4bcaa79b8df362b28cb0db487ef6a2a Mon Sep 17 00:00:00 2001 From: Hamza SAYAH Date: Sun, 12 Nov 2023 20:33:54 +0100 Subject: [PATCH 265/734] Fix Documentation Inconsistency in Joke Class Example (#356) # Fix Documentation Inconsistency in Joke Class Example This pull request addresses an inconsistency found in the `Joke` class example within the README file of the `outlines` repository. The changes ensure that the inline documentation correctly reflects the actual output format. ## Changes Made - Updated the `Joke` class definition to use `Field` descriptors for `joke` and `explanation` attributes. ## Issue Identified The previous version of the README included an example where the `Joke` class did not use `Field` descriptors, leading to a discrepancy between the class definition and the output format described in the comments. This could potentially cause confusion for users trying to understand the expected behavior of the `joke_ppt` function. ## Resolution By introducing `Field` descriptors in the `Joke` class, the documentation now accurately reflects the structure and output format of the `joke_ppt` function, enhancing clarity and correctness. I believe these updates will make the documentation more accurate and helpful for users. Your feedback and further suggestions are always welcome. Thank you for considering this pull request. --- README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0f698a40..2eb722e4 100644 --- a/README.md +++ b/README.md @@ -345,13 +345,15 @@ description passed to the prompt we define a custom Jinja filter that can extract the expected response's schema: ``` python -from pydantic import BaseModel +from pydantic import BaseModel, Field import outlines.text as text class Joke(BaseModel): - joke: str - explanation: str + joke: str = Field(description="The joke") + explanation: str = Field( + description="The explanation of why the joke is funny" + ) @text.prompt @@ -364,6 +366,7 @@ def joke_ppt(response_model): joke_ppt(Joke) + # Tell a joke and explain why the joke is funny. # # RESPONSE FORMAT: From 0fe5d052dbaf20479fbc6bdf5c172f2b790feb79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 12 Nov 2023 20:51:33 +0100 Subject: [PATCH 266/734] Use MkDocs for the documentation (#331) --- .github/workflows/build_documentation.yml | 15 +- .github/workflows/publish_documentation.yml | 38 ++- README.md | 29 ++- docs/Makefile | 20 -- docs/api/continuation.md | 1 + docs/api/fsm.md | 1 + docs/api/index.md | 1 + docs/api/json_schema.md | 1 + docs/api/models.md | 1 + docs/api/parsing.md | 1 + docs/api/prompts.md | 1 + docs/api/regex.md | 1 + docs/api/sample.md | 1 + .../_static => assets/images}/logo.png | Bin docs/assets/images/normal_computing.jpg | Bin 0 -> 64918 bytes docs/cookbook/index.md | 1 + docs/examples/dating_profiles.md | 230 ++++++++++++++++++ docs/examples/index.md | 3 + docs/get_started.md | 102 ++++++++ docs/index.md | 4 + docs/make.bat | 35 --- docs/overrides/home.html | 202 +++++++++++++++ docs/overrides/index.html | 11 + docs/overrides/main.html | 22 ++ docs/reference/choices.md | 14 ++ docs/reference/index.md | 15 ++ docs/reference/json.md | 1 + .../prompting.rst => reference/prompting.md} | 105 ++++---- docs/reference/regex.md | 1 + docs/reference/text_generation.md | 1 + docs/reference/types.md | 13 + docs/source/conf.py | 42 ---- docs/source/index.rst | 111 --------- docs/source/installation.rst | 53 ---- docs/source/overview.rst | 28 --- docs/source/reference/batching.rst | 22 -- .../reference/controlled_generation.rst | 76 ------ docs/source/reference/multimodel.rst | 69 ------ docs/stylesheets/extra.css | 12 + mkdocs.yml | 114 +++++++++ requirements-doc.txt | 8 +- 41 files changed, 857 insertions(+), 549 deletions(-) delete mode 100644 docs/Makefile create mode 100644 docs/api/continuation.md create mode 100644 docs/api/fsm.md create mode 100644 docs/api/index.md create mode 100644 docs/api/json_schema.md create mode 100644 docs/api/models.md create mode 100644 docs/api/parsing.md create mode 100644 docs/api/prompts.md create mode 100644 docs/api/regex.md create mode 100644 docs/api/sample.md rename docs/{source/_static => assets/images}/logo.png (100%) create mode 100644 docs/assets/images/normal_computing.jpg create mode 100644 docs/cookbook/index.md create mode 100644 docs/examples/dating_profiles.md create mode 100644 docs/examples/index.md create mode 100644 docs/get_started.md create mode 100644 docs/index.md delete mode 100644 docs/make.bat create mode 100644 docs/overrides/home.html create mode 100644 docs/overrides/index.html create mode 100644 docs/overrides/main.html create mode 100644 docs/reference/choices.md create mode 100644 docs/reference/index.md create mode 100644 docs/reference/json.md rename docs/{source/reference/prompting.rst => reference/prompting.md} (65%) create mode 100644 docs/reference/regex.md create mode 100644 docs/reference/text_generation.md create mode 100644 docs/reference/types.md delete mode 100644 docs/source/conf.py delete mode 100644 docs/source/index.rst delete mode 100644 docs/source/installation.rst delete mode 100644 docs/source/overview.rst delete mode 100644 docs/source/reference/batching.rst delete mode 100644 docs/source/reference/controlled_generation.rst delete mode 100644 docs/source/reference/multimodel.rst create mode 100644 docs/stylesheets/extra.css create mode 100644 mkdocs.yml diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml index 50de5b2d..7902c171 100644 --- a/.github/workflows/build_documentation.yml +++ b/.github/workflows/build_documentation.yml @@ -9,17 +9,14 @@ jobs: name: Build runs-on: ubuntu-latest steps: - - name: Checkout the branch - uses: actions/checkout@v2.3.1 - with: - persist-credentials: false - - - name: Set up Python 3.10 - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 with: python-version: "3.10" - - name: Build the documentation with Sphinx + - name: Build the documentation + env: + GOOGLE_ANALYTICS_KEY: ${{ secrets.GOOGLE_ANALYTICS_KEY }} run: | pip install -r requirements-doc.txt - sphinx-build -b html docs/source docs/build/html + mkdocs build diff --git a/.github/workflows/publish_documentation.yml b/.github/workflows/publish_documentation.yml index 6542ac70..4679121b 100644 --- a/.github/workflows/publish_documentation.yml +++ b/.github/workflows/publish_documentation.yml @@ -2,33 +2,29 @@ name: Publish the documentation on: push: - branches: [main] + branches: + - main permissions: contents: write jobs: - publish: - name: Publish + deploy: runs-on: ubuntu-latest steps: - - name: Checkout the branch - uses: actions/checkout@v2.3.1 - - - name: Set up Python 3.10 - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 with: - python-version: "3.10" - - - name: Build the documentation with Sphinx - run: | - pip install -r requirements-doc.txt - sphinx-build -b html docs/source docs/build/html - - - name: Publish the documentation - uses: JamesIves/github-pages-deploy-action@3.6.2 + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v3 with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - BRANCH: gh-pages - FOLDER: docs/build/html - CLEAN: true + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + - run: pip install -r requirements-doc.txt + - name: Build documentation + env: + GOOGLE_ANALYTICS_KEY: ${{ secrets.GOOGLE_ANALYTICS_KEY }} + run: mkdocs gh-deploy --force diff --git a/README.md b/README.md index 2eb722e4..3a8081ed 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,14 @@
-Outlines Logo - # Outlines 〰️ -Fast and reliable neural text generation. +Outlines Logo + +[![Pypi][pypi-badge]][pypi] +[![Contributors][contributors-badge]][contributors] +[![Twitter][twitter-badge]][twitter] + +*Generate text that machines understand.* [Install](#installation) • [Guided generation](#guided-generation) • @@ -14,6 +18,7 @@ Fast and reliable neural text generation.
+ **Outlines** 〰 is a library for neural text generation. You can think of it as a more flexible replacement for the `generate` method in the [transformers](https://github.com/huggingface/transformers) library. @@ -45,16 +50,7 @@ via the next-token logits. It can be used with API-based models as well. - [x] 💾 Caching of generations - [x] 🤗 Integration with Hugging Face's `transformers` models -Outlines 〰 has new releases and features coming every week! Make sure to ⭐ star and 👀 watch this repository to stay up to date. - -## Stay tuned for - -- Context-Free Grammar guided generation ([#178](https://github.com/outlines-dev/outlines/pull/178)); -- Prompt-token alignment so you don't have to think about tokenization details ([#201](https://github.com/outlines-dev/outlines/pull/201)) -- An infilling DSL ([#182](https://github.com/outlines-dev/outlines/issues/182)) - -You can follow [@NormalComputing](https://twitter.com/NormalComputing), [@remilouf](https://twitter.com/remilouf) or [@BrandonTWillard](https://twitter.com/BrandonTWillard) for regular updates! - +Outlines 〰 has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][twitter] to stay up to date! ## Installation @@ -425,3 +421,10 @@ Do not hesitate to open a draft PR before your contribution is ready, especially ## License Outlines is open-source and licensed under the [Apache License 2.0](LICENSE). + +[contributors]: https://github.com/outlines-dev/outlines/graphs/contributors +[contributors-badge]: https://img.shields.io/github/contributors/outlines-dev/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4 +[twitter]: https://twitter.com/dottxtai +[twitter-badge]: https://img.shields.io/twitter/follow/dottxtai?style=social +[pypi]: https://pypi.org/project/outlines/ +[pypi-badge]: https://img.shields.io/pypi/v/outlines?color=ECEFF4&logo=python&logoColor=white&style=flat-square diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d0c3cbf1..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/api/continuation.md b/docs/api/continuation.md new file mode 100644 index 00000000..e8790a2a --- /dev/null +++ b/docs/api/continuation.md @@ -0,0 +1 @@ +::: outlines.text.generate.continuation diff --git a/docs/api/fsm.md b/docs/api/fsm.md new file mode 100644 index 00000000..7c8f543a --- /dev/null +++ b/docs/api/fsm.md @@ -0,0 +1 @@ +::: outlines.text.fsm diff --git a/docs/api/index.md b/docs/api/index.md new file mode 100644 index 00000000..b0d5c88f --- /dev/null +++ b/docs/api/index.md @@ -0,0 +1 @@ +# API Reference diff --git a/docs/api/json_schema.md b/docs/api/json_schema.md new file mode 100644 index 00000000..78272c9a --- /dev/null +++ b/docs/api/json_schema.md @@ -0,0 +1 @@ +::: outlines.text.json_schema diff --git a/docs/api/models.md b/docs/api/models.md new file mode 100644 index 00000000..124e27fe --- /dev/null +++ b/docs/api/models.md @@ -0,0 +1 @@ +::: outlines.models.transformers diff --git a/docs/api/parsing.md b/docs/api/parsing.md new file mode 100644 index 00000000..75efa846 --- /dev/null +++ b/docs/api/parsing.md @@ -0,0 +1 @@ +::: outlines.text.parsing diff --git a/docs/api/prompts.md b/docs/api/prompts.md new file mode 100644 index 00000000..f9899400 --- /dev/null +++ b/docs/api/prompts.md @@ -0,0 +1 @@ +::: outlines.text.prompts diff --git a/docs/api/regex.md b/docs/api/regex.md new file mode 100644 index 00000000..cdfb9abd --- /dev/null +++ b/docs/api/regex.md @@ -0,0 +1 @@ +::: outlines.text.generate.regex diff --git a/docs/api/sample.md b/docs/api/sample.md new file mode 100644 index 00000000..1f962ea1 --- /dev/null +++ b/docs/api/sample.md @@ -0,0 +1 @@ +::: outlines.text.generate.sample diff --git a/docs/source/_static/logo.png b/docs/assets/images/logo.png similarity index 100% rename from docs/source/_static/logo.png rename to docs/assets/images/logo.png diff --git a/docs/assets/images/normal_computing.jpg b/docs/assets/images/normal_computing.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a71483a68ebea1b02a8b3b167fa0efe57b498f54 GIT binary patch literal 64918 zcmeFa1z1(v*EhOpknS!erMtUBx}_ULy1PSCKtj4xMM031?k)*wq&pN4;jRsyb3Etx ze?PzbzR$hibFb6=?7j9_GsgVQF@9srmeTNkK+HyW1Qw^56f80I&eCX z{QoEW{h=?I3QCPFU@;_K|6YtAVEM`6@5T5DC_feb_Kcsx@^9g9n)){;KbQQbsh{KW zZ|QHE`Zp#&m;9!wpX2f`>2I3)7bO2d@SCRo12F#*|E8&bLGm92ziH|}0P|1rZ<_ii zBLC_8Z<_i~$oy0Oo2LGW$bb6&o2LE~GXIeOf0w3E6`#+2!Fl5XDTSCkC(PqB4wZQL zv1;6R(LD*k0E$YWk~e!!_?h0PFG?uN=V`%_rbu4E2||6Bt<=45qf$5_(+r*SW^Oz4 zB+HvAT!jugA|Lnp&b#!t&Ia^Qj32%&_Z>Gpqz9UyDtIXjB(wk>2}pu_GjIX@@S$^b z>WkZ??rfX)(o8tZe{2Fg3_iICc-bm=}*oxkJPVmPKV&$^gHE@AYq!a$E8sM*7@ zueIL&Td?yM-)r1odc7S!cW_JwJ}>0+mE-h%bl+aKO%U)#^qzE*X#G{ri)dnd@M+J> zhU*oicPyAJW;voz(Pv&O(tGYy!K&1TpBRW?vW4W5Hc)lo^ z0vDF6t-UPX&B61kq4AVr*kY!hpdG&hfgad=5szVf@eX5aXdQyr~{-P9&`QZa{@!dFXF@$)j)*iMb%!>!$AciAlRa6 zUytctgcMu5VJMPjpaEoO-CLi$97*H~VaTMH^qwmj?#NEy6C~?tW$s-GMZ%~$!Wu~i zVeOfErdY_v|?=IrT@M2tn7R<~xQmbS@dm0$j^g(0XB&Kfq<<4v2;)jGa!v?rjg?0gMkX|2A5|CSBR9n(JP zbGK{k=18uPkc*8?akWmXFU(LLYsMW323@VwF5Uxzb$_5{DKITw>TsZo5SX4@cN zw?~fE-Bz~pl+CD+Qy*b#^u(Oc-?YHQ&ol@0hZHVGcZvf=TEf0edW-l7xA?`fn07wc4`82 zWqF*`YF5_ET`ze)_n8EFL!?#tf8~&xa6kHV3UWC9b`5 zjfO(d+@C@Zqjf+a*%jvPtXBL=~={7%po;kh8C5rM4M%2K?HnTv?Orz&pV8 zfzj+H!7gSg9TyAE3ld6TxS5WzM&rJ*^E1yeJ(V=U*MkkRCv$`zGE2}glt0?2;1!pj zj_oUt(ety8bY)MeqJjPk$cr4C=(v{wf+>@(=A4e6V2%O-{PW+%RDVh*Il}vlr>0|H zT*fA7Y>4_*jug_e|5%+xnxSq5vVaok&ly6aSO=#CulYdp@7WN1E7>D{$EzmH!RTE< zcZSP6Hh!~Qm=*G8Ti2jD6N}BuOSV@k>w_;oy>suH=I0ff+?_`^({Yzt~S-n6HoIhIXsHbujrrwB7tD6Bwt-NCI#It@|W!BSA5LIWZF1GMI3Az5jatBcBze%zPXMTJh=}v;dFi)Kw z!$kYaAB(E{XwHz5P&g(SfAIKF(b@c0V1Rm_L$$<{C4$PAdTJ2vdi+ue6YcWr zyGNOS5e`#UPOp&QH3*Jw<7M(%q@o|y9A+splwaGr-~C2k1+zl@T=(r9GI&Ovt6oj)q3nqZ9};u1W; z_9`lo4##EwZ`&#+1LnE~YmCKkvw}$FO6k$~=Pt=`l9fm4n-k+Fw%-iNXZ|vrA8hqv zYw+Qgl}q+BJ*%&wA72av}m}z@G%@38Yu8d{kycz2U zY&nTNSEgYHhzXdZyGq5BF3#6{KrB@-$6|I~9yP@qDw%dNnVQE_*w-M|D-yt&5C@|g zz8rCl@MeyG9#Cx0^dg!CBEb*kXZdiAHJzIs>lpDJ9V#8_!9g>kp&Wl${n)j}GB>82 ztgOn-SfT78*7k*QRNg&;_38mdn=gbpaI%4L9HI|eodg3(+&nLmpLv$*SyF1Me+$Ul zP8ExB*|{AQ)j;JKPVpv$G*oQsIh8yeYzT0ee)}wKI4^VxT!$U`>dgnT_j3@9borw?U^Wz4?#(ksBl_?n7R^W&$Hx79H(n8`l+8zdGf$h`U% z@hn8s>R2JBojJ%3svJ&p9i%eR5v%L;3gV?Z2qK}VZ&^9g{$& zpv$MX)|FRP&f%NGpg$9=b=9LT$(}_UV>7)MJDGlbJbI_c{TFz*y%RD?jJlc5giEj7 ze`p;wHa;ATnDSRTb+~loq#skGSQ}85H@0U;EOgq}R(f>g{py7t*SXvbVXgjIM!+7& zV_oO4!OUcu@{3mDeE#mA2FXt~Dso@L#+ZiYpP{dwzRpx{YqV^bJ@Z(woV~be0eiPj zmcSJDC1Q%z7Q`e}*|9(EUnu}wgERHwQntAb{waTjP4n zz%pErYnJQ1v!;9l)qIua3jxDXH`{0=0qa+^jIY+Gk{-)Ve3Gbe2_!KoIj$JW{4;Xt zslEJCli?W?LV@Bs$#~x_!vopLA3PjHYGi5iktcW-IRgSP{SG!p7~L>k=93zBqA`Sq zBF6@@PP%;3lKa3uU`pN~t%&}ld@DG7q5Cr1?wg&aSbCS>-cQZ9*%@a}Qs3u}@Z%e|AZCC#blK5oDh_uc!LHAD*d!R>G`_8 znC>D--`Q;a{nMt1@y|uo*1i%1len!3ydxUQzL-dPvm9x1lj?xwF!jn0>phZE zHdIDfM+9A+Xx4m?fTTvTfXBG+X?v~s%yS$oa1u0I!tJBdN3BlT5 zu8!vYVKn^%$|!n$GoYJBY1S9id@0#*IQG`5)2^HJXqH2u@}q4q@qPHe287km{L~jC zg)x9>?U(*~g6IQI3!KAP^5DyuJ?gRab=PC(hj`*|KyX?puLeN{WsxNB*7c!u4?t6O zyj5@3RfGH}I#kb|*_Pr)a31C0{P;SSn&%Wj)oI{wn@D80y@~@u2ZYB+a^{uf1-0r- z=5)zf!jMUt@6F8qd}9c>U!!5%_kIo3%;Pw|c zGP2U@IDMGN=K8w^1t`m{Hjm$aP}Woan%115^r!PLQr2lcV<;h_(|SH8X~cbRjm8!Awfg^{&RwlP)G0Db)xp#GMe`gq5b4!qpaQ_O^Q`Jh$= zL>fYSAd~qStdPDCS;U5uuC`f2ACHH}QDUmohA$6CF6#IYo;+5`DQ|3Ie0;_)145}F zHb{q%)5H;Q7;J#8U1T+x_pz#LERtJaO1C@_Jk34+zE^@Cl5-4`=zY19t7nzBkJqsG zNvfu!{{qDN-`hkbMI=QWSEfp_Z5RS@Fw`Y5y=&A6dA>0MCFni>G|uGDw8`JJ(;BdYFcUBXk=dsznJtoB_-#Rri+UFl~YbRma*-!m{ba%<~xqiBwJJWJ61Ag)<%yDrJ zB54M?0?IFkD!BJkx&-UDplzYeNd6$kRR^`HD)X%XYt7?D(@38i=_|?)4t# z2eM8-Aw`{Li1Vd_(}Af{QRH^OVnB?1oNp>UvFv9dQJ-I^K3|#<<#GoUL7x1&=9`kH zhCsx@;QWENj=gw&&i`lo3J}b+%+6ObXX%ZR=EyZU@1j;0klGAsm6CE zOuwKytko-&M;js&`-6ZBV4iu;UI<)1bkU|bu$gmG?}~l~3UCH??oNVL&UJ4TinL3+ zH4ZYbLC}cVCWau0QPpvh1>P+4D-DOHs*TrR=`aXl@=nii-gIfV$Ol5nS9fMeD61dX zIea0w25HE=MdZ*%ed244LtMcDY@&c7peNRwq#MJ~TOQBNX`>(IS>#8NXQ&57lAM8W zxVtvD)x%h6UPk_(UisEe%)_?i>)_P}m!Di5xjpg2JIR768}Hhoc*;Oful9{8yX)a=o8infT?lzgY1n@#Pm={&KaSqy@j+_7^MuUW{J^>SsCHuQvVFdcPOr zSCRVd8NZ6uZ<_j5q<+VYUqtFRP5mNJzhlO)0tNa_Q@;w;Z<_i=q<+)XF9P+OrhXNu z-!%2hNd2a%Uxey6P5mNJziH}Mf%<=rrow-?_2-wh`^Adjln{8@oEZ&xZXN~>8vI=O z?L+6#kRae`b1YaiGG4TqY7H%s8|k zXsD|HKKKp7kCrg^r0>HZ;_ui0HUbw+E?P}^)CC4uJoW0Np;{Bv2S?81S z5DxMO(yA(0KRaOSib+2ykW>FhjUlJeYf$UcaMd4l0Y~oq8mQg(J=`svvLQLHVT8$_oSk zE*)$CH~ikvkScBYIyR|FrTGpyBveMJU6vX2dWkD|=4T}A+*To|*adSI-IDtmPi#17 zr$4(Hc^^;wIZVW!(;NrPVnW)z&nZ+7FSDB|J_S)>@t@|PtN3I(OJehMr167Kk@=k+HA&h-(m(oXttY* zK26%`IdP$ra7N!sW#%V7gnJ^4=!+7Nj1s9A%q}@~!*iClRa`oRFOVlgx5ap5d_@Aq zIHBFqVWXc&?J@%QHV`b!4@BZ1sda_;(PHdVa~3{m4;JXq7ql)P{RFanVdz*-5NT*mb9vc zbzvo5wtl}=IWIJb0fHq}R%Ic|2?2d^22UA($T7qJwGlq_YhyT4;?xVKR2x>KBsD6M z;WUK6v~XKGaa>(`6(K8^9Ktsy+O1we^ICf}W;~m|kJAX3a0g`yAqs-j(_8=yWkU;K zH*@REc4;)_d(Y427Awp5TF=+Zu4GTI%2;x@rKww=t9i9h4mr(vFWwn>vwCA}PqZLo z$!TRV1Fd+3ALFoVZTBS)nE2;J1nSabT6x6~?|EfC>Hmh?UyUE{(VJL~ZdAz5T`_To z3ZJy9&Qr)6HZeig72#kmsFG;CWEo{ivS|eKBW?NcH3+_Lbg$CPL_N*>+y`S4-RI3K z9o;H)n3b-9%6i20%va}hA1=x}BV$O4<;pMH>QSN3P}*%uZopb{$}H-Gw7Z@(D{T%V13|(j z6w`3q^ADX*QkZkw)hweK1LU!R%hdpahj3TOo9UD;4Vzdn{XOiX-Nc!-!jA=`-o z>y-I)_Z;kERmnAIB2;RZDXV2yo?swhgyfW`*TJMg#c>tNRTO|{7C#_J$?S8oOG;v+ zH33v8$yX7=I-gPaxoMwchfU9t*U`4om+g9frCl@4=WuqXp2y`(KBqKrPA*d}r8 zXdUy253zh3aA{UyF7NHhB69THCiX|=AAJLv>X>ARjODf#xaPqU5rUws&b3PW%66AnOrZN&4h; zDE#o^2w(zr5wd_?z5|^FMplQACHkt&NTz+)wvk|R0(D+ zR*mW6?|Q7ooF^~F$@3{Q>wtL2Vx)GNBVY({R2+D%`;9|6XjU-xbtJaajoaPIS?sMUirtm?T2eF_9~iSC!6shGL& z-M_!>EJdh_CH5Ove72^Wl5x-BJvP4wnNWy66mbJK@qtS#+M$eWdj5iC=SFM-v9 zE;8i#roi#5u-vb5b-mA!)m#y#IpCJ#(C1;#D#i$~w5)?L@Bht`g-o6dk%kUK43`5Q z5@`0m32rWML zJbS{(I1YK9^BCSfPhem5a7J%ZJ&mvs`BNA8yVs%i5g*lJm6&T}@82wWXlhw>0@?8x z!~R1@X!!5|x)FNdjTwDu{(7Jz@|G)%<&Wk%G~XeIQmXawwilh!6z4q<$azuOG3N{&Gr zGA%ri72X)P6DPbdHmpK8Pxcia8`npazXn}S(GQJeST#jWe_y{wCZsSdj?YoH@9?1g zk0PAXlMR@u${Ql(>U!dk7r3CupXa@y-xL^O@@mpG+{Y?UMV5;APo^sN91E?H_G?%3NfpFFh!b|i-5cAd&O_P)+yI0!!GTx z1|K+x3B*@1L1wq8ia+aHuJYpKP3bW*GLyb#KbYs#S4^=8EQ{@;IpR$N(MR>`;$sjA zfW@++*Mk;VsS@c^@ri>Dbd;O06D(YDhXf3cG+&)+N8@I_?he2==|D@wg*=iHQG)k> zou@~qDc=SpU4g2Y^^8sTiE%JLp1ePtey1m~Sf$Jx({dYZ!lm$GLaR)3g85Zdy*_^8 zwWD;VHvO&+2znIv>OcqLE42X~NebaEg23p}W5#VVEmVr&g6;?F{tJc%VLs=9vD1FnD7u&~ydy6}oDndK3f;-% zy>si{y%B6cf#D4?tiho44^avcDyI zMkLFi4y<4tfn#Os)~?&ckJCG|CpinT0ejLBS2=lx)f284u+s>`@f=?{X)ax)$bZ_1 zk{LZKS%+BHVe5u;TB8bZN z&ZNOIB8uWJu4Wq_1hw}ra7IlsUvMf*!_VhFkRU64#^vMd?Y6A!jx zeZD{Pvkc++{8}p=17vs88(NJS#GIwtjCv}Cxq@hvJr*nrE`Jr$vk)=T9n(;m{*Sj6Y?2u1#u_~@E6EM0s2NLSelLdev6gCuDvZ*lIIG9d=-}oLsl2K->H3wgts$_7M>9nA6N`}+7ct? zgo=oPNCGVARLkL{7;$6Gm?kb9yC8GMvtp%mjLoXy19c(95ddhdCWEcS0dz*i1YtWW|M>?H;Z@8g zsArU_ntii9*<61}8=ga9$9Byx~!;u3^h;A7bK4Kyf zRX(jYNz_F8N3$nYXwrn-4@FQwK+ixQ9_+B{`NUSO5R6FgZ$`V7-DeA@rc&p)29fOF zNF%FgV>mVc=me4`az@I#Y0so~_2uLUpGz=OhI*T#}HW@h^yNHUR!)s>N0Ia(? z_k=}_aF$>x#FR{)j@(>;1};D>hj8x7biIb!{`jk5{Y2zm;n^{@7Fl|#b*#BE3wdy8 zlaj?uNVZp-%%2*G(qZ;|gZ4GwMpa*#B1pxoFBr8`)XKqOu__%t?3iS}vM}#&hH*goR${I5aM- zDR?=$p$b(aHfl9(*~PL3HmFIFxwBjK(FkT|u}Y{?*ej*u&pDQ-(CFa~1bjt}$f;zq zYF`k{5UM^*XT?=LO{(<=alj@@;;6SV2)QM(i02vh-uY_yjpvH}Zd%VLi0QPZgs~D5 zgg94ia_QyNce0Q#!dRVd)+Z6>k+f-8kW|LEGB_*IOj<^qkGxeYWiy82+_JFyiP@$6 z1f12P_z;~+15?i(o^3J36H&rY$u+*K^3H7Qg>90}jXNBR-UFU;b(&3h=kcvVZ>!^r zs<%fXy6&?h2O~t$7R#Wbv9g4=;2hK{wZcbX5`Qe5F-mN27T=gDaSzfqWGWG-)^Bzp7%y@MXU}%unxv8oD+!3jqKb~fbV{^` zhe;cwB-aRpD3OVSt;QPEsr12<2CG4bx1bDyIg!nkDxozy-Z=Zf1W%qvjM}t>O6|b1 zPTO|n+oO2dI~Gu3u=0zJ0c|mfc5dehnIALShIRw@HJIwsR{GPC;}1lxKA}0o1;24W zFoJ-nFuUXPGE?RiGTfglA@o89io`ycHXEwl<>EdXiZc1THziLjFi;VMHL6185IVRL zdZ8-0e7I*)(=pgxmgxC)9EaDAbvY@}=#UHU+S*jag3;KYP(OpJoN zmtz`ZP%5di%9IiFx$T8wY_J=n@+&heY$R$f99r=*iHKsSA~#131ql)oCp6g1GjcTyK{#WFteoHU^$VU*umLHj=`2Zr}t;+Ly-=F?@b&upv5;Bc#s8sTIj6il0s7I^|OaV+f z-mXZ9u<&MBc22WhrgP9XnOlOt&Vj;Jy0_pFQcjRb4>i`jA~jxIU7{=unA?N^LhLyQ zI9jpK;@FY(_1l5sx|B}V&?=Wmazs6Y6$@9#P^$ee`n;&%6~PoU*Y7tD2%^w-4xxl$ zS)$?V5NDM*rQiQX+^AX}OQ7|LP z)j#^ZaD=GTVtX?Ye?MiT^NU|9&u-60{;Mg*Q{%5CP5eEi9?o}TT*j)J7UPhgx8w|E zDLu>c!xywco$`Kwoa!ZaCE_7yhDu0+urL-I`%y+r>?%J2Sdg*MM5F^@99mG*@aT>I zJ||@Toai#&2HDc7X;`q+N5Dl#ge}h4M=^N7UbaGYR z>Q{KHyWT;GXPypiGffH*G%O4r5UmamR*%BOL{@W+IHFS!2B-bgcT*4tyZliP`1so< z#kAX0u}zX^==brog?uY4EaDmEt4>Hc;dTV6Li-!@FZZ=EF@!mhvj?{(0ZWfE<_;w_ z%*IRA|A4usco~{18)HN~ccCvvVABk#f?p_F6}?F^MXPc=7)tf~F>6N(Jw$c4 z>5?7=iT=U;rSd@$%!$bI`KEXpeG>Lh{WAooE2T0~CqgJTw5R=HFV(>g3zWiY+;30;G_7K) z-F?*UUn^V6HLADH2I;4>xl*i@O(}_U_e>DJmjx8Zs#V-c04>4{T|Yuen}pV=FuTlQp|t`I5Zo~cKyKr zB(nN>^0o<=;x&yJX{~rv5q-pRC#~Mmo*0k$)J+wo>cOAQ$L2N`6W`%K6`XV5lt1Yv zgnW35^SzKEg{*C}BM~_=PO!Y6U<}A@0#>-^_m4Mg zcMzudJ#*a3Fr<64jnO!&A9<{r=ERcOqf=*-LE<3ti{ji5M8FxgnLO^EezE@aDI&ZQ zF?F#q-8Nc(QNcH%j4X+tIYu_#_*<3v?N8*ryRVwvIgf)ZmMG^zIz-yxdUWJ(U$@kfPgiTZCe ziYCE4kgiLqC<;J>phNQ)4G@z8`r!n0w7}|r4nw}5Ft2w{1^Mywc@IM11Y1xemaNEo zfHLt7X%mnQ!3sy9c+v#;M=7I&&=b`$OJih>EWva9V?LD+hwgnJGx~ck;O@bv2RktG zMbkjQYF(1$i!=}fdS^G|=#3ivAF&Vmb1xBzTV;pI>rc_W!gu#jC*5$NfgGZ8N)JZ)Z`hc6v=7 zil^vRe6BazB^JhfV+(8zij}N4xkfi@M%i^@2uEGGxO{AKQe~2m#Ek#o8&T!dRw!o8p#`LV+{l79T>7qiY6;CjSUZIoDmkl zcEL+@qHb>VB0XfT>A5p?k4X081GOAMo$%<5?kz|V7?`+?BhT@n`E^XVy&ZR} zqN`#!aF9F2-V~j z7J8g(&RQuL&%Y=}s;{~X+tf{Ew6-0-HE(;nbgq4=xUu2%*0rrbspORNt~l1(Czb+W z4vm{^ER8xNOLdzXeT(yrLKP!Y{?*hz@*Y-k0Z%#GV=-MFaoV~1%lVqB%cpG{_C(`O^`-5g=tTQlmTCFGX<~!!$s7cA3a` zJX#5euq#x=QHF69y@B>7h?h!X(nC z(F=o&s@wSnA96hclvc6T=y;8CjN4YMSbGTGd-(|ca*Zz`dB|}N)HTq z#lmi>gAfU!iFLz+jRTw#Io{p(Wz|C|L(|R{dFIQf# zQ+}hYdo%yzV0~0-Bd*xX&n?m1-*MU5ty1wCpD(mXT5Y9Q7&e z1b&J{YPa6(ERy$A3k3ATrLGn0^+RCwCahQWdQR#SHr=R4@v@9K^Yq^MXNVM;uHmHc z;sXt4(%#hBF2OQ9sF-?dAy);AQzaa!QMgWunEh$kaf8R1sJN<1L5xL+T$xE&xBc2>s%}Tm zm9ZXIFXAQ25kt6L#K<4EJ~*p~yM|NV-=YL-sy#a&v z;Bh9AA^?3g!3%0{(hDnIE`=RBKjdn{TYAK0nSfiq>Ds{7RBiv1fca0Gd1P{wDm;{d zvLJE4wt=w8pI^1)ot^!tt9;uv=)@nYx$+|cMG+rMLT)UCjl#u7s%15WJ1lq}Cq{G2 zn9qq93&)VBHJrKh^d-++&j&2(pf8bJRk>bHxg1=UFOh}ZE=3)R9vov(vPfU{s<`_# zs5Fd^&n^g6b<3I02UO~^-c}{r%PyF0?bR2R!496thcH0~Ejhq(0nVZ}b2&swmaJ=t z-mIZBo=W!dB~VD3sW+?i7GjFNjQyq-#VfnbHpB6d@RFf?>Zq>&Gw_PJNL3$r(9XUW z6fkoXZ63TM!uP01Ij2HXJ9EMfj+9`7pUY*^qRP4=Jgw6Hk0#pMuRjqCh!155n0x7& zd#NMUNu*+aa!K6o+Z!Sof4vMvR@7 z#8eLrbHCr+3@ZdFr0#SjQKISo_WnLjf!^|XS%c0qw@8(jht}%z46^kdzfb)}T-9$jiu{qvulns|^WSU>5!MoKaSv zG-tJHHiv@;djtLw@e?ckJnAF7&c~A<^4Fja*+!Omp6KSf;NvqsWfc$P^31$!=4a#W z-5((srqWQ_Un4Y!s~61qoM*@9V;LjbaWAgi>YGKzV^4Xe(?L6!?v4UU!ztN|Sl*$E zp+$Z?;SG9yd&02kCnQK1>_oG3M*Y1XzfpdEP}_ug((QfBSlvj?T*4#WOZ>4zm!f~i zxN41f%j+05+`=oiUfgm?EZdZm#HiZPt8(jh(orogqj;lUS~*(bS-jhiMveSlI*D9^ z5PICQt>}sC@MsC(Sf8#@T52Sn*@c9O3!RLr z-`4eRXKHHy5|71Mbv@p83T!lBr9L-%GVvNm=92o zIM9h8UL7Rs1GhAcLiD<|LDY zcX979zvuonNItpD-BCW<@?(#8L?UGX5d$x)Vztj~21c(9TEh|%@se~hR)$E0yAE@` zDj&>ggnP!8*ZPOnSTVG!!zQ5?eO-mG_6^x*ffluT7fxS)SiBVjXvI-k`LOZ|9S8I% zX0Opyv1-U&W2JJ5#80ZuiduubGbbqq+v!~c2LW+B_g>O-Z6y?2F$A?V#d|+QFjw(B z#(1Z#XdXRqaAw#n6qj0&5QQffR!IuWNV0K=A6C~2cM-;MGG8LFdur;*=RgYE3!E%^*J1FGRQR$ctlD0GZX4uBk~rgM3T%B#)?@Jki`qm5t1(v;y!@#H zLDBO?O+G07>oha&3#2PlSJc!sgQ(%u&SraKS+mRn)2FC!2|Z@Fr3koA5Fs#6V11}thkaNc9n0W@vWI%@?#Y0UypLmrbXm5tL zI(jeR!V1aGB^dP-#o1l~$1FMimC2n9Ml%F_g=f6-PXRzOIcOC2>AtjwWXir#^rl=~ zZS372%V@z&5(^k1VMalra^+XV9K>>7jr9dw=X{lq|7p||nbu`EkwT^mJt^KDY!(dG zk94`FLyuEPb}qtZq8oNk^P*Q`UO$;5@}oNWbT7f;9Cd&XAC-DX1MHGu%{$1_E8Ir8 zCv1cm7r&_j_r@0R?1%5!#%&v(_ax%^KA!6F=$*U@4?|9Za35t*iDAweL46a; zWUS&>3=f&98`;MM=*++Dsu_E?{pFpKfq@>b8g(s$r44N-lWqZxZh4jAq_e?r;$JRPt%RP0kcJGh`j=KS>t z{AY&eg@O;hab?22Bu#Z?ojZ#fyCu*FlnW_ZjT9M0a=e7EYX%VBHTp_1XsMa8=-+j8 z^fGcO1FIJ>c9pC8J_2=wSH7I&t{H+j{F2pJyicX2edg1rftUbo!W28I#`C?49BL7C z&jzb_7HXlbcR@y{)-2oST{14SVgM(=!sH&Z0{AgK&s-(cT$a*T<(iD9D&c2WbU}*^ zLdpx_VaQOM4V`>^YyeGC;a;ra+V+q3dgW-K(ymFbS^DIfX&D*lu!cD+-dL!q`tESXztXH>#&#t+UVj_O4r$V~aQk%T<{e3HEshTFhli(Zc( zgO)qS=)0Y^ehDU4-iNmbI{~|E92IT%0v552kl~_ED!TiMY(D_tCI@et6Gw?c7(v}0j5nE#F8K2I7uPI; z+C@vKJQDfV2Y+z&T)1n{1KWH$stvox%v;DKY%KQJU@I&9{K4DN)92|c(**0yw5(gU>DJNH5WZf z6&3AsEEzTJX9^+7BsUCNikM&C8-vO}CG!vlq8)dbxyL{uXi?$d3_k9hEgQH;E+xVc zwS8|W;3t{}E%l^|y$&2UgTwRBD`Y901ahjo-{F)V6>ge_i{)^9wMOpvGS>*;A8)wD zhjA|kx;55T>6{#~YisX({1{?uD_X-v{xFSlCf315L5-nfZS){H;9d#$>;^Tdb|z}6 zV-||c~)W#pp;u>@@@rks%QK0yn*zsT}rQ_~) z#CO!2qmAG{5KT;9iDHqi?*Q}QiF&1hY4!At`XTC}4KAs)do@N>{@QKq?jF~z-RDC&9VXtv7@C9Qi%nh zh{xRvj7d`7>!)0%mPuqa0LaTw8MD5ZuX47%w=o{yk!%U3MhaWW=XY+-#7do8=EPH4bdEHP2u3B{pvF#3 z214SiaC4W~_u@>r*`|@OrtiQ6g@DM=wd|ms$MHj=7fyN&(rKs^qB^(@iTyxGr!?WZwVg$OvvE!cTr|Dj72l~jE@m_Q!j zO}_yIlKToOM^e64fS+5^4}PJm7e^(DSdG(hl7Y2I6)M&{cwI#C)x5eN)zwFAS90F< zVg%|Mw5^$B#*}ds-{ZSyCR~I{rAph@GuF9!4PxRp*iO+cD`Bj7bFb5S);pNlS6#D+SA z_kb-&h^-%tN9%zlcv}#1cnyj>gV%4k($@lP0-T5#tyBi)uo z^nXV?fr)lr=CeQWD*-R`;iAVf-_mt&~$|6_ckR4U;H2<(enGQXNbf*Q(cwD?(p3 zfMLsDLPSD6?Wgrj9W^s;6z|3OZfVI0`91xyh-!m}h(=N{n8izk(6{m2WgVKgZ65b! z71Wcq7;rcO+#Jn4V$=*NY%^6<3v2(7!a=}mvIGbah+3;f6mB;vq6IPca-!JO+}T9H z%TbNej{X;H>F3l%l-_AZ=j&px{hBBQM_9|P#Ji%eYQqoI754Ee+Kyj_vM;M=cy}=B zIVSel@0qsHnl)|or~RSV=vUK)Cs+cPW1nt<&sA--H@0dXUPbenAFJCT)HzKiMGwZ=pMxqYdN^j|I?uvuL?6fMwI=f2-`=W3#T9VngQ3K z)3QDu8pFIk1xjJ1a>K!n58^8_JIm)%=|xV~*PwK}(|&1Rc107IJUs4p44gJM@sfHz zwjoVsd(TuQR&A@6ZyEq!y}&!Zpr|MGX065pdu4 z#2YB$-dCMx4jy!Xz2Q!87QQiR*Yx9>8Hg@ZAvA?;QrGm(9}`T!Z`MbHVU3s(K*Su2 z!q09Ys#;OpY(mVBFGS5ur*47*P>x;I&rlqv92)^{s1TzaNi}HLyW9;>>K@Bj3 z9!E3eBrtHsr3?OKYc|-m8z2?d;(AGg_SWG`(idxB*AoLW5@yK=Es}h&VT`oO|7Z~q z%dlt4r~6`V$s}{>bxVr2J%z@xSsv#&s^b5Hv@{5(V;wBw8H(K;T4=C$v`D-ph}^ss z|Ebv@{8}+lY+6(t8uaGwlyj5P_(85ZG&$AW&Z9KOaqk5E_NL*?xDXv=ly)Wrx!`ww zbRy@cH5kbE$Jt+z?`Jyj{%N1`T5Wm!a;(HnBqHk>lJ%Mk-uk~SZ`SsbKN)U{4pU?Y z8&A}BG4sV|Ov*?~eKIl?N~X}l5jCBRqfh@2_VqtCygzT;Xc=@SOAqE>68Nk7xF(hq zc>P-0JUM*!%;;Zib-aT}m4z;ewXm&VzMQ`})u=9fLAh%78}T zvYQdc0w$@*$-^8X;@YJ=-k~1uA@%SQDd!_?)#e+r{|X?iosEy>|?Up2YPlD z2h#q+U-R2S3ezUX+2QtFoqdm=@qT7xpZ_UBri2Z3Y8I^P1Kr1$gboH+P4#iF+;ViC z&un-Hwn1_w`#&qqs!@Usd2THd4v~`HpKahd7vs!-bSKd3e~MBgN@D4wKcf5g=RUw5 z9B#8qCyUN?Ncl<&N0rgGWW)2)$s@tLy} z9C2(7auGj~)=Ay5HGV16EZDb4ZO}Z@x&3cXfH~R6NiO?5a>^uQjXOJew2v1ynZ+<{ z@~>CfHynIBp$u|a)MtnB-2Fd^8a`c5+WN%QV{&5j*zY0kUx)Woz@ObjjwTTzB?W(SJ9;q% zd^Lp@;4W&}K%p_JCSLa5VQKY42<)cu?4J_|scm_2)YkY2V$wh39NmvjrWn-T-T|UZ8-ZWrU1iTwOO5^G zx!KE?;!xL)D}7mMVhchbG_ULjE_{a9tX|2bXgn9qpKh`Ig#I)G@;&%cMCQCxAZJT_ zjFcbE=SDb;?!Ee5*Ie5KRYl;T?Bow1?goXQ*=4eshI)--6019%hJkI9Xrq#7Ug{b? z#6_%sQ@?m&wwZ3k)b0D((aZacT{uH!NR+6@Cx7@lojlqZ27glQw$onnvH3f4NtHdqI5dv>+|>rIlZcs$ z$i6>%U*}xw4WXs48E~@lKGT^lFkb7;a#D%8e5t1_EO@G zW^A{+%7@Rs95FsK?!c}{&1`0%T;%YIILDnreV6!c$ua`VO+m{=+aI_OFH(3$-6AF| zdv~*#3)Q_H4SZzEw5d*~yCqw@KH z|IQ;U|0Yt}WV@*KowfCLr-PM7hOuHwgeFr6@(S^rg4kVpmT2|F27yi^{g|GK7)r7* z2qZu=uCgTuI$~^ei6ExZ`9tuItG26!X3-7qhZ0E-m zq_K1kg`jqzF0FWD?TD$`LnUgiejWlpxKh#D0O#!BhGRnw!bzR%Begc4Hxq4htSnOy zlc*N)j&{we7xx{7H}jHs&0#CyxK2usgoL=yJ>>x^lI%J_}XGU4Gr0OtPi?y%s9BGw6cL+=ZBO%V{dH|aFJ z9%H^ZI?#DEPgz)vOSAHto{R--H1k<3^p-LioE7!ie|7v_*Xc%L@Cnc3Oj&yDy@TvWIfwTP)1xm=+ z4|-iPZPBIy+QEYj>RiTtiYGI_w#Z7GviTW^oc4PrNgnEwe_A6D9~`#H%t!Nrj2r(@ zrEl4E)75auEbLF*OhT$dOx3#Fd6j@+Otlii0F%KELr_V!aV+$3ZsJVo;s+D^^h|tzT5<`Lmq7gxtiY_jpZNI~Z|=6vhjlJ3pJmLms0j1t zow^vi7VL7j${aRq8x8d!ImS9n ztrYJ?07j6&x(Kt$|Qo_n>|RqbF_ zTUOjL`>&w;@%CC{ZP2<+H23?n?8^IkQb$BYjZqomCXC)v4Eix^_;_t=g*^4!>74Lc zSx;-yj1?+H*F9lZI?WMNko#!>q=9g+XH2K05-~Xg9D=tDxv)q^a%1<|V`v?3;}@;= zb~-V5^I!UIPty^s+#ev5e8w&57Axwt;K>!ne2))u)GITVe8BDG0AsypB`?}u@L#XM ztBxq2a3z?dE6PM_0EVybGr#wh))tl*Gl|o8I%zk;kkN{&MmX)`NLH@}*>Npo+ayLW zbGa;Di-I(YNPB}SZgYS;by+#^?xG+_xlQTDkaGLQ4ou=crqxs~>Wi5T%CvamRJb|& zr@v~baX>S?fefX=9ZC0Ier42d2abry77=rQ#{Oa!lyrM-TWeg;TeUZm%R-ati!ILp zW9Zz3iB!GS@Wx;=mWDqQhoyU*Z7wgvRhjo2wtMT*~AN8Voi zON|+n0&dA#w@5G|QN3)y$LiV6`eg=g7>`(I&8l5EyN)Ih0j!eM-$L|!*gE_1RQDwF z8=BJ~j>I2yQ9hR@W!BlgBz@ETUJ^R=-1Xgk$GEPi`VJ-U9UOCsA79SG7F#|OdsZWS z9aD9MT~DMYDyDw^ifXuge$Nt5U|L0QfP+(^aUdI6TAxeFi~7TIKsKwTFyy7=$hd74 z)b|HqxMGK}LN+-9$i^&{&X=*k~4N5r%>Nz7H>rF4a$@F*2eeo zrlaCoav;2(b|k%YDl`(X@qjE=~+HRgPDPNkCLpQWKIXMmW^e)?&dxpC`rwm<4Nbny8YCKUC+5G zKQm`lsf{ua;E)C_^x)np9J`)9R@pJ^9133!;_kDAmVWH7qS|zx{hT zUxt#&Qs+~`xz$_hK6y8ad>4zYS#^wota)q}cVDvzyU=L0NUu~=k!w37weJKzTf&Sd zqWwhq<}6NO+$cpzO4;N6Km^|93MmUY1ej6Q0ggGiMr6+VTD|a#mu3!nJ1tUp+W?v@ zb6ke|E$vxA5^rtU^CcyG^{+-@Jm-_VqQHBv^hK6#hM2r&qwuZx%9z{#@R~RFAw7FZ zdx1qbx_!cD>lDN#?smi$EL;!g?%=6!P#O0CbD~UOqNJLru>e4|Hn3oqy#1?COn{#kN;Q9#Ny`8Hah*kHfjf zT8EA3sK=+p`B?V~E(q~Y?}kj@ssB5T9*eu0b2dGMsdvPnJ?s}9i#m7g9Aea8s=r>k zJ~ZocuQoUJ2XH-+HhG7aJqjLEK4MW9Zs2*1;<7Q?VGNGw$hYGv5_>sV_ZVy@HPKIptc2bsUs z3*YO9a+CvDd>j$LPLde@(8jB;T8Z`E(0@1}{p1)p)Ng0y;Ug z1?{#i72ouUnpNizanr(Yq+PEN&)nj_>o1Ba5D7cW94CWxwUguuWC_ck%V0TEo!c=6M#!^4k^^ zXjIGvxSN!tPWJyozQ9x@DKzGLeL@05J9q9*;2yZfDUDsbB`D5{(hjGfQ;Cr{%lO{D zJ(gqirC)h`pkz|@9hl#)x1ZgoFH9P1C7c~3D8#TV%M`~_z8CfD^< zqOW`#4;e>^7DLXBd0Usi$FN6pnFISK<9m<)sxKU3?tGF|$j85UK~5E8;zvx>GpPaN zQ6|+}hdD03IDb}FynfkgmKetCNEc!=4&zxGR@=xP^vEvn6y8j^qbDS! zW@H!E4Ay`pvf=cx!S=;_(j3a#Q0`0k)bf|mJHzARF9fy5pMAU+KdW$TMOtisT#+k7 zeJG}zA$3mdZ}mgbYI85H%;(kX0R^FyF#M}yG%fz`Oj8~r?#RBgvEg^JTq#mgZPV(998Y_Fj#K%9{A6G@ABP^I zorO@hK-G<{Rh>k1-i}sT%+R&-=Wi!+`;PT&os^Tjeepa#42Zrp?Brs-H~n##l#|7WLl zQA@I|-Pt44OTm7PzZCoD)fBm4QdtyQ+l49uz0`d5W~9QaTZyaOSuVMs_rSh^76Z9< zhrJV5yBdXZWnMD7%_S|YHkOpHF8xOFyp*0$EV(4EEmxE1XQ!Z%#bCIt_7rV(6j+U4 zWIPpfcMWk6M{#}w7v&ei;D^oyw#G{fGE|4R?!`~lZiBs)hRu+!!+i14hJ?F^h;fP! z64WI1yDBGQYJaIJK4o$drLN?)cGqO$wnoNlRy_l#w4Kz2!B@`MsnQBr;Nnn>r?z&p zFBeHt{7VSD>FT0$TW=lO`czMbuv$k?O(jJ~nqm6XnFq46WYc&#gah z!@TFZX-YX1&;EkuUa&UUU6fBKL?*=dKZ38dvyjF_kmpmg>@%fLAnm9%lyK(ZkCZSD zHSWi@oaj_p*hm;d9A{&RYsgjgPzd&vNc}qPWIPeXjcs=pJAdcK&!(bwUP^Jqd#mA{ zX-T#Quy1WA7g55+kCdN0SGCj|Y%}J4rU#s}5eIhDDO2Gvh`rh3lV_&OPPG~BE(++d z5uyltn2`Ko%SjMY5l#td**uLTtL4#K|H2_Lu-`FmR%RmQYSYE&7c}SgG{{-XwW6Vj z;_35eGjnKTJEcULhE>{qM!Xw2BLW$#iD3VSQvfv&;1UKFY4TB$88*$%My5%qE1&*A6 zs{bWCN{%j?>6C}*GAI5CXqCEG3oY9fS$BGTW`1MKClxsK$5Bs-A;L{WLslwT*(N75S5z?`%VSn3uQ*6^1jzR_h8BK#P zmMoRaK&J_y4S#a)XUI#fn6amcaUCZND6;A_UH_Jyf28$i^j|#^uQa}5rn{Q`^*}@9 z6;yRpGIXI@Zn~iPz2Wfx?ZdkNVMzN20Dh!INm#7`AO=4HMyyhz+x|U!M-v;DYJboLHU-9^DK>i7j-v;DYKzLNv#D(N| z(4mHLgMu>zf;eG`z~kSyze9}xx`}28H%m$I1WX7U3h)9b$&Z`}0+v{E{0Nwi9rGVh z+d(1ncp>n{e+{)4_@izQ7>X#B7~+Ko9pEMe#Q|tSR7jl`;RMVAU>=BY%S3nr!sNgj zAOc`P*MAK)9zu>Mgd)c}5wdu+71DuFgF~TkxPdmogB)5&4nTO3soVspNw9Df00-B$ z1;7%(5$^xB{T*s>P!)WK9S49=<4FGqLWI{i8f*oisGmNi#5i^UMhJBTH3SVn&{(Pe z4r)p;G8!S|1(ZT??1ev5A_AqMq~7Z0MX2+D$$?lg1439hK}?X%V~- zAW%XNh>RdgWdz6x>3{@*!obH7Fg7&C5Io4S0Q#6Qv;=g5P@56F!0q^d3pE%h5?Fzg zW64n<0SFr*BR^RUas~&wj{#IbwjBa=flw2n;Ii|{vU7Ow&VL6r06B*1LV?kbN)rI! zXiiY_qcn+rW~3CPqjMArOqv`I0r0V)1E2%2xc{2fM-nnp!huqNxCVMz5e4N$6M>$8 zu)2^NOa){+k(3uZAwdvo{22mQ@05269#ODL(sxbx+#MFflN(sUZ5Ftt=p#8`10Ifg0z-Sf!aQ(-#o)G>&dj9J^ ze;JT}J(aA^$6=|FsnU(@?*m{uP?vQ2&a@|4w7?pPu>u z`SLfbe}(WjtAEAgH>>{%kKfheuYmkEAiv`A+kpIv$8Q7jPk8(`Aio0g+kpIv$8Q7j zD;~cM$Uoun+kpHE$ZrGkD;`8gLvz3S{SRGH{IU8C0D}`|82Mw45eR^w<;K{jshZe% zgLC;0p;|W3v)~*fN0ayyR)|HMSHPz4kct03{3{snb30~RE;7kQCENe+IrgtNBVM{- zb=+^IpD8cRs$7A+$)@$*MkB_e!zx8hhxN>Ml0pZmv@%H5Tt@^PX5W#~B}P0f4ZSC; zP2zCl9$Nn5GTY68qIC|Mi(*nM&+Jc~EbICsUizpK-vc9^pNT}R93rOJF?LLh%6f4rOPsc6 zw!VB+q|`{cFOr(Sb9NW**^)Hm<>isicQKNmw!(LXfRbUK?1KZNq5zK37L!zZaO9R#f1RF_bXD23<9a&KaLn41KV$ z{zi22w3*;dYn~4WvB^j>Z7z)p6@!wcCYUV`(|%z#B~Y!s0hiy z1!NjkF04RzJXqONU2fWQ^`zt_vd=4A;f~=-l{M@NDf1z_Hlv^KT+3x%{j6Qdu05hM zq?%1dLT;C|CpKi1GUg$VU%8wz*8KEIR5r%Djr)yR=hKv;yHr?W!~v0I0gd|da#4C+ z_+ZvanMeK~V<5JE&Fv>z3)9*dG%$@D&ufD$T%p15QOWHw=V_Y|*$%k+S#i6VMfMQg zWISh9AKUE+Y*{R4Q11?sHW4eM(jjcWwr5#$N&QqLiJQ0(gg=ZAMddlow#g)v++Kxx zPeliy;IDh4)@{!$?G=SSH;eB=RX?q$unRI=OBqxzLPseFOJeMIu07)-FovP4xP!9i zX^r5?>ajP6+%5RFDoys-;#`ulB*%``pSZ?y+ert_L@{;0QR%q5YGYPTZPvvk9=ugX zTD$}sRXp>vnBBsL?mf=Dd>iDjz)tW>Zt`0Uy{SvN>H$I>WR&uy=e47xW4Fhf zgE_6m-S=Pr)S%O*a%U^N11?b4N%o!g)UXkSUzsG0`cBX#Q^p?v?yzFcL7Nd9kGTAP ziVfJ`sY&O3cl#%f@tspduMV1vs@()K0X7{?zq9?9T|q>#^|LHrTV>DR^uz4+pWm0d(A8k7Z z!5qJv`b#!)2N&T@H^Rs74eK6s@rOF4KbKH+AI!D$fSF8 zS`EaEH8;5N%~Gljs#8~T_?*`=E;Fg|yI0F>xA~Mx7r{~bl(a`jgtZi-S}<*9AbPrO zV$*?tp-pnc&&)x`Zj@Ud5N)CHE)8&xRYR$U)jxf*`*&+`wrC*Y*#r7!e`!P*CxR#{ z;KFB%Bf1jrT{%{gu}y!BOV@*JUwE1tmYv}0=G|E+05~Z=BdFl98+o(6Y9&z;NshJo z?hg-s7U^uB=tyI*eBhc=0n?7}OG$ESGdz7%6aI2J4#-55dV0EayTcWo9G?i!w|==> z>w7Tx+_viE#TaF=TZdZTSS!dUTEyCoUDrtjVjwott-0rP7`a~PW2Ho~XEygRt$6n{ zf!wEr412#k9h?y=$QFrA_ldZlSGX=%p@3~u^QD}*MOUUezgLZzGL+!s-k^||Uaw$3 ze@2vae)jWS@r$$i4@F#i6;HEP%t^LVdPc8a{9uQ^K&m?R(i{!4eBkv<`?X!f^_B(E z5t%9y3d)I(Xv8P8yds@$hNFXusEEBUH%)2TjP)3M!o%cSihm>PvhO_Qz56@Zx*tH& zs8QVPo+zt^&LdQ>u~1+qolxzQD2$pS?L;-b_Y8WGquKue3oXn*t<_8z8KBMcz-*?eES*VXpeCxEjONc|M5lTxO>1ikSvpN`m+Do>u>Wi~3$WK0p zUQ<~oG@&$|+gvAlu7(d^X)fXp+6&VJYPV0&oc}i4#7=T`c#FGIF4DNNFd$QW0j!m7 zJo0MvfWXY+F|-;^a(h~MOJgEEBBD2ty&L(UpxtY%GDVeYCHHSsZqfwRoH&0IiwI6z zKOM(5twzF8_%!5~vRz`jN!2m1mg-#>-daqr17c?dL^HTNdUWpkh%p~iBr8l|{);4* zddl0o2z#bzoyqY7I2*gT7(?6yB`cYlWA8Z00*QL`;`UEbw;jbrD@gRk?O2(D!K&!# zlN8c3Ue7TH&a`9t62MkiU!gMKMO#d9I`!Rv{aWpF&dpA>@+fwy+k0ml0Hoq4T#O5+ zj2d#T%#HJ7md84C>z4{Ogb(y5P5&b7kt-4)k`VuHPcZn?)Yo7d8*5n{D5sw06CV|6 z@PiJB$Lu$?9798Ru$?8bF{U%~Ij%+DB{ZdbCw(`6TKRh_yY%CxEkr{mQ_KW4nr}&q z;gsZvj(UjO9+wSP4yJMTz&@^FfnDM-!y|kpw(kk=HsF_7!MQ&fzOsvqCMI&A-Ray*?(;+wi83$ka!?iC9)sRjd&oDc z5+V`UtcukbY$LsMF-_;EA9{2?=L2h~sDkX^P7vrf_%xS7{Y7qJtUV92j)^sKIv6jv zVqKt0;g1Oad_npO4(WyLgNtFbhbAr~Aagh@F)>-|OLTp&h%W{E|E2@{Sg;)sB&-`s z3NkvAj(SamHA4}~&N(htS^s}KWM*E<2)X(G zoxq+<*E55y|Cx)e)>}A8*#+E|7ZDk2GG15MH}BJzyO#-bBfS7YsZ&mG@y zR`627c<{cF7!GP;IxEZFdI$ zSY*V*Os+4av5KQUZmz3Wy@?EvG#3VC12%%)N@iQ3S7t-i_fkYjZxE5nP+VJY&XoEd z_TgKg>eQvV+w_WDyS-Q*@-!u$#0Y2)!NSoEkQ}kYg$bZ-jM9XntV{4z!b-(YwQ!x!pnt7W6xOJ*w1{L8f8V$^lKpO zio>0hY_CAL7hXb}vtm0^Pk}F70lXffL(*!Syk#(g*opRZsLCGzX}r_MW*d$VuZr-) z8;^@zVC|?c)^w3BNIi7yQLS=AF+eah6U3xufV%mY!Ml92*z2&G;^b6EuaW@LoGq!V zgo2)*Euj&dJE@bu8?dpOtnL1j4VBd`CvkL|a-y5YqgU%NjmA7v0VOe)2Nh@B82nGV zV+dsXYU{xk>*A>|`*0@Mb)}|Yvg`X_he8f2G|ibmB)Krrr+%@x$irp&f$gO{_VU-? zFC?LWj>*aqq19L?x-GEmt~`6w4xxlh-d-o&j?S5Non40z5gW{V21h=J4(0HgG|rph$R$`PX7W$4@sI8Fsdgy-6MCfOJ(b1VL#);I9Zuu0VAv8I?FT-C~>E zlqvNd(a4)ZH|lE$_z(`U8jp=cNh@0*hoX#Emr)XkrWjRXW1Nr_&D&-o5)NNvkN$Il ze$z&M(Xu|noeHpHoFB$m4~z#H3B6Pedk+KXC?mb-!)MsjSRbl3+5lBAQO7O(m9s`4 z(CK7U*xjsy3)Y0)B&L3eq6}$-@oA=$1{Z3C$7(#l@n9a@>W9vFrSeaN5s^cYR6MeH zaO4c7ueepPi!&4ExvBp@YxaJocI{~5UI_TQBPj`3JNz-M2BGB!P#6>Y4YaD4cNhYj zR%Be?vN`m(iF*;mXh4!PB2&n?NG*z-;eYj%a(MgS_)N{U!zus1)#TNiuQ^`J+)=o5 z$@;SO+s2QNpL$RGUYT!N{QUCw?f-39zBR-b9*-c4puc)l^6i4lbOxGqc|ltq5vn54 z8t61g!fMoIz-UyD+oQ;XVb~2a^Fm1c zH-AFd;<{kw4!+2n@eD$7$MJ4<*MaLPFD^cQ9)5vQ0x+xlv~;Lcl2k3j&>LbwR?vZA z9Y+-~b}Sd@_9C+-Hi#o@=}w}gaJwE8-kTf{!HlL?$eLqYr`9EWzvd3V(xBkf9FDK) zkZYw$s}C%M_nQ>f&t<}-g=drs7*1vSaPRSr7C3HtHWZ(uJbMxwphiH`zt%Hy;1C6WEBIYVMuvXho>;ZuO0JotP!@Zq_KMLAtF4 z4$XGgIp&`=^ay5sseL~^zxuLLTVt%UcNFyy&;B#hi2h)7XyDhR*D@QPR8r?|CIElo0&@?gTbJK0%Bj1AYK^Wf-g1z#h0=F7G!^?%D!}=tAqU z^O(xbPY>7_qYO>cjbfm>R>wwASQYh_ij=xp1UxmF%<&4#-GLx{Tg5kWD%R{)?h*Bh zZujSPjJIh9l|`oVMKmHe3i5+HR&Rcq41ZC4&%`COhgF@D zs*^QV04Wof_|Caw^k#(o0?AiMmW%BDk|>HHhEm*3q6nApY)ErQPcO5uoP2~Ri|~%< zp~B%rl|aQF_h;u4ExC86I-04TuGW!g8Vd!*X>KTy(B)-paqV{Tr{}9diFXuPV>kUZ zW{%eqy<51p{=EAKFm>(v58&YsK==pXd1!Gsb#V9lqdnVim$sgN=@;t8tK5NXMPRg_ z-H^-?RM0Qq%0Rfl4LF&p=wer-m>y)?QixyemX0Rc60u6i=Y+OZM$~ri@|88nBLclw zN?By}-YDJ#5XMSoWUfppC~!f(?$j*J+CpGxZ((FD0pUyxkiz(KZ@ritH7G!hC49&? zyMhUOdMt65=HT_Q#yWd)j`z$s!K4UZnJU8mW?y9XZPTFqxUybP8#_XE^ul zdT-+6YFJjVn8u)xo%ANLGwM}69qrm?rQ9*U)S-x`$7+tX{5Qg5WOvBf#YHf?LBWE- zO!9LvCY+oPJ_>TMogXMIYp?M>yn8d#&mK9$QTz`)PL(39!^?e4ioxA$i@!w~FV)Wab(MyxHf zJ>~dw#r(99LSJ(m%SKc&r+|->{8jYOyV+;2`{c88Gqx< z9-pgxi-zO{pSr3(oS>g!(0!Wda<9B@-n}nK909GWgXB)6QVI8>^P)J)^M|}7k`a~< zwtS9n-N1e(Gq0+S-^{(IasMKK#2&D8OZr!GC%%AwX)=mW&$&yeu^cu`xJwGbgF5rW%JW@1U2<_6i&j#x^xX`h16?!<>4xUH?b6uXEFAF` zYe`|`51={pM9u4pXCCn?ZTVDVLdkmzd|R$%$DCc|hp*e(d3uq!g)7zd%dk(7Mia<(iA^@(&|k<6Mc${ukX`J7b^K2tosd_W_~y# z#pji%abx+UdV5@r)4a8c!(5&TvD8NHxdLn{m3wH$+BsX&tm9aYu_3PtL)CAvKD@cq zN4m;)#8V^uP`R#M9@vFj=MYK8WAx9LI5(RXzkMlFXAFq^dgJ9CXxMAPGq?l9){{ii z(&PRgPfKMm%!7K}e07Wnqt&1&W`BBgwBdck7o9a~n{Hr1&*J4qR_>ilRotw`TIPfE zQJ|(KC9lpr-kJ8EWR!CI?$t<_Cf852`SptZ=BrcHs=OthdS(%@*~c+0nIncPG%)J; z?Zku~{!M#IH9MptN%uMjiwWy-DI+yu^9NyHz6iP_Kg;UI1f_YGgvUCEkUXO_KEn|g zF&3f0EJ@tN`5IH=-o|9eJYzp{?Zgo+A4DPVo)7(qUW>DiZVHX+Sd{YkB!rce_fC$M zQ@jY4j8Q|B7G?-`eN>zenHCDJl4j(Rds0}QZU7h&LD6XzTi0f)NPgYOUAJm{RdhP> zd6>U`N8)oVyBM!p2zw~>^W7b%x$b93SCTe6G5u#JPH`}GCecQEb27S>59D3YoXeuV zi&Si;aI_e-KwC7Yl-CEOrI;oP1;jBkOez}apLf-rGe%h@$SR+*;w9w{m!?}3(Q!0s zMsJ0Beqy_G>A5?r)28_Fn_xi>eC9b=o3)-QpKXq8vi6W1f2TVYw2^mb@tE_$xR3Uh z)3$80r`4TXe5OzKLO%ARTHUC2cCQxqWADh4jplsv!la9|($9_O_?q)C%F2WJ8Q(ir zsqL5crm;{DTPId^)7_AnghJ>W)i+p8aCQh8{f6$Az}mOe5Hifh86V!L@%q;c^06C_ z&EPo`Vhu`9&8irb9aqQC-O5pOG3r$oQKk|Ba0(=?@EA>oj5!0n`uG6_=%N348SYl& zJn|TvK}r-nna4pT_h*&+IxuPe!9-)al~#E)4dqmQ1|aD8IBZys-3BZbmpR496z?zc zV2RA}B84LQTWX4kU@f*{`g=0d<0RqKU0W0_pmw~FT$DG)5i@!st6FP$R6Ajj+m9jJ zz^LIm1GQV*+?~#I>1>XynJ$=RvLP0F+J>AAQro9GQhL`H@{ohOPJ^5@@h`jy7A|6T z7*A$VcP0eFJH82-AB)o(z8bl%S01OYc|rZb%dB-o4QnIQ4eqs3ThB>-^>a?tAI~K` zXY0PM^}>vy3eqN%pLT+Xj&!e}G)Y3r76_Y3WkDZOvhHI~4t31UQ!*L)oA9qTjO#ebB1#Jmx>~=}=n9!e!lXM7&otP`tKl#71yxVQbx8P%xH5h&$8!4tqx`dvIn^fOpWSo} zc*s*qtZJ$irdQc;nKbikB6k6DUa6R|C4*MINY%*hlfuhRZ}o~4ip&$`+r`g9BH!YL zTp0P!@N=-a&lIz8@%l86`m(A(bLscWo56-`)P-RkA!a380f918JEfRdIzgYAXY`L! zF>+ENiZ!do5)5jBe1Rb#`|MdG7wbJxg8|39ZPmZNTAi%gAVNqyD zS5rX(Hg&e9uI+g~KX~}=@a!eK#wQ5xNs?5m(gehF{cwONZ7%t^JymV zMa08iz4>!kDU}#Tu^lF6G{~Z-r{%!1X0*20tZFEZOjf2#DIasmR%a~F;SYm!w2;w1 z_9;kL7fYC>Se&tPsDWL-fB7{G^*v&7Nx*Kyz&0p^S;&(V9m_EPBvrk^A?75fg`0kF zW$rV_I@_#nMf>S_X4-?!2OE$91l z<5`Z)G!V&q+HPlJav#_bVrPq`&L4j^#4}HIh0+^4Vsw4CGGpR2&3I$-9^FiVJ;_Z| z$`?8oPu8Yyk4z{;=!&+x7>iMqERf{ScUDy&|N2}Q;=ORL0CB-zvrFg|p-<5J6s)Zd zByEy&Z}HO!-c*wv@)-&6a$S;vZM!b2lnZ9K$&#zG-*^Y(Gqe^1RZiyMk%l)JxTfQi z$xnM1!%c)?vi#q_8CVIs!TFeACS0cy6rC}rEeOrJ$C}*<)aTz3tqKU)y@=3J6hD<= zr5!PeXe?Blmxc^EvvT&0Wd}KwLZ&@QKdW}C*ggH|DAoCr%y0O37F`7H?j>?YX3GjrEXyBD`de@CJa%RLM z74GyqV|^~nF}>U^0Y7sc|Ez(7R^u!!t0TmdQg4`lxj0X~*9QM2A=a#}(8qdgYi5i| zXvtzGMokQAH|V7qJKkp>IX9wMvQ0xE0OECNX5}jFs~%Xi!5d`^ME@!=MHkK*^Gbm% zf4W+F{ABLk3;T^>w`tO^2A{%e_ZWS=VQ{7J+`GX*`Mmq4fI~I*d5LBctCU{ddr3Rf zfn#=S`^6AC+7=NJX889_$8^2ClpE)HSySpeKZ%wJYqN2Y**O=!N&$2CQJh>x3Kng1 zHv7;sQ4LZtGB)^LDZP5_apWlr11fD!3n|xt`;u~l58l7Le>uMxxy4Z@CWMy3yi5@% zjC^1 z5mN5L$f3{Di)F) z4$!FcS-zD94lj*?*v@45o9rkxgx&XL;S|}Ipx-TK;hO-`WEVA5;IdZQb8LE(Vy+!I zhKJLTyu2;61s7o_Nj?xf=l>eq4fYdzb?EgR)E+eYqVH`)w6%CfarGSOgq}?{IkeHv zt)Q^+Tx{kpd6ccRQn`_pb4uB@1jkd0qq)mHw2P(`h0O0aaqG+}u4UYn{%N^1OXqfv zs!iWp;n?8A?S`t5naOBJLorq-qx4PcFZc1qQ4t^VUuyEH!qG_}c|Bz@2_ z7TWy?4&`%AD!jmpl>62v&Av3UI^V?A{P=-O6B$xu_6LB31>b|G<)lvwef{FP&D5v* zWeUbvPY*v`ZEMqvl5jlQEcg0*UB$5}*NS87+5K})j!nH}Ib@E=vOO03~eFidbJZTug z%vG`bjGeL{iuL%a{he0u%-HN#xjG%yijk4~Pg8?1;jY)Eu4c?}2oP5~calAQ$kiMP%~5b5Ob8<-`RSD|RR+39*a zRCup(#*kTE$mxR1U+_LMnkxNT=SKjrmlOX`V=;28NJ~V|eD(s^SNn42a^kJg zz$V&_+Wz`oYeUfN?)l{Omkw8ZR|jj^8f_tr&N4>nd$%pju5&C!!TJ^I-nfDlvMt1o z+^w`S;o9)o!}NDMSwDc%N8kCMuZaEtbS~}d{muLSht*az(E~EBygNhOy$n&*+Z02SoRk^fk2zBYqttk0^J*z^+%8Ab) zBMcejUR^FOD;zJGorA``X>_Fy|?~;lMUO6EnoPzeQvZ1%9^bMjqTQ4=j$>@>f-n{ z7nIn`WMQsRdya&bSBLa6nJaznC}np6QZ#MS9L1-_*GOD_>NKOFX8!Dg9XUB)yya;; zj4}4U{SJlpV0gSHGamDK@}8|ftEPkFJK-Who{tWiC$#5yO^!>8e9F|VZ55HW@1mkc zkpa5#0ORTIoAIeVZ}Re<)HSNy)IxoK%s!=CCE#iKxN4iw8J)MNOjx>T;*=n7Wkd@O z8sU_dNzu%ga4zBZxb%CMNtlTrpV2u~g1EGW5k5KP9#0 z(u((6i;Xts)8~WJ8|oIp4)!be{z+^ogzVkcT|%wH`np__GJ9_GwVIDuqcjtJyu%}q zJ#Xzr)#B*y*(b}gTBKcz5Dbn3)MFiZH6Da^9cFiwzHhX;{~n!~7CEaYRR060$)#Iy zW%EhSIPQ}>x5UuA==abxoipT_o{f4tp|5$J!Cod$SUEyI`a z>3A`gcduRP4aNAicVgBr?PR5w)^k(p>(Q@J^gL&ETP(XZxK77%LNUcvZ60_bS8plp zC0g5%W_Hkgxir_NQz64&V})hjW{sx%NZ+LIY9%Gd(Vd9CU3hJdRUi%zjsgn0%?cO_ zI6U8}upFg4mThj8BP*Etgl8jV(0Cn_j}N;kW>W8Y;oyF7_;2tCz$LxQ2XW$^+_^^{9{RKY9h((>nHZQzg9M9#))2=ZbRCWp*Z)dE#Bb~If-+vKnTD|j+z0p+Swi23D zPN{$O=I-KC)CGtvXvh5(=kTc-X=uG~r!P{N_$e;IvJbLTCs9C-?afMjP;?%Zv~fZG z{q6@<9S7-kGsb<_vQD2FKZ(NuI5fov=zR*Kpc^YW)InPxDymMMuG;hmI}GIlBq8cw zFKcKBN)5cG1`DmF`u*Je@d8eWSo^_!dE#zCJz4&Ws#>tIGj_&rxv~a!p~{~}gkN$m zxn=Ew{zQHJ>)y+3*+UP^BLvAL!?H^Dv&|; zV?MrT)#u7FiuBqA+2~by>Bh@>nwW?W2Y|U3G2=Z?v#%kdTbikB+e|&d#?@ePc-y;mVIOQx?QgxYJCbm;J0So z`px|dmWfT|bF*dz8WgtZ)1xb4L7V)(he`LgFbaJ82Fn7_ys%}vs*AT*(3-u#oB&)y v;}D!x&gXgVQljCinpL;6>T`{BJRJBN#Pa-&P2;sM#g0zY4YmnN`TsWo?Y*>R literal 0 HcmV?d00001 diff --git a/docs/cookbook/index.md b/docs/cookbook/index.md new file mode 100644 index 00000000..741ecbe7 --- /dev/null +++ b/docs/cookbook/index.md @@ -0,0 +1 @@ +# Cookbook diff --git a/docs/examples/dating_profiles.md b/docs/examples/dating_profiles.md new file mode 100644 index 00000000..f6bf3335 --- /dev/null +++ b/docs/examples/dating_profiles.md @@ -0,0 +1,230 @@ +# Generate a dating profile from a description + +In this example we will see how we can use Outlines to generate synthetic data for a dating application. This example was originally contributed by [Vibhor Kumar](https://github.com/veezbo). + +```python +from dataclasses import dataclass +from enum import Enum + +import torch +import transformers +from pydantic import BaseModel, conlist, constr + +import outlines.models as models +import outlines.text as text +``` + +## Defining the profile with Pydantic + +Here a dating profile will consist in a biography, a job, a list of interests and two question-answer pairs. The questions are written in advance by the team, and the users are asked to provide an answer: + +```python +class QuestionChoice(str, Enum): + A = "The key to my heart is" + B = "The first item on my bucket list is" + C = "Perks of dating me" + D = "Message me if you also love" + E = "People would describe me as" + F = "I can beat you in a game of" + +@dataclass +class QuestionAnswer: + question: QuestionChoice + answer: str +``` + +Users need to provide a short biography, with a minimum of 10 and a maximum of 300 characters. The application also limits job descriptions to 50 characters. In addition to the question-answer pairs, the user is required to provide a list of between 1 and 5 interests: + +```python +class DatingProfile(BaseModel): + bio: constr(str, min_length=10, max_length=300) + job: constr(str, max_lengt=50) + interests: conlist(str, min_length=1, max_length=5) # type: ignore + qna1: QuestionAnswer + qna2: QuestionAnswer +``` + +## Prompt template and examples + +We will ask the model to generate profiles from a high-level description: + +```python +@dataclass +class Example: + description: str + profile: DatingProfile +``` + +We will use Outlines' prompt templating abilities to generate the prompt for us. This help clearly separate the general prompting logic from what is specific to an example. + +```python + +@text.prompt +def dating_profile_prompt(description: str, examples: list[Example]): + """ + You are a world-renowned matchmaker who understands the modern dating + market. Your job is to generate dating app profiles for male clients + interested in women based on a provided description. The profiles should be + authentic, show off their strengths, and maximize their likelihood of + getting matches on dating apps. Here are some examples of past clients that + you have successfully created profiles for: + + {% for example in examples %} + Description: + {{ example.description }} + Profile: + {{ example.profile }} + {% endfor %} + + Here is the new client who you need to create a profile for: + Description: {{ description }} + Profile: + """ +``` + +We will provide the model with several few-shot examples: + +```python +samples: list[Example] = [ + Example( + description="I'm an author and former professional soccer player living in Seattle who publishes popular fiction books. A typical day for me starts by hanging out with my cat, drinking a coffee, and reading as much as I can in a few hours. Then, I'll prepare a quick smoothie before starting to write for a few hours, take a break with soccer or running a few miles, and finally meet friends for dinner at a new, hip restaurant in the evening. Sometimes we go axe-throwing afterwards, or play poker, or watch a comedy show, or visit a dive bar. On my vacations, I travel extensively to countries South America, Europe, and Asia, with the goal of visiting them all!", + profile=DatingProfile( + bio="Adventurer, dreamer, author, and soccer enthusiast. Life’s too short to waste time so I make the most of each day by exploring new places and playing with my friends on the pitch. What’s your favorite way to get out and have fun?", + job="Famous Soccer Player -> Famous Author", + interests=["Soccer", "Travel", "Friends", "Books", "Fluffy Animals"], + qna1=QuestionAnswer( + question=QuestionChoice.B, answer="swim in all seven oceans!" + ), + qna2=QuestionAnswer( + question=QuestionChoice.E, + answer="fun-loving, adventurous, and a little bit crazy", + ), + ), + ), + Example( + description="I run my company and build houses for a living. I'm a big fan of the outdoors and love to go hiking, camping, and fishing. I don't like video games, but do like to watch movies. My love language is home-cooked food, and I'm looking for someone who isn't afraid to get their hands dirty.", + profile=DatingProfile( + bio="If you're looking for a Montana man who loves to get outdoors and hunt, and who's in-tune with his masculinity then I'm your guy!", + job="House Construction Manager / Entrepreneur", + interests=["Hunting", "Hiking", "The outdoors", "Home-cooked food"], + qna1=QuestionAnswer(question=QuestionChoice.A, answer="food made at home"), + qna2=QuestionAnswer( + question=QuestionChoice.C, + answer="having a man in your life who can fix anything", + ), + ), + ), + Example( + description="I run my own Youtube channel with 10M subscribers. I love working with kids, and my audience skews pretty young too. In my free time, I play Fortnite and Roblox. I'm looking for someone who is also a gamer and likes to have fun. I'm learning Japanese in my free time as well as how to cook.", + profile=DatingProfile( + bio="Easy on the eyes (find me on Youtube!) and great with kids. What more do you need?", + job="Youtuber 10M+ subscribers", + interests=["Kids", "Gaming", "Japanese"], + qna1=QuestionAnswer(question=QuestionChoice.D, answer="anime and gaming!"), + qna2=QuestionAnswer(question=QuestionChoice.F, answer="Fortnite, gg ez"), + ), + ), +] +``` + +## Load the model + +We will use Mosaic's MPT-7B model (requires 13GB of GPU memory) which can fit on a single GPU with a reasonable context window. We initialize it with Outlines: + +```python +config = transformers.AutoConfig.from_pretrained( + "mosaicml/mpt-7b-8k-instruct", trust_remote_code=True +) +config.init_device = "meta" +model = models.transformers( + model_name="mosaicml/mpt-7b-8k-instruct", + device="cuda", + model_kwargs={ + "config": config, + "trust_remote_code": True, + "torch_dtype": torch.bfloat16, + "device_map": {"": 0}, + }, +) +``` + +## JSON-guided generation of profiles + +We will now generate a dating profile from a textual description of oneself: + +``` python +new_description = """I'm a laid-back lawyer who spends a lot of his free-time +gaming. I work in a corporate office, but ended up here after the start-up I +cofounded got acquired, so still play ping pong with my cool coworkers every +day. I have a bar at home where I make cocktails, which is great for +entertaining friends. I secretly like to wear suits and get a new one tailored +every few months. I also like weddings because I get to wear those suits, and +it's a good excuse for a date. I watch the latest series because I'm paying, +with my hard-earned money, for every streaming service.""" + +prompt = dating_profile_prompt(new_description, samples) +profile = text.generate.json(model, DatingProfile)(prompt) +parsed_profile = DatingProfile.model_validate_json(profile) +``` + +## Results + +Here are a couple of results: + +``` +{ + "bio": """I'm an ambitious lawyer with a casual and fashionable style. I love + games and sports, but my true passion is preparing refreshing cocktails at + home and dressing to the nines at weddings. I'm currently looking for a woman + to show a good time to and get a kiss on the opulent suit I just had made. + Send resume to this inbox.""", + "job": "Lawyer", + "interests": + [ + "Stylish guys", + "Gaming", + "Ping pong", + "Cocktails", + "Weddings" + ], + "qna1": + { + "question": "The first item on my bucket list is", + "answer": "be married and have a family." + }, + "qna2": + { + "question": "People would describe me as", + "answer": "charming, stylish, and funny." + } +} +``` + +``` +{ + "bio": """I’m a sexy lawyer with time on my hands. I love to game and + play ping pong, but the real reason you should swipe to the right + is because I look great in a suit. Who doesn’t love a man in a + suit? Just saying. Send me a message if you think it’s time to take + your dating life to the next level.""", + "job": "Lawyer", + "interests": + [ + "Gaming", + "Ping Pong", + "Tailored Suits", + "Weddings", + "Streaming Services" + ], + "qna1": + { + "question": "The first item on my bucket list is", + "answer": "simulate space but stay alive for as long as possible" + }, + "qna2": + { + "question": "People would describe me as", + "answer": "easy-going, a little nerdy but with a mature essence" + } +} +``` diff --git a/docs/examples/index.md b/docs/examples/index.md new file mode 100644 index 00000000..cf792f75 --- /dev/null +++ b/docs/examples/index.md @@ -0,0 +1,3 @@ +# Examples + +- [Dating Profile](dating_profiles.md): Build dating profiles from descriptions using prompt templating and JSON-guided generation. diff --git a/docs/get_started.md b/docs/get_started.md new file mode 100644 index 00000000..f2447ea5 --- /dev/null +++ b/docs/get_started.md @@ -0,0 +1,102 @@ +--- +title: Get Started +--- + + +
+ ![Outlines logo](assets/images/logo.png){ width="300" } +
+ + +
+ + +# + +## :sparkles: Features + +- :material-keyboard: Prompting utilities +- :material-regex: Regex-guided generation +- :material-code-json: JSON-guided generation +- :material-dice-multiple-outline: Multiple sequence sampling methods +- :material-open-source-initiative: Integration with several open source libraries + +## :floppy_disk: Install + +```bash +pip install outlines +``` + +??? info "Using OpenAI and Transformers" + + Outlines :wavy_dash: does not install the `openai` or `transformers` libraries by default. You will have to install these libraries manually. + +## :eyes: Sneak Peek + +=== "Code" + + ```python + from enum import Enum + from pydantic import BaseModel, constr + + import outlines.models as models + import outlines.text.generate as generate + + + class Weapon(str, Enum): + sword = "sword" + axe = "axe" + mace = "mace" + spear = "spear" + bow = "bow" + crossbow = "crossbow" + + + class Armor(str, Enum): + leather = "leather" + chainmail = "chainmail" + plate = "plate" + + + class Character(BaseModel): + name: constr(max_length=20) + age: int + armor: Armor + weapon: Weapon + strength: int + + + model = models.transformers("gpt2") + generator = generate.json(model, Character) + sequence = generator("Create a character description for a role playing game in JSON") + + print(sequence) + ``` +=== "Output" + + ```json + { + "name": "Anonymous Tokens", + "age": 7, + "armor": "plate", + "weapon": "mace", + "strength": 4171 + } + ````` + +## Acknowledgements + +
+ + ![Normal Computing logo](assets/images/normal_computing.jpg){ width="150" } + +
+ +Outlines was originally developed at [@NormalComputing](https://twitter.com/NormalComputing) by [@remilouf](https://twitter.com/remilouf) and [@BrandonTWillard](https://twitter.com/BrandonTWillard). diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..6fb100a4 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,4 @@ +--- +title: Outlines +template: home.html +--- diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 747ffb7b..00000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/overrides/home.html b/docs/overrides/home.html new file mode 100644 index 00000000..4e32efee --- /dev/null +++ b/docs/overrides/home.html @@ -0,0 +1,202 @@ +{% extends "main.html" %} +{% block tabs %} +{{ super() }} + + + +
+
+
+
+ +
+
+

Outlines

+

Use Large Language Models and Symbolic methods to generate text machines understand.

+ + Get started + + + Go to GitHub + +
+
+
+
+ +{% endblock %} +{% block content %}{% endblock %} +{% block footer %}{% endblock %} diff --git a/docs/overrides/index.html b/docs/overrides/index.html new file mode 100644 index 00000000..74a4987f --- /dev/null +++ b/docs/overrides/index.html @@ -0,0 +1,11 @@ +{% extends "base.html" %} + +{% block announce %} + For updates follow @remilouf on + + + Twitter + +{% endblock %} diff --git a/docs/overrides/main.html b/docs/overrides/main.html new file mode 100644 index 00000000..b4183d71 --- /dev/null +++ b/docs/overrides/main.html @@ -0,0 +1,22 @@ +{% extends "base.html" %} + +{% block announce %} + For updates follow @dottxtai on + + + Twitter + + and + + {% include ".icons/fontawesome/solid/star.svg" %} + + the repo on + + + {% include ".icons/fontawesome/brands/github.svg" %} + + Github + +{% endblock %} diff --git a/docs/reference/choices.md b/docs/reference/choices.md new file mode 100644 index 00000000..e7a66e38 --- /dev/null +++ b/docs/reference/choices.md @@ -0,0 +1,14 @@ +# Multiple choices + +Choice between different options +In some cases we know the output is to be chosen between different options. We can restrict the completion’s output to these choices using the is_in keyword argument: + +```python +import outlines.models as models + +complete = models.text_completion.openai("text-davinci-002") +answer = complete( + "Pick the odd word out: skirt, dress, pen, jacket", + is_in=["skirt", "dress", "pen", "jacket"] +) +``` diff --git a/docs/reference/index.md b/docs/reference/index.md new file mode 100644 index 00000000..24a6303d --- /dev/null +++ b/docs/reference/index.md @@ -0,0 +1,15 @@ +# Reference + +## Constrained generation + +While LLM capabilities are increasingly impressive, we can make their output more reliable by steering the generation. Outlines thus offers mechanisms to specify high level constraints on text completions by generative language models. + +Stopping sequence +By default, language models stop generating tokens after and token was generated, or after a set maximum number of tokens. Their output can be verbose, and for practical purposes it is often necessary to stop the generation after a given sequence has been found instead. You can use the stop_at keyword argument when calling the model with a prompt: + +```python +import outlines.models as models + +complete = models.text_completion.openai("text-davinci-002") +expert = complete("Name an expert in quantum gravity.", stop_at=["\n", "."]) +``` diff --git a/docs/reference/json.md b/docs/reference/json.md new file mode 100644 index 00000000..02ed5a24 --- /dev/null +++ b/docs/reference/json.md @@ -0,0 +1 @@ +# JSON diff --git a/docs/source/reference/prompting.rst b/docs/reference/prompting.md similarity index 65% rename from docs/source/reference/prompting.rst rename to docs/reference/prompting.md index 73fbca88..2746bfe6 100644 --- a/docs/source/reference/prompting.rst +++ b/docs/reference/prompting.md @@ -1,71 +1,76 @@ -Prompting -========= +# Prompting techniques -Outlines provides a powerful domain-specific language to write and manage prompts, via what we call *prompt functions*. Prompt functions are Python functions that contain a template for the prompt in their docstring, and their arguments correspond to the variables used in the prompt. When called, a prompt function returns the template rendered with the values of the arguments: +## Prompt templating -.. code:: +Outlines provides a powerful domain-specific language to write and manage prompts, via what we call *prompt functions*. Prompt functions are Python functions that contain a template for the prompt in their docstring, and their arguments correspond to the variables used in the prompt. When called, a prompt function returns the template rendered with the values of the arguments: - import outlines.text as text +```python +import outlines.text as text - @text.prompt - def greetings(name, question): - """Hello, {{ name }}! - {{ question }} - """ +@text.prompt +def greetings(name, question): + """Hello, {{ name }}! + {{ question }} + """ - prompt = greetings("user", "How are you?") - # Hello, user! - # How are you? +prompt = greetings("user", "How are you?") +# Hello, user! +# How are you? +``` +Outlines uses the [Jinja templating engine](https://jinja.palletsprojects.com/en/3.1.x/) to render prompts, which allows to easily compose complex prompts. No need for extra abstractions to write a prompt with few-shot examples, Jinja can handle that: -Outlines uses the `Jinja templating engine `_ to render prompts, which allows to easily compose complex prompts. No need for extra abstractions to write a prompt with few-shot examples, Jinja can handle that: -.. code:: +=== "Code" - import outlines.text as text + ```python + import outlines.text as text - @text.prompt - def few_shots(instructions, examples, question): - """"{{ instructions }} + @text.prompt + def few_shots(instructions, examples, question): + """"{{ instructions }} - {% for examples in examples %} - Q: {{ example.question }} - A: {{ example.answer }} - {% endfor %} - Q: {{ question }} - """ + {% for examples in examples %} + Q: {{ example.question }} + A: {{ example.answer }} + {% endfor %} + Q: {{ question }} + """ prompt = few_shots(question, examples, question) + ``` +=== "Output" -Please refer to the `Jinja documentation `_ for more information about the syntax of the templating language. The Jinja syntax is powerful, and we recommend you take some time to read their documentation if building your prompts requires complex logic involving for instance loops and conditionals. + Something +Please refer to the `Jinja documentation `_ for more information about the syntax of the templating language. The Jinja syntax is powerful, and we recommend you take some time to read their documentation if building your prompts requires complex logic involving for instance loops and conditionals. -Calling tools -~~~~~~~~~~~~~ +## Tools Several projects (e.g.`Toolformer `_, `ViperGPT `_, `AutoGPT `_, etc.) have shown that we can "teach" language models to use external functions by describing what these functions do in the prompt. In these projects the same information is often repeated twice: the function implementation, name, docstring, or arguments are copy-pasted in the prompt. This is cumbersome and error prone; you can directly pull this information from within an Outlines prompt function: -.. code:: +=== "Code" - import outlines.text as text + ```python + import outlines.text as text - def my_tool(arg1: str, arg2: int): - """Tool description. + def my_tool(arg1: str, arg2: int): + """Tool description. - The rest of the docstring - """ - pass + The rest of the docstring + """ + pass - @text.prompt - def tool_prompt(question, tool): - """{{ question }} + @text.prompt + def tool_prompt(question, tool): + """{{ question }} - COMMANDS - 1. {{ tool | name }}: {{ tool | description }}, args: {{ tool | args }} + COMMANDS + 1. {{ tool | name }}: {{ tool | description }}, args: {{ tool | args }} - {{ tool | source }} - """ + {{ tool | source }} + """ tool_prompt("Can you do something?", my_tool) # Can you do something? @@ -79,15 +84,19 @@ Several projects (e.g.`Toolformer `_, `ViperGP # The rest of the docstring # """ # pass + ``` + +=== "Output" + Something -Specify a response format -~~~~~~~~~~~~~~~~~~~~~~~~~ +## JSON response format To build reliable chains with language models we often need to instruct them the format in which we would like them to return their response. Again the information is often repeated twice between creating the parsing function, and writing the desired schema in the prompt. You can directly pull the JSON schema of a pydantic model, or pretty print a dictionary from within an Outlines prompt function -.. code:: +=== "Code" + ```python from pydantic import BaseModel, Field import outlines.text as text @@ -105,10 +114,11 @@ To build reliable chains with language models we often need to instruct them the # "field1": "an int", # "field2": "" # } + ``` +=== "Output" -.. code:: - + ```python response = { "field1": "", "field2": "a string" @@ -119,3 +129,4 @@ To build reliable chains with language models we often need to instruct them the # "field1": "", # "field2": "a string" # } + ``` diff --git a/docs/reference/regex.md b/docs/reference/regex.md new file mode 100644 index 00000000..1bdb8afe --- /dev/null +++ b/docs/reference/regex.md @@ -0,0 +1 @@ +# Regular expressions diff --git a/docs/reference/text_generation.md b/docs/reference/text_generation.md new file mode 100644 index 00000000..a4075eae --- /dev/null +++ b/docs/reference/text_generation.md @@ -0,0 +1 @@ +# Generate text diff --git a/docs/reference/types.md b/docs/reference/types.md new file mode 100644 index 00000000..c6da9a00 --- /dev/null +++ b/docs/reference/types.md @@ -0,0 +1,13 @@ +# Type constraints + +We can ask completions to be restricted to valid integers or floating-point numbers using the `type` keyword argument, respectively with the “int” or “float” value: + +```python +import outlines.models as models + +complete = models.text_completion.openai("text-davinci-002") +answer = complete( + "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", + type="int" +) +``` diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 51175d19..00000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,42 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = "Outlines" -copyright = "2023, Normal Computing, Outlines Developers" -author = "Remi Louf" -release = "0.1" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -extensions = ["sphinx.builders.linkcheck", "sphinx_design"] - -templates_path = ["_templates"] - -source_suffix = {".rst": "restructuredtext"} - -pygments_style = "nord-darker" - - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - -html_theme = "sphinx_book_theme" -html_static_path = ["_static"] -html_title = "" -html_logo = "_static/logo.png" -html_options = { - "icon_links": [ - { - "name": "GitHub", - "url": "https://github.com/outlines-dev/outlines", # required - "icon": "fa-brands fa-square-github", - "type": "fontawesome", - }, - ] -} diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 6b995942..00000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,111 +0,0 @@ -.. Outlines documentation master file, created by - sphinx-quickstart on Thu May 4 11:16:27 2023. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - - -👋 Welcome to Outlines -====================== - -**Outlines** is a Python library to write reliable programs for interactions with generative models: language models, diffusers, multimodal models, classifiers, etc. It provides a Domain Specific Language (DSL) to make prompting easier, constrained text generation and is natively concurrent. It integrates well with the rest of the Python ecosystem: tools, vector stores, etc. - -*Outlines aims to be the library frameworks are made with. It is more like NumPy than LangChain.* - -.. grid:: 2 - - .. grid-item-card:: 💻 Install Outlines - :link: https://pypi.org/project/outlines - :text-align: center - :width: 75% - :margin: 4 4 auto auto - - .. code:: - - pip install outlines - - .. grid-item-card:: 🚀 Normal Computing - :link: https://normalcomputing.ai - :text-align: center - :width: 75% - :margin: 4 4 auto auto - - Outlines is built with ❤️ by `Normal Computing `_ - - -👀 Sneak Peek -------------- - -A toy implementation of an agent (similar to BabyAGI or AutoGPT) with Outlines: - -.. code:: python - - import outlines.text as text - import outlines.models as models - - from my_tools import google_search, execute_code - from my_response_models import command_response - - - @text.prompt - def agent_prompt(objective, goals, tools, response_model): - """You are an AI with the following objective: {{ objective }} - - Keep the following goals in mind: - {% for goal in goals %} - {{ loop.counter }}. {{ goal }} - {% endfor %} - - COMMANDS - {% for tool in tools %} - - {{ tool | name }}, {{ tool | description }}, {{ tool | signature }} - {% endfor %} - - OUTPUT FORMAT - {{ response_model | schema }} - """ - - - def agent(objective, goals, tools): - complete = models.text_completion.hf("sshleifer/tiny-gpt2") - prompt = agent_prompt(objective, goals, tools , command_response) - answer = complete(prompt) - command = command_response(answer) - - return command - - - agent( - "Write a library called Outlines", - ["Easy prompting", "Multimodal, multimodel", "Constrained text generation"], - [google_search, execute_code], - ) - -📜 Features ------------ - Simple and powerful prompting primitives based on the Jinja templating engine. - Integration with OpenAI and Hugging Face models - -- A powerful domain-specific language to write and render prompts; -- Interleave completions with loops, conditionals, and custom Python functions; -- OpenAI integration: language models, embeddings and Dall-E; -- Hugging Face integration: ``transformers`` and ``diffusers``; -- Caching; -- Sampling multiple sequences; -- Controlled generation, including multiple choice, type constraints and dynamic stopping. - -.. toctree:: - :maxdepth: 1 - :hidden: - - installation - overview - -.. toctree:: - :maxdepth: 1 - :caption: Outlines - :hidden: - - reference/prompting - reference/controlled_generation - reference/multimodel - reference/batching diff --git a/docs/source/installation.rst b/docs/source/installation.rst deleted file mode 100644 index 6d18cce1..00000000 --- a/docs/source/installation.rst +++ /dev/null @@ -1,53 +0,0 @@ -✨ Installation -=============== - -The latest version of outlines is available on PyPi: - -.. code:: bash - - pip install outlines - -Outlines comes with a minimal set of dependencies that are necessary to run the library's code. Integrations will require you to install dependencies manually. - - -OpenAI ------- - -To use OpenAI models you first have to run: - -.. code:: bash - - pip install openai tiktoken - -.. important:: - - You also need to set your API credentials by defining the ``OPENAI_API_KEY`` environment variable. - - -Hugging Face ------------ - -To use the integrations with Hugging Face's `transformers `_ and `diffusers `_ libraries you first need to run: - -.. code:: - - pip install torch transformers diffusers - - -.. attention:: - - Hugging Face models are run locally. Outlines uses the `PyTorch `_ versions of the models. Please refer to the `PyTorch documentation `_ for questions related to **GPU support**. - -The integration is fairly basic for now, and if you have specific performance needs please `open an issue `_ - -Other integrations ------------------- - -Outlines is designed to be fully compatible with other libraries, which you will need to install separately. You can use any library with Outlines but , whenever possible, we recommend to use libraries with async support for better performance. Examples of possible integrations are: - -- `Llama index `_ for vector stores and document querying; -- `discord.py `_ for Discord integration; -- `Slack SDK `_ for Slack integration; -- `aiofiles `_ for asynchronous file operations; -- `httpx `_ or `aiohttp `_ for asynchronous HTTP requests; -- `asyncpg `_ and `aiosqlite `_ for async PostgreSQL and SQLite interfaces. diff --git a/docs/source/overview.rst b/docs/source/overview.rst deleted file mode 100644 index d2d0416b..00000000 --- a/docs/source/overview.rst +++ /dev/null @@ -1,28 +0,0 @@ -🌎 Hello world -============== - -Here is a simple Outlines program that highlights some of its key features: - -.. code:: - - import outlines.text as text - import outlines.models as models - - - @text.prompt - def where_from(expression): - "What's the origin of '{{ expression }}'?" - - - complete = models.text_completion.openai("text-davinci-003") - - hello_world = where_from("Hello world") - foobar = where_from("Foo Bar") - answer = complete([hello_world, foobar], samples=3, stop_at=["."]) - - -- **Prompt management**. You can use functions with the ``@outlines.text.prompt`` decorator. "Prompt functions" use the `Jinja templating language `_ to render the prompt written in the docstring. We also added a few filters to help with common worflows, like building agents. Of course, for simple prompts, you can also use Python strings directly. -- **Generative model integration**. You can use text completion models from OpenAI and Hugging Face, but models are not limited to text. -- **Controlled generation**. The ``stop_at`` keyword arguments allows to define when the generation should be stopped. Outlines includes more options to control the generation; these happen on a token basis, saving time and costs. -- **Sampling**. Outlines exclusively generates sequences using sampling. You can generate many samples with one call. -- **Batching**. Models can take a list of prompt as input and generate completions in parallel. diff --git a/docs/source/reference/batching.rst b/docs/source/reference/batching.rst deleted file mode 100644 index beb330f8..00000000 --- a/docs/source/reference/batching.rst +++ /dev/null @@ -1,22 +0,0 @@ -Sampling -======== - -Outlines is sampling-first, and is built to generate several samples from the same prompt: - -.. code:: - - import outlines.models as models - - sample = models.text_generation.openai("text-davinci-003") - answers = sample( - "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", - samples=10 - ) - -This will enable probabilistic applications down the line, stay tuned for more updates. In the meantime you can take a look at the `self-consistency example `_. - - -Batching --------- - -Outlines will soon allow you to vectorize model calls. diff --git a/docs/source/reference/controlled_generation.rst b/docs/source/reference/controlled_generation.rst deleted file mode 100644 index 703b9069..00000000 --- a/docs/source/reference/controlled_generation.rst +++ /dev/null @@ -1,76 +0,0 @@ -Controlled Generation -===================== - -While LLM capabilities are increasingly impressive, we can make their output more reliable by *steering* the generation. Outlines thus offers mechanisms to specify high level constraints on text completions by generative language models. - - -Stopping sequence ------------------ - -By default, language models stop generating tokens after and `` token was generated, or after a set maximum number of tokens. Their output can be verbose, and for practical purposes it is often necessary to stop the generation after a given sequence has been found instead. You can use the `stop_at` keyword argument when calling the model with a prompt: - -.. code:: - - import outlines.models as models - - complete = models.text_completion.openai("text-davinci-002") - expert = complete("Name an expert in quantum gravity.", stop_at=["\n", "."]) - - -.. warning:: - - The OpenAI API does not allow more than 4 stopping sequences. - - -Choice between different options --------------------------------- - -In some cases we know the output is to be chosen between different options. We can restrict the completion's output to these choices using the `is_in` keyword argument: - - -.. code:: - - - import outlines.models as models - - complete = models.text_completion.openai("text-davinci-002") - answer = complete( - "Pick the odd word out: skirt, dress, pen, jacket", - is_in=["skirt", "dress", "pen", "jacket"] - ) - - -Type constraints ----------------- - -We can ask completions to be restricted to `int`s or `float`s using the `type` keyword argument, respectively with the "int" or "float" value: - - -.. code:: - - - import outlines.models as models - - complete = models.text_completion.openai("text-davinci-002") - answer = complete( - "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?", - type="int" - ) - - -.. warning:: - - This feature is very limited for OpenAI models, due to restrictions on OpenAI's API. - - -The future of constrained generation ------------------------------------- - -We believe constrained hold a lot of promises when it comes to build reliable systems that use language models. In future releases of Outlines, you will be able to: - -- Exclude sequences with a `not_in` keyword agument; -- Constrain the output to be valid JSON; -- Constrain the output to be a valid array; -- Constrain the output to be valid Python code; - -We also believe that `alternative steering methods `_ can be useful and plan on expanding Outline's prompt DSL and generation methods in this direction. diff --git a/docs/source/reference/multimodel.rst b/docs/source/reference/multimodel.rst deleted file mode 100644 index c44ef53c..00000000 --- a/docs/source/reference/multimodel.rst +++ /dev/null @@ -1,69 +0,0 @@ -Multimodal, Multimodels -======================= - -Outlines interfaces with multiple model providers, so models can be easily swapped. It is built so that different models can be chained together, with different modalities. - -OpenAI ------- - -Outlines connects to OpenAI's text completion, and chat completion. Note however that Outlines does not provide a chat interface, and uses the chat completion API for text completion. Both are accessible via the `models.text_completion.openai` module, by passing the name of the model. You can currently specify `max_tokens` and `temperature` when initializing the model: - -.. code:: - - import outlines.models as models - - complete = models.text_completion.openai("gpt4", max_tokens=128, temperature=0.7) - - -It is also possible to use DALL-E to generate images: - -.. code:: - - import outlines.models as models - - generate = models.image_generation.openai("dall-e") - - -Hugging Face ------------ - -Outlines can call models from Hugging Face's `transformers` and `diffusers` libraries. The models are then run locally. - -.. code:: - - import outlines.models as models - - complete = models.text_completion.hf("sshleifer/tiny-gpt2") - generate = models.image_generation.hf("runwayml/stable-diffusion-v1-5") - - -.. note:: - - Outlines call the PyTorch version of models by default. The generation process also runs with defaults, please `open an issue `_ if you have more specific needs. - - -Bring Your Own Model --------------------- - -Outlines models are currently simple functions that return a text or an image given a prompt, you can thus easily use any model. We will soon provide a more comprehensive integration that handles controlled generation for any model. - -If you think the model you are using could be useful to others, `open an issue `_ 😊 - - -Coming soon ------------ - -We plan on integrating more model providers, for instance: - -- Anthropic -- Llamacpp -- GPT4All - -We currently favor the integration of *Open Source models* since they give more freedom for guided generation. We will also extend the range of models to allow building more complex chains, including: - -- Image captioning; -- Classification; -- Image segmentation; -- Speech-to-text; -- Image question answering; -- etc. diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 00000000..7e98a8cc --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,12 @@ +:root > * { + --md-code-bg-color: #2E3440; + --md-default-bg-color: black; +} + +.language-python.highlight > * { + border-radius: 1rem; +} + +code > * { + border-radius: 1rem; +} diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..37fa7ec3 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,114 @@ +# Site information +site_name: Outlines +site_author: The Outlines developers +site_description: >- + Generate text that machines understand using Large Language Models and + symbolic methods + + +# Repository +repo_name: outlines-dev/outlines +repo_url: https://github.com/outlines-dev/outlines + +# Copyright +copyright: Copyright © 2023- The Outlines Developers + +# Configuration +theme: + name: material + custom_dir: docs/overrides + palette: + - scheme: slate + primary: black + logo: assets/images/logo.png + favicon: assets/images/logo.png + icon: + repo: fontawesome/brands/github + features: + - content.code.copy + - navigation.sticky + - navigation.tabs + - header.autohide + - announce.dismiss + font: + text: Roboto + code: Source Code Pro + +# Additional configuration +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/outlines-dev + - icon: fontawesome/brands/twitter + link: https://twitter.com/remilouf + generator: false + analytics: + provider: google + property: !ENV GOOGLE_ANALYTICS_KEY + +# Extensions +markdown_extensions: + - admonition + - def_list + - attr_list + - md_in_html + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + noclasses: True + pygments_style: nord + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.inlinehilite + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + + +extra_css: + - stylesheets/extra.css + +plugins: + - mkdocstrings: + default_handler: python + handlers: + python: + options: + show_submodules: true + - search + - section-index + - social: + cards_layout_options: + color: #173a58 + +nav: + - Home: index.md + - Get Started: + - get_started.md + - Cookbook: + - cookbook/index.md + - Examples: + - examples/index.md + - Dating Profile: examples/dating_profiles.md + - Reference: + - reference/index.md + - Prompting: reference/prompting.md + - Generate text: reference/text_generation.md + - Guided generation: + - reference/choices.md + - reference/types.md + - reference/regex.md + - reference/json.md + - API: + - api/index.md + - api/models.md + - api/prompts.md + - api/json_schema.md + - api/fsm.md + - api/parsing.md + - api/regex.md + - api/sample.md + - api/continuation.md diff --git a/requirements-doc.txt b/requirements-doc.txt index ae0fede4..ec55b01f 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -1,3 +1,5 @@ -sphinx -sphinx-book-theme -sphinx-design +mkdocs +mkdocs-material +mkdocs-material[imaging] +mkdocs-section-index +mkdocstrings[python] From 1745ae4589ca973db896cb41fab74d50aaf44563 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 6 Nov 2023 18:34:31 +0100 Subject: [PATCH 267/734] Improve support for JSON schema We use the `referencing` library to dereference fields in the JSON Schema, which simplifies the codebase a lot and prevents reference errors. We also support combination of `minLength` and `maxLength` as well as the `pattern` keyword. --- environment.yml | 1 + outlines/text/json_schema.py | 337 ++++++--------------- pyproject.toml | 2 + tests/text/test_json_schema.py | 527 +++++++++------------------------ 4 files changed, 221 insertions(+), 646 deletions(-) diff --git a/environment.yml b/environment.yml index b0b3b62c..14629af3 100644 --- a/environment.yml +++ b/environment.yml @@ -16,6 +16,7 @@ dependencies: - scipy - pytest - pre-commit + - referencing - transformers - pip - pip: diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index b9d6a84c..fe157807 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -1,7 +1,9 @@ -import itertools import json import re -from typing import Callable, Dict + +from referencing import Registry, Resource +from referencing._core import Resolver +from referencing.jsonschema import DRAFT202012 STRING_INNER = r'(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)' STRING = f'"{STRING_INNER}*"' @@ -33,276 +35,109 @@ def build_regex_from_schema(schema: str): A string that contains a regular expression that matches any JSON object that follows the schema. - """ - schedule = build_schedule_from_schema(schema) - - regex = "" - for step in schedule: - regex += match_step_to_regex(step) - - return regex - - -def _ref_resolver(schema: Dict) -> Callable[[str], Dict]: - cache: Dict[str, Dict] = dict() - - if "$id" in schema: - cache[schema["$id"]] = schema - - if "$defs" in schema: - for definition, annotation in schema["$defs"].items(): - cache[f"#/$defs/{definition}"] = annotation - - if "$id" in annotation: - cache[annotation["$id"]] = annotation - - def resolver(reference: str) -> Dict: - """Resolve a $ref reference in the context of the top-level schema.""" - - if reference in cache: - return cache[reference] - - path = reference.split("/") - - # Navigate through the top-level schema based on the path - subschema = schema - - if path[0] != "#": - raise ValueError(f"Unable to resolve reference: {reference}") - - for step in path[1:]: - if step in subschema: - subschema = subschema[step] - else: - raise ValueError(f"Unable to resolve reference: {reference}") - - cache[reference] = subschema - return subschema - - return resolver - - -def build_schedule_from_schema(schema: str): - """Turn a JSON schema into a regex that matches any JSON object that follows - this schema. - - JSON Schema is a declarative language that allows to annotate JSON documents - with types and descriptions. These schemas can be generated from any Python - datastructure that has type annotation: namedtuples, dataclasses, Pydantic - models. And by ensuring that the generation respects the schema we ensure - that the output can be parsed into these objects. - This function parses the provided schema and builds a generation schedule which - mixes deterministic generation (fixed strings), and sampling with constraints. - - Parameters - ---------- - schema - A string that represents a JSON Schema. - - Returns - ------- - A generation schedule. A list of strings that represent the JSON - schema's structure and regular expression that define the structure of - the fields. - - References - ---------- - .. [0] JSON Schema. https://json-schema.org/ - """ schema = json.loads(schema) - schema = expand_json_schema(schema, resolver=_ref_resolver(schema)) - schedule = build_schedule_from_instance(schema) - - # Concatenate adjacent strings - reduced_schedule = [ - x - for cls, grp in itertools.groupby(schedule, type) - for x in (("".join(grp),) if cls is str else grp) - ] - - return reduced_schedule - - -def expand_item_json_schema(expanded_property: Dict, resolver: Callable[[str], Dict]): - """Recursively expand "$ref"s in "item"s.""" - if "items" not in expanded_property.keys(): - return - elif "$ref" in expanded_property["items"]: - expanded_property["items"] = expand_json_schema( - resolver(expanded_property["items"]["$ref"]), resolver - ) - else: - expand_item_json_schema(expanded_property["items"], resolver) - - -def expand_json_schema( - raw_schema: Dict, - resolver: Callable[[str], Dict], -): - """Replace references by their value in the JSON Schema. - - This recursively follows the references to other schemas in case - of nested models. Other schemas that may exist at a higher level - within the overall schema may be referenced via the `$ref` keyword - according to the JSON Schema specification. - - - Parameters - --------- - raw_schema - The raw JSON schema as a Python dictionary, possibly with definitions - and references. - resolver - A function that takes a reference and returns the corresponding schema - or subschema from the currently scoped top-level schema. - - Returns - ------- - A dictionary that represents the flattened equivalent of the input - JSON schema. - - """ - expanded_properties = {} - - if "properties" in raw_schema: - if "$id" in raw_schema: - # see https://json-schema.org/understanding-json-schema/structuring#bundling - resolver = _ref_resolver(raw_schema) - - for name, value in raw_schema["properties"].items(): - if "$ref" in value: # if item is a single element - expanded_properties[name] = expand_json_schema( - resolver(value["$ref"]), resolver - ) - elif "type" in value and value["type"] == "array": # if item is a list - expanded_properties[name] = value - - if "$ref" in value["items"] or ( - "type" in value["items"] and value["items"]["type"] == "array" - ): - expand_item_json_schema(expanded_properties[name], resolver) - else: - expanded_properties[name]["items"] = value["items"] - - else: - expanded_properties[name] = value - - return { - **({"title": raw_schema["title"]} if "title" in raw_schema else {}), - "type": raw_schema["type"], - "properties": expanded_properties, - } - - else: - return raw_schema + # Build reference resolver + schema = Resource(contents=schema, specification=DRAFT202012) + uri = schema.id() if schema.id() is not None else "" + registry = Registry().with_resource(uri=uri, resource=schema) + resolver = registry.resolver() + content = schema.contents + regex = to_regex(resolver, content) + return regex -def build_schedule_from_instance(instance: Dict): - """Build a generation schedule from a instance. - This recursively follows the references to other instances. +def to_regex(resolver: Resolver, instance: dict): + whitespace = r"[\n ]*" - Parameters - ---------- - instance - An instance, can be the JSON schema itself. - indent - The current indentation level - - Returns - ------- - A generation schedule for the instance, a list of strings that represent - the structure of the JSON schema and dictionaries that contain the - instance definition. - - """ - schedule = [] if "properties" in instance: - schedule.append(r"\{") - schedule += build_schedule_from_instance(instance["properties"]) - schedule.append(r"\}") - else: - for i, (name, annotation) in enumerate(instance.items()): - whitespace = r"[\n ]*" - schedule.append(f'{whitespace}"{name}"{whitespace}:{whitespace}') + regex = "" + regex += r"\{" + for i, (name, value) in enumerate(instance["properties"].items()): + regex += f'{whitespace}"{name}"{whitespace}:{whitespace}' + regex += to_regex(resolver, value) - if "anyOf" in annotation: - schedule.append(annotation) - elif annotation["type"] == "object": - schedule += build_schedule_from_instance(annotation) - else: - schedule.append(annotation) + # No comma after the last key-value pair in JSON + if i < len(instance["properties"]) - 1: + regex += f"{whitespace}," - # We cannot add commas after the last key-value pair in JSON - if i == len(instance) - 1: - schedule.append(whitespace) - else: - schedule.append(f"{whitespace},") + regex += f"{whitespace}" + r"\}" - return schedule + return regex + elif "oneOf" in instance: + print(instance) -def match_step_to_regex(step): - """Translate an element of a JSON schema to a regex that defines its content. + elif "allOf" in instance: + print(instance) - Parameters - ---------- - step: - A string that represents the schema's structure, or a dictionary - that represents a field in the schema. + elif "anyOf" in instance: + subregexes = [to_regex(resolver, t) for t in instance["anyOf"]] + return rf"({'|'.join(subregexes)})" - Returns - ------- - A string that represents a regular expression that defines the value of the - schedule's step. - - """ - if isinstance(step, str): - return step - - if isinstance(step, dict): - keys = set(step.keys()) - - if all(key in keys for key in ("enum", "type")) and step["type"] == "string": - choices = [f'"{re.escape(choice)}"' for choice in step["enum"]] + elif "enum" in instance: + if instance["type"] == "string": + choices = [f'"{re.escape(choice)}"' for choice in instance["enum"]] return f"({'|'.join(choices)})" - - elif "enum" in keys: - choices = [re.escape(str(choice)) for choice in step["enum"]] + else: + choices = [re.escape(str(choice)) for choice in instance["enum"]] return f"({'|'.join(choices)})" - elif all(key in keys for key in ("type", "items")) and step["type"] == "array": - item_regexes = match_step_to_regex(step["items"]) - return rf"\[({item_regexes})(,({item_regexes}))*\]" + elif "$ref" in instance: + path = f"{instance['$ref']}" + instance = resolver.lookup(path).contents + return to_regex(resolver, instance) + + elif "type" in instance: + type = instance["type"] + + if type == "string": + if "maxLength" in instance or "minLength" in instance: + max_length = instance.get("maxLength", "") + min_length = instance.get("minLength", "") + try: + if int(max_length) < int(min_length): + raise ValueError( + "maxLength must be greater than or equal to minLength" + ) + except ValueError: + pass + return f'"{STRING_INNER}{{{min_length},{max_length}}}"' + elif "pattern" in instance: + pattern = instance["pattern"] + if pattern[0] == "^" and pattern[-1] == "$": + return rf'(^"{pattern[1:-1]}"$)' + else: + return rf'("{pattern}")' + else: + return type_to_regex["string"] + + elif type == "number": + return type_to_regex["number"] - elif "type" in keys and step["type"] == "object": - steps = build_schedule_from_schema(json.dumps(step)) - regex_str = "" - for step in steps: - regex_str += match_step_to_regex(step) - return regex_str + elif type == "integer": + return type_to_regex["integer"] - elif ( - all(key in keys for key in ("type", "maxLength")) - and step["type"] == "string" - ): - max_length = step["maxLength"] - return f'"{STRING_INNER}{{,{max_length}}}"' + elif type == "array": + items_regex = to_regex(resolver, instance["items"]) + return rf"\[({items_regex})(,({items_regex}))*\]" - elif ( - all(key in keys for key in ("type", "minLength")) - and step["type"] == "string" - ): - min_length = step["minLength"] - return f'"{STRING_INNER}{{{min_length},}}"' + elif type == "boolean": + return type_to_regex["boolean"] - elif "type" in keys: - return type_to_regex[step["type"]] + elif type == "null": + return type_to_regex["null"] - elif "anyOf" in keys: - regexes = [match_step_to_regex(choice) for choice in step["anyOf"]] - return rf"({'|'.join(regexes)})" + # elif isinstance(type, list): + # if "object" in type: + # expanded = to_regex(resolver, instance) + # return "" + # return "" - raise NotImplementedError + raise NotImplementedError( + f"""Could not translate the instance {instance} to a + regular expression. Make sure it is valid to the JSON Schema specification. If + it is, please open an issue on the Outlines repository""" + ) diff --git a/pyproject.toml b/pyproject.toml index c769baf1..6ab6cf61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ dependencies = [ "torch", "numba", "joblib", + "referencing", ] dynamic = ["version"] @@ -92,6 +93,7 @@ module = [ "PIL.Image", "pydantic", "pytest", + "referencing.*", "scipy.*", "tenacity.*", "tiktoken.*", diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 239effb6..0069ca84 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -1,7 +1,5 @@ import json import re -from enum import Enum -from typing import List, Optional, Union import pytest from pydantic import BaseModel, constr @@ -13,12 +11,12 @@ NUMBER, STRING, STRING_INNER, - build_schedule_from_schema, - match_step_to_regex, + build_regex_from_schema, + to_regex, ) -def test_pydantic_basic(): +def test_from_pydantic(): class User(BaseModel): user_id: int name: str @@ -28,382 +26,31 @@ class User(BaseModel): is_true: bool schema = json.dumps(User.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"title": "User Id", "type": "integer"}, - '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - '[\\n ]*,[\\n ]*"maxlength_name"[\\n ]*:[\\n ]*', - {"title": "Maxlength Name", "type": "string", "maxLength": 10}, - '[\\n ]*,[\\n ]*"minlength_name"[\\n ]*:[\\n ]*', - {"title": "Minlength Name", "type": "string", "minLength": 10}, - '[\\n ]*,[\\n ]*"value"[\\n ]*:[\\n ]*', - {"title": "Value", "type": "number"}, - '[\\n ]*,[\\n ]*"is_true"[\\n ]*:[\\n ]*', - {"title": "Is True", "type": "boolean"}, - "[\\n ]*\\}", - ] - - -def test_pydantic_optional(): - class Foo(BaseModel): - bar: Optional[str] - - schema = json.dumps(Foo.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"bar"[\\n ]*:[\\n ]*', - {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bar"}, - "[\\n ]*\\}", - ] - - -def test_pydantic_array(): - class User(BaseModel): - user_id: int - value: List[float] - - schema = json.dumps(User.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"title": "User Id", "type": "integer"}, - '[\\n ]*,[\\n ]*"value"[\\n ]*:[\\n ]*', - {"title": "Value", "type": "array", "items": {"type": "number"}}, - "[\\n ]*\\}", - ] - - -def test_pydantic_enum(): - class Name(str, Enum): - john = "John" - marc = "Marc" - michel = "Michel" - - class User(BaseModel): - user_id: int - name: Name - - schema = json.dumps(User.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"title": "User Id", "type": "integer"}, - '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', - { - "title": "Name", - "enum": ["John", "Marc", "Michel"], - "type": "string", - }, - "[\\n ]*\\}", - ] - - -def test_pydantic_nested(): - """Arbitrarily nested schema.""" - - class Fizz(BaseModel): - buzz: str - - class Foo(BaseModel): - count: int - size: Fizz - - class Bar(BaseModel): - apple: str - banana: str - - class Spam(BaseModel): - foo: Foo - bars: Bar - - # We need to a recursive function to parse nested schemas - schema = json.dumps(Spam.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"foo"[\\n ]*:[\\n ]*\\{[\\n ]*"count"[\\n ]*:[\\n ]*', - {"title": "Count", "type": "integer"}, - '[\\n ]*,[\\n ]*"size"[\\n ]*:[\\n ]*\\{[\\n ]*"buzz"[\\n ]*:[\\n ]*', - {"title": "Buzz", "type": "string"}, - '[\\n ]*\\}[\\n ]*\\}[\\n ]*,[\\n ]*"bars"[\\n ]*:[\\n ]*\\{[\\n ]*"apple"[\\n ]*:[\\n ]*', - {"title": "Apple", "type": "string"}, - '[\\n ]*,[\\n ]*"banana"[\\n ]*:[\\n ]*', - {"title": "Banana", "type": "string"}, - "[\\n ]*\\}[\\n ]*\\}", - ] - - -def test_pydantic_list_object(): - class Foo(BaseModel): - count: int - - class Spam(BaseModel): - foo: List[Foo] - - # We need to a recursive function to parse nested schemas - schema = json.dumps(Spam.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"foo"[\\n ]*:[\\n ]*', - { - "items": { - "title": "Foo", - "type": "object", - "properties": {"count": {"title": "Count", "type": "integer"}}, - }, - "title": "Foo", - "type": "array", - }, - "[\\n ]*\\}", - ] - - -def test_pydantic_recursive_list_object(): - class ItemModel(BaseModel): - name: str - - class ArrayModel1(BaseModel): - item_model_lists: List[List[ItemModel]] - - class ArrayModel2(BaseModel): - nums: List[List[int]] - - class ArrayModel3(BaseModel): - array_model_lists: List[List[ArrayModel1]] - - schema = json.dumps(ArrayModel1.model_json_schema()) - schedule = build_schedule_from_schema(schema) - array_model_1_schema = { - "items": { - "items": { - "title": "ItemModel", - "type": "object", - "properties": {"name": {"title": "Name", "type": "string"}}, - }, - "type": "array", - }, - "title": "Item Model Lists", - "type": "array", - } - assert schedule == [ - '\\{[\\n ]*"item_model_lists"[\\n ]*:[\\n ]*', - array_model_1_schema, - "[\\n ]*\\}", - ] - - schema = json.dumps(ArrayModel2.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"nums"[\\n ]*:[\\n ]*', - { - "items": {"items": {"type": "integer"}, "type": "array"}, - "title": "Nums", - "type": "array", - }, - "[\\n ]*\\}", - ] - - schema = json.dumps(ArrayModel3.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"array_model_lists"[\\n ]*:[\\n ]*', - { - "items": { - "items": { - "title": "ArrayModel1", - "type": "object", - "properties": {"item_model_lists": array_model_1_schema}, - }, - "type": "array", - }, - "title": "Array Model Lists", - "type": "array", - }, - "[\\n ]*\\}", - ] - - -def test_pydantic_union(): - """Schemas with Union types.""" - - class Spam(BaseModel): - foo: int - bar: Union[float, str] - - schema = json.dumps(Spam.model_json_schema()) - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"foo"[\\n ]*:[\\n ]*', - {"title": "Foo", "type": "integer"}, - '[\\n ]*,[\\n ]*"bar"[\\n ]*:[\\n ]*', - {"title": "Bar", "anyOf": [{"type": "number"}, {"type": "string"}]}, - "[\\n ]*\\}", - ] - - -def test_json_schema(): - schema = '{"title": "User", "type": "object", "properties": {"user_id": {"title": "User Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["user_id", "name"]}' - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"title": "User Id", "type": "integer"}, - '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - "[\\n ]*\\}", - ] - - -def test_json_schema_no_titles(): - schema = '{"type": "object", "properties": {"user_id": {"type": "integer"}, "name": {"type": "string"}}, "required": ["user_id", "name"]}' - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"type": "integer"}, - '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', - {"type": "string"}, - "[\\n ]*\\}", - ] - - -def test_json_schema_with_property_ref(): - schema = """{ - "title": "User", - "type": "object", - "properties": { - "user_id": {"title": "User Id", "type": "integer"}, - "name": {"title": "Name", "type": "string"}, - "a": {"$ref": "#/properties/name"}, - "b": {"$ref": "#/properties/name"}, - "c": {"$ref": "#/properties/name"} - }, - "required": ["user_id", "name"]} - """ - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"title": "User Id", "type": "integer"}, - '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - '[\\n ]*,[\\n ]*"a"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - '[\\n ]*,[\\n ]*"b"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - '[\\n ]*,[\\n ]*"c"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - "[\\n ]*\\}", - ] - - -def test_json_schema_with_def_ref(): - schema = """{ - "title": "User", - "type": "object", - "$defs": { - "name": {"title": "Name2", "type": "string"} - }, - "properties": { - "user_id": {"title": "User Id", "type": "integer"}, - "name": {"title": "Name", "type": "string"}, - "name2": {"$ref": "#/$defs/name"} - }, - "required": ["user_id", "name"]} - """ - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"user_id"[\\n ]*:[\\n ]*', - {"title": "User Id", "type": "integer"}, - '[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*', - {"title": "Name", "type": "string"}, - '[\\n ]*,[\\n ]*"name2"[\\n ]*:[\\n ]*', - {"title": "Name2", "type": "string"}, - "[\\n ]*\\}", - ] - - -def test_json_schema_with_bundled_ref(): - schema = """{ - "$id": "https://example.com/schemas/customer", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "Customer", - "type": "object", - "properties": { - "first_name": { "type": "string" }, - "last_name": { "type": "string" }, - "shipping_address": { "$ref": "/schemas/address" }, - "billing_address": { "$ref": "/schemas/address" } - }, - "required": ["first_name", "last_name", "shipping_address", "billing_address"], - "$defs": { - "address": { - "title": "Address", - "$id": "/schemas/address", - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "street_address": { "type": "string" }, - "city": { "type": "string" }, - "state": { "$ref": "#/definitions/state" } - }, - "required": ["street_address", "city", "state"], - "definitions": { - "state": { "type": "object", "title": "State", "properties": { "name": { "type": "string" } }, "required": ["name"] } - } - } - } - }""" - schedule = build_schedule_from_schema(schema) - assert schedule == [ - '\\{[\\n ]*"first_name"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*,[\\n ]*"last_name"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*,[\\n ]*"shipping_address"[\\n ]*:[\\n ]*\\{[\\n ]*"street_address"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*,[\\n ]*"city"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*,[\\n ]*"state"[\\n ]*:[\\n ]*\\{[\\n ]*"name"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*\\}[\\n ]*\\}[\\n ]*,[\\n ]*"billing_address"[\\n ]*:[\\n ]*\\{[\\n ]*"street_address"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*,[\\n ]*"city"[\\n ]*:[\\n ]*', - {"type": "string"}, - '[\\n ]*,[\\n ]*"state"[\\n ]*:[\\n ]*\\{[\\n ]*"name"[\\n ]*:[\\n ]*', - {"type": "string"}, - "[\\n ]*\\}[\\n ]*\\}[\\n ]*\\}", - ] - - -class MockTokenizer: - pad_token_id = 0 - eos_token_id = 0 - - -class MockModel: - tokenizer = MockTokenizer() - device = "cpu" + schedule = build_regex_from_schema(schema) + assert isinstance(schedule, str) @pytest.mark.parametrize( "pattern,does_match", [ - ("0", True), - ("1", True), - ("-1", False), - ("01", False), - ("1.3", False), - ("t", False), + ({"integer": "0"}, True), + ({"integer": "1"}, True), + ({"integer": "-1"}, False), + ({"integer": "01"}, False), + ({"integer": "1.3"}, False), + ({"integer": "t"}, False), ], ) def test_match_integer(pattern, does_match): step = {"title": "Foo", "type": "integer"} - regex = match_step_to_regex(step) + regex = to_regex(None, step) assert regex == INTEGER - match = re.fullmatch(regex, pattern) + value = pattern["integer"] + match = re.fullmatch(regex, value) if does_match: - assert match[0] == pattern - assert match.span() == (0, len(pattern)) + assert match[0] == value + assert match.span() == (0, len(value)) else: assert match is None @@ -411,47 +58,64 @@ def test_match_integer(pattern, does_match): @pytest.mark.parametrize( "pattern,does_match", [ - ("1", True), - ("0", True), - ("01", False), - (".3", False), - ("1.3", True), - ("-1.3", True), - ("1.3e9", False), - ("1.3e+9", True), + ({"number": "1"}, True), + ({"number": "0"}, True), + ({"number": "01"}, False), + ({"number": ".3"}, False), + ({"number": "1.3"}, True), + ({"number": "-1.3"}, True), + ({"number": "1.3e9"}, False), + ({"number": "1.3e+9"}, True), ], ) def test_match_number(pattern, does_match): step = {"title": "Foo", "type": "number"} - regex = match_step_to_regex(step) + regex = to_regex(None, step) assert regex == NUMBER - match = re.fullmatch(regex, pattern) + value = pattern["number"] + match = re.fullmatch(regex, value) if does_match: - assert match[0] == pattern - assert match.span() == (0, len(pattern)) + assert match[0] == value + assert match.span() == (0, len(value)) else: assert match is None @pytest.mark.parametrize( - "step,regex,examples", + "schema,regex,examples", [ + # String ( {"title": "Foo", "type": "string"}, STRING, [("unquotedstring", False), ('"quoted_string"', True)], ), + # String with maximum length ( {"title": "Foo", "type": "string", "maxLength": 3}, f'"{STRING_INNER}{{,3}}"', [('"ab"', True), ('"a""', False), ('"abcd"', False)], ), + # String with minimum length ( {"title": "Foo", "type": "string", "minLength": 3}, f'"{STRING_INNER}{{3,}}"', [('"ab"', False), ('"abcd"', True), ('"abc""', False)], ), + # String with both minimum and maximum length + ( + {"title": "Foo", "type": "string", "minLength": 3, "maxLength": 5}, + f'"{STRING_INNER}{{3,5}}"', + [('"ab"', False), ('"abcd"', True), ('"abcdef""', False)], + ), + # String defined by a regular expression + ( + {"title": "Foo", "type": "string", "pattern": r"^[a-z]$"}, + r'(^"[a-z]"$)', + [('"a"', True), ('"1"', False)], + ), + # Boolean ( {"title": "Foo", "type": "boolean"}, BOOLEAN, @@ -462,6 +126,7 @@ def test_match_number(pattern, does_match): ("0", False), ], ), + # Null ( {"title": "Foo", "type": "null"}, NULL, @@ -471,31 +136,25 @@ def test_match_number(pattern, does_match): ("0", False), ], ), - ( - {"title": "Foo", "anyOf": [{"type": "string"}, {"type": "number"}]}, - f"({STRING}|{NUMBER})", - [ - ('"string"', True), - ('"st"ring"', False), - ("1000", True), - ("true", False), - ], - ), + # Enum string ( {"title": "Foo", "enum": ["Marc", "Jean"], "type": "string"}, '("Marc"|"Jean")', [('"Marc"', True), ('"Jean"', True), ('"John"', False)], ), + # Make sure strings are escaped ( {"title": "Foo", "enum": [".*", r"\s*"], "type": "string"}, r'("\.\*"|"\\s\*")', [('".*"', True), (r'"\s*"', True), (r'"\.\*"', False)], ), + # Enum integer ( {"title": "Foo", "enum": [0, 1], "type": "integer"}, "(0|1)", [("0", True), ("1", True), ("a", False)], ), + # integer ( { "title": "Foo", @@ -505,11 +164,13 @@ def test_match_number(pattern, does_match): '\\{[\\n ]*"count"[\\n ]*:[\\n ]*(0|[1-9][0-9]*)[\\n ]*\\}', [('{\n "count": 100\n}', True)], ), + # array ( {"title": "Foo", "type": "array", "items": {"type": "number"}}, rf"\[({NUMBER})(,({NUMBER}))*\]", [("[1e+9,1.3]", True)], ), + # anyOf ( { "title": "Foo", @@ -519,6 +180,7 @@ def test_match_number(pattern, does_match): r"\[(((true|false)|null))(,(((true|false)|null)))*\]", [("[true,null,false]", True)], ), + # Nested schema ( { "title": "Bar", @@ -534,11 +196,86 @@ def test_match_number(pattern, does_match): f'\\{{[\\n ]*"fuzz"[\\n ]*:[\\n ]*\\{{[\\n ]*"spam"[\\n ]*:[\\n ]*{INTEGER}[\\n ]*\\}}[\\n ]*\\}}', [('{\n "fuzz": {\n "spam": 100\n }\n}', True)], ), + # Schema with a reference + ( + { + "title": "User", + "type": "object", + "properties": { + "user_id": {"title": "User Id", "type": "integer"}, + "name": {"title": "Name", "type": "string"}, + "a": {"$ref": "#/properties/name"}, + }, + "required": ["user_id", "name"], + }, + f'\\{{[\\n ]*"user_id"[\\n ]*:[\\n ]*{INTEGER}[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*{STRING}[\\n ]*,[\\n ]*"a"[\\n ]*:[\\n ]*{STRING}[\\n ]*\\}}', + [('{"user_id": 100, "name": "John", "a": "Marc"}', True)], + ), + ( + { + "title": "User", + "type": "object", + "$defs": {"name": {"title": "Name2", "type": "string"}}, + "properties": { + "user_id": {"title": "User Id", "type": "integer"}, + "name": {"title": "Name", "type": "string"}, + "name2": {"$ref": "#/$defs/name"}, + }, + "required": ["user_id", "name"], + }, + f'\\{{[\\n ]*"user_id"[\\n ]*:[\\n ]*{INTEGER}[\\n ]*,[\\n ]*"name"[\\n ]*:[\\n ]*{STRING}[\\n ]*,[\\n ]*"name2"[\\n ]*:[\\n ]*{STRING}[\\n ]*\\}}', + [('{"user_id": 100, "name": "John", "name2": "Marc"}', True)], + ), + ( + { + "$id": "customer", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Customer", + "type": "object", + "properties": { + "name": {"type": "string"}, + "last_name": {"type": "string"}, + "address": {"$ref": "customer#/$defs/address"}, + }, + "required": [ + "first_name", + "last_name", + "shipping_address", + "billing_address", + ], + "$defs": { + "address": { + "title": "Address", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "city": {"type": "string"}, + }, + "required": ["street_address", "city", "state"], + "definitions": { + "state": { + "type": "object", + "title": "State", + "properties": {"name": {"type": "string"}}, + "required": ["name"], + } + }, + } + }, + }, + f'\\{{[\\n ]*"name"[\\n ]*:[\\n ]*{STRING}[\\n ]*,[\\n ]*"last_name"[\\n ]*:[\\n ]*{STRING}[\\n ]*,[\\n ]*"address"[\\n ]*:[\\n ]*\\{{[\\n ]*"city"[\\n ]*:[\\n ]*{STRING}[\\n ]*\\}}[\\n ]*\\}}', + [ + ( + '{"name": "John", "last_name": "Doe", "address": {"city": "Paris"}}', + True, + ) + ], + ), ], ) -def test_match(step, regex, examples): - test_regex = match_step_to_regex(step) - +def test_match(schema, regex, examples): + schema = json.dumps(schema) + test_regex = build_regex_from_schema(schema) assert test_regex == regex for string, does_match in examples: From 9d7cf431966f41b3c31ad11b18cad13a55726648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 8 Nov 2023 14:34:31 +0100 Subject: [PATCH 268/734] Support `oneOf`, `anyOf` and `allOf` --- outlines/text/json_schema.py | 57 +++++++++++++++++++++++++++------- tests/text/test_json_schema.py | 25 ++++++++++++--- 2 files changed, 67 insertions(+), 15 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index fe157807..da7f4f37 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -1,3 +1,4 @@ +import itertools as it import json import re @@ -50,6 +51,25 @@ def build_regex_from_schema(schema: str): def to_regex(resolver: Resolver, instance: dict): + """Translate a JSON Schema instance into a regex that validates the schema. + + Note + ---- + Many features of JSON schema are missing: + - Support the fact that fields in an object are optional by default + - Handle `required` keyword + - Handle `additionalProperties` keyword + - Handle types defined as a list + - Handle constraints on numbers + - Handle special patterns: `date`, `uri`, etc. + + Parameters + ---------- + resolver + An object that resolves references to other instances within a schema + instance + The instance to translate + """ whitespace = r"[\n ]*" if "properties" in instance: @@ -67,16 +87,33 @@ def to_regex(resolver: Resolver, instance: dict): return regex - elif "oneOf" in instance: - print(instance) - + # To validate against allOf, the given data must be valid against all of the + # given subschemas. elif "allOf" in instance: - print(instance) + subregexes = [to_regex(resolver, t) for t in instance["allOf"]] + subregexes_str = [f"{subregex}" for subregex in subregexes] + return rf"({''.join(subregexes_str)})" + # To validate against `anyOf`, the given data must be valid against + # any (one or more) of the given subschemas. elif "anyOf" in instance: subregexes = [to_regex(resolver, t) for t in instance["anyOf"]] + combinations = [ + "(" + "".join(c) + ")" + for r in range(1, len(subregexes) + 1) + for c in it.permutations(subregexes, r) + ] + + return rf"({'|'.join(combinations)})" + + # To validate against oneOf, the given data must be valid against exactly + # one of the given subschemas. + elif "oneOf" in instance: + subregexes = [to_regex(resolver, t) for t in instance["oneOf"]] return rf"({'|'.join(subregexes)})" + # The enum keyword is used to restrict a value to a fixed set of values. It + # must be an array with at least one element, where each element is unique. elif "enum" in instance: if instance["type"] == "string": choices = [f'"{re.escape(choice)}"' for choice in instance["enum"]] @@ -90,9 +127,13 @@ def to_regex(resolver: Resolver, instance: dict): instance = resolver.lookup(path).contents return to_regex(resolver, instance) + # The type keyword may either be a string or an array: + # - If it's a string, it is the name of one of the basic types. + # - If it is an array, it must be an array of strings, where each string is + # the name of one of the basic types, and each element is unique. In this + # case, the JSON snippet is valid if it matches any of the given types. elif "type" in instance: type = instance["type"] - if type == "string": if "maxLength" in instance or "minLength" in instance: max_length = instance.get("maxLength", "") @@ -130,12 +171,6 @@ def to_regex(resolver: Resolver, instance: dict): elif type == "null": return type_to_regex["null"] - # elif isinstance(type, list): - # if "object" in type: - # expanded = to_regex(resolver, instance) - # return "" - # return "" - raise NotImplementedError( f"""Could not translate the instance {instance} to a regular expression. Make sure it is valid to the JSON Schema specification. If diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 0069ca84..f3f2bc82 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -170,15 +170,32 @@ def test_match_number(pattern, does_match): rf"\[({NUMBER})(,({NUMBER}))*\]", [("[1e+9,1.3]", True)], ), + # oneOf + ( + { + "title": "Foo", + "oneOf": [{"type": "string"}, {"type": "number"}], + }, + rf"({STRING}|{NUMBER})", + [("12.3", True), ('"a"', True), ('1.3"a"', False)], + ), # anyOf ( { "title": "Foo", - "type": "array", - "items": {"anyOf": [{"type": "boolean"}, {"type": "null"}]}, + "anyOf": [{"type": "string"}, {"type": "integer"}], + }, + rf'(("(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*")|((0|[1-9][0-9]*))|("(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*"(0|[1-9][0-9]*))|((0|[1-9][0-9]*)"(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*"))', + [("12", True), ('"a"', True), ('1"a"', True)], + ), + # allOf + ( + { + "title": "Foo", + "allOf": [{"type": "string"}, {"type": "integer"}], }, - r"\[(((true|false)|null))(,(((true|false)|null)))*\]", - [("[true,null,false]", True)], + rf"({STRING}{INTEGER})", + [('"a"1', True), ('"a"', False), ('"1"', False)], ), # Nested schema ( From a8598372bca95417714bc6d6451c3ca99831c75c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 8 Nov 2023 17:23:01 +0100 Subject: [PATCH 269/734] Check the JSON Schema before translating it --- environment.yml | 1 + outlines/text/json_schema.py | 2 ++ pyproject.toml | 2 ++ 3 files changed, 5 insertions(+) diff --git a/environment.yml b/environment.yml index 14629af3..56c58c5e 100644 --- a/environment.yml +++ b/environment.yml @@ -17,6 +17,7 @@ dependencies: - pytest - pre-commit - referencing + - jsonschema - transformers - pip - pip: diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index da7f4f37..bac3e930 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -2,6 +2,7 @@ import json import re +from jsonschema.protocols import Validator from referencing import Registry, Resource from referencing._core import Resolver from referencing.jsonschema import DRAFT202012 @@ -37,6 +38,7 @@ def build_regex_from_schema(schema: str): follows the schema. """ + Validator.check_schema(schema) schema = json.loads(schema) # Build reference resolver diff --git a/pyproject.toml b/pyproject.toml index 6ab6cf61..7fc1dceb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ dependencies = [ "numba", "joblib", "referencing", + "jsonschema", ] dynamic = ["version"] @@ -86,6 +87,7 @@ exclude=["examples"] module = [ "jinja2", "joblib.*", + "jsonschema.*", "openai", "numpy.*", "perscache.*", From 0856b7a418838da189b422359f6869e8d7002043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 8 Nov 2023 17:27:31 +0100 Subject: [PATCH 270/734] Support array of types and arrays without specified types --- outlines/text/json_schema.py | 28 ++++++++++++++++++++++++---- tests/text/test_json_schema.py | 2 +- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index bac3e930..0c597b84 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -48,8 +48,7 @@ def build_regex_from_schema(schema: str): resolver = registry.resolver() content = schema.contents - regex = to_regex(resolver, content) - return regex + return to_regex(resolver, content) def to_regex(resolver: Resolver, instance: dict): @@ -164,8 +163,22 @@ def to_regex(resolver: Resolver, instance: dict): return type_to_regex["integer"] elif type == "array": - items_regex = to_regex(resolver, instance["items"]) - return rf"\[({items_regex})(,({items_regex}))*\]" + if "items" in instance: + items_regex = to_regex(resolver, instance["items"]) + return rf"\[({items_regex})(,({items_regex}))*\]" + else: + # Here we need to make the choice to exclude generating list of objects + # if the specification of the object is not give, even though a JSON + # object that contains an object here would be valid under the specification. + types = [ + {"type": "boolean"}, + {"type": "null"}, + {"type": "number"}, + {"type": "integer"}, + {"type": "string"}, + ] + regexes = [to_regex(resolver, t) for t in types] + return rf"\[({'|'.join(regexes)})(,({'|'.join(regexes)}))*\]" elif type == "boolean": return type_to_regex["boolean"] @@ -173,6 +186,13 @@ def to_regex(resolver: Resolver, instance: dict): elif type == "null": return type_to_regex["null"] + elif isinstance(type, list): + # Here we need to make the choice to exclude generating an object + # if the specification of the object is not give, even though a JSON + # object that contains an object here would be valid under the specification. + regexes = [to_regex(resolver, {"type": t}) for t in type if t != "object"] + return rf"({'|'.join(regexes)})" + raise NotImplementedError( f"""Could not translate the instance {instance} to a regular expression. Make sure it is valid to the JSON Schema specification. If diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index f3f2bc82..05255dc6 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -185,7 +185,7 @@ def test_match_number(pattern, does_match): "title": "Foo", "anyOf": [{"type": "string"}, {"type": "integer"}], }, - rf'(("(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*")|((0|[1-9][0-9]*))|("(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*"(0|[1-9][0-9]*))|((0|[1-9][0-9]*)"(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*"))', + r'(("(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*")|((0|[1-9][0-9]*))|("(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*"(0|[1-9][0-9]*))|((0|[1-9][0-9]*)"(?:[^"\\\x00-\x1f\x7f-\x9f]|\\.)*"))', [("12", True), ('"a"', True), ('1"a"', True)], ), # allOf From 650311dc3031d95923780b73abc8ebae64f58dd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 8 Nov 2023 21:03:15 +0100 Subject: [PATCH 271/734] Support enums with different types --- outlines/text/json_schema.py | 37 +++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 0c597b84..61422753 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -63,6 +63,9 @@ def to_regex(resolver: Resolver, instance: dict): - Handle types defined as a list - Handle constraints on numbers - Handle special patterns: `date`, `uri`, etc. + - Handle optional fields (not in `required`) + + This does not support recursive definitions. Parameters ---------- @@ -116,12 +119,14 @@ def to_regex(resolver: Resolver, instance: dict): # The enum keyword is used to restrict a value to a fixed set of values. It # must be an array with at least one element, where each element is unique. elif "enum" in instance: - if instance["type"] == "string": - choices = [f'"{re.escape(choice)}"' for choice in instance["enum"]] - return f"({'|'.join(choices)})" - else: - choices = [re.escape(str(choice)) for choice in instance["enum"]] - return f"({'|'.join(choices)})" + choices = [] + for choice in instance["enum"]: + if type(choice) in [int, float, bool, None]: + choices.append(re.escape(str(choice))) + elif type(choice) == str: + choices.append(f'"{re.escape(choice)}"') + + return f"({'|'.join(choices)})" elif "$ref" in instance: path = f"{instance['$ref']}" @@ -134,8 +139,8 @@ def to_regex(resolver: Resolver, instance: dict): # the name of one of the basic types, and each element is unique. In this # case, the JSON snippet is valid if it matches any of the given types. elif "type" in instance: - type = instance["type"] - if type == "string": + instance_type = instance["type"] + if instance_type == "string": if "maxLength" in instance or "minLength" in instance: max_length = instance.get("maxLength", "") min_length = instance.get("minLength", "") @@ -156,13 +161,13 @@ def to_regex(resolver: Resolver, instance: dict): else: return type_to_regex["string"] - elif type == "number": + elif instance_type == "number": return type_to_regex["number"] - elif type == "integer": + elif instance_type == "integer": return type_to_regex["integer"] - elif type == "array": + elif instance_type == "array": if "items" in instance: items_regex = to_regex(resolver, instance["items"]) return rf"\[({items_regex})(,({items_regex}))*\]" @@ -180,17 +185,19 @@ def to_regex(resolver: Resolver, instance: dict): regexes = [to_regex(resolver, t) for t in types] return rf"\[({'|'.join(regexes)})(,({'|'.join(regexes)}))*\]" - elif type == "boolean": + elif instance_type == "boolean": return type_to_regex["boolean"] - elif type == "null": + elif instance_type == "null": return type_to_regex["null"] - elif isinstance(type, list): + elif isinstance(instance_type, list): # Here we need to make the choice to exclude generating an object # if the specification of the object is not give, even though a JSON # object that contains an object here would be valid under the specification. - regexes = [to_regex(resolver, {"type": t}) for t in type if t != "object"] + regexes = [ + to_regex(resolver, {"type": t}) for t in instance_type if t != "object" + ] return rf"({'|'.join(regexes)})" raise NotImplementedError( From e73d7fda70738046d12cf61be4af29d877aac8eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 10 Nov 2023 15:19:56 +0100 Subject: [PATCH 272/734] Support fixed-length arrays --- outlines/text/json_schema.py | 23 ++++++++++++++++------- tests/text/test_json_schema.py | 24 ++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 61422753..f2fc351e 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -142,16 +142,16 @@ def to_regex(resolver: Resolver, instance: dict): instance_type = instance["type"] if instance_type == "string": if "maxLength" in instance or "minLength" in instance: - max_length = instance.get("maxLength", "") - min_length = instance.get("minLength", "") + max_items = instance.get("maxLength", "") + min_items = instance.get("minLength", "") try: - if int(max_length) < int(min_length): + if int(max_items) < int(min_items): raise ValueError( "maxLength must be greater than or equal to minLength" ) except ValueError: pass - return f'"{STRING_INNER}{{{min_length},{max_length}}}"' + return f'"{STRING_INNER}{{{min_items},{max_items}}}"' elif "pattern" in instance: pattern = instance["pattern"] if pattern[0] == "^" and pattern[-1] == "$": @@ -168,12 +168,19 @@ def to_regex(resolver: Resolver, instance: dict): return type_to_regex["integer"] elif instance_type == "array": + min_items = instance.get("minItems", "0") + max_items = instance.get("maxItems", "") + if min_items == max_items: + num_repeats = "{" + str(int(min_items) - 1) + "}" + else: + num_repeats = "*" + if "items" in instance: items_regex = to_regex(resolver, instance["items"]) - return rf"\[({items_regex})(,({items_regex}))*\]" + return rf"\[({items_regex})(,({items_regex})){num_repeats}\]" else: # Here we need to make the choice to exclude generating list of objects - # if the specification of the object is not give, even though a JSON + # if the specification of the object is not given, even though a JSON # object that contains an object here would be valid under the specification. types = [ {"type": "boolean"}, @@ -183,7 +190,9 @@ def to_regex(resolver: Resolver, instance: dict): {"type": "string"}, ] regexes = [to_regex(resolver, t) for t in types] - return rf"\[({'|'.join(regexes)})(,({'|'.join(regexes)}))*\]" + return ( + rf"\[({'|'.join(regexes)})(,({'|'.join(regexes)})){num_repeats}\]" + ) elif instance_type == "boolean": return type_to_regex["boolean"] diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 05255dc6..b84ac39f 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -170,6 +170,30 @@ def test_match_number(pattern, does_match): rf"\[({NUMBER})(,({NUMBER}))*\]", [("[1e+9,1.3]", True)], ), + # array with a set length of 1 + ( + { + "title": "Foo", + "type": "array", + "items": {"type": "integer"}, + "minItems": 1, + "maxItems": 1, + }, + rf"\[({INTEGER})(,({INTEGER})){{0}}\]", + [("[1]", True), ("[1,2]", False), ('["a"]', False), ("[]", False)], + ), + # array with a set length greather than 1 + ( + { + "title": "Foo", + "type": "array", + "items": {"type": "integer"}, + "minItems": 3, + "maxItems": 3, + }, + rf"\[({INTEGER})(,({INTEGER})){{2}}\]", + [("[1]", False), ("[]", False), ("[1,2,3]", True), ("[1,2,3,4]", False)], + ), # oneOf ( { From 95c5294b49d9bd8815fd595ba39858af6ec81d9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 12 Nov 2023 21:46:28 +0100 Subject: [PATCH 273/734] Remove approximate guided generation methods for OpenAI APIs The `type` guided-generation method for the OpenAI API is not correct since we cannot bias more than 300 tokens at every step. It is better to remove these features than misguide users. --- outlines/models/openai.py | 82 ++++----------------------------------- outlines/text/masks.py | 73 ---------------------------------- tests/text/test_masks.py | 71 --------------------------------- 3 files changed, 7 insertions(+), 219 deletions(-) delete mode 100644 outlines/text/masks.py delete mode 100644 tests/text/test_masks.py diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 6726256e..b3ab4a8e 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,7 +1,6 @@ """Integration with OpenAI's API.""" import functools import os -import warnings from typing import Callable, Dict, List, Optional, Union import numpy as np @@ -64,31 +63,20 @@ def generate( *, samples=1, stop_at: Union[List[Optional[str]], str] = [], - is_in=None, - type=None, + is_in: Optional[List[str]] = None, ): - import tiktoken - - if isinstance(stop_at, str): - stop_at = [stop_at] - - mask = {} - if type is not None: - encoder = tiktoken.encoding_for_model(model_name) - mask = create_type_mask(type, encoder) - if is_in is not None and stop_at: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") - elif is_in is not None and len(mask) > 0: - raise TypeError("You cannot set `is_in` and `mask` at the same time.") elif is_in is not None: return generate_choice(prompt, is_in, samples) else: - return generate_base(prompt, stop_at, samples, mask) + if isinstance(stop_at, str): + stop_at = [stop_at] + return generate_base(prompt, stop_at, samples) - @functools.partial(outlines.vectorize, signature="(),(m),(),()->(s)") + @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") async def generate_base( - prompt: str, stop_at: List[Optional[str]], samples: int, mask: Dict[int, int] + prompt: str, stop_at: List[Optional[str]], samples: int ) -> str: responses = await call_api( model_name, @@ -96,7 +84,7 @@ async def generate_base( max_tokens, temperature, stop_at, - mask, + {}, samples, ) @@ -162,62 +150,6 @@ async def generate_choice( return generate -def create_int_mask(encoder): - """Create an exclusive mask for digit tokens.""" - warnings.warn( - "The OpenAI API only allows for limited type control; results may not be accurate", - UserWarning, - ) - - int_token_ids = [] - - tokens = encoder._mergeable_ranks - for token, token_id in tokens.items(): - if all([c.isdigit() for c in encoder.decode([token_id])]): - int_token_ids.append(token_id) - - # TODO: This is a hack because OpenAI's API does not - # allow more than 300 entries for `logit_bias` - special_tokens = encoder._special_tokens - mask = {special_tokens["<|endoftext|>"]: 100} - mask.update({int_token_ids[i]: 100 for i in range(300 - len(special_tokens))}) - - return mask - - -def create_float_mask(encoder): - """Create an exclusive mask for digit tokens.""" - warnings.warn( - "The OpenAI API only allows for limited type control; results may not be accurate", - UserWarning, - ) - - int_token_ids = [] - - tokens = encoder._mergeable_ranks - for token, token_id in tokens.items(): - if all([c.isdigit() or c == "." for c in encoder.decode([token_id])]): - int_token_ids.append(token_id) - - # TODO: This is a hack because OpenAI's API does not - # allow more than 300 entries for `logit_bias` - special_tokens = encoder._special_tokens - mask = {special_tokens["<|endoftext|>"]: 100} - mask.update({int_token_ids[i]: 100 for i in range(300 - len(special_tokens))}) - - return mask - - -type_to_mask = { - "float": create_float_mask, - "int": create_int_mask, -} - - -def create_type_mask(type: str, encoder): - return type_to_mask[type](encoder) - - def error_handler(api_call_fn: Callable) -> Callable: """Handle OpenAI API errors and missing API key.""" diff --git a/outlines/text/masks.py b/outlines/text/masks.py deleted file mode 100644 index 8dc7b3d3..00000000 --- a/outlines/text/masks.py +++ /dev/null @@ -1,73 +0,0 @@ -import re -from typing import Dict, Iterable - -import torch - -__all__ = [ - "create_char_set_mask", - "create_float_mask", - "create_int_mask", - "create_mask_from_regex", -] - - -def create_mask_from_regex(vocabulary: Dict[str, int], regex: str) -> torch.BoolTensor: - """Create a token mask from a regex. - - Parameters - ---------- - vocabulary - A dictionary that contains a tokenizer's vocabulary as a map - between tokens and their ids. - regex - The regex that tokens need to respect. - - """ - program = re.compile(regex) - - mask = torch.zeros(len(vocabulary), dtype=torch.bool) - for token, token_id in vocabulary.items(): - if program.match(token) is not None: - mask[token_id] = True - - return mask - - -def create_int_mask(vocabulary: Dict[str, int]) -> torch.BoolTensor: - """Create a mask to generate signed integers.""" - mask = create_mask_from_regex(vocabulary, r"^[-+]?\d+$") - - return mask - - -def create_float_mask(vocabulary: Dict[str, int]) -> torch.BoolTensor: - """Create a mask to generate signed floating point numbers.""" - mask = create_mask_from_regex(vocabulary, r"^[-+]?([0-9]+(\.[0-9]*)?|\.[0-9]+)$") - - return mask - - -def create_char_set_mask( - vocabulary: Dict[str, int], char_set: Iterable[str] -) -> torch.BoolTensor: - """Create a mask to only generate characters in a given set. - - Parameters - ---------- - vocabulary - A dictionary that contains a tokenizer's vocabulary as a map - between tokens and their ids. - char_set - An iterable that contains the valid single characters. - - """ - for char in char_set: - if len(char) != 1: - raise ValueError( - "The `char_set` argument of `char_set_mask` can only contain single characters." - ) - - char_set = re.escape("".join(char_set)) - regex = "^[" + char_set + "]+$" - mask = create_mask_from_regex(vocabulary, regex) - return mask diff --git a/tests/text/test_masks.py b/tests/text/test_masks.py deleted file mode 100644 index 36e3eff7..00000000 --- a/tests/text/test_masks.py +++ /dev/null @@ -1,71 +0,0 @@ -import random - -import pytest -import torch - -from outlines.text.masks import create_char_set_mask, create_float_mask, create_int_mask - - -def test_int_mask(): - vocabulary = {"1": 0, "12": 1, "12a": 2, "a1": 3, "1.3": 4} - - mask = create_int_mask(vocabulary) - assert torch.equal(mask, torch.tensor([True, True, False, False, False])) - - -def test_float_mask(): - vocabulary = { - "1": 0, - "12": 1, - "12a": 2, - "a1": 3, - "1.3": 4, - "1.": 5, - "0.": 6, - "1.2.3": 7, - ".": 8, - ".0": 9, - } - - mask = create_float_mask(vocabulary) - assert torch.equal( - mask, - torch.tensor([True, True, False, False, True, True, True, False, False, True]), - ) - - -def test_char_set_mask(): - vocabulary = {} - with pytest.raises(ValueError, match="single characters"): - create_char_set_mask(vocabulary, ["ab"]) - - vocabulary = {"a": 0, "ab": 1, "abc": 2, "1": 3, "1_a": 4} - mask = create_char_set_mask(vocabulary, ["a", "b", "1", "_"]) - assert torch.equal(mask, torch.tensor([True, True, False, True, True])) - - vocabulary = { - "\\": 0, - "$": 1, - ".": 2, - "|": 3, - "?": 4, - "*": 5, - "(": 6, - ")": 7, - "[": 8, - "]": 9, - "{": 10, - "}": 11, - } - - char_set = ["\\", "$", ".", "|", "?", "*", "(", ")", "[", "]", "{", "}"] - random.shuffle(char_set) - - mask = create_char_set_mask(vocabulary, char_set) - assert torch.equal(mask, torch.ones(12, dtype=torch.bool)) - - mask = create_char_set_mask(vocabulary, ["a"]) - assert torch.equal(mask, torch.zeros(12, dtype=torch.bool)) - - mask = create_char_set_mask(vocabulary, ["\n", "\r", "\t"]) - assert torch.equal(mask, torch.zeros(12, dtype=torch.bool)) From b17b20060cf277ed7d34ec563907b3a03b1860ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 13 Nov 2023 10:02:54 +0100 Subject: [PATCH 274/734] Unify interface of OpenAI and `transformers` integrations --- README.md | 1 + examples/babyagi.py | 2 +- examples/math_generate_code.py | 2 +- examples/meta_prompting.py | 24 ++- examples/pick_odd_one_out.py | 7 +- examples/react.py | 14 +- examples/self_consistency.py | 2 +- outlines/models/__init__.py | 5 +- outlines/models/openai.py | 240 ++++++++++++++--------------- outlines/models/text_completion.py | 4 - 10 files changed, 147 insertions(+), 154 deletions(-) delete mode 100644 outlines/models/text_completion.py diff --git a/README.md b/README.md index 3a8081ed..76b8f3e3 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ via the next-token logits. It can be used with API-based models as well. - [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions - [x] 💾 Caching of generations - [x] 🤗 Integration with Hugging Face's `transformers` models +- [x] 🔒 Integration with OpenAI's API Outlines 〰 has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][twitter] to stay up to date! diff --git a/examples/babyagi.py b/examples/babyagi.py index a745c93e..fd4b8b3d 100644 --- a/examples/babyagi.py +++ b/examples/babyagi.py @@ -10,7 +10,7 @@ import outlines.models as models import outlines.text as text -model = models.text_completion.openai("gpt-3.5-turbo") +model = models.openai("gpt-3.5-turbo") ################# diff --git a/examples/math_generate_code.py b/examples/math_generate_code.py index b2b25a94..507a76ec 100644 --- a/examples/math_generate_code.py +++ b/examples/math_generate_code.py @@ -35,6 +35,6 @@ def execute_code(code): prompt = answer_with_code_prompt(question, examples) -answer = models.text_completion.openai("text-davinci-003")(prompt) +answer = models.openai("text-davinci-003")(prompt) result = execute_code(answer) print(f"It takes Carla {result:.0f} minutes to download the file.") diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index b41843db..eb10b9bd 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -22,10 +22,10 @@ def solve(question): Let's solve this problem by splitting it into steps. """ - complete = models.text_completion.openai(model_name, max_tokens=500) + model = models.openai(model_name) prompt = solve(question) - answer = complete(prompt) + answer = model(prompt, 500) completed = prompt + answer return completed @@ -43,12 +43,12 @@ def determine_goal(question): def solve(memory): """{{memory}}. Let's begin.""" - complete = models.text_completion.openai(model_name, max_tokens=500) + model = models.openai(model_name) prompt = determine_goal(question) - answer = complete(prompt, stop_at=["."]) + answer = model(prompt, stop_at=["."]) prompt = solve(prompt + answer) - answer = complete(prompt) + answer = model(prompt, max_tokens=500) completed = prompt + answer return completed @@ -82,13 +82,12 @@ def get_answer(question, expert, memory): {{question}} """ - complete_expert = models.text_completion.openai(model_name) - complete_answer = models.text_completion.openai(model_name, max_tokens=500) + model = models.openai(model_name) prompt = find_expert(question) - expert = complete_expert(prompt, stop_at=['"']) + expert = model(prompt, stop_at=['"']) prompt = get_answer(question, expert, prompt + expert) - answer = complete_answer(prompt) + answer = model(prompt, max_tokens=500) completed = prompt + answer return completed @@ -110,13 +109,12 @@ def get_answer(expert, memory): For instance, {{expert}} would answer """ - model_expert = models.text_completion.openai(model_name) - model_answer = models.text_completion.openai(model_name, max_tokens=500) + model = models.openai(model_name) prompt = find_expert(question) - expert = model_expert(prompt, stop_at=["\n", "."]) + expert = model(prompt, stop_at=["\n", "."]) prompt = get_answer(expert, prompt + expert) - answer = model_answer(prompt) + answer = model(prompt, max_tokens=500) completed = prompt + answer return completed diff --git a/examples/pick_odd_one_out.py b/examples/pick_odd_one_out.py index d973c11c..28612503 100644 --- a/examples/pick_odd_one_out.py +++ b/examples/pick_odd_one_out.py @@ -29,13 +29,12 @@ def build_ooo_prompt(options): """ -reasoning_model = models.text_completion.openai("text-davinci-003") -result_model = models.text_completion.openai("text-davinci-003") +model = models.openai("text-davinci-003") options = ["sea", "mountains", "plains", "sock"] prompt = build_ooo_prompt(options) -reasoning = reasoning_model(prompt, stop_at=["Pick the odd word", "So the odd one"]) +reasoning = model(prompt, stop_at=["Pick the odd word", "So the odd one"]) prompt += reasoning -result = result_model(prompt) +result = model(prompt) prompt += result print(prompt) diff --git a/examples/react.py b/examples/react.py index 76c17042..c3964cfa 100644 --- a/examples/react.py +++ b/examples/react.py @@ -45,22 +45,22 @@ def search_wikipedia(query: str): prompt = build_reAct_prompt("Where is Apple Computers headquarted? ") -complete = models.text_completion.openai( - "gpt-3.5-turbo", max_tokens=128, temperature=1.0 -) +complete = models.openai("gpt-3.5-turbo", temperature=1.0) for i in range(1, 10): - mode = complete(prompt, is_in=["Tho", "Act"]) + mode = complete(prompt, is_in=["Tho", "Act"], max_tokens=128) prompt = add_mode(i, mode, "", prompt) if mode == "Tho": - thought = complete(prompt, stop_at="\n") + thought = complete(prompt, stop_at="\n", max_tokens=128) prompt += f"{thought}" elif mode == "Act": - action = complete(prompt, is_in=["Search", "Finish"]) + action = complete(prompt, is_in=["Search", "Finish"], max_tokens=128) prompt += f"{action} '" - subject = complete(prompt, stop_at=["'"]) # Apple Computers headquartered + subject = complete( + prompt, stop_at=["'"], max_tokens=128 + ) # Apple Computers headquartered subject = " ".join(subject.split()[:2]) prompt += f"{subject}'" diff --git a/examples/self_consistency.py b/examples/self_consistency.py index 1a3a3bb7..6aded6e6 100644 --- a/examples/self_consistency.py +++ b/examples/self_consistency.py @@ -55,7 +55,7 @@ def few_shots(question, examples): """ -model = models.text_completion.openai("text-davinci-003", max_tokens=128) +model = models.openai("text-davinci-003") prompt = few_shots(question, examples) answers = model(prompt, samples=100) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 086e33b5..81497fbd 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,6 +5,5 @@ codebase. """ -from . import text_completion -from .openai import OpenAICompletion -from .transformers import transformers +from .openai import openai, OpenAIAPI +from .transformers import transformers, Transformers diff --git a/outlines/models/openai.py b/outlines/models/openai.py index b3ab4a8e..57ce857d 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -14,52 +14,112 @@ import outlines from outlines.caching import cache -__all__ = [ - "OpenAICompletion", -] - - -def OpenAICompletion( - model_name: str, - max_tokens: Optional[int] = 216, - temperature: Optional[float] = 1.0, -) -> Callable: - """Create a function that will call the OpenAI completion API. - - You should have the `openai` package installed. Available models are listed - in the `OpenAI documentation `_. - - Parameters - ---------- - model_name: str - The name of the model as listed in the OpenAI documentation. - max_tokens - The maximum number of tokens to generate. - temperature - Value used to module the next token probabilities. - - Returns - ------- - A function that will call OpenAI's completion API with the given parameters - when passed a prompt. - - """ - - if "text-" in model_name: - call_api = call_completion_api - format_prompt = lambda x: x - extract_choice = lambda x: x["text"] - elif "gpt-" in model_name: - call_api = call_chat_completion_api - format_prompt = lambda x: [{"role": "user", "content": x}] - extract_choice = lambda x: x["message"]["content"] - else: - raise NameError( - f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." - ) +__all__ = ["OpenAIAPI", "openai"] + + +class OpenAIAPI: + def __init__(self, model_name: str, temperature: float = 1.0): + if "text-" in model_name: + call_api = call_completion_api + format_prompt = lambda x: x + extract_choice = lambda x: x["text"] + elif "gpt-" in model_name: + call_api = call_chat_completion_api + format_prompt = lambda x: [{"role": "user", "content": x}] + extract_choice = lambda x: x["message"]["content"] + else: + raise NameError( + f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." + ) + + @functools.partial(outlines.vectorize, signature="(),(),(m),()->(s)") + async def generate_base( + prompt: str, max_tokens: int, stop_at: List[Optional[str]], samples: int + ) -> str: + responses = await call_api( + model_name, + format_prompt(prompt), + int(max_tokens), + temperature, + stop_at, + {}, + samples, + ) + + if samples == 1: + results = np.array([extract_choice(responses["choices"][0])]) + else: + results = np.array( + [extract_choice(responses["choices"][i]) for i in range(samples)] + ) + + return results + + @functools.partial(outlines.vectorize, signature="(),(),(m),()->(s)") + async def generate_choice( + prompt: str, max_tokens: int, is_in: List[str], samples: int + ) -> Union[List[str], str]: + """Generate a sequence that must be one of many options. + + .. warning:: + + This function will call the API once for every token generated. + + We tokenize every choice, iterate over the token lists, create a mask + with the current tokens and generate one token. We progressively + eliminate the choices that don't start with the currently decoded + sequence. - def generate( + """ + try: + import tiktoken + except ImportError: + raise ImportError( + "The `tiktoken` library needs to be installed in order to choose `outlines.models.openai` with `is_in`" + ) + + tokenizer = tiktoken.encoding_for_model(model_name) + encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] + + decoded_samples = [] + for _ in range(samples): + decoded: List[str] = [] + for i in range(max([len(word) for word in encoded])): + mask = {} + for word, tokenized_word in zip(is_in, encoded): + if not word.startswith("".join(decoded)): + continue + try: + mask[tokenized_word[i]] = 100 + except IndexError: + pass + + if len(mask) == 0: + break + + response = await call_api( + model_name, + format_prompt(prompt), + 1, + temperature, + [], + mask, + samples, + ) + decoded.append(extract_choice(response["choices"][0])) + prompt = prompt + "".join(decoded) + + decoded_samples.append("".join(decoded)) + + return np.array(decoded_samples) + + self.generate_base = generate_base + self.generate_choice = generate_choice + + def __call__( + self, prompt: str, + max_tokens: int = 500, *, samples=1, stop_at: Union[List[Optional[str]], str] = [], @@ -68,86 +128,16 @@ def generate( if is_in is not None and stop_at: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None: - return generate_choice(prompt, is_in, samples) + return self.generate_choice(prompt, max_tokens, is_in, samples) else: if isinstance(stop_at, str): stop_at = [stop_at] - return generate_base(prompt, stop_at, samples) - - @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") - async def generate_base( - prompt: str, stop_at: List[Optional[str]], samples: int - ) -> str: - responses = await call_api( - model_name, - format_prompt(prompt), - max_tokens, - temperature, - stop_at, - {}, - samples, - ) - - if samples == 1: - results = np.array([extract_choice(responses["choices"][0])]) - else: - results = np.array( - [extract_choice(responses["choices"][i]) for i in range(samples)] - ) + return self.generate_base(prompt, max_tokens, stop_at, samples) - return results - - @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") - async def generate_choice( - prompt: str, is_in: List[str], samples: int - ) -> Union[List[str], str]: - """Generate a sequence that must be one of many options. - - We tokenize every choice, iterate over the token lists, create a mask - with the current tokens and generate one token. We progressively - eliminate the choices that don't start with the currently decoded - sequence. - - """ - import tiktoken - - assert is_in is not None - tokenizer = tiktoken.encoding_for_model(model_name) - encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] - - decoded_samples = [] - for _ in range(samples): - decoded: List[str] = [] - for i in range(max([len(word) for word in encoded])): - mask = {} - for word, tokenized_word in zip(is_in, encoded): - if not word.startswith("".join(decoded)): - continue - try: - mask[tokenized_word[i]] = 100 - except IndexError: - pass - - if len(mask) == 0: - break - - response = await call_api( - model_name, - format_prompt(prompt), - 1, - temperature, - [], - mask, - samples, - ) - decoded.append(extract_choice(response["choices"][0])) - prompt = prompt + "".join(decoded) - - decoded_samples.append("".join(decoded)) + pass - return np.array(decoded_samples) - return generate +openai = OpenAIAPI def error_handler(api_call_fn: Callable) -> Callable: @@ -204,7 +194,12 @@ async def call_completion_api( logit_bias: Dict[str, int], num_samples: int, ): - import openai + try: + import openai + except ImportError: + raise ImportError( + "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." + ) response = await openai.Completion.acreate( engine=model, @@ -230,7 +225,12 @@ async def call_chat_completion_api( logit_bias: Dict[str, int], num_samples: int, ): - import openai + try: + import openai + except ImportError: + raise ImportError( + "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." + ) response = await openai.ChatCompletion.acreate( model=model, diff --git a/outlines/models/text_completion.py b/outlines/models/text_completion.py deleted file mode 100644 index 49155b29..00000000 --- a/outlines/models/text_completion.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Router for text completion models.""" -from .openai import OpenAICompletion - -openai = OpenAICompletion From 7e167a3661c8ba470ea1271b27dc2dfe7dbd20c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 13 Nov 2023 10:03:50 +0100 Subject: [PATCH 275/734] Raise exception when trying to do guided generation with OpenAI API --- outlines/models/__init__.py | 4 ++-- outlines/text/generate/sequence.py | 5 +++++ tests/text/generate/test_sequence.py | 7 +++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 81497fbd..e0bc748f 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,5 +5,5 @@ codebase. """ -from .openai import openai, OpenAIAPI -from .transformers import transformers, Transformers +from .openai import OpenAIAPI, openai +from .transformers import Transformers, transformers diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 9c277200..8550c2e8 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -3,6 +3,8 @@ import torch +from outlines.models import OpenAIAPI + if TYPE_CHECKING: from outlines.models.transformers import KVCacheType, Transformers from outlines.text.generate.sample import Sampler @@ -33,6 +35,9 @@ def __init__( such functions. """ + if isinstance(model, OpenAIAPI): + raise TypeError("Cannot use guided generation with the OpenAI API.") + self.model = model self.device = model.device self.max_tokens = max_tokens diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 05980872..20c5ad01 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -5,10 +5,17 @@ import pytest import torch +from outlines import models from outlines.models.tokenizer import Tokenizer from outlines.text.generate.sequence import Sequence +def test_openai_error(): + model = models.openai("text-davinci-003") + with pytest.raises(TypeError): + Sequence(model) + + class MockModel: def __init__(self, tokenizer, logits): self.tokenizer = tokenizer From a257414e76a73d3146670be552705f1cf2892af7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 13 Nov 2023 11:25:33 +0100 Subject: [PATCH 276/734] Add documentation for the OpenAI integration --- docs/api/models.md | 2 + docs/reference/openai_text_generation.md | 167 +++++++++++++++++++++++ docs/reference/text_generation.md | 1 - mkdocs.yml | 4 +- 4 files changed, 172 insertions(+), 2 deletions(-) create mode 100644 docs/reference/openai_text_generation.md delete mode 100644 docs/reference/text_generation.md diff --git a/docs/api/models.md b/docs/api/models.md index 124e27fe..27ad297f 100644 --- a/docs/api/models.md +++ b/docs/api/models.md @@ -1 +1,3 @@ ::: outlines.models.transformers + +::: outlines.models.openai diff --git a/docs/reference/openai_text_generation.md b/docs/reference/openai_text_generation.md new file mode 100644 index 00000000..5845545f --- /dev/null +++ b/docs/reference/openai_text_generation.md @@ -0,0 +1,167 @@ +# Generate text with the OpenAI API + +Outlines is focused on 🔓 models, but includes an OpenAI integration nevertheless. You can instantiate a model very simply by calling the [outlines.models.openai][] function, with either a chat or non chat model: + +```python +from outlines import models + +model = models.openai("text-davinci-003") +model = models.openai("gpt4") + +print(type(model)) +# OpenAIAPI +``` + +!!! note + + It is currently not possible to pass a system message to the model. If that is something you need, please [open an Issue](https://github.com/outlines-dev/outlines/issues) or, better, [submit a Pull Request](https://github.com/outlines-dev/outlines/pulls). + +The OpenAI integration supports the following features: + +- The ability to stop the generation when a specified sequence is found [🔗](#stop-when-a-sequence-is-found) +- The ability to choose between different choices [🔗](#multiple-choices) +- Vectorization, i.e. the ability to pass an array of prompts and execute all requests concurrently [🔗](#vectorized-calls) + +## Stop when a sequence is found + +The OpenAI API tends to be chatty and it can be useful to stop the generation once a given sequence has been found, instead of paying for the extra tokens and needing to post-process the output. For instance if you only to generate a single sentence: + +```python +from outlines import models + +model = models.openai("text-davinci-003") +response = model("Write a sentence", stop_at=['.']) +``` + +## Multiple choices + +It can be difficult to deal with a classification problem with the OpenAI API. However well you prompt the model, chances are you are going to have to post-process the output anyway. Sometimes the model will even make up choices. Outlines allows you to *guarantee* that the output of the model will be within a set of choices you specify: + +```python +from outlines import models + +prompt = """ +Review: The OpenAI API is very limited. It does not allow me to do guided generation properly. +Question: What is the overall sentiment of this review? +Answer: +""" + +model = models.openai("text-davinci-003") +response = model(prompt, is_in=['Positive', 'Negative']) +``` + +## Vectorized calls + +A unique feature of Outlines is that calls to the OpenAI API are *vectorized* (In the [NumPy sense](https://numpy.org/doc/stable/reference/generated/numpy.vectorize.html) of the word). In plain English this means that you can call an Openai model with an array of prompts with arbitrary shape to an OpenAI model and it will return an array of answers. All calls are executed concurrently, which means this takes roughly the same time as calling the model with a single prompt: + +```python +from outlines import models +from outlines import text + +@text.prompt +def template(input_numbers): + """Use these numbers and basic arithmetic to get 24 as a result: + + Input: {{ input_numbers }} + Steps: """ + +prompts = [ + template([1, 2, 3]), + template([5, 9, 7]), + template([10, 12]) +] + +model = models.openai("text-davinci-003") +results = model(prompts) +print(results.shape) +# (3,) + +print(type(results)) +# + +print(results) +# [ +# "\n1. 1 + 2 x 3 = 7\n2. 7 + 3 x 4 = 19\n3. 19 + 5 = 24", +# "\n1. Add the three numbers together: 5 + 9 + 7 = 21\n2. Subtract 21 from 24: 24 - 21 = 3\n3. Multiply the remaining number by itself: 3 x 3 = 9\n4. Add the number with the multiplication result: 21 + 9 = 24", +# "\n\n1. Add the two numbers together: 10 + 12 = 22 \n2. Subtract one of the numbers: 22 - 10 = 12 \n3. Multiply the two numbers together: 12 x 12 = 144 \n4. Divide the first number by the result: 144 / 10 = 14.4 \n5. Add the initial two numbers together again: 14.4 + 12 = 26.4 \n6. Subtract 2: 26.4 - 2 = 24", +# ] +``` + +Beware that in this case the output of the model is a NumPy array. So if you want to concatenate the prompt to the result you have to use `numpy.char.add`: + +```python +import numpy as np + +new_prompts = np.char.add(prompts, results) +print(new_prompts) + +# [ +# "Use these numbers and basic arithmetic to get 24 as a result:\n\nInput: [1, 2, 3]\nSteps:\n1. 1 + 2 x 3 = 7\n2. 7 + 3 x 4 = 19\n3. 19 + 5 = 24", +# "Use these numbers and basic arithmetic to get 24 as a result:\n\nInput: [5, 9, 7]\nSteps:\n1. Add the three numbers together: 5 + 9 + 7 = 21\n2. Subtract 21 from 24: 24 - 21 = 3\n3. Multiply the remaining number by itself: 3 x 3 = 9\n4. Add the number with the multiplication result: 21 + 9 = 24", +# "'Use these numbers and basic arithmetic to get 24 as a result:\n\nInput: [10, 12]\nSteps:\n\n1. Add the two numbers together: 10 + 12 = 22 \n2. Subtract one of the numbers: 22 - 10 = 12 \n3. Multiply the two numbers together: 12 x 12 = 144 \n4. Divide the first number by the result: 144 / 10 = 14.4 \n5. Add the initial two numbers together again: 14.4 + 12 = 26.4 \n6. Subtract 2: 26.4 - 2 = 24", +# ] +``` + +You can also ask for several samples for a single prompt: + +```python +from outlines import models +from outlines import text + + +@text.prompt +def template(input_numbers): + """Use these numbers and basic arithmetic to get 24 as a result: + + Input: {{ input_numbers }} + Steps:""" + + +model = models.openai("text-davinci-003") +results = model(template([1, 2, 3]), samples=3, stop_at=["\n2"]) +print(results.shape) +# (3,) + +print(results) +# [ +# ' \n1. Subtract 1 from 3', +# '\n1. Add the three numbers: 1 + 2 + 3 = 6', +# ' (1 + 3) x (2 + 2) = 24' +# ] +``` + +Or ask for several samples for an array of prompts. In this case *the last dimension is the sample dimension*: + +```python +from outlines import models +from outlines import text + + +@text.prompt +def template(input_numbers): + """Use these numbers and basic arithmetic to get 24 as a result: + + Input: {{ input_numbers }} + Steps:""" + + +prompts = [template([1, 2, 3]), template([5, 9, 7]), template([10, 12])] + +model = models.openai("text-davinci-003") +results = model(prompts, samples=2, stop_at=["\n2"]) +print(results.shape) +# (3, 2) + +print(results) +# [ +# ['\n1. Add the numbers: 1 + 2 + 3 = 6', ' (3 * 2) - 1 = 5\n 5 * 4 = 20\n 20 + 4 = 24'], +# ['\n\n1. (5 + 9) x 7 = 56', '\n1. 5 x 9 = 45'], +# [' \n1. Add the two numbers together: 10 + 12 = 22', '\n1. Add 10 + 12'] +# ] +``` + +You may find this useful, e.g., to implement [Tree of Thoughts](https://arxiv.org/abs/2305.10601). + +!!! note + + Outlines provides an `@outlines.vectorize` decorator that you can use on any `async` python function. This can be useful for instance when you call a remote API within your workflow. diff --git a/docs/reference/text_generation.md b/docs/reference/text_generation.md deleted file mode 100644 index a4075eae..00000000 --- a/docs/reference/text_generation.md +++ /dev/null @@ -1 +0,0 @@ -# Generate text diff --git a/mkdocs.yml b/mkdocs.yml index 37fa7ec3..33835f8b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -96,7 +96,9 @@ nav: - Reference: - reference/index.md - Prompting: reference/prompting.md - - Generate text: reference/text_generation.md + - Generate text: + - OpenAI: reference/openai_text_generation.md + - Guided generation: - reference/choices.md - reference/types.md From 587f9ac139651ff2e4af860fbd6e155f38eca675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 13 Nov 2023 11:49:02 +0100 Subject: [PATCH 277/734] Fix the problem with event loop in notebooks --- outlines/base.py | 4 ++++ pyproject.toml | 2 ++ 2 files changed, 6 insertions(+) diff --git a/outlines/base.py b/outlines/base.py index 6287a460..2dada90f 100644 --- a/outlines/base.py +++ b/outlines/base.py @@ -3,6 +3,7 @@ import inspect from typing import Callable, Optional +import nest_asyncio import numpy as np from numpy.lib.function_base import ( _calculate_shapes, @@ -11,6 +12,9 @@ _update_dim_sizes, ) +# Allow nested loops, useful to run in notebooks +nest_asyncio.apply() + class vectorize: """Returns an object that acts like a function but takes arrays as an input. diff --git a/pyproject.toml b/pyproject.toml index 7fc1dceb..1661cbec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ dependencies = [ "interegular", "jinja2", "lark", + "nest_asyncio", "numpy", "pillow", "perscache", @@ -89,6 +90,7 @@ module = [ "joblib.*", "jsonschema.*", "openai", + "nest_asyncio", "numpy.*", "perscache.*", "PIL", From 757390cf2b18867e24e0ea343bb6bcd7766da3b2 Mon Sep 17 00:00:00 2001 From: Bas van Ooyen Date: Mon, 13 Nov 2023 14:54:03 +0100 Subject: [PATCH 278/734] added optional openai api key parameter. Will use the environment variable when no key is supplied --- outlines/models/openai.py | 59 +++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 57ce857d..6979a6d8 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -18,7 +18,14 @@ class OpenAIAPI: - def __init__(self, model_name: str, temperature: float = 1.0): + def __init__( + self, + model_name: str, + api_key: Optional[str] = os.getenv("OPENAI_API_KEY"), + temperature: float = 1.0, + ): + self.api_key = api_key + if "text-" in model_name: call_api = call_completion_api format_prompt = lambda x: x @@ -32,9 +39,13 @@ def __init__(self, model_name: str, temperature: float = 1.0): f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." ) - @functools.partial(outlines.vectorize, signature="(),(),(m),()->(s)") + @functools.partial(outlines.vectorize, signature="(),(),(m),(),()->(s)") async def generate_base( - prompt: str, max_tokens: int, stop_at: List[Optional[str]], samples: int + prompt: str, + max_tokens: int, + stop_at: List[Optional[str]], + samples: int, + api_key: str, ) -> str: responses = await call_api( model_name, @@ -44,6 +55,7 @@ async def generate_base( stop_at, {}, samples, + api_key, ) if samples == 1: @@ -55,20 +67,20 @@ async def generate_base( return results - @functools.partial(outlines.vectorize, signature="(),(),(m),()->(s)") + @functools.partial(outlines.vectorize, signature="(),(),(m),(),()->(s)") async def generate_choice( - prompt: str, max_tokens: int, is_in: List[str], samples: int + prompt: str, max_tokens: int, is_in: List[str], samples: int, api_key: str ) -> Union[List[str], str]: """Generate a sequence that must be one of many options. + ` + .. warning:: - .. warning:: + This function will call the API once for every token generated. - This function will call the API once for every token generated. - - We tokenize every choice, iterate over the token lists, create a mask - with the current tokens and generate one token. We progressively - eliminate the choices that don't start with the currently decoded - sequence. + We tokenize every choice, iterate over the token lists, create a mask + with the current tokens and generate one token. We progressively + eliminate the choices that don't start with the currently decoded + sequence. """ try: @@ -105,6 +117,7 @@ async def generate_choice( [], mask, samples, + api_key, ) decoded.append(extract_choice(response["choices"][0])) prompt = prompt + "".join(decoded) @@ -128,13 +141,15 @@ def __call__( if is_in is not None and stop_at: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None: - return self.generate_choice(prompt, max_tokens, is_in, samples) + return self.generate_choice( + prompt, max_tokens, is_in, samples, self.api_key + ) else: if isinstance(stop_at, str): stop_at = [stop_at] - return self.generate_base(prompt, max_tokens, stop_at, samples) - - pass + return self.generate_base( + prompt, max_tokens, stop_at, samples, self.api_key + ) openai = OpenAIAPI @@ -146,14 +161,6 @@ def error_handler(api_call_fn: Callable) -> Callable: def call(*args, **kwargs): import openai - try: - os.environ["OPENAI_API_KEY"] - except KeyError: - raise KeyError( - "Could not find the `OPENAI_API_KEY` environment variable, which is necessary to call " - "OpenAI's APIs. Please make sure it is set before re-running your model." - ) - try: return api_call_fn(*args, **kwargs) except ( @@ -193,6 +200,7 @@ async def call_completion_api( stop_sequences: List[str], logit_bias: Dict[str, int], num_samples: int, + api_key: str, ): try: import openai @@ -209,6 +217,7 @@ async def call_completion_api( stop=list(stop_sequences) if len(stop_sequences) > 0 else None, logit_bias=logit_bias, n=int(num_samples), + api_key=api_key, ) return response @@ -224,6 +233,7 @@ async def call_chat_completion_api( stop_sequences: List[str], logit_bias: Dict[str, int], num_samples: int, + api_key: str, ): try: import openai @@ -240,6 +250,7 @@ async def call_chat_completion_api( stop=list(stop_sequences) if len(stop_sequences) > 0 else None, logit_bias=logit_bias, n=int(num_samples), + api_key=api_key, ) return response From 4266fac90007dd09bc6da211f5848dc2e532f3bf Mon Sep 17 00:00:00 2001 From: Bas van Ooyen Date: Mon, 13 Nov 2023 17:32:52 +0100 Subject: [PATCH 279/734] formatting --- outlines/models/openai.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 6979a6d8..0172777a 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -72,15 +72,15 @@ async def generate_choice( prompt: str, max_tokens: int, is_in: List[str], samples: int, api_key: str ) -> Union[List[str], str]: """Generate a sequence that must be one of many options. - ` - .. warning:: - This function will call the API once for every token generated. + .. warning:: - We tokenize every choice, iterate over the token lists, create a mask - with the current tokens and generate one token. We progressively - eliminate the choices that don't start with the currently decoded - sequence. + This function will call the API once for every token generated. + + We tokenize every choice, iterate over the token lists, create a mask + with the current tokens and generate one token. We progressively + eliminate the choices that don't start with the currently decoded + sequence. """ try: From f029d2616d99967118130d034910d62cb5cd5614 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 12 Nov 2023 08:46:59 +0100 Subject: [PATCH 280/734] Build a JSON schema from a function's signature --- outlines/text/generate/regex.py | 28 ++++++---- outlines/text/json_schema.py | 53 +++++++++++++++++-- .../generate/test_integration_transfomers.py | 16 ++++++ tests/text/test_fsm.py | 4 +- tests/text/test_json_schema.py | 28 ++++++++-- 5 files changed, 109 insertions(+), 20 deletions(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index af87f9f5..1860b83b 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -1,6 +1,6 @@ +import json as pyjson import math -from json import dumps -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Tuple, Union import interegular import torch @@ -8,7 +8,7 @@ from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm from outlines.text.generate.continuation import Continuation -from outlines.text.json_schema import build_regex_from_schema +from outlines.text.json_schema import build_regex_from_object if TYPE_CHECKING: from outlines.text.generate.sample import Sampler @@ -386,7 +386,7 @@ def choice( def json( model, - schema: Union[str, BaseModel], + schema_object: Union[str, BaseModel, Callable], max_tokens: Optional[int] = None, *, sampler: Optional["Sampler"] = None, @@ -397,14 +397,15 @@ def json( .. note: Reuse instances of these guided generators whenever possible, because constructing them has more overhead than generating - token sequences from them. See the docstring for `Regex`. + token sequences from them. See the docstring for `Regex`. Parameters --------- model The language model to use to compute the next-token logits. schema - The JSON schema or Pydantic model that guides the generation. + The JSON schema, Pydantic model or function (signature) that guides the + generation. max_tokens The maximum number of tokens to generate. sampler @@ -416,10 +417,17 @@ def json( Allow sampling of tokens corresponding to empty strings. """ - if isinstance(schema, type(BaseModel)): - schema = dumps(schema.model_json_schema()) - - regex_str = build_regex_from_schema(schema) + if isinstance(schema_object, type(BaseModel)): + schema = pyjson.dumps(schema_object.model_json_schema()) + format_fn = lambda x: schema_object.model_validate(pyjson.loads(x)) + elif callable(schema_object): + schema = pyjson.dumps(get_schema_from_signature(schema_object)) + # TODO: Convert string fields to their respective types + format_fn = lambda x: pyjson.loads(x) + else: + format_fn = lambda x: x + + regex_str = build_regex_from_object(schema) return Regex( model, diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index f2fc351e..4044d225 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -1,8 +1,11 @@ +import inspect import itertools as it import json import re +from typing import Callable, Union from jsonschema.protocols import Validator +from pydantic import BaseModel, create_model from referencing import Registry, Resource from referencing._core import Resolver from referencing.jsonschema import DRAFT202012 @@ -23,23 +26,43 @@ } -def build_regex_from_schema(schema: str): +def build_regex_from_object(object: Union[str, Callable, BaseModel]): """Turn a JSON schema into a regex that matches any JSON object that follows this schema. + JSON Schema is a declarative language that allows to annotate JSON documents + with types and descriptions. These schemas can be generated from any Python + datastructure that has type annotation: namedtuples, dataclasses, Pydantic + models. And by ensuring that the generation respects the schema we ensure + that the output can be parsed into these objects. + This function parses the provided schema and builds a generation schedule which + mixes deterministic generation (fixed strings), and sampling with constraints. + Parameters ---------- schema - A string that contains the JSON schema. + A string that represents a JSON Schema. Returns ------- - A string that contains a regular expression that matches any JSON object that - follows the schema. + A generation schedule. A list of strings that represent the JSON + schema's structure and regular expression that define the structure of + the fields. + + References + ---------- + .. [0] JSON Schema. https://json-schema.org/ """ + + if isinstance(object, type(BaseModel)): + schema = object.model_json_schema() + elif callable(object): + schema = get_schema_from_signature(object) + else: + schema = json.loads(object) + Validator.check_schema(schema) - schema = json.loads(schema) # Build reference resolver schema = Resource(contents=schema, specification=DRAFT202012) @@ -214,3 +237,23 @@ def to_regex(resolver: Resolver, instance: dict): regular expression. Make sure it is valid to the JSON Schema specification. If it is, please open an issue on the Outlines repository""" ) + + +def get_schema_from_signature(fn: Callable) -> str: + """Turn a function signature into a JSON schema. + + Every JSON object valid to the output JSON Schema can be passed + to `fn` using the ** unpacking syntax. + + """ + signature = inspect.signature(fn) + arguments = {} + for name, arg in signature.parameters.items(): + if arg.annotation == inspect._empty: + raise ValueError("Each argument must have a type annotation") + else: + arguments[name] = (arg.annotation, ...) + + model = create_model("Arguments", **arguments) + + return model.model_json_schema() diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index d54ca884..149ea6c8 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -237,6 +237,22 @@ class Spam(BaseModel): ) +def test_transformers_json_function(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name) + prompt = "Output arguments for the function" + + def function(foo: int, bar: List[int]): + return foo + sum(bar) + + rng = torch.Generator() + rng.manual_seed(4) + + sequence = generate.json(model, function, max_tokens=100)(prompt, rng=rng) + assert isinstance(sequence, dict) + assert isinstance(function(**sequence), int) + + def test_transformers_logits_vocab_size(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") diff --git a/tests/text/test_fsm.py b/tests/text/test_fsm.py index ce4a3647..f10f0f81 100644 --- a/tests/text/test_fsm.py +++ b/tests/text/test_fsm.py @@ -429,7 +429,7 @@ def test_json_index_performance(): from pydantic import BaseModel, constr import outlines.models as models - from outlines.text.generate.regex import Regex, build_regex_from_schema + from outlines.text.generate.regex import Regex, build_regex_from_object class Weapon(str, Enum): sword = "sword" @@ -457,7 +457,7 @@ class Character(BaseModel): json_schema = json.dumps(Character.model_json_schema()) def build_regex(): - regex_str = build_regex_from_schema(json_schema) + regex_str = build_regex_from_object(json_schema) Regex(model, regex_str, 100) profiler = LineProfiler(create_fsm_index_end_to_end) diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index b84ac39f..38039298 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -1,5 +1,6 @@ import json import re +from typing import List import pytest from pydantic import BaseModel, constr @@ -11,11 +12,32 @@ NUMBER, STRING, STRING_INNER, - build_regex_from_schema, + build_regex_from_object, + get_schema_from_signature, to_regex, ) +def test_function_basic(): + def test_function(foo: str, bar: List[int]): + ... + + result = get_schema_from_signature(test_function) + assert result["type"] == "object" + assert list(result["properties"].keys()) == ["foo", "bar"] + assert result["properties"]["foo"]["type"] == "string" + assert result["properties"]["bar"]["type"] == "array" + assert result["properties"]["bar"]["items"]["type"] == "integer" + + +def test_function_no_type(): + def test_function(foo, bar: List[int]): + ... + + with pytest.raises(ValueError): + get_schema_from_signature(test_function) + + def test_from_pydantic(): class User(BaseModel): user_id: int @@ -26,7 +48,7 @@ class User(BaseModel): is_true: bool schema = json.dumps(User.model_json_schema()) - schedule = build_regex_from_schema(schema) + schedule = build_regex_from_object(schema) assert isinstance(schedule, str) @@ -316,7 +338,7 @@ def test_match_number(pattern, does_match): ) def test_match(schema, regex, examples): schema = json.dumps(schema) - test_regex = build_regex_from_schema(schema) + test_regex = build_regex_from_object(schema) assert test_regex == regex for string, does_match in examples: From 2f73c5797e4b3168a9a45d32f5b9b7edd5d62af1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 12 Nov 2023 20:31:10 +0100 Subject: [PATCH 281/734] Return an instance of the Pydantic model with the generated data --- examples/dating_profile.py | 5 +- outlines/text/generate/regex.py | 14 +++-- .../generate/test_integration_transfomers.py | 55 +++++++++---------- 3 files changed, 38 insertions(+), 36 deletions(-) diff --git a/examples/dating_profile.py b/examples/dating_profile.py index 485dfa7d..228f1399 100644 --- a/examples/dating_profile.py +++ b/examples/dating_profile.py @@ -121,12 +121,9 @@ def dating_profile_prompt(description: str, examples: list[Example]): new_description = "I'm a laid-back lawyer who spends a lot of his free-time gaming. I work in a corporate office, but ended up here after the start-up I cofounded got acquired, so still play ping pong with my cool coworkers every day. I have a bar at home where I make cocktails, which is great for entertaining friends. I secretly like to wear suits and get a new one tailored every few months. I also like weddings because I get to wear those suits, and it's a good excuse for a date. I watch the latest series because I'm paying, with my hard-earned money, for every streaming service." prompt = dating_profile_prompt(description=new_description, examples=samples) -profile = text.generate.json(model, DatingProfile)(prompt) +profile = text.generate.json(model, DatingProfile)(prompt) # type: ignore print(profile) -parsed_profile = DatingProfile.model_validate_json(profile) -print(parsed_profile) - # Sample generated profiles """ { diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 1860b83b..24eabffc 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -8,7 +8,7 @@ from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm from outlines.text.generate.continuation import Continuation -from outlines.text.json_schema import build_regex_from_object +from outlines.text.json_schema import build_regex_from_object, get_schema_from_signature if TYPE_CHECKING: from outlines.text.generate.sample import Sampler @@ -48,6 +48,7 @@ def __init__( final_states: Optional[Set[int]] = None, states_to_token_maps: Optional[Dict[int, Dict[int, int]]] = None, empty_token_ids: Optional[Set[int]] = None, + format_fn: Callable[[str], Union[BaseModel, dict, str]] = lambda x: x, ): """ @@ -73,6 +74,8 @@ def __init__( corresponding FSM end states. empty_token_ids Pre-computed set of token ids for tokens that are empty strings. + format_fn + The function to apply to the generated JSON. """ super().__init__(model, max_tokens, sampler, stop) @@ -113,6 +116,7 @@ def __init__( self.mask_cache: Dict[Tuple[int, int], torch.LongTensor] = {} self.regex_string = regex_string self.allow_empty_tokens = allow_empty_tokens + self.format_fn = format_fn def create_proposal( self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor @@ -215,9 +219,10 @@ def _get_mask_for_state( return mask - def postprocess_completions(self, completions: List[str]) -> List[str]: + def postprocess_completions(self, completions: List[str]): self.last_fsm_states.clear() - return super().postprocess_completions(completions) + results: List[str] = super().postprocess_completions(completions) + return [self.format_fn(result) for result in results] def regex( @@ -391,7 +396,7 @@ def json( *, sampler: Optional["Sampler"] = None, allow_empty_tokens: bool = True, -): +) -> Union[dict, BaseModel]: """Generate a text sequence that follows a JSON schema or Pydantic model. .. note: @@ -435,4 +440,5 @@ def json( max_tokens, sampler=sampler, allow_empty_tokens=allow_empty_tokens, + format_fn=format_fn, ) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 149ea6c8..18d5e9eb 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -1,4 +1,3 @@ -import json import re from enum import Enum from typing import List, Union @@ -75,7 +74,7 @@ def test_transformers_integration_integer(): rng.manual_seed(0) model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" - model = models.transformers(model_name, device="cpu") + model = models.transformers(model_name) prompt = "Write a short sentence" sequence = generate.integer(model, max_tokens=10)(prompt, rng=rng) @@ -88,7 +87,7 @@ def test_transformers_integration_integer_array(): rng.manual_seed(0) model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" - model = models.transformers(model_name, device="cpu") + model = models.transformers(model_name) prompts = ["Give me a number", "And another one"] sequence = generate.integer(model, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) @@ -102,7 +101,7 @@ def test_transformers_integration_float(): rng.manual_seed(0) model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" - model = models.transformers(model_name, device="cpu") + model = models.transformers(model_name) prompt = "Write a short sentence" sequence = generate.float(model, max_tokens=10)(prompt, rng=rng) @@ -143,13 +142,13 @@ class Spam(BaseModel): rng = torch.Generator() rng.manual_seed(0) # make sure that `bar` is not an int - sequence = generate.json(model, Spam, max_tokens=1000)(prompt, rng=rng) - parsed = json.loads(sequence) - assert isinstance(parsed["foo"], int) - assert isinstance(parsed["bar"], int) - assert isinstance(parsed["spam"], str) - assert isinstance(parsed["fuzz"], bool) - assert len(parsed["spam"]) == 10 + result = generate.json(model, Spam, max_tokens=1000)(prompt, rng=rng) + assert isinstance(result, BaseModel) + assert isinstance(result.foo, int) + assert isinstance(result.bar, float) + assert isinstance(result.spam, str) + assert isinstance(result.fuzz, bool) + assert len(result.spam) == 10 def test_transformers_json_str_enum(): @@ -169,10 +168,10 @@ class User(BaseModel): user_id: int name: Name - sequence = generate.json(model, User)(prompt, rng=rng) - parsed = json.loads(sequence) - assert isinstance(parsed["user_id"], int) - assert parsed["name"] in ["John", "Marc", "Michel"] + result = generate.json(model, User)(prompt, rng=rng) + assert isinstance(result, BaseModel) + assert isinstance(result.user_id, int) + assert result.name in ["John", "Marc", "Michel"] def test_transformers_json_int_enum(): @@ -190,10 +189,10 @@ class Id(int, Enum): class User(BaseModel): user_id: Id - sequence = generate.json(model, User)(prompt, rng=rng) - parsed = json.loads(sequence) - assert isinstance(parsed["user_id"], int) - assert parsed["user_id"] in [1, 2] + result = generate.json(model, User)(prompt, rng=rng) + assert isinstance(result, BaseModel) + assert isinstance(result.user_id, int) + assert result.user_id in [1, 2] def test_transformers_json_array(): @@ -208,11 +207,11 @@ class User(BaseModel): rng = torch.Generator() rng.manual_seed(0) - sequence = generate.json(model, User)(prompt, rng=rng) - parsed = json.loads(sequence) - assert isinstance(parsed["user_id"], int) - assert isinstance(parsed["value"], list) - for value in parsed["value"]: + result = generate.json(model, User)(prompt, rng=rng) + assert isinstance(result, BaseModel) + assert isinstance(result.user_id, int) + assert isinstance(result.value, list) + for value in result.value: assert isinstance(value, float) or isinstance(value, int) @@ -229,11 +228,11 @@ class Spam(BaseModel): rng.manual_seed(4) sequence = generate.json(model, Spam, max_tokens=100)(prompt, rng=rng) - parsed = json.loads(sequence) + assert isinstance(sequence, BaseModel) assert ( - isinstance(parsed["bar"], int) - or isinstance(parsed["bar"], float) - or isinstance(parsed["bar"], str) + isinstance(sequence.bar, int) + or isinstance(sequence.bar, float) + or isinstance(sequence.bar, str) ) From 96773e1ec9264e360fc56360188cad9743487900 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 13 Nov 2023 21:31:18 +0100 Subject: [PATCH 282/734] Document the JSON-guided generation --- docs/reference/json.md | 56 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/docs/reference/json.md b/docs/reference/json.md index 02ed5a24..5285aa24 100644 --- a/docs/reference/json.md +++ b/docs/reference/json.md @@ -1 +1,55 @@ -# JSON +# Make the LLM follow a JSON Schema + +Outlines can make any open source model return a JSON object that follows a structure that is specified by the user. This is useful whenever we want the output of the model to be processed by code downstream: code does not understand natural language but rather the structured language it has been programmed to understand. + +There are mostly two reasons why someone would want to get an output formatted as JSON from a LLM: + +1. Parse the answer (e.g. with Pydantic), store it somewhere, return it to a user, etc. +2. Call a function with the result + +Outlines has you covered in both cases! Indeed, to define the structure of the JSON you want the model to follow you can either provide a Pydantic model, or a function. No need to duplicate code! + +## Using Pydantic + +Outlines can infer the structure of the output from a Pydantic model. The result is an instance of the model that contains the values returned by the LLM: + +```python +from pydantic import BaseModel + +from outlines import models +from outlines import text + + +class User(BaseModel): + name: str + last_name: str + id: int + + +model = models.transformers("mistralai/Mistral-7B") +generator = text.generate.json(model, User) +result = generator("Create a user profile with the fields name, last_name and id") +print(result) +# User(name="John", last_name="Doe", id=11) +``` + +## From a function's signature + +Outlines can infer the structure of the output from the signature of a function. The result is a dictionary, and can be passed directly to the function using the usual dictionary expansion syntax `**`: + +```python +from outlines import models +from outlines import text + +def concat(a: int, b: int): + return a + b + +model = models.transformers("mistralai/Mistral-7B") +generator = text.generate.json(model, add) +result = generator("Return two integers named a and b respectively. a is odd and b even.") + +print(add(**result)) +# 3 +``` + +A great advantage of passing functions directly to specify the structure is that the structure of the LLM will change with the function's definition. No need to change the code at several places! From 45947045a2a6e751131a70791ddbdf7e70ac7ab6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 12 Nov 2023 22:08:43 +0100 Subject: [PATCH 283/734] Implement `text.generate.format` constrained generation --- README.md | 21 +++++++ outlines/text/generate/__init__.py | 2 +- outlines/text/generate/regex.py | 51 +++------------- outlines/text/types.py | 28 +++++++++ .../generate/test_integration_transfomers.py | 59 ++++++++++++++++++- tests/text/generate/test_regex.py | 39 +++--------- tests/text/test_types.py | 29 +++++++++ 7 files changed, 149 insertions(+), 80 deletions(-) create mode 100644 outlines/text/types.py create mode 100644 tests/text/test_types.py diff --git a/README.md b/README.md index 76b8f3e3..fab0097c 100644 --- a/README.md +++ b/README.md @@ -254,6 +254,27 @@ print(parsed) The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/outlines-dev/outlines/issues/215), but everything else should work. +### Open functions + +Outlines can infer the structure of the output from the signature of a function. The result is a dictionary, and can be passed directly to the function using the usual dictionary expansion syntax `**`: + +```python +from outlines import models +from outlines import text + +def concat(a: int, b: int): + return a + b + +model = models.transformers("mistralai/Mistral-7B") +generator = text.generate.json(model, add) +result = generator("Return two integers named a and b respectively. a is odd and b even.") + +print(add(**result)) +# 3 +``` + +A great advantage of passing functions directly to specify the structure is that the structure of the LLM will change with the function's definition. No need to change the code at several places! + ## Prompting Writing prompts by concatenating strings in pure Python quickly becomes diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py index 359b7f0b..9895d5c1 100644 --- a/outlines/text/generate/__init__.py +++ b/outlines/text/generate/__init__.py @@ -1,2 +1,2 @@ from .continuation import continuation -from .regex import choice, float, integer, json, regex +from .regex import choice, format, json, regex diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 24eabffc..71271467 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -9,6 +9,7 @@ from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm from outlines.text.generate.continuation import Continuation from outlines.text.json_schema import build_regex_from_object, get_schema_from_signature +from outlines.text.types import python_types_to_regex if TYPE_CHECKING: from outlines.text.generate.sample import Sampler @@ -266,8 +267,9 @@ def regex( ) -def integer( +def format( model, + python_type, max_tokens: Optional[int] = None, *, sampler: Optional["Sampler"] = None, @@ -288,6 +290,8 @@ def integer( ---------- model The language model to use to compute the next-token logits. + python_type + The format in which the output is expected, defined as a Python type. max_tokens The maximum number of tokens to generate. sampler @@ -299,51 +303,10 @@ def integer( Allow sampling of tokens corresponding to empty strings. """ + regex_str = python_types_to_regex(python_type) return Regex( model, - r"[-+]?\d+", - max_tokens, - sampler=sampler, - allow_empty_tokens=allow_empty_tokens, - ) - - -def float( - model, - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - allow_empty_tokens: bool = True, -): - """Generate floating-point numbers. - - The regex used to constrain the generation optionally matches plus or minus - signs, and forbids leading zeros (even if the `float` function in Python - allows them). - - .. note: - Reuse instances of these guided generators whenever possible, - because constructing them has more overhead than generating - token sequences from them. See the docstring for `Regex`. - - Parameters - ---------- - model - The language model to use to compute the next-token logits. - max_tokens - The maximum number of tokens to generate. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - allow_empty_tokens - Allow sampling of tokens corresponding to empty strings. - - """ - return Regex( - model, - r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))", + regex_str, max_tokens, sampler=sampler, allow_empty_tokens=allow_empty_tokens, diff --git a/outlines/text/types.py b/outlines/text/types.py new file mode 100644 index 00000000..d33b213d --- /dev/null +++ b/outlines/text/types.py @@ -0,0 +1,28 @@ +import datetime +from typing import Any + +INTEGER = r"[+-]?(0|[1-9][0-9]*)" +BOOLEAN = "(True|False)" +FLOAT = rf"{INTEGER}(\.[0-9]+)?([eE][+-][0-9]+)?" +DATE = r"(\d{4})-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])" +TIME = r"([0-1][1-9]|2[0-4]):([0-5][0-9]):([0-5][0-9])" +DATETIME = rf"({DATE})(\s)({TIME})" + + +def python_types_to_regex(python_type: Any) -> str: + if python_type == float: + return FLOAT + elif python_type == int: + return INTEGER + elif python_type == bool: + return BOOLEAN + elif python_type == datetime.date: + return DATE + elif python_type == datetime.time: + return TIME + elif python_type == datetime.datetime: + return DATETIME + else: + raise NotImplementedError( + f"The Python type {python_type} is not supported. Please open an issue." + ) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 18d5e9eb..9e5c28f2 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -1,3 +1,4 @@ +import datetime import re from enum import Enum from typing import List, Union @@ -76,7 +77,7 @@ def test_transformers_integration_integer(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name) prompt = "Write a short sentence" - sequence = generate.integer(model, max_tokens=10)(prompt, rng=rng) + sequence = generate.format(model, int, max_tokens=10)(prompt, rng=rng) assert sequence[0] != 0 int(sequence) @@ -89,7 +90,7 @@ def test_transformers_integration_integer_array(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name) prompts = ["Give me a number", "And another one"] - sequence = generate.integer(model, max_tokens=10)(prompts, rng=rng) + sequence = generate.format(model, int, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) assert len(sequence) == 2 int(sequence[0]) @@ -103,12 +104,64 @@ def test_transformers_integration_float(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name) prompt = "Write a short sentence" - sequence = generate.float(model, max_tokens=10)(prompt, rng=rng) + sequence = generate.format(model, float, max_tokens=10)(prompt, rng=rng) assert sequence[0] != 0 float(sequence) +def test_transformers_integration_bool(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name) + prompt = "Is this True or False?" + sequence = generate.format(model, bool, max_tokens=10)(prompt, rng=rng) + + assert sequence[0] != 0 + bool(sequence) + + +def test_transformers_integration_date(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name) + prompt = "What day is it today?" + sequence = generate.format(model, datetime.date, max_tokens=10)(prompt, rng=rng) + + assert sequence[0] != 0 + datetime.datetime.strptime(sequence, "%Y-%m-%d") + + +def test_transformers_integration_time(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name) + prompt = "What time is it?" + sequence = generate.format(model, datetime.time, max_tokens=10)(prompt, rng=rng) + + assert sequence[0] != 0 + datetime.datetime.strptime(sequence, "%H:%M:%S") + + +def test_transformers_integration_datetime(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name) + prompt = "What time is it?" + sequence = generate.format(model, datetime.datetime, max_tokens=20)(prompt, rng=rng) + + assert sequence[0] != 0 + datetime.datetime.strptime(sequence, "%Y-%m-%d %H:%M:%S") + + def test_transformers_integration_choice(): rng = torch.Generator() rng.manual_seed(0) diff --git a/tests/text/generate/test_regex.py b/tests/text/generate/test_regex.py index a32a8b85..642e383d 100644 --- a/tests/text/generate/test_regex.py +++ b/tests/text/generate/test_regex.py @@ -116,7 +116,7 @@ def test_regex_no_valid_transition(): ) def test_integer_proposal(input_ids, proposal): model = Model() - generator = generate.integer(model) + generator = generate.format(model, int) logits = torch.ones(len(model.tokenizer.vocabulary)) result = generator.create_proposal(torch.tensor(input_ids), logits) @@ -155,45 +155,20 @@ def test_choice_proposal(): ) -@pytest.mark.parametrize( - "input_ids, proposal", - [ - ([[]], [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf]]), - ([[3]], [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), - ], -) -def test_float_proposal(input_ids, proposal): - model = Model() - generator = generate.float(model) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor(input_ids), logits) - assert torch.equal( - result, - torch.tensor(proposal), - ) - - @pytest.mark.parametrize( "input_ids, proposal, with_empty", [ - ([[]], [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf, 1]], True), - ( - [[]], - [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf, -math.inf]], - False, - ), - ([[3]], [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf, 1]], True), + ([[]], [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf]], False), ( [[3]], - [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf, -math.inf]], - False, + [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]], + True, ), ], ) -def test_empty_strings(input_ids, proposal, with_empty): - model = ModelWithEmpty() - generator = generate.float(model, allow_empty_tokens=with_empty) +def test_float_proposal(input_ids, proposal, with_empty): + model = Model() + generator = generate.format(model, float, allow_empty_tokens=with_empty) logits = torch.ones(len(model.tokenizer.vocabulary)) result = generator.create_proposal(torch.tensor(input_ids), logits) diff --git a/tests/text/test_types.py b/tests/text/test_types.py new file mode 100644 index 00000000..d70d5bd7 --- /dev/null +++ b/tests/text/test_types.py @@ -0,0 +1,29 @@ +import datetime + +import pytest + +from outlines.text.types import ( + BOOLEAN, + DATE, + DATETIME, + FLOAT, + INTEGER, + TIME, + python_types_to_regex, +) + + +@pytest.mark.parametrize( + "python_type,regex", + [ + (int, INTEGER), + (float, FLOAT), + (bool, BOOLEAN), + (datetime.date, DATE), + (datetime.time, TIME), + (datetime.datetime, DATETIME), + ], +) +def test_python_types(python_type, regex): + test_regex = python_types_to_regex(python_type) + assert regex == test_regex From facac7169e1df659424cbdb876aa5d72340de20c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 14 Nov 2023 23:31:48 +0100 Subject: [PATCH 284/734] Fix spelling mistake --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fab0097c..c8ff49ec 100644 --- a/README.md +++ b/README.md @@ -262,7 +262,7 @@ Outlines can infer the structure of the output from the signature of a function. from outlines import models from outlines import text -def concat(a: int, b: int): +def add(a: int, b: int): return a + b model = models.transformers("mistralai/Mistral-7B") From 1051e8c967d53bb14072234734e941382981f58f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 15 Nov 2023 13:21:40 +0100 Subject: [PATCH 285/734] Remove top-level imports of `transformers` and `datasets` --- outlines/models/transformers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 7c22f017..85f2c2f9 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -1,8 +1,6 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Union import torch -from datasets.fingerprint import Hasher -from transformers.file_utils import SPIECE_UNDERLINE from outlines.models.tokenizer import Tokenizer @@ -157,6 +155,8 @@ def decode(self, token_ids: torch.LongTensor) -> List[str]: return text def convert_token_to_string(self, token: str) -> str: + from transformers.file_utils import SPIECE_UNDERLINE + string = self.tokenizer.convert_tokens_to_string([token]) if self.is_llama: @@ -172,6 +172,8 @@ def __eq__(self, other): return NotImplemented def __hash__(self): + from datasets.fingerprint import Hasher + return hash(Hasher.hash(self.tokenizer)) From 44f79d040fc3307b46c56e0944a25fe02d45f8e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 15 Nov 2023 15:58:22 +0100 Subject: [PATCH 286/734] Fix a typo in the documentation --- docs/reference/json.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/json.md b/docs/reference/json.md index 5285aa24..04d238e5 100644 --- a/docs/reference/json.md +++ b/docs/reference/json.md @@ -41,7 +41,7 @@ Outlines can infer the structure of the output from the signature of a function. from outlines import models from outlines import text -def concat(a: int, b: int): +def add(a: int, b: int): return a + b model = models.transformers("mistralai/Mistral-7B") From b60bb7af956904d6742bd6c51ad7420c36f2e683 Mon Sep 17 00:00:00 2001 From: Robin Picard Date: Wed, 15 Nov 2023 10:56:28 +0100 Subject: [PATCH 287/734] Modify the openai model to conform to the new openai sdk v1.0.0 --- outlines/models/openai.py | 124 ++++++++++++--------------- tests/text/generate/test_sequence.py | 8 +- 2 files changed, 62 insertions(+), 70 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 0172777a..d355f4df 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,21 +1,18 @@ """Integration with OpenAI's API.""" import functools import os -from typing import Callable, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union import numpy as np -from tenacity import ( - retry, - retry_if_exception_type, - stop_after_attempt, - wait_random_exponential, -) import outlines from outlines.caching import cache __all__ = ["OpenAIAPI", "openai"] +if TYPE_CHECKING: + from openai import AsyncOpenAI + class OpenAIAPI: def __init__( @@ -23,15 +20,38 @@ def __init__( model_name: str, api_key: Optional[str] = os.getenv("OPENAI_API_KEY"), temperature: float = 1.0, + max_retries: int = 6, ): - self.api_key = api_key + try: + import openai + except ImportError: + raise ImportError( + "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." + ) + + try: + self.client = openai.AsyncOpenAI(api_key=api_key, max_retries=max_retries) + except openai.OpenAIError as e: + raise e + + @error_handler + @cache + async def cached_call_completion_api(*args, **kwargs): + response = await call_completion_api(self.client, *args, **kwargs) + return response + + @error_handler + @cache + async def cached_call_chat_completion_api(*args, **kwargs): + response = await call_chat_completion_api(self.client, *args, **kwargs) + return response if "text-" in model_name: - call_api = call_completion_api + call_api = cached_call_completion_api format_prompt = lambda x: x extract_choice = lambda x: x["text"] elif "gpt-" in model_name: - call_api = call_chat_completion_api + call_api = cached_call_chat_completion_api format_prompt = lambda x: [{"role": "user", "content": x}] extract_choice = lambda x: x["message"]["content"] else: @@ -45,7 +65,7 @@ async def generate_base( max_tokens: int, stop_at: List[Optional[str]], samples: int, - api_key: str, + client: openai.AsyncOpenAI, ) -> str: responses = await call_api( model_name, @@ -55,7 +75,6 @@ async def generate_base( stop_at, {}, samples, - api_key, ) if samples == 1: @@ -69,7 +88,11 @@ async def generate_base( @functools.partial(outlines.vectorize, signature="(),(),(m),(),()->(s)") async def generate_choice( - prompt: str, max_tokens: int, is_in: List[str], samples: int, api_key: str + prompt: str, + max_tokens: int, + is_in: List[str], + samples: int, + client: openai.AsyncOpenAI, ) -> Union[List[str], str]: """Generate a sequence that must be one of many options. @@ -117,7 +140,6 @@ async def generate_choice( [], mask, samples, - api_key, ) decoded.append(extract_choice(response["choices"][0])) prompt = prompt + "".join(decoded) @@ -141,15 +163,11 @@ def __call__( if is_in is not None and stop_at: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None: - return self.generate_choice( - prompt, max_tokens, is_in, samples, self.api_key - ) + return self.generate_choice(prompt, max_tokens, is_in, samples, self.client) else: if isinstance(stop_at, str): stop_at = [stop_at] - return self.generate_base( - prompt, max_tokens, stop_at, samples, self.api_key - ) + return self.generate_base(prompt, max_tokens, stop_at, samples, self.client) openai = OpenAIAPI @@ -164,35 +182,26 @@ def call(*args, **kwargs): try: return api_call_fn(*args, **kwargs) except ( - openai.error.RateLimitError, - openai.error.Timeout, - openai.error.TryAgain, - openai.error.APIConnectionError, - openai.error.ServiceUnavailableError, + openai.APITimeoutError, + openai.InternalServerError, + openai.RateLimitError, ) as e: raise OSError(f"Could not connect to the OpenAI API: {e}") except ( - openai.error.AuthenticationError, - openai.error.PermissionError, - openai.error.InvalidRequestError, - openai.error.InvalidAPIType, + openai.AuthenticationError, + openai.BadRequestError, + openai.ConflictError, + openai.PermissionDeniedError, + openai.NotFoundError, + openai.UnprocessableEntityError, ) as e: raise e return call -retry_config = { - "wait": wait_random_exponential(min=1, max=30), - "stop": stop_after_attempt(6), - "retry": retry_if_exception_type(OSError), -} - - -@retry(**retry_config) -@error_handler -@cache async def call_completion_api( + client: "AsyncOpenAI", model: str, prompt: str, max_tokens: int, @@ -200,32 +209,21 @@ async def call_completion_api( stop_sequences: List[str], logit_bias: Dict[str, int], num_samples: int, - api_key: str, -): - try: - import openai - except ImportError: - raise ImportError( - "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." - ) - - response = await openai.Completion.acreate( - engine=model, +) -> dict: + response = await client.completions.create( + model=model, prompt=prompt, temperature=temperature, max_tokens=max_tokens, stop=list(stop_sequences) if len(stop_sequences) > 0 else None, logit_bias=logit_bias, n=int(num_samples), - api_key=api_key, ) - return response + return response.model_dump() -@retry(**retry_config) -@error_handler -@cache async def call_chat_completion_api( + client: "AsyncOpenAI", model: str, messages: List[Dict[str, str]], max_tokens: int, @@ -233,16 +231,8 @@ async def call_chat_completion_api( stop_sequences: List[str], logit_bias: Dict[str, int], num_samples: int, - api_key: str, -): - try: - import openai - except ImportError: - raise ImportError( - "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." - ) - - response = await openai.ChatCompletion.acreate( +) -> dict: + response = await client.chat.completions.create( model=model, messages=messages, max_tokens=max_tokens, @@ -250,7 +240,5 @@ async def call_chat_completion_api( stop=list(stop_sequences) if len(stop_sequences) > 0 else None, logit_bias=logit_bias, n=int(num_samples), - api_key=api_key, ) - - return response + return response.model_dump() diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index 20c5ad01..f4fd52c0 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -5,13 +5,17 @@ import pytest import torch -from outlines import models +from outlines.models import OpenAIAPI from outlines.models.tokenizer import Tokenizer from outlines.text.generate.sequence import Sequence def test_openai_error(): - model = models.openai("text-davinci-003") + class Mock(OpenAIAPI): + def __init__(self): + pass + + model = Mock() with pytest.raises(TypeError): Sequence(model) From 07bd334ca8bb5d722beb079dc8386a1b36699f70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 15 Nov 2023 21:05:24 +0100 Subject: [PATCH 288/734] Ignore warning from `transformers` using deprecated Pydantic syntax --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 1661cbec..bcab189c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,6 +77,7 @@ testpaths = ["tests"] filterwarnings = [ "error", "ignore::numba.core.errors.NumbaPendingDeprecationWarning", + "ignore::pydantic.warnings.PydanticDeprecatedSince20", "ignore::FutureWarning:transformers.*", "ignore::UserWarning:torch.cuda.*" ] From 4d7ae1cf3d8cd5735ddd517763c1f78b8e04de44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 16 Nov 2023 08:00:54 +0100 Subject: [PATCH 289/734] Add "was this page helpful?" widget --- mkdocs.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mkdocs.yml b/mkdocs.yml index 33835f8b..4cab4067 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -45,6 +45,20 @@ extra: analytics: provider: google property: !ENV GOOGLE_ANALYTICS_KEY + feedback: + title: Was this page helpful? + ratings: + - icon: material/thumb-up-outline + name: This page was helpful + data: 1 + note: >- + Thanks for your feedback! + - icon: material/thumb-down-outline + name: This page could be improved + data: 0 + note: >- + Thanks for your feedback! Help us improve this page by + using our feedback form. # Extensions markdown_extensions: From f1f5c07d41047b50c3530156ac41e0f82cb2046b Mon Sep 17 00:00:00 2001 From: Ivan Herreros Date: Thu, 16 Nov 2023 15:37:23 +0100 Subject: [PATCH 290/734] Remove unused arguments in OpenAI generate functions --- outlines/models/openai.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index d355f4df..e845b940 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -30,20 +30,20 @@ def __init__( ) try: - self.client = openai.AsyncOpenAI(api_key=api_key, max_retries=max_retries) + client = openai.AsyncOpenAI(api_key=api_key, max_retries=max_retries) except openai.OpenAIError as e: raise e @error_handler @cache async def cached_call_completion_api(*args, **kwargs): - response = await call_completion_api(self.client, *args, **kwargs) + response = await call_completion_api(client, *args, **kwargs) return response @error_handler @cache async def cached_call_chat_completion_api(*args, **kwargs): - response = await call_chat_completion_api(self.client, *args, **kwargs) + response = await call_chat_completion_api(client, *args, **kwargs) return response if "text-" in model_name: @@ -59,13 +59,12 @@ async def cached_call_chat_completion_api(*args, **kwargs): f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." ) - @functools.partial(outlines.vectorize, signature="(),(),(m),(),()->(s)") + @functools.partial(outlines.vectorize, signature="(),(),(m),()->(s)") async def generate_base( prompt: str, max_tokens: int, stop_at: List[Optional[str]], samples: int, - client: openai.AsyncOpenAI, ) -> str: responses = await call_api( model_name, @@ -86,13 +85,11 @@ async def generate_base( return results - @functools.partial(outlines.vectorize, signature="(),(),(m),(),()->(s)") + @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") async def generate_choice( prompt: str, - max_tokens: int, is_in: List[str], samples: int, - client: openai.AsyncOpenAI, ) -> Union[List[str], str]: """Generate a sequence that must be one of many options. @@ -139,9 +136,10 @@ async def generate_choice( temperature, [], mask, - samples, + 1, ) decoded.append(extract_choice(response["choices"][0])) + prompt = prompt + "".join(decoded) decoded_samples.append("".join(decoded)) @@ -163,11 +161,11 @@ def __call__( if is_in is not None and stop_at: raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") elif is_in is not None: - return self.generate_choice(prompt, max_tokens, is_in, samples, self.client) + return self.generate_choice(prompt, is_in, samples) else: if isinstance(stop_at, str): stop_at = [stop_at] - return self.generate_base(prompt, max_tokens, stop_at, samples, self.client) + return self.generate_base(prompt, max_tokens, stop_at, samples) openai = OpenAIAPI From 76cfc611dfc66d7d03cf466718bb440dd146e0f2 Mon Sep 17 00:00:00 2001 From: Ivan Herreros Date: Tue, 14 Nov 2023 12:30:52 +0100 Subject: [PATCH 291/734] Improve multiple-choice selection for the OpenAI API The current approach is greedy, in the sense that it generates a single token at each steps, asking the API to only generate valid next tokens. This mean having to pay for the prompt tokens for every token generated. This commit takes a more optimistic approach. It starts with allowing all tokens present in the sequences, and limiting the length of the generation to the number of tokens in the longest sequence. If the completion is not satisfactory it then takes one greedy step before switching back to the optimistic mode. On average this new approach consumes less tokens than the current one. --- outlines/models/openai.py | 116 +++++++++++++++++++++++++++++++------- 1 file changed, 97 insertions(+), 19 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index e845b940..4d5e3553 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,7 +1,9 @@ """Integration with OpenAI's API.""" import functools import os -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union +from collections import deque +from itertools import zip_longest +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Tuple, Union import numpy as np @@ -85,6 +87,39 @@ async def generate_base( return results + def longest_common_prefix(tokens1: List[int], tokens2: List[int]) -> List[int]: + i = 0 + while i < len(tokens1) and i < len(tokens2) and tokens1[i] == tokens2[i]: + i += 1 + return tokens1[:i] + + def get_choices_with_longest_common_prefix( + response: List[int], is_in: List[List[int]] + ) -> Tuple[List[int], List[List[int]]]: + max_len_prefix = 0 + is_in_left = [] + prefix = [] + for i in range(len(is_in)): + len_prefix = len(longest_common_prefix(response, is_in[i])) + + if len_prefix > max_len_prefix: + max_len_prefix = len_prefix + is_in_left = [is_in[i][len_prefix:]] + prefix = is_in[i][:len_prefix] + + elif len_prefix == max_len_prefix: + is_in_left.append(is_in[i][len_prefix:]) + + return prefix, is_in_left + + def build_optimistic_mask(transposed: deque[Set]) -> Dict: + # build the biggest mask possible, adding tokens left to right + to_mask: Set[int] = set() + while len(transposed) > 0 and len(to_mask | transposed[0]) <= 300: + to_mask = to_mask | transposed.popleft() + + return {token: 100 for token in to_mask} + @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") async def generate_choice( prompt: str, @@ -95,12 +130,11 @@ async def generate_choice( .. warning:: - This function will call the API once for every token generated. + Worst case, this function may call the API as many times as tokens are in the response. - We tokenize every choice, iterate over the token lists, create a mask - with the current tokens and generate one token. We progressively - eliminate the choices that don't start with the currently decoded - sequence. + With the optimistic approach, we activate all tokens that could form all answers. If the solution returned + does not match any of the answers, we the call the API again only with the tokens that can be accepted as + next-token. In average, this approach returns a solution consuming less calls to the API. """ try: @@ -111,20 +145,33 @@ async def generate_choice( ) tokenizer = tiktoken.encoding_for_model(model_name) - encoded: List[List[int]] = [tokenizer.encode(word) for word in is_in] decoded_samples = [] for _ in range(samples): + is_in_left = is_in.copy() decoded: List[str] = [] - for i in range(max([len(word) for word in encoded])): - mask = {} - for word, tokenized_word in zip(is_in, encoded): - if not word.startswith("".join(decoded)): - continue - try: - mask[tokenized_word[i]] = 100 - except IndexError: - pass + + greedy = False # we try to generate the full response at each iteration + + while len(is_in_left) > 0: + encoded: List[List[int]] = [ + tokenizer.encode(word) for word in is_in_left + ] + + max_tokens_left = max([len(tokens) for tokens in encoded]) + transposed: deque[Set] = deque( + [ + {item for item in subset if item is not None} + for subset in zip_longest(*encoded) + ] + ) + + if not greedy: + mask = build_optimistic_mask(transposed) + else: + mask = {} + for token in transposed.popleft(): # build greedy mask + mask[token] = 100 if len(mask) == 0: break @@ -132,15 +179,46 @@ async def generate_choice( response = await call_api( model_name, format_prompt(prompt), - 1, + max_tokens_left if not greedy else 1, temperature, [], mask, 1, ) - decoded.append(extract_choice(response["choices"][0])) - prompt = prompt + "".join(decoded) + current_resp = extract_choice(response["choices"][0]) + + if current_resp in is_in_left: + decoded.append(current_resp) + break + else: + # map response to tokens + tokenized_resp = tokenizer.encode(current_resp) + ( + tokenized_resp, + encoded, + ) = get_choices_with_longest_common_prefix( + tokenized_resp, encoded + ) + + if len(tokenized_resp) == 0: + greedy = True # next iteration will be "greedy" + continue + else: + decoded.append("".join(tokenizer.decode(tokenized_resp))) + + # map back to words + is_in_left = [ + "".join(tokenizer.decode(tokens)) for tokens in encoded + ] + + if len(is_in_left) == 1: # only one choice left + decoded.append(is_in_left[0]) + break + + greedy = False # after each success, stay with (or switch to) "optimistic" approach + + prompt = prompt + "".join(decoded) decoded_samples.append("".join(decoded)) From a2c6e2b1a1b4c15b49774e8593c6d88f935b7b0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 21 Nov 2023 11:15:59 +0100 Subject: [PATCH 292/734] Add hiring ad on Outlines --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index c8ff49ec..2c7a5df8 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,10 @@ functions and calls to other libraries. **Outlines** 〰 is *compatible with all models*. It only interfaces with models via the next-token logits. It can be used with API-based models as well. + + ## Features - [x] 🖍️Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/) From e2262fe576413adce68056cf1bc151501e891e02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 20 Nov 2023 08:08:47 +0100 Subject: [PATCH 293/734] Refactor the OpenAI integration --- examples/math_generate_code.py | 2 +- examples/pick_odd_one_out.py | 2 +- examples/react.py | 8 +- examples/self_consistency.py | 2 +- outlines/caching.py | 7 +- outlines/models/__init__.py | 2 +- outlines/models/openai.py | 563 ++++++++++++++++----------- outlines/text/generate/sequence.py | 4 +- tests/models/test_openai.py | 50 +++ tests/test_cache.py | 2 +- tests/text/generate/test_sequence.py | 4 +- 11 files changed, 395 insertions(+), 251 deletions(-) create mode 100644 tests/models/test_openai.py diff --git a/examples/math_generate_code.py b/examples/math_generate_code.py index 507a76ec..df141818 100644 --- a/examples/math_generate_code.py +++ b/examples/math_generate_code.py @@ -35,6 +35,6 @@ def execute_code(code): prompt = answer_with_code_prompt(question, examples) -answer = models.openai("text-davinci-003")(prompt) +answer = models.openai("gpt-4")(prompt) result = execute_code(answer) print(f"It takes Carla {result:.0f} minutes to download the file.") diff --git a/examples/pick_odd_one_out.py b/examples/pick_odd_one_out.py index 28612503..676c7e56 100644 --- a/examples/pick_odd_one_out.py +++ b/examples/pick_odd_one_out.py @@ -29,7 +29,7 @@ def build_ooo_prompt(options): """ -model = models.openai("text-davinci-003") +model = models.openai("gpt-3.5-turbo") options = ["sea", "mountains", "plains", "sock"] prompt = build_ooo_prompt(options) diff --git a/examples/react.py b/examples/react.py index c3964cfa..2a4a5262 100644 --- a/examples/react.py +++ b/examples/react.py @@ -45,17 +45,19 @@ def search_wikipedia(query: str): prompt = build_reAct_prompt("Where is Apple Computers headquarted? ") -complete = models.openai("gpt-3.5-turbo", temperature=1.0) +complete = models.openai("gpt-3.5-turbo") for i in range(1, 10): - mode = complete(prompt, is_in=["Tho", "Act"], max_tokens=128) + mode = complete.generate_choice(prompt, choices=["Tho", "Act"], max_tokens=128) prompt = add_mode(i, mode, "", prompt) if mode == "Tho": thought = complete(prompt, stop_at="\n", max_tokens=128) prompt += f"{thought}" elif mode == "Act": - action = complete(prompt, is_in=["Search", "Finish"], max_tokens=128) + action = complete.generate_choice( + prompt, choices=["Search", "Finish"], max_tokens=128 + ) prompt += f"{action} '" subject = complete( diff --git a/examples/self_consistency.py b/examples/self_consistency.py index 6aded6e6..396c1a45 100644 --- a/examples/self_consistency.py +++ b/examples/self_consistency.py @@ -55,7 +55,7 @@ def few_shots(question, examples): """ -model = models.openai("text-davinci-003") +model = models.openai("gpt-3.5-turbo") prompt = few_shots(question, examples) answers = model(prompt, samples=100) diff --git a/outlines/caching.py b/outlines/caching.py index ecaa950c..28c6ff7f 100644 --- a/outlines/caching.py +++ b/outlines/caching.py @@ -10,8 +10,11 @@ memory = Cache(serializer=JSONSerializer(), storage=LocalFileStorage(cache_dir)) -def cache(fn: Callable): - return memory.cache()(fn) +def cache(ignore: Optional[str] = None): + def cache_fn(fn: Callable): + return memory.cache(ignore=ignore)(fn) + + return cache_fn def get_cache(): diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index e0bc748f..d0b344a5 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,5 +5,5 @@ codebase. """ -from .openai import OpenAIAPI, openai +from .openai import OpenAI, openai from .transformers import Transformers, transformers diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 4d5e3553..778b278d 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -1,29 +1,102 @@ """Integration with OpenAI's API.""" import functools import os -from collections import deque +import textwrap +from dataclasses import asdict, dataclass, field, replace from itertools import zip_longest from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Tuple, Union import numpy as np import outlines -from outlines.caching import cache -__all__ = ["OpenAIAPI", "openai"] +__all__ = ["OpenAI", "openai"] if TYPE_CHECKING: from openai import AsyncOpenAI -class OpenAIAPI: +@dataclass(frozen=True) +class OpenAIConfig: + """Represents the parameters of the OpenAI API. + + The information was last fetched on 2023/11/20. We document below the + properties that are specific to the OpenAI API. Not all these properties are + supported by Outlines. + + Properties + ---------- + model_name + The name of the model. Available models can be found on OpenAI's website. + frequence_penalty + Number between 2.0 and -2.0. Positive values penalize new tokens based on + their existing frequency in the text, + logit_bias + Modifies the likelihood of specified tokens to appear in the completion. + Number between -100 (forbid) and +100 (only allows). + n + The number of completions to return for each prompt. + presence_penalty + Similar to frequency penalty. + response_format + Specifies the format the model must output. `{"type": "json_object"}` + enables JSON mode. + seed + Two completions with the same `seed` value should return the same + completion. This is however not guaranteed. + stop + Up to 4 words where the API will stop the completion. + temperature + Number between 0 and 2. Higher values make the output more random, while + lower values make it more deterministic. + top_p + Number between 0 and 1. Parameter for nucleus sampling. + user + A unique identifier for the end-user. + + """ + + model: str + frequency_penalty: float = 0 + logit_bias: Dict[int, int] = field(default_factory=dict) + max_tokens: Optional[int] = None + n: int = 1 + presence_penalty: float = 0 + response_format: Optional[Dict[str, str]] = None + seed: Optional[int] = None + stop: Optional[Union[str, List[str]]] = None + temperature: Optional[float] = None + top_p: int = 1 + user: str = field(default_factory=str) + + +class OpenAI: + """An object that represents the OpenAI API.""" + def __init__( self, model_name: str, - api_key: Optional[str] = os.getenv("OPENAI_API_KEY"), - temperature: float = 1.0, + api_key: Optional[str] = None, max_retries: int = 6, + config: Optional[OpenAIConfig] = None, ): + """Create an `OpenAI` instance. + + Parameters + ---------- + model_name + Model to use, as defined in OpenAI's documentation + api_key + Secret key to use with the OpenAI API. One can also set the + `OPENAI_API_KEY` environment variable, or the value of + `openai.api_key`. + max_retries + The maximum number of retries when calls to the API fail. + config + An instance of `OpenAIConfig`. Can be useful to specify some + parameters that cannot be set by calling this class' methods. + + """ try: import openai except ImportError: @@ -31,222 +104,282 @@ def __init__( "The `openai` library needs to be installed in order to use Outlines' OpenAI integration." ) - try: - client = openai.AsyncOpenAI(api_key=api_key, max_retries=max_retries) - except openai.OpenAIError as e: - raise e + if api_key is None: + if os.getenv("OPENAI_API_KEY") is not None: + api_key = os.getenv("OPENAI_API_KEY") + elif openai.api_key is not None: + api_key = openai.api_key + else: + raise ValueError( + "You must specify an API key to use the OpenAI API integration." + ) - @error_handler - @cache - async def cached_call_completion_api(*args, **kwargs): - response = await call_completion_api(client, *args, **kwargs) - return response - - @error_handler - @cache - async def cached_call_chat_completion_api(*args, **kwargs): - response = await call_chat_completion_api(client, *args, **kwargs) - return response - - if "text-" in model_name: - call_api = cached_call_completion_api - format_prompt = lambda x: x - extract_choice = lambda x: x["text"] - elif "gpt-" in model_name: - call_api = cached_call_chat_completion_api - format_prompt = lambda x: [{"role": "user", "content": x}] - extract_choice = lambda x: x["message"]["content"] + if config is not None: + self.config = replace(config, model=model_name) # type: ignore else: - raise NameError( - f"The model {model_name} requested is not available. Only the completion and chat completion models are available for OpenAI." - ) + self.config = OpenAIConfig(model=model_name) - @functools.partial(outlines.vectorize, signature="(),(),(m),()->(s)") - async def generate_base( - prompt: str, - max_tokens: int, - stop_at: List[Optional[str]], - samples: int, - ) -> str: - responses = await call_api( - model_name, - format_prompt(prompt), - int(max_tokens), - temperature, - stop_at, - {}, - samples, + self.client = openai.AsyncOpenAI(api_key=api_key, max_retries=max_retries) + + def __call__( + self, + prompt: Union[str, List[str]], + max_tokens: Optional[int] = None, + *, + temperature: float = 1.0, + samples: int = 1, + stop_at: Optional[Union[List[str], str]] = None, + ) -> np.ndarray: + """Call the OpenAI API to generate text. + + Parameters + ---------- + prompt + A string or list of strings that will be used to prompt the model + max_tokens + The maximum number of tokens to generate + temperature + The value of the temperature used to sample tokens + samples + The number of completions to generate for each prompt + stop_at + Up to 4 words where the API will stop the completion. + + """ + config = replace(self.config, max_tokens=max_tokens, n=samples, stop=stop_at) # type: ignore + + if "text-" in self.config.model: + raise NotImplementedError( + textwrap.dedent( + "Most models that support the legacy completion endpoints will be " + "deprecated on January 2024. Use Chat models instead.\n" + "The list of chat models is available at https://platform.openai.com/docs/guides/text-generation." + ) ) + if "gpt-" in self.config.model: + return generate_chat(prompt, self.client, config) + + """ + def generate_choice_greedy(transposed, max_tokens_left): + mask = {token: 100 for token in transposed.popleft()} + config = replace(config, logit_bias=mask, max_tokens=max_tokens_left) + response = generate_chat(prompt, config) + prefix, _ = find_common_prefix(response, choices_left) + return prefix + + + def generate_choice_optimistic(transposed, max_tokens_left): + mask = build_optimistic_mask(transposed) + config = replace(config, logit_bias=mask, max_tokens=max_tokens_left) + response = generate_chat(prompt, config) + return response + + while len(choices_left) > 0: + if greedy == True: + prefix = generate_choice_greedy() + choices_left = find_choices_left(prefix, choices_left) + if len(choices_left) == 1: + return choices_left[0] + else: + decoded.append(prefix) + greedy = False + else: + remainder = generate_choice_optimistic() + if remainder in choices_left: # Not exactly true + return remainder + else: + prefix, _ = find_common_prefix(remainder, choices_left) + decoded.append(prefix) + greedy = True + """ + + def generate_choice( + self, prompt: str, choices: List[str], max_tokens: Optional[int] = None + ) -> str: + """Call the OpenAI API to generate one of several choices. + + Parameters + ---------- + prompt + A string or list of strings that will be used to prompt the model + choices + The list of strings between which we ask the model to choose + max_tokens + The maximum number of tokens to generate + + """ + try: + import tiktoken + except ImportError: + raise ImportError( + "The `tiktoken` library needs to be installed in order to choose `outlines.models.openai` with `is_in`" + ) + + config = replace(self.config, max_tokens=max_tokens) - if samples == 1: - results = np.array([extract_choice(responses["choices"][0])]) + tokenizer = tiktoken.encoding_for_model(self.config.model) + + greedy = False + decoded: List[str] = [] + encoded_choices_left: List[List[int]] = [ + tokenizer.encode(word) for word in choices + ] + + while len(encoded_choices_left) > 0: + max_tokens_left = max([len(tokens) for tokens in encoded_choices_left]) + transposed_choices_left: List[Set] = [ + {item for item in subset if item is not None} + for subset in zip_longest(*encoded_choices_left) + ] + + if not greedy: + mask = build_optimistic_mask(transposed_choices_left) else: - results = np.array( - [extract_choice(responses["choices"][i]) for i in range(samples)] - ) + mask = {} + for token in transposed_choices_left[0]: # build greedy mask + mask[token] = 100 + + if len(mask) == 0: + break + + config = replace(config, logit_bias=mask, max_tokens=max_tokens_left) + response = generate_chat(prompt, self.client, config) + encoded_response = tokenizer.encode(response) - return results - - def longest_common_prefix(tokens1: List[int], tokens2: List[int]) -> List[int]: - i = 0 - while i < len(tokens1) and i < len(tokens2) and tokens1[i] == tokens2[i]: - i += 1 - return tokens1[:i] - - def get_choices_with_longest_common_prefix( - response: List[int], is_in: List[List[int]] - ) -> Tuple[List[int], List[List[int]]]: - max_len_prefix = 0 - is_in_left = [] - prefix = [] - for i in range(len(is_in)): - len_prefix = len(longest_common_prefix(response, is_in[i])) - - if len_prefix > max_len_prefix: - max_len_prefix = len_prefix - is_in_left = [is_in[i][len_prefix:]] - prefix = is_in[i][:len_prefix] - - elif len_prefix == max_len_prefix: - is_in_left.append(is_in[i][len_prefix:]) - - return prefix, is_in_left - - def build_optimistic_mask(transposed: deque[Set]) -> Dict: - # build the biggest mask possible, adding tokens left to right - to_mask: Set[int] = set() - while len(transposed) > 0 and len(to_mask | transposed[0]) <= 300: - to_mask = to_mask | transposed.popleft() - - return {token: 100 for token in to_mask} - - @functools.partial(outlines.vectorize, signature="(),(m),()->(s)") - async def generate_choice( - prompt: str, - is_in: List[str], - samples: int, - ) -> Union[List[str], str]: - """Generate a sequence that must be one of many options. - - .. warning:: - - Worst case, this function may call the API as many times as tokens are in the response. - - With the optimistic approach, we activate all tokens that could form all answers. If the solution returned - does not match any of the answers, we the call the API again only with the tokens that can be accepted as - next-token. In average, this approach returns a solution consuming less calls to the API. - - """ - try: - import tiktoken - except ImportError: - raise ImportError( - "The `tiktoken` library needs to be installed in order to choose `outlines.models.openai` with `is_in`" + if encoded_response in encoded_choices_left: + decoded.append(response) + break + else: + ( + encoded_response, + encoded_choices_left, + ) = find_response_choices_intersection( + encoded_response, encoded_choices_left ) - tokenizer = tiktoken.encoding_for_model(model_name) + if len(encoded_response) == 0: + greedy = True # next iteration will be "greedy" + continue + else: + decoded.append("".join(tokenizer.decode(encoded_response))) - decoded_samples = [] - for _ in range(samples): - is_in_left = is_in.copy() - decoded: List[str] = [] + if len(encoded_choices_left) == 1: # only one choice left + choice_left = tokenizer.decode(encoded_choices_left[0]) + decoded.append(choice_left) + break - greedy = False # we try to generate the full response at each iteration + greedy = False # after each success, stay with (or switch to) "optimistic" approach - while len(is_in_left) > 0: - encoded: List[List[int]] = [ - tokenizer.encode(word) for word in is_in_left - ] + prompt = prompt + "".join(decoded) - max_tokens_left = max([len(tokens) for tokens in encoded]) - transposed: deque[Set] = deque( - [ - {item for item in subset if item is not None} - for subset in zip_longest(*encoded) - ] - ) + choice = "".join(decoded) - if not greedy: - mask = build_optimistic_mask(transposed) - else: - mask = {} - for token in transposed.popleft(): # build greedy mask - mask[token] = 100 + return choice - if len(mask) == 0: - break + def generate_json(self): + """Call the OpenAI API to generate a JSON object.""" + raise NotImplementedError - response = await call_api( - model_name, - format_prompt(prompt), - max_tokens_left if not greedy else 1, - temperature, - [], - mask, - 1, - ) + def __str__(self): + return self.__class__.__name__ + " API" - current_resp = extract_choice(response["choices"][0]) + def __repr__(self): + return str(self.config) - if current_resp in is_in_left: - decoded.append(current_resp) - break - else: - # map response to tokens - tokenized_resp = tokenizer.encode(current_resp) - ( - tokenized_resp, - encoded, - ) = get_choices_with_longest_common_prefix( - tokenized_resp, encoded - ) - if len(tokenized_resp) == 0: - greedy = True # next iteration will be "greedy" - continue - else: - decoded.append("".join(tokenizer.decode(tokenized_resp))) +@functools.partial(outlines.vectorize, signature="(),(),()->(s)") +async def generate_chat( + prompt: str, client: "AsyncOpenAI", config: OpenAIConfig +) -> np.ndarray: + responses = await client.chat.completions.create( + messages=[{"role": "user", "content": prompt}], **asdict(config) # type: ignore + ) - # map back to words - is_in_left = [ - "".join(tokenizer.decode(tokens)) for tokens in encoded - ] + if config.n == 1: + results = np.array([responses.choices[0].message.content]) + else: + results = np.array( + [responses.choices[i].message.content for i in range(config.n)] + ) - if len(is_in_left) == 1: # only one choice left - decoded.append(is_in_left[0]) - break + return results - greedy = False # after each success, stay with (or switch to) "optimistic" approach - prompt = prompt + "".join(decoded) +openai = OpenAI - decoded_samples.append("".join(decoded)) - return np.array(decoded_samples) +def find_longest_intersection(response: List[int], choice: List[int]) -> List[int]: + """Find the longest intersection between the response and the choice.""" + for i, (token_r, token_c) in enumerate(zip_longest(response, choice)): + if token_r != token_c: + return response[:i] - self.generate_base = generate_base - self.generate_choice = generate_choice + return response - def __call__( - self, - prompt: str, - max_tokens: int = 500, - *, - samples=1, - stop_at: Union[List[Optional[str]], str] = [], - is_in: Optional[List[str]] = None, - ): - if is_in is not None and stop_at: - raise TypeError("You cannot set `is_in` and `stop_at` at the same time.") - elif is_in is not None: - return self.generate_choice(prompt, is_in, samples) - else: - if isinstance(stop_at, str): - stop_at = [stop_at] - return self.generate_base(prompt, max_tokens, stop_at, samples) +def find_response_choices_intersection( + response: List[int], choices: List[List[int]] +) -> Tuple[List[int], List[List[int]]]: + """Find the longest intersection between the response and the different + choices. + + Say the response is of the form `[1, 2, 3, 4, 5]` and we have the choices + `[[1, 2], [1, 2, 3], [6, 7, 8]` then the function will return `[1, 2]` as the + intersection, and `[1, 2, 3]` as the choice that is left. + + Parameters + ---------- + response + The model's response + choices + The remaining possible choices + + Returns + ------- + A tuple that contains the longest intersection between the response and the + different choices, and the choices which start with this intersection. -openai = OpenAIAPI + """ + max_len_prefix = 0 + choices_left = [] + longest_prefix = [] + for i, choice in enumerate(choices): + # Find the longest intersection between the response and the choice. + prefix = find_longest_intersection(response, choice) + + if len(prefix) > max_len_prefix: + max_len_prefix = len(prefix) + choices_left = [choice[len(prefix) :]] + longest_prefix = prefix + + elif len(prefix) == max_len_prefix: + choices_left.append(choice[len(prefix) :]) + + return longest_prefix, choices_left + + +def build_optimistic_mask( + transposed: List[Set[int]], max_mask_size: int = 300 +) -> Dict[int, int]: + """We build the largest mask possible. + + Tokens are added from left to right, so if the encoded choices are e.g. + `[[1,2], [3,4]]`, `1` and `3` will be added before `2` and `4`. + + Parameters + ---------- + transposed + A list of lists that contain the nth token of each choice. + + """ + mask: Dict[int, int] = {} + for tokens in transposed: + for token in tokens: + if len(mask) == max_mask_size: + return mask + mask[token] = 100 + + return mask def error_handler(api_call_fn: Callable) -> Callable: @@ -274,47 +407,3 @@ def call(*args, **kwargs): raise e return call - - -async def call_completion_api( - client: "AsyncOpenAI", - model: str, - prompt: str, - max_tokens: int, - temperature: float, - stop_sequences: List[str], - logit_bias: Dict[str, int], - num_samples: int, -) -> dict: - response = await client.completions.create( - model=model, - prompt=prompt, - temperature=temperature, - max_tokens=max_tokens, - stop=list(stop_sequences) if len(stop_sequences) > 0 else None, - logit_bias=logit_bias, - n=int(num_samples), - ) - return response.model_dump() - - -async def call_chat_completion_api( - client: "AsyncOpenAI", - model: str, - messages: List[Dict[str, str]], - max_tokens: int, - temperature: float, - stop_sequences: List[str], - logit_bias: Dict[str, int], - num_samples: int, -) -> dict: - response = await client.chat.completions.create( - model=model, - messages=messages, - max_tokens=max_tokens, - temperature=temperature, - stop=list(stop_sequences) if len(stop_sequences) > 0 else None, - logit_bias=logit_bias, - n=int(num_samples), - ) - return response.model_dump() diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 8550c2e8..53857958 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -3,7 +3,7 @@ import torch -from outlines.models import OpenAIAPI +from outlines.models import OpenAI if TYPE_CHECKING: from outlines.models.transformers import KVCacheType, Transformers @@ -35,7 +35,7 @@ def __init__( such functions. """ - if isinstance(model, OpenAIAPI): + if isinstance(model, OpenAI): raise TypeError("Cannot use guided generation with the OpenAI API.") self.model = model diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py new file mode 100644 index 00000000..c2e885eb --- /dev/null +++ b/tests/models/test_openai.py @@ -0,0 +1,50 @@ +import pytest + +from outlines.models.openai import ( + build_optimistic_mask, + find_longest_intersection, + find_response_choices_intersection, +) + + +@pytest.mark.parametrize( + "response,choice,expected_intersection,expected_choices_left", + ( + ([1, 2, 3, 4], [[5, 6]], [], [[5, 6]]), + ([1, 2, 3, 4], [[5, 6], [7, 8]], [], [[5, 6], [7, 8]]), + ([1, 2, 3, 4], [[1, 2], [7, 8]], [1, 2], [[]]), + ([1, 2], [[1, 2, 3, 4], [1, 2]], [1, 2], [[3, 4], []]), + ([1, 2, 3], [[1, 2, 3, 4], [1, 2]], [1, 2, 3], [[4]]), + ), +) +def test_find_response_choices_intersection( + response, choice, expected_intersection, expected_choices_left +): + intersection, choices_left = find_response_choices_intersection(response, choice) + assert intersection == expected_intersection + assert choices_left == expected_choices_left + + +@pytest.mark.parametrize( + "response,choice,expected_prefix", + ( + ([1, 2, 3], [1, 2, 3, 4], [1, 2, 3]), + ([1, 2, 3], [1, 2, 3], [1, 2, 3]), + ([4, 5], [1, 2, 3], []), + ), +) +def test_find_longest_common_prefix(response, choice, expected_prefix): + prefix = find_longest_intersection(response, choice) + assert prefix == expected_prefix + + +@pytest.mark.parametrize( + "transposed,mask_size,expected_mask", + ( + ([{1, 2}, {3, 4}], 3, {1: 100, 2: 100, 3: 100}), + ([{1, 2}, {3, 4}], 4, {1: 100, 2: 100, 3: 100, 4: 100}), + ), +) +def test_build_optimistic_mask(transposed, mask_size, expected_mask): + mask = build_optimistic_mask(transposed, mask_size) + assert mask == expected_mask diff --git a/tests/test_cache.py b/tests/test_cache.py index cc91eb60..7cc5ede6 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -37,7 +37,7 @@ def test_cache(refresh_environment): memory = outlines.get_cache() assert memory.storage.location == Path(tempdir) - yield outlines.caching.cache + yield outlines.caching.cache() memory.storage.clear() diff --git a/tests/text/generate/test_sequence.py b/tests/text/generate/test_sequence.py index f4fd52c0..e5ede8c5 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/text/generate/test_sequence.py @@ -5,13 +5,13 @@ import pytest import torch -from outlines.models import OpenAIAPI +from outlines.models import OpenAI from outlines.models.tokenizer import Tokenizer from outlines.text.generate.sequence import Sequence def test_openai_error(): - class Mock(OpenAIAPI): + class Mock(OpenAI): def __init__(self): pass From 25ba231d35bc45f0f41ef6f2d9fcb7d8442feb82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 20 Nov 2023 15:25:20 +0100 Subject: [PATCH 294/734] Ignore function arguments when caching --- outlines/caching.py | 2 +- outlines/models/openai.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/outlines/caching.py b/outlines/caching.py index 28c6ff7f..60dca6f3 100644 --- a/outlines/caching.py +++ b/outlines/caching.py @@ -1,5 +1,5 @@ import os -from typing import Callable +from typing import Callable, Optional from perscache import Cache, NoCache from perscache.serializers import JSONSerializer diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 778b278d..f8f06a7d 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -9,6 +9,7 @@ import numpy as np import outlines +from outlines.caching import cache __all__ = ["OpenAI", "openai"] @@ -287,6 +288,7 @@ def __repr__(self): return str(self.config) +@cache(ignore="client") @functools.partial(outlines.vectorize, signature="(),(),()->(s)") async def generate_chat( prompt: str, client: "AsyncOpenAI", config: OpenAIConfig From b736708159846ac501564672b910b1ffa35c8bb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 21 Nov 2023 11:21:53 +0100 Subject: [PATCH 295/734] Remove unnecessary comment --- outlines/models/openai.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index f8f06a7d..7d7ac61c 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -160,40 +160,6 @@ def __call__( if "gpt-" in self.config.model: return generate_chat(prompt, self.client, config) - """ - def generate_choice_greedy(transposed, max_tokens_left): - mask = {token: 100 for token in transposed.popleft()} - config = replace(config, logit_bias=mask, max_tokens=max_tokens_left) - response = generate_chat(prompt, config) - prefix, _ = find_common_prefix(response, choices_left) - return prefix - - - def generate_choice_optimistic(transposed, max_tokens_left): - mask = build_optimistic_mask(transposed) - config = replace(config, logit_bias=mask, max_tokens=max_tokens_left) - response = generate_chat(prompt, config) - return response - - while len(choices_left) > 0: - if greedy == True: - prefix = generate_choice_greedy() - choices_left = find_choices_left(prefix, choices_left) - if len(choices_left) == 1: - return choices_left[0] - else: - decoded.append(prefix) - greedy = False - else: - remainder = generate_choice_optimistic() - if remainder in choices_left: # Not exactly true - return remainder - else: - prefix, _ = find_common_prefix(remainder, choices_left) - decoded.append(prefix) - greedy = True - """ - def generate_choice( self, prompt: str, choices: List[str], max_tokens: Optional[int] = None ) -> str: From a91637203abd5fa66f42ca3eed78ad90ff11c4a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 21 Nov 2023 19:23:04 +0100 Subject: [PATCH 296/734] Mention in README `datasets` requirement --- README.md | 2 +- docs/get_started.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2c7a5df8..fdfd6f6e 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ pip install outlines The dependencies needed to use models are not installed by default. You will need to run: - `pip install openai` to be able to use OpenAI [models](https://platform.openai.com/docs/api-reference). -- `pip install transformers` to be able to use Hugging Face `transformers` [models](https://huggingface.co/models?pipeline_tag=text-generation). +- `pip install transformers datasets` to be able to use Hugging Face `transformers` [models](https://huggingface.co/models?pipeline_tag=text-generation). ## Guided generation diff --git a/docs/get_started.md b/docs/get_started.md index f2447ea5..c127f1de 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -36,7 +36,7 @@ pip install outlines ??? info "Using OpenAI and Transformers" - Outlines :wavy_dash: does not install the `openai` or `transformers` libraries by default. You will have to install these libraries manually. + Outlines :wavy_dash: does not install the `openai` or `transformers` libraries by default. You will have to install these libraries manually. To use `transformers` models you will also need to install the `datasets` library. ## :eyes: Sneak Peek From 7bbb64fcd5e67c94d92ec5e7f7b7b65d63edaeb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 9 Nov 2023 18:39:01 +0100 Subject: [PATCH 297/734] Rename the `Transformers` class --- outlines/models/__init__.py | 2 +- outlines/models/transformers.py | 8 ++++---- outlines/text/generate/sequence.py | 5 +++-- tests/models/test_transformers.py | 19 ++++++++++--------- .../generate/test_integration_transfomers.py | 6 +++--- tests/text/test_fsm.py | 6 +++--- 6 files changed, 24 insertions(+), 22 deletions(-) diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index d0b344a5..6ab40f54 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -6,4 +6,4 @@ """ from .openai import OpenAI, openai -from .transformers import Transformers, transformers +from .transformers import Transformer, transformers diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 85f2c2f9..240acbba 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -55,7 +55,7 @@ class CodeLlamaTokenizerFast: # type: ignore ) -class Transformers: +class Transformer: """Represents a `transformers` model.""" def __init__( @@ -116,7 +116,7 @@ def __call__( return self.forward(input_ids, attention_mask, past_key_values)[0] -class TransformersTokenizer(Tokenizer): +class TransformerTokenizer(Tokenizer): """Represents a tokenizer for models in the `transformers` library.""" def __init__(self, model_name: str, **kwargs): @@ -215,6 +215,6 @@ def transformers( model_kwargs["device_map"] = device model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs) - tokenizer = TransformersTokenizer(model_name, **tokenizer_kwargs) + tokenizer = TransformerTokenizer(model_name, **tokenizer_kwargs) - return Transformers(model, tokenizer) + return Transformer(model, tokenizer) diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index 53857958..e66cafa0 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -6,7 +6,7 @@ from outlines.models import OpenAI if TYPE_CHECKING: - from outlines.models.transformers import KVCacheType, Transformers + from outlines.models.transformers import KVCacheType, Transformer from outlines.text.generate.sample import Sampler @@ -15,7 +15,7 @@ class Sequence: def __init__( self, - model: "Transformers", + model: "Transformer", max_tokens: Optional[int] = None, sampler: Optional["Sampler"] = None, ): @@ -41,6 +41,7 @@ def __init__( self.model = model self.device = model.device self.max_tokens = max_tokens + self.pad_token_id = torch.tensor( model.tokenizer.pad_token_id, device=model.device ) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 71389960..9fb86b85 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -2,13 +2,14 @@ import torch from transformers.models.gpt2 import GPT2TokenizerFast -from outlines.models.transformers import TransformersTokenizer, transformers +from outlines.models.autogptq import autogptq +from outlines.models.transformers import TransformerTokenizer, transformers TEST_MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" def test_tokenizer(): - tokenizer = TransformersTokenizer(TEST_MODEL) + tokenizer = TransformerTokenizer(TEST_MODEL) assert tokenizer.eos_token_id == 0 assert tokenizer.pad_token_id == 0 assert isinstance(tokenizer.tokenizer, GPT2TokenizerFast) @@ -37,7 +38,7 @@ def test_tokenizer(): isinstance(text[0], str) isinstance(text[1], str) - tokenizer = TransformersTokenizer( + tokenizer = TransformerTokenizer( TEST_MODEL, additional_special_tokens=["", ""] ) assert "" in tokenizer.special_tokens @@ -45,7 +46,7 @@ def test_tokenizer(): def test_llama_tokenizer(): - tokenizer = TransformersTokenizer("hf-internal-testing/llama-tokenizer") + tokenizer = TransformerTokenizer("hf-internal-testing/llama-tokenizer") # Broken assert tokenizer.tokenizer.convert_tokens_to_string(["▁baz"]) == "baz" @@ -63,15 +64,15 @@ def test_model(): transformers(TEST_MODEL, device="non_existent") model = transformers(TEST_MODEL, device="cpu") - assert isinstance(model.tokenizer, TransformersTokenizer) + assert isinstance(model.tokenizer, TransformerTokenizer) assert model.device.type == "cpu" model = transformers(TEST_MODEL, model_kwargs={"device_map": "cpu"}) - assert isinstance(model.tokenizer, TransformersTokenizer) + assert isinstance(model.tokenizer, TransformerTokenizer) assert model.device.type == "cpu" model = transformers(TEST_MODEL, device="cpu", model_kwargs={"device_map": "cuda"}) - assert isinstance(model.tokenizer, TransformersTokenizer) + assert isinstance(model.tokenizer, TransformerTokenizer) assert model.device.type == "cpu" input_ids = torch.tensor([[0, 1, 2]]) @@ -92,7 +93,7 @@ def test_model(): def test_tokenizer_eq_hash(): - tokenizer = TransformersTokenizer("gpt2") - tokenizer2 = TransformersTokenizer("gpt2") + tokenizer = TransformerTokenizer("gpt2") + tokenizer2 = TransformerTokenizer("gpt2") assert tokenizer == tokenizer2 assert hash(tokenizer) == hash(tokenizer2) diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/text/generate/test_integration_transfomers.py index 9e5c28f2..04bd0d46 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/text/generate/test_integration_transfomers.py @@ -9,7 +9,7 @@ import outlines.models as models import outlines.text.generate as generate -from outlines.models.transformers import TransformersTokenizer +from outlines.models.transformers import TransformerTokenizer from outlines.text.fsm import reduced_vocabulary @@ -326,8 +326,8 @@ def test_transformers_logits_vocab_size(): def test_transformers_reduced_vocabulary_caching(): - tokenizer = TransformersTokenizer("gpt2") - tokenizer2 = TransformersTokenizer("gpt2") + tokenizer = TransformerTokenizer("gpt2") + tokenizer2 = TransformerTokenizer("gpt2") # TODO: We might actually want only one copy of a given tokenizer. assert tokenizer is not tokenizer2 diff --git a/tests/text/test_fsm.py b/tests/text/test_fsm.py index f10f0f81..10c18eec 100644 --- a/tests/text/test_fsm.py +++ b/tests/text/test_fsm.py @@ -2,7 +2,7 @@ import numba import pytest -from outlines.models.transformers import TransformersTokenizer +from outlines.models.transformers import TransformerTokenizer from outlines.text.fsm import ( _walk_fsm, create_fsm_index, @@ -380,7 +380,7 @@ def test_create_fsm_index_tokenizer(): num_fsm_states = len(regex_fsm.states) assert num_fsm_states == 220 - tokenizer = TransformersTokenizer("gpt2") + tokenizer = TransformerTokenizer("gpt2") states_to_token_subsets, empty_token_ids = create_fsm_index_tokenizer( regex_fsm, tokenizer @@ -403,7 +403,7 @@ def test_regex_index_performance(): num_fsm_states = len(regex_fsm.states) assert num_fsm_states == 220 - tokenizer = TransformersTokenizer("gpt2") + tokenizer = TransformerTokenizer("gpt2") # Pre-compile Numba functions res, _ = create_fsm_index_tokenizer(regex_fsm, tokenizer) From 8804595abda1930aaa311fa961568d99999fcff1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 9 Nov 2023 18:39:13 +0100 Subject: [PATCH 298/734] Add AutoGPTQ integration --- outlines/models/__init__.py | 2 ++ outlines/models/gptq.py | 25 +++++++++++++++++++++++++ tests/models/test_transformers.py | 1 - 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 outlines/models/gptq.py diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 6ab40f54..857c3261 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -5,5 +5,7 @@ codebase. """ +from .awq import awq +from .gptq import gptq from .openai import OpenAI, openai from .transformers import Transformer, transformers diff --git a/outlines/models/gptq.py b/outlines/models/gptq.py new file mode 100644 index 00000000..67c00127 --- /dev/null +++ b/outlines/models/gptq.py @@ -0,0 +1,25 @@ +from typing import Optional + +from .transformers import Transformer, TransformerTokenizer + + +def gptq( + model_name: str, + device: Optional[str] = None, + model_kwargs: dict = {}, + tokenizer_kwargs: dict = {}, +): + try: + from auto_gptq import AutoGPTQForCausalLM + except ImportError: + raise ImportError( + "The `auto_gptq` library needs to be installed in order to use `AutoGPTQ` models." + ) + + if device is not None: + model_kwargs["device_map"] = device + + model = AutoGPTQForCausalLM.from_quantized(model_name, **model_kwargs) + tokenizer = TransformerTokenizer(model_name, **tokenizer_kwargs) + + return Transformer(model, tokenizer) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 9fb86b85..f0b9d681 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -2,7 +2,6 @@ import torch from transformers.models.gpt2 import GPT2TokenizerFast -from outlines.models.autogptq import autogptq from outlines.models.transformers import TransformerTokenizer, transformers TEST_MODEL = "hf-internal-testing/tiny-random-GPTJForCausalLM" From a117d9d861f894d939896404c96da336e22f72bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 9 Nov 2023 18:47:51 +0100 Subject: [PATCH 299/734] Add AutoAWQ integration --- outlines/models/awq.py | 45 ++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 2 ++ 2 files changed, 47 insertions(+) create mode 100644 outlines/models/awq.py diff --git a/outlines/models/awq.py b/outlines/models/awq.py new file mode 100644 index 00000000..3a241808 --- /dev/null +++ b/outlines/models/awq.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Optional + +from .transformers import Transformer, TransformerTokenizer + +if TYPE_CHECKING: + from transformers import PreTrainedModel, PreTrainedTokenizer + + +class AWQModel(Transformer): + """Represents a `transformers` model.""" + + def __init__( + self, + model: "PreTrainedModel", + tokenizer: "PreTrainedTokenizer", + ): + self.device = model.model.device + self.model = model + self.tokenizer = tokenizer + + +def awq( + model_name: str, + fuse_layers: bool = True, + device: Optional[str] = None, + model_kwargs: dict = {}, + tokenizer_kwargs: dict = {}, +): + try: + from awq import AutoAWQForCausalLM + except ImportError: + raise ImportError( + "The `autoawq` and `transformers` library needs to be installed in order to use `AutoAWQ` models." + ) + + model_kwargs["fuse_layers"] = fuse_layers + model_kwargs["safetensors"] = True + + if device is not None: + model_kwargs["device_map"] = device + + model = AutoAWQForCausalLM.from_quantized(model_name, **model_kwargs) + tokenizer = TransformerTokenizer(model_name, trust_remote_code=True) + + return AWQModel(model, tokenizer) diff --git a/pyproject.toml b/pyproject.toml index bcab189c..87493cb0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,6 +87,8 @@ exclude=["examples"] [[tool.mypy.overrides]] module = [ + "awq.*", + "auto_gptq.*", "jinja2", "joblib.*", "jsonschema.*", From 5cd10531cfc9d998ac255954ee04d5fec5a07f28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 24 Nov 2023 20:30:46 +0100 Subject: [PATCH 300/734] Update README.md --- README.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fdfd6f6e..65f0b5e9 100644 --- a/README.md +++ b/README.md @@ -52,8 +52,13 @@ via the next-token logits. It can be used with API-based models as well. - [x] 🔥 Fast [JSON generation](#efficient-json-generation-following-a-pydantic-model) following a JSON schema or a Pydantic model - [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions - [x] 💾 Caching of generations -- [x] 🤗 Integration with Hugging Face's `transformers` models -- [x] 🔒 Integration with OpenAI's API + +## Available models + +- Transformers +- AutoGPTQ +- AutoAWQ +- OpenAI API Outlines 〰 has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][twitter] to stay up to date! From 027b29b45a752e360fdc038fe3fa0db0648fa0e0 Mon Sep 17 00:00:00 2001 From: Matthew Date: Sun, 26 Nov 2023 12:59:43 -0500 Subject: [PATCH 301/734] Update `meta_prompting` prompt there appears to be, at least in ChatGPT-3.5 a meta-cognitive 'think step-by-step' approach in every response. I couldn't get def split_into_steps to output the correct answer with anything but the prompt in its current form. --- examples/meta_prompting.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index eb10b9bd..80519167 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -19,13 +19,18 @@ def split_into_steps(question, model_name: str): @text.prompt def solve(question): """{{question}} - Let's solve this problem by splitting it into steps. + Rephrase : : as a true or false statement, identify an Object, relationship and subject """ model = models.openai(model_name) prompt = solve(question) answer = model(prompt, 500) + prompt += ( + answer + + "\n what is the only option that displays the same type of relationship as : :?" + ) + answer = model(prompt, 500) completed = prompt + answer return completed @@ -131,7 +136,7 @@ def run_example(model_fn, question, model_name): parser.add_argument( "--model", type=str, - default="gpt-3.5-turbo", + default="gpt-3.5-turbo-1106", help="The Large Language Model to use to run the examples.", ) args = parser.parse_args() @@ -139,10 +144,6 @@ def run_example(model_fn, question, model_name): math_q = "f(x) = x*x. What is f(f(3))?" sat_q = """ -Directions: In the following question, a related pair of words or phrases \ -is followed by five pairs of words or phrases. Choose the pair that best \ -expresses a relationship similar to that in the original pair. \ - BRAGGART :: MODESTY A) FLEDGLING : EXPERIENCE B) EMBEZZLER : GREED @@ -156,7 +157,7 @@ def run_example(model_fn, question, model_name): run_example(split_into_steps, math_q, args.model) run_example( - split_into_steps, sat_q, args.model + split_into_steps, sat_q.lower(), args.model ) # gpt>3.5 usually gets this one right run_example(fill_in_the_blanks, sat_q, args.model) run_example(ask_an_expert, alignment_q, args.model) From b50d6bf7294edf80211e4f73284a30ff598a7165 Mon Sep 17 00:00:00 2001 From: amrrs <1littlecoder@gmail.com> Date: Tue, 28 Nov 2023 11:55:19 +0530 Subject: [PATCH 302/734] Correct `auto-gpt` name in exception it was a bit confusing to use `auto_gptq` while the library installation from pypi refers to `auto-gptq` https://github.com/PanQiWei/AutoGPTQ The change should be helpful if the `ImportError` is raised --- outlines/models/gptq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/models/gptq.py b/outlines/models/gptq.py index 67c00127..1cf922be 100644 --- a/outlines/models/gptq.py +++ b/outlines/models/gptq.py @@ -13,7 +13,7 @@ def gptq( from auto_gptq import AutoGPTQForCausalLM except ImportError: raise ImportError( - "The `auto_gptq` library needs to be installed in order to use `AutoGPTQ` models." + "The `auto-gptq` library needs to be installed in order to use `AutoGPTQ` models." ) if device is not None: From 77378d6a91935642f255a2f0d71eb9487ee25484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 24 Nov 2023 18:54:06 +0100 Subject: [PATCH 303/734] Add Chain of Density prompting example --- docs/examples/chain_of_density.md | 127 ++++++++++++++++++++++ docs/examples/dating_profiles.md | 2 +- docs/examples/images/chain_of_density.png | Bin 0 -> 515603 bytes docs/examples/index.md | 1 + mkdocs.yml | 3 +- 5 files changed, 131 insertions(+), 2 deletions(-) create mode 100644 docs/examples/chain_of_density.md create mode 100644 docs/examples/images/chain_of_density.png diff --git a/docs/examples/chain_of_density.md b/docs/examples/chain_of_density.md new file mode 100644 index 00000000..3eb5f4a1 --- /dev/null +++ b/docs/examples/chain_of_density.md @@ -0,0 +1,127 @@ +# Summarize documents using Chain of Density prompting + +A good summary should be informative, concise and clear. While large language models are generally good at summarizing documents, their summaries tend to be long and contain redundant information; their information density tends to be on the lower end. This is where [chain of Density](https://arxiv.org/abs/2309.04269), a new prompting technique, comes in. In this example we will show how one can implement chain of density with a few lines of code using Outlines, leveraging both Outline's prompt templating and its guided generation capabilities. + +The article we will try to summarize is the first three paragraphs of the [Alan Turing page on Wikipedia](https://en.wikipedia.org/wiki/Alan_Turing): + +```python +article = """ +Alan Mathison Turing OBE FRS (/ˈtjʊərɪŋ/; 23 June 1912 – 7 June 1954) was an English mathematician, computer scientist, logician, cryptanalyst, philosopher and theoretical biologist.[5] Turing was highly influential in the development of theoretical computer science, providing a formalisation of the concepts of algorithm and computation with the Turing machine, which can be considered a model of a general-purpose computer.[6][7][8] He is widely considered to be the father of theoretical computer science and artificial intelligence.[9] + +Born in Maida Vale, London, Turing was raised in southern England. He graduated at King's College, Cambridge, with a degree in mathematics. Whilst he was a fellow at Cambridge, he published a proof demonstrating that some purely mathematical yes–no questions can never be answered by computation. He defined a Turing machine and proved that the halting problem for Turing machines is undecidable. In 1938, he obtained his PhD from the Department of Mathematics at Princeton University. During the Second World War, Turing worked for the Government Code and Cypher School at Bletchley Park, Britain's codebreaking centre that produced Ultra intelligence. For a time he led Hut 8, the section that was responsible for German naval cryptanalysis. Here, he devised a number of techniques for speeding the breaking of German ciphers, including improvements to the pre-war Polish bomba method, an electromechanical machine that could find settings for the Enigma machine. Turing played a crucial role in cracking intercepted coded messages that enabled the Allies to defeat the Axis powers in many crucial engagements, including the Battle of the Atlantic.[10][11] + +After the war, Turing worked at the National Physical Laboratory, where he designed the Automatic Computing Engine, one of the first designs for a stored-program computer. In 1948, Turing joined Max Newman's Computing Machine Laboratory at the Victoria University of Manchester, where he helped develop the Manchester computers[12] and became interested in mathematical biology. He wrote a paper on the chemical basis of morphogenesis[1] and predicted oscillating chemical reactions such as the Belousov–Zhabotinsky reaction, first observed in the 1960s. Despite these accomplishments, Turing was never fully recognised in Britain during his lifetime because much of his work was covered by the Official Secrets Act.[13] +""" +``` + +## How Chain Of Density works + +Chain Of Density starts with asking the model to generate a first long and non-specific summary. Then it asks the model to generate 4 extra summaries by proceeding in the following way: + +1. Identify 1-3 entities missing in the previous summary; +2. Add all entities marked as missing in the previous step, while not dropping entities; +3. Make the summary more concise; + +The prompt also asks the model to return a list of JSON objects that contain the missing entities and the new summary. This is where guided generation will come in handy :) The paper provides the prompt and an example: + +![Figure 2 in the paper](./images/chain_of_density.png) + +We can now implement the prompt provided in the paper: + +```python +from outlines import text + +@text.prompt +def chain_of_density(article): + """Article: {{ article }} + + You will generate increasingly concise, entity-dense summaries of the above Article. + + Repeat the following 2 steps 5 times. + + Step 1. Identify 1-3 informative Entities ("; " delimited) from the Article which are missing from the previously generated summary. + Step 2. Write a new, denser summary of identical length which covers every entity and detail from the previous summary plus the Missing Entities. + + A Missing Entity is: + - Relevant: to the main story. + - Specific: descriptive yet concise (5 words or fewer). + - Novel: not in the previous summary. + - Faithful: present in the Article. + - Anywhere: located anywhere in the Article. + + Guidelines: + - The first summary should be long (4-5 sentences, ~80 words) yet highly non-specific, containing little information beyond the entities marked as missing. Use overly verbose language and fillers (e.g., "this article discusses") to reach ~80 words. + - Make every word count: rewrite the previous summary to improve flow and make space for additional entities. + - Make space with fusion, compression, and removal of uninformative phrases like "the article discusses". + - The summaries should become highly dense and concise yet self-contained, e.g., easily understood without the Article. + - Missing entities can appear anywhere in the new summary. + - Never drop entities from the previous summary. If space cannot be made, add fewer new entities. + + Remember, use the exact same number of words for each summary. + + Answer in JSON. The JSON should be a a dictionary with key "summaries" that contains a list (length 5) of dictionaries whose keys are "Missing_Entities" and "Denser_Summary". + """ +``` + +??? Note + + Note that we modified the prompt slightly so it returns a JSON object that contains the summaries, instead of a list of summaries. + + +## Outlines implementation + +We will use Outline's JSON-guided generation to ensure that the model's output is consistent with the format specified in the prompt. We start with defining the JSON objects that the model is asked to return using Pydantic. One JSON object that contains a list of `Summary` objects that contain the missing entities and new summary: + +```python +from pydantic import BaseModel, conlist + +class Summary(BaseModel): + missing_entities: str + denser_summary: str + +class Summaries(BaseModel): + summaries: conlist(Summary, max_length=5, min_length=5) +``` + +We now generate the prompt by passing the article we want to summarize to the template. We load a quantized version of Mistral-7B using the AutoAWQ library, and then use JSON-guided generation to generate the summaries: + +```python +from outlines import models + +model = models.awq("TheBloke/Mistral-7B-OpenOrca-AWQ") + +prompt = chain_of_density(article) +result = text.generate.json(model, Summaries)(prompt) +``` + +We can now check the results: + +```python +print(result.model_dump()) +# {'summaries': [ +# { +# 'missing_entities': 'English mathematician, cryptanalyst, philosopher', +# 'denser_summary': 'Alan Mathison Turing was an English mathematician, cryptanalyst, philosopher.' +# }, +# { +# 'missing_entities': '', +# 'denser_summary': "Alan Mathison Turing was an English mathematician who was a crucial figure in WW2's Bletchley Park codebreaking centre and designed one of the first computers." +# }, +# { +# 'missing_entities': 'cryptanalyst, studied, biology, father', +# 'denser_summary': 'Alan Mathison Turing was an English cryptanalyst, studied theoretical computer science, and contributed to mathematical biology.' +# }, +# { +# 'missing_entities': 'biology, morphogenesis, chemical', +# 'denser_summary': 'Alan Mathison Turing was an English cryptanalyst, studied theoretical computer science, and predicted chemical reactions in morphogenesis. +# '}, +# { +# 'missing_entities': '', +# 'denser_summary': 'Alan Mathison Turing was an English cryptanalyst, developed computer science, and made strides in mathematical biology research.' +# } +# ]} +``` + +Not bad, considering we used a smallish model to generate the summary! Chain of Density seems to be a very effective prompting technique to generate dense summaries, even with small quantized models. Its implementation in Outlines is also very short. + +Note that this is the first article I tried and it worked out of the box. Try it out on other articles, and please share the results on Twitter, or by opening [a new discussion](https://github.com/outlines-dev/outlines/discussions/categories/show-and-tell) on the Outlines repository! diff --git a/docs/examples/dating_profiles.md b/docs/examples/dating_profiles.md index f6bf3335..3b365431 100644 --- a/docs/examples/dating_profiles.md +++ b/docs/examples/dating_profiles.md @@ -1,4 +1,4 @@ -# Generate a dating profile from a description +# Generate a synthetic dating profile from a description In this example we will see how we can use Outlines to generate synthetic data for a dating application. This example was originally contributed by [Vibhor Kumar](https://github.com/veezbo). diff --git a/docs/examples/images/chain_of_density.png b/docs/examples/images/chain_of_density.png new file mode 100644 index 0000000000000000000000000000000000000000..61e01f40459e26f082fc90ea2964dcc4bd2d70c6 GIT binary patch literal 515603 zcmb@uV{~L)7d2X8$LVwj9ox2T+qTuQZFQ3F*k;GJla6iMw(fbpJH8+H{e9OsBQ?gU zQ>UtS?Y-7qbIzRz1vzmubgGN=%B7{4o%Y0tmppD`RyMZOdAg7YPj3b;TmNW<$2!h>ZNKU zIv^tWRV4mvL0en8saH0?St>6#Hcj*J`?~o?)w{8o>yAv86W<|ruwuR#7|#DhhMj2I zfKfhpG#po0i$X=$mc^Ybd!Mg`>6 zi;L@7J`Dc9-}z1LLs!=^&O`d-6#Lh4VHD423is_}a*y@lNAnYjiRWDSwwJ5T@kKe2 zM_RhJTy`4{7M@`K^wVc1ukp!X19fX#^k;d+W-T)*Ekr8bA)D1}lFPbo-d`fJhvLU9 z;o*9O2O9Fls%P3ITr~8frSry}<{*?jfAZOzJ5t7a#2URq>i8sYp?(2NuR+Kh zU%r!f+4;@BP2rQ#I}yJUX(f7s&6bMvTks2oj29<8#b;f2SLYbK!(7efMV)2#%galf z+ohQJe{pztI65*yNkvswR(5iHjLZ4cWcU~1x~@RRWadBWr)UC7*tB)1a5<*1&=U5|Y%El+G@8-X1M23Dp}*|54oG)q?Z41bRE$qeEiEXRTZig@ zmK6~fPaH7;`TzMdPk(u)ACQr;7RpN9=zb|`X1~^X$>bd#Z&+nVql96_LW53C#B?xF z?lw|tNlTldB_1h(!_nPiTYq{KPd7uE2!(~{;_}5y8_I(dD^t$0 zP-SsJQiRO?o^5M-7USw0k)o8IoSLNMNN3mh6=9G}wAUv&_IqYdZm#1@^fNHF=E&z5 zahJ|+j{Uut`{|MYs7$n!g?%>8j`J3)%I)3n58jI>_2J;6G->37MTB*I#wd~{ch_a3 zy`;+wMoN$UaJdob3BQK_{8@W*bEAfS&=-bK-`JR7C;uaj&Esvhaj_t`=DKJ1u>4bJ z4K4ZlY`{8xn*S+TS~?LG4XuJ^?EKv3WmT)ZWD5=@JW77;r~wT;d1GF`E9TWu3;p*nNQe0MsW#>|%)2N^(RXrwxnHn*per;-U3^M^f`GNfFg3#WG9(WIC62Vajy_5>HL|ryP5O(C#0t*w?Wob3<>W`<4{_8{v4k=zEij7}| zd!NAJrywVPgl|KYF*a6KbS&-e^6ekkd^*eu=-dcp^+UpDs&1o?2wtv!m^?T@H__6T zR?d(mA~IH3-th}8IyiWZdz)G&h36IJ7ea|={DPEiV`tezQ&m~+_3}0!rGEL~ZtseH zP8DCPNU36otS4l2lV41)qVhYL?(QyyOEDNWuD<>*(iHv?pDT8Z&*#G`X$X~87BND9 zByEMuun-DFQe436^C1-~vQqCm|2x9?Gut{%zti{fg6M6?ux^=Nzl+;z8!PeBNkvEL zCGJv~|C62kW5wKdf98IroZFTs3!MPK+lGZEAy!8gKn` zq`fwFi_&96R}Jcx zff%lD_r`#$>dO8zib05l2qK{2A)@7-o#}0FgPSEy>QJF_30kD{4S*&iVlta;Tt_HU zfRJV@_hP}4hQAerz=_XJEp=@AoqJkRW$)kiua2k4IYfXMX@InF)eWD+?|JYNU+_ff zMN=zhGR`r3W5?!ZfAv$Q4|jWdHd%Ije7OFv+~mmjHPH$jwC_!}$;e#-LA?eH-bmTr zkAz2$sGT)6$G|GrZhy>;)dI(4HEJ%E#5YM3%U|!Hw=)5Y5YC^K&uFg&=!4mHw7dumscP0rHq#XT%{W7bX zU;=}Jx_sZ^Kn_k$eNbtihhJeJ2iRIy#GlxW4ebu#26Eb6Pwm*zi=_Xa&8NH7Io)iL z$|1df>a-esWMGz2>F}rR%>kc#JP_r44K54%Kl=%76lYHi)|p@DsrtEXbt z)AK8z_e!bmhj@-ssUoA}x^5&=w^7Xo6l7jq;$U0EF!;jUn)lVPq zORon-`&;jRIwE4sH~LCPd}8vjk9V3^sp|zI{+%6Q1rg?5ij#aRmca>|@O`^W6+2xy zQh@~TnckQHj@&NW> zuQD2pmW8;}eUF5=G96#mOi_ZUGY4C<3-dd0Kbylp)BUkO*AnLx@pvAjC&$FhVIg}9 zt}m#QXr1@paiIJ@zP;?Ho5(Y8lUL>CDeWv)Mm=RGia_u|=ieAj;;ler;`A^7qwmAd zbYPaSK`V3aSapCPRP z4XTr&v;J|umwfjlqX=jI)o3+ec0o7Vh0B)yj*sp8;WscRxdu96thwG_S9g@&SSNr`MQ}QF7UHVwL(IDyF1@+;o***LlGiOOy;cnuX2sWT7cvNvqr*A ze%W{_9N@RCp*@~S$}uG-Vsh68n)otWo!s+pKMDi_GUSK-#%Hv=RiOeZ8mf+N=y)b; zhrGRTg#O-6!i*c3AfciD>B%W2YFS1`MQH0vHDQog8<)1`!$5_OuJY^SGxab_%OCJh zm-nP9Ed9Gw*;6$uYir5I?zTpIwy8xZ(B3`<-?i8C0dG^H-|A!TY6%MXrJSJQH1L~G z>US>8Ehv_mR}j*EhM%SyyV%UhmP9WGWZjS``?qP3ug4uN|<6oX645G$$W zdGIbi=}&-jc5Wte_;0 za(w)9opDt6cUU;2eOCsUVBJ-AN-7w{xpl>FiAqzC7$63--xSj8K;4=X{udR}ay%{U z}F8rB6=R75u>9UxW3u)WhTh8fo_Q(9tPf`fO7zgyVZWP;QwuAr3XPi~j>l;r2}Wdp&SF$q zTU&c!9@(!q7j;##wm$ElHc3ly1kLA*w?_YT`CD$^?D486Vj^Rz+7Q^@?!rGBa%;Z2 zx@-WKnwshKxM2(ufJMb7G*l=L{`w=%!Rz(V*6c*mHsZn z=VIpGoL-OEZ-4Kg3mzto%M!;7S%r#~=IwRWzxynM=O=Zl#P5XUD$Q+ua9h;1$%=R<;}B$IZGt@So@i7-)&3#?7!XAF!BbmYpVKFfp_I zJCC&cTB@}dN=tA0<=5SgIO@)sIV^jZ2!k@Fy72fsvJ%peKu;*i{&eluu&>ecp2%}9 zI;KrFzp}o6zFeb4_=#pgg7y{;^Wll5jmKvir4Lp@$oW(47B3oG0s$U^Xa#G^)7 zrLrR6HN{48u!9%OW5v--ID|5M+E~>^@e4qLC|16R96r6Hu3kQ4=LEB_g)sWJ%VbqY zAwv~GQCU^ME@GeKb*TvdGH&|_dq#oHZbw78O`)%-s5sm|U#^bN*YBEUV`KAhxf!R~ zY|s<9u%HI434DBf5GY2h&FL^UJ6qqxgd{{LK0bbWZtm{kL9fm%raLhvBBI6Za^rNd z%G%262SR9Mj?mY;hPA4Qih_ot)({vC{^GP|BFWpcam`M#ydkWHpCA3mI~ zLV^>LknC;s1OXo-GBUDUy^7EMDn}%Ex}u^2xE35lCZE-AF`c)wvjd=gNNDJTy}hpy zBF4tX^z>Dv(n-TG*BP1Y_IR0cmz!OmS-f5#kP6j3aARLz-|6Y;;rr>CnUx0X#Yi-8 zrgNs$G5KshUe7xU8k&l{yucBYveMERU?3(kICqEQhB9q95b*3w)P!nP zrHAMMV6$$U2L>CgrJbb*_SdJ6-D2uPc`a>8nrQS8p=GD7rN@#V?uOFvErlU(Ps^i2 zlX|f~T^=@S@sc_yP$R3qqgvc_7%dY`ncf}SHuqT*2lXJpAc!SR9Y-I2*e_P;AytXl z*{*kdGMs!5*P99M-vf>>aoLxv`ys~+b(%I$EQjN%ifVc_w#o9!--oEf#PUgt3(h4a z(>Z-^4(^v6{nJ)fjEg74MQ(I7$f8sg6*UzVG2fm?>yoP;&+m5P?6h^2sn2exVB`%4 zbC{o+@}@0PLbTdZZ0}rL==545CSslNNg00c29ZzJ)pEAb0^?6gF$Dtg`5=iG(~ij^ z-rb8-ynXbMJEuvIyqqm>KqP#U$vb>vp*){+7u(-nD-XZSk8a7EdmG(+2oD)b!hi(w zXZ^@X-en|Of4)x?xlohGx_vKR(n`aU#_%jFEz95=dSB!Byb%f(%m4fLV9x;K`{?yD z0@C|TAr>MuDd~W1`r^QcjEHcP?1G*x~3$ok5;(IPVK;Ld082|{U-csI>V3X zBy^^(03}P!=ZEz#H#hLxA+oaLn6k^5smttko7!8PUe7zv7W`pi#rZ>N92@6z%cBzg zN5EPoCZ9}A`!Uo%kS}j$r`Hoo%l(eGE_UgnC1$oZDI!<%=Z~}J!&1wqgNqAdZSdL( z);lrXi})J&q-J>%eQ1=5I{DV_*ykW)>ZkN}#zzRiQppn<9#yoqu3X@1qpoC(0iZSw zO*-Fb2QDfyo)oCnj9BAg-CN8UfAdDb0DhREMCD{Lyrc?6QA;b$%gqKwT@u*4kW9pA1H> zyACv)M+Rwz^bliNbw7Vjo7c+Q6F<=hAVvdy@R-mCNdjU_%oW$E8ZE!?fsh2n<86Af zzQeAopn}9w$gH?wqQ;nZogrV~UP2Z+lFKeE}=-pW>&Y}43Kkb!JlhSVb+xgK7_OQ&y zhSfr3>KCrpdm@!x0SXHmDhnFV_!!c` zn=5wsP0UEZO<8-yj{`B+|9uCgi+uO>@%4{qJ1%0s6;>g)k zS~{vWry#YowDk61Tv|pZ7(-1}b-l~)^W**1>2Sj5>57m>7?^oL`C$4H9ZxJSm%)K@ zJpv9|>+o`$DUc-jX1P$IIUE5nw_P7EqvPY_0|Wbu^ok-P{kMmcU?2!+1ox}09-vTQ zV{zbmp-276%^Om$v2TR9Q#oYGN7+JhVmpaJGCq5QQOe`P3ir9o!xrL_kk(zdIO%3MZ@< zf29uw1~!~(q|xSDc_iI$RyUc!SqFS6bc!9Y7%^#C+4VZ}xVSiZ1%+Sb<E3HRzqp8jj^62cf3i@i4O|i#DU@#{ zDJ>a;#rQTrJt3fYbx6NnI4z-fDl4_kIQ*Exg;LimK>kJBn)-42?`l(ppZnno$D<A4%|(30k<4RD{lz_ zQZ0J`AHY0Yf9CF@!WBZap(6#20K^(c5n?bQ35)k;?(P^}wP-SdHv3;i<=xmbL#0)K z7Qf{EVi+JMt6B)GRTU2MrpAjUuOoAzZ|#3rs+@E@JO(x(#1#Xe2QT-rKTNm|={JL+ zuqY`hNkv6HC^Y0<SG@OoL~*Og2_DJy6M!?A$;OTVwGH*-UB=^%g{ z@c2af(_&&|p+ZED&IHK4>idEKu?Og`OiZU2ohG6a#UJ>+9N^I{e%{`6xZ(H1urTZ` zEC2}-+V2Cr-|Xdg1kyv}kC49@!ZV9f&!cuC+SfWCd*${d9Zt& z!$vhgK}EvjrHvI<06MjUb1z14M1-h;R5|d-0<pnt=^c#7m4s+)9Mhi`$Ns=5aQL_oy!aure64vvTn++o%ArNG=QLcieN`Xl&$ z?Q*)N%Z>|gQrk4z1v^3$%ovxL%O-{)Tx7&Oe)!+booLlYJ3KrK#WR$sy4}X{og&GR zsnf*_VzXnfewf+*o~t763u4(H&xa{==7b!5X!eU}Yx+aJvJNjwC#w?wwH&iF|1!dS zHtUH~(h`B7842sOPdg7zd-ZH{D68E)45wyrS!@|L>miKn>bxoej%ibVGTv-NcowxZ zpXoes5Rp{=dbVsnQ+46X=_{;8jvy@kb0?`u@g7~5Ml%*ymD|?J$b!;I-l)+w+J|Io z6P-IY)zq)>8P}BsEQP?z)e&ShNV~6>7Dn-k>8#HKI2t*}g%!wNW|=V^wU?*c6KAdP zOQ^LF=oS(pmaLkpqTk#gFU(DM?vk@;>yswVbiX1ZB4V@IEVa6v0*gpd8Oq7&wlXk< z*&OK;?-x35h!8OMFnQb^G&Fw1OW-gX1&93?L-m%k|0!7+)dz}(hK51Vh*B~#Bwt}h zlW5@)5NIeUhR4TweczfxL!m)i*SkaFu~&0~$H&Lv5qEXwlUUf;{N7Ivw`As1M?liZ z%F3cv{~H}0E%JKH?<+w@;3)(?w86m6{usyqF*Q32XjdC?_yAkAvrxHuwQ3ypKt)AiVs>bAx3a#p1PL-=O0C-Ty;=#Lf0L1o zmd82JI?`=(VJBzrdWR<;=x_$=`?K`o{+`L?4wZ zJrni6n_@9YAkglbIBgogy0UUdYwN+kd+u!%oG&*3o8g6=O{do#aa>rM8<1t-B0C4_?hEDN!Rf`FK53ZYD(NezsObUqQj+oV`i<-4F9Cg*`4D>Y zwlEANcu-HX`#rkVPrs+9&CVveTp!rz=~@2HxUw?qP(;ilJ~I-~*8D2SIUD33Cz$;O zdC&K$*6C)-(1eq7`Ms?X5pz$3C>{g?{0JD|@gOFy+-(m$bAdqI%YuIR+OD zA+@y$7MO+k_4U7|f3Jc-%Da3-YI3rZ{^sMdvQLYGfzBim9 zi~g?{;0LP_hhpRp>|}?BorbbTxexuJ&&GNNjDwMIK#NDfWdZqo)1Xlsi_5Og#fRoN zVQy(?s4de9YzZ3(nApO64?6cUH8lZ5v8mXa%b;RR+W@wD7FM7ZYsipA4U|=uhmC(M z+dNEkghj6M#FU&0w$_ zd!glympdEE`ASPW-o{Q>->76Zt{-|=2*Ao*BPV$OyB&1*iL`yjkO3op-le0Gxl z)LV6Xy$|ej_O$dgf~ttiAo7t?oM(XY-4~M2BD9Nn(AjVhNlf@h9Fq!vvNLg$!Zm74 z^?4K6o43vgF>p+hnV?x;o52MfPylLT!P)`yM8br%dKb_Xt^359qvE z3==UjWUn{}Y%@u;S(}SvsCqG*)0E8IU;d2l@9#aFKVUI^EffCi4EFviB{h1v=GAUC z@Qpz6=Plfn}O)kpT%Pxts)q8;k2S%f?&%9N)eyp3-F1 zoX0kvjAL?^ERovyydPd&i`!~q?pb=*SyOd>@Uh$a>LX`1NiVtyU|78OQ~6cVVx3Q= zcvV0n%ZBe7b8pDWS^bK%;WR>LMDv5##yldl*QHc^>T+7JJ*!^b}tkqWhns5!9Z_sZ(k+B zL&wdC7sW+IO_ys;TUuJ+(P=nb&r-6oD0c7i@`%{k*_l#l=;;A0@3jPOIuwgrR8&-F zHva#Dci<&gp>lF^0Q3j>KRvkzY)Insd0#y~egQ2lE#-iJ|F<&`HEoF=<+3}72olKV z^ZBYO@dJ>2yIfAuK?X-txgQ@ND=Qg`3i|q+larI{>+iG0@(m3QK=IsYu*PU7Oj}CV z4-^z&!2(s6`h0wRGGxl?>g=SXqG&g_z_pn?9<`~dnwFNwKs|E3ME|3*S|D%l z&PLyrL#@=-7!}Nlg%zU*2ZhAIdNF&fnAV$@kUC3luhsNzxvvDJ?~qn2!j<;hw?xCb zPoT@ag1H@JrT+Sf=-|Gb!8iKEs>}iUXvmj7;77L_whC3l-z<;U z6)2WF8GGVB+0};}Y7!FC&sH%2noq%SfIx@2L5~+pkuua+R5-l#=JTHP_1L%9v*s%K z@yW@;@?SgrVStj@_)V%{Me)q$CHFtklf4QwZJ{;1m-9=(`8it}Bd5o-u&|K5Qd?71 zHRgwL@*j0bsIa`6Oooqn04D=11)VkzeyWQLCY#g7w|e6^N!bK2(Rdt2Zwi_mEp5%S z%?8_bGkaUWXhe$$1!a@KDT|2s^nC4eC6X~q9xpMhR!smXrP+dLPR{%ZT1S7YD?m=BrG$TTX7G-S43MRLI9p1PPKutvDj$nplj2vMNBeo!idA*G!cL(s(j7bpMvzRmd)KDNn{g zMa$Rp1$ZRzZ4#Lu?#n4E`cG;yMq&_| z8tLJj)7??$Ns(AtC``iK!ibsayk9fLU;yfI=#xzR~E zaDSwacP4e?E-Wa|U8kRvlNCdQGz^tp5DDs_M)l zegr6A72Jvotn8T2TyK1wTVj&tRUhkyLrA~EdS* zN)*5I#p67^yqZ=?b=q8QZEa~mTR<;SEfB=uabp4`TYxDBd_M^Z-&8IH8@|!g)Axgd z1qcYvNrB!%MMX%(7z8qkyNv?OT6fKMc6P51=Y4E0MKXdcY3=PiAL9V2oSvQz1_Eev zRJi@Ukwon#JKM(>cMp$vz$?`auoh3c!2t3$w6aRe%#49FoSxZP$wT_V0E!R^O2Q14 zDzsbbW>jkPey%-op!M+Zh>3~G=?EhvC;y_Thk$?(7#N7^o=*p;_V)Id0NViuVvy6$ z={T%xXkY~vI>^A*HinU7d}2b(q!|hxy`;=YoDUTh^{;-H9Klx^8OUhNK0pAcq^8dO z`vXv|AaF*5(upH>FJ0*2l%Z za97yp(?+}VeJ`aui_I43!qnE1Q2O5l`Yq(-O$uBS0?zsQR)CKq0`&?hAte;w{swXBE%Fnb<6GwUEOk;v2Oz_pAkx3}B}pLA_uY-^(Qh_7 z3v`}bDPQ8qj7(Kk(UvHYUDr>nPEN2N@OYrbb+TCS(syx;VF+kxecrD409lU++^IKW z!&IUGs3U!!RFsr*I72ZkC@7dg(YM|9YW$;}62f0x!vvK-hesXJ8f@Mq6bxSg=x zl802Xnpci7!Fi0XP+@i94rr&Qj)u!${=AB`v?2&YVNE2U^leWU>wBtNvRpUqhmDx@ zS`N%VYuST9-~pg7(-`lLUn8s8T*I;rvv6DD@L<_XfZhV;H4xU@x}PgFhfYx3niK@W zsVAc3L}SzwW4xGct3dMez0d(Hc;z50p6(yQun?}!X%@Yu6hjYqL3*)nE%+nO zc#j=bq??BG#;NB*97-c;AEP0-CGMQsn28Fs#}UuwTcTGGgFZ zV0h8I#%i{r<)<|6E(_NlPn88JxGH6o0`JL4F$+W9TM6B2Yolu$!5?!)*V8Oy()`fb z#h%25=2N~hT=(Pmmt1zUmgjUQ>*8TCL2a9PtaWaX1*GA}wHp zUcaKTqPtm4>Vl#Dta??$(Omtv>tVRs*ZNcUVp3FpbHY88kstvzbIM}Mf&dlHbRZIq z+x5&s-CsgdQZO)jX=Me@Ig98k46yXXiV_o=JIhOO5v?v1m6ersbrJvb!3g(N5#W0L zXJmwgE|!*-wzlFBjsOta0YCR01sWQ7JHhs8o$I+4&`rBm$!lmV0Z0W56dN1+7Rot% zUsJu+&o3&6Z`uu-U?>*QF9xuPnW-rl2pv$`0Oo+;+uX3|U7y3rTq&U3fx0(3ywD&+ z2F_-~n6V%?r6Bh_W@lzj=B2wIc_=AiEz@o=a_WE2LI|ml_yU3yhhc9pz*~aH11sJ= znyqW=k@drn#70~=S;te$i`yFLPu*e36!6!`2!NVSL1#v8!-joZS^TcWjSRJ`$!r0d z3=$&whIr?qm`D_Xb7w7r%40o-;XWC?D7OtPK$)jFKeUu9#v{@fa3~rTb>&K1>B1ge zm2QWhB0flHN2{Nu;fkFPV9&U1s1a&UTDYJ5HyM(W9iu54;R2lINz zc#@}r3X^%{?J1Lf2Y?+DE<(nZf_jAUUI=b3O>NB|kBlcr6QatHumUeHo=QYj2!17; zjD`%U4muFu1rOZK$OcPBza`vADk@e?4J!%Z@fiNVI?A73Z&P_JP$;!#_kaK`*|-`j zJL&m?z0n5k#jzYV_P5Nbs46Z|UQwFxYpzd2*g*6M{PPVaSg=(N zJTvZu@;)2B1#EF{f9?hwj^Q+=#8JO>5^!Y3=sY8z%vhJO#21fnY1sQ0Tn1_U3SU#h zER@ZER=TUiOVCA?xOpqKEM3N!Zqe|^)qBg_lqt%Wr6dX{0dPT-DT6~1?Z=gH7k(Sw zT0t1l)r2m-j5B020rEV7E;%(V{Mh(8+%LP9v@|TL_qGPQ=LiNtu9)Ur2N`;y zf1DZ{7&zRRyn+dUKo(L(tkw%Ky^lo*f`{g9QGhEI5tR#m+R$hfb~vEACRppw#3fO!*npub`fh0GUpb2(pO z#l_8hzCBD&OAC~nE>HF~(k5EpNw{G2{B;#YE4#D4O^@Jflt2suNwjkxZn}93%`9F{ zJ6=cZ@BgVpg-h%)aMg^Kmq`!WgvLfnq>L9d;1F&ri-hy5#p6h$0SROr<*vh6(FFOY zmG=z%EvZO5Zc*u%J2^R#mKvRo*S55zoO8}c-Za1(JpF>_Qd7&&)X*rjDMt0@g^^;< zSOG=}FhynjM}FIz6Zc$2DsXbW+1%BgS=*Th_$WmxQql(7+ZEUK3P8AqTY>n9K01bLt>$ISj!*MVTH@dshOC$#u0PJrlQjt- z_@V){5dgo>@}OW^2x_~QLpa+FN8T)_x(GGh=jjU)(o?-~TFupheJ%Sp9N9(7?*PN; zNDp82kDg43HBH|dIwj2@2=boo1Z~62Uc%1SmN4m~{xbvn#pe*yw zP}Z~zuD^YO8NVw6nAjjYb&*eJc)+j_PgFt-iek?vmhY=kN~V7|sW2_CiJgt4+>#ql zL~&HvlV%(S3>KiVIacyR;!bOUCwJ~nY3Q)Z>ZLgqN2a^C|K-`F!{|t~CyfVZNt2sL z5sev;pV1FS4INM?W1UX3l)=gr{lrjX1Na4`Y3!?x-^{M_LgavPMeOYcRTA!d%+TM4 z&)H=Ii{yGmu>a;G1qG=0bqrjvl2+}n4kb|-(pVySh6WhX3@0%um;za8WF%a_g6Z(r zzfq)CnnA%4q5>*t@bK_Zk|x}utZAvt=5RV-4JM3i=wEjbJNN)`4gIhj3Iv0E7#zdz zbTD>*x(M|*NQ%EiBZz>U+}zms_V!R7(9SY45cHD(GJ;q#83~_nt!^(;l`gv~2!Zc;+YDDp|IK41)w_MlJ zWy>vV1^BZa!O4|NuFOTcLk7J2P{LR_mcExQcH{HTO%>_Jh~?zQR8%08O~bSgH&-bq z_db?2UWHYog>zJuRnWK*1_5UMRHYqFRRew17X29u24kK|p#<8-#-EcgbS+OXZZ0o{ z<|&^8Jv!$TnvC%A7F;?i05v20>RPWKHu2!!K}ia3)-H}?iikvQoKxXOACm5v7e9DE z9A;n~-)*H+s#8$%0xdK*KdOJA2=qAngD*1t3L+?u1 zP{B&OrBqANFApi(tlZzbD%0KKy>5dJRzsP7*y&_5ltuAnWRLrHgYI4JTgY>Hh@5ZQ z^^N7AG&U`D6&Nc99 zi?#i0GC!Pw<+{r_y^(@mZtCXW|&5Ms7cPks^Bk4K9CeB~EE0K7$+xPzGekr{^x% z7gVZdBwk0gP?4X&=7^rtsfYy(oITPGHtXl1dvJB@JoYbW-eR(BTk?tB(j;fhP}~## z4_Wv>mY88c3=)eqRiT()V`GU9Magak(@;Odb)}S4;ey6lV^51sDX&<_G2`ZcsbkdO zi{pajpW^yh%vv)QzfOvqtD5`{J4XIFncl~gC zH(X(+`hFUbs*>4CVeqG-eheoHGm|S&feR)2KSmVS5*HVjaXo*EgA$Mmzdb0CladlT zx@Kl%Ol7gP66)}X;0GB0D=ID~=CzfyWEmy|L_1qssMi#6v7cuv8JRW2MKtK}EqooG z_un#);Go7aE{=|xf%ajV8gS;`>i2;`Kocymm$(DIf{u=k_~X%J`Y2{^B`qy2Rn_?m z)8NaZY8N6Rj$GPMBwRRp3|n#G;#m}z3j57YaBhEKN*Bvzaye{u1Gwk?{TZX$!U6gdJiuUf@*5(N#^#iUvi4AR^~wI}Bf#+;6pb*|F{ z?Bhtx-D7Iy;&oYx-rJs4QB@RlQjcg@)whF6Pft5rYjoUgMFZSTrIwh$0qje1SX%(h zz2OpRvn|ro5tVy;dt|Y0UT@L6iul*Oo;&VMB`F-vA(V+EIJ7^&*fK5D)f*dkFu8^U z6Uw@ayQZz1(#j%JOKVzEBTV(3(9ld}cH&+U z{(${ycU4(hT3LLqMvZ~!)aC^AyaiQx*+^w&idJn-I5`~w*%utRm`Nk3@edHN-|yo3 zHfmnkM;HRj4Ivjz${+AJpzc8-s~bp3+u4fdH-dpuWWWmF|16HLjhQSq%3?469UdM7 zQ)z&6 z-4BA80?gmq*0LU%aly7FB0JY#uNl&7eWu79BWcNLznCkOku4gil@PfF(es>?1#PWF zeNRJ8jI>zoj-l%ajp))2j%6#Hcxw)q8Ne#RmU^Mvnidvvc zU=e`2_eONc`q*zKrl%?1_ViD4grdF7$}M={lyIpXxCH#)=xx9;6t0XkRbVX z798SVB=#*yI}M)&=AzoLFNsETCW7fU3=s)%g~GyufoSwQb%J}~qd9s2f>uHRli+?oKu_<2G}|Q0EL@rV>ZoKnrLyd#^XOqYTh(hkb}}5z z*$A)B0AT4YtymFN>P-ba>>ZioZ!OHo_)F)Xb8e@D^GEu!Z-PncNQtl{4pdcVOyQjc~D45yP$m6hl!=Vz>W%e;lRzX>gw4wmQ!<=A0OJt5Cd zCKQvRqT}^^8nl02Q4%wA6EHlj6`r25T~bife*vYWWA%;Cy6UT5TNeU*+lq^vg0i^o zn%?gNXuP!X1Rum{*g6K{II;=~985`|G)uc1*!-9G8dnO)3k($YTjgIlnWDwc;i@m?Bqy)R=SFp**^n@~!q6s| zBqUCA_4xCiwP03lP&x4KUJK$b3>81JENmK7C4pIsXh5Gd&Q^$!H3C004m_^3GRf(0 zu!yP*|FT0U`nz^+uX7GobxFb;rBKo6LP}{)`6Ny(Q_Nga#FEAC+QBCaui~kg62WnU zp5yvqX(J!7Mggj4c9f(1Yf~;w{+ZQLyU$e7;p_!Ql@vdJB=%uLs8X^{<_r8QI%vjR zO+kTy`FoS$GVCli%S{tZDk}j2DAk`8YfnQ$M#n@_OAD)({psjjjPv*_Kt@Z@QquHG z{`|?blg47R#Ng``CpWBjbdNHt8io(x5|RoL;5m^0fDxe>3l_UOn19Tz70id_xgm*=yk=?uT6*zw7l*EM~oVX z+>(8uoT_d5{`TRxzTK|J&;R`!BX#%Bl=h5_{EkeHw)gjfBGZhnpAdRXmf-BA)uRkq zD}ZCeTGvN-t9T0E@fuKD8X8=9I))}Teh&^775uJ$#OLSr3b5^nmmrBNO&pVHLFw9Z zXzW6IL(Eg6*cp|9erXN*@)Ppu2%q2BXm>i6E~8MdwwsbUQd$ag%ys3FIJXKcb z14_n+1}5f**Y`r8ydCu>j8vu|VW*EHg?uYtUcM7f*oXC|fqHSS$1choFd`+nZdMxl z*t3ksz7>u7(oUz6l5xS&6`Pj_`1>wWH1vE4t@6VZbd+3oA9$S?in0rVO^P58VD%A{ zQy}IbgeAT}MMdxX**@w}BX+{X1h^5q!0ET;c{X9s&fNpKbUzRAcofvs)seFsY7@TE z==LzuclkyU3A520r)qjUha6Q1jG}g@o9BF=-ZApKnR}u&`3YCB(w*1%&E9=No{;B! z8d;F=szRU~KYA#S!(K;N*jO9)1?v_gLL`mR_FD!IN+amSg@tv_(2F8K&xWBof0nAP z@Q<|Rp`8NMe1n&Sy28RNdrRA#1^8?qi3|qk=Nj=xrH=4|CX!K-bdz zxk`jJm5tsQjY~^T>D=-5lH|w$WqCVGeete-I3vJ+q2)pDBd?Td;U9=s&)G7G+7?EW?UNcfYad)fRCdIl`r0{6#? zO>$8YS;6F5m!BV_a72Kde@iJqJ*TRG^lh5(y63$cPZUScS%W? zba#WabVzr1cX#JqzVG*aKktwG`#R2Wz!;o!_Fj9%ob!o!d+}U@iYxtFI@4!&_om-= z2|}SJQD5G#|6peQ_YcS{-f#o~6zWw!HyPUbU|@Bo@s-8lY!zZWAYH*U5Cq`9fY6Kt z1+wKBH0yjM5XX%uF<;4l>3UIRfc~Z}p7;+no7pK9$9_oW$!ds+u`h&fG7 zM&SmvZB}{*3QS7k>AiazzTb3uy`x}rKG6E(da&r-wLIvwRUii&#bNqbuDI8TLi}{M z-?0}lGQIyBD3h^dN^pZ)Zx}{lZkZ<8m^<6;zrHZ;WR}_s5zE%9wZe~&iHNWw=8^kB z=EL}-T%R$%KQfYOWN#>e5vybz=DGKj#i^8-Xkkfyi1e4)-A2?w#Ivo@mqy!0Ti@oJ z2%^OrT_0F)I~n8p_i7GVn5{#$gnVj6C5NuT?Pz`)&aa5wqSuIk4dFI5HkL}|*0R0X z+r$6L;HQ06y=@PR`rYTK8_;F*%>NPs4H~%r^I+O@?U57j7k(A5Mf4f~(F^ZWT~m51 zAe@~u0IvcOmuD9isOE>Jk>S67j67Jm6Ap23lOoY;-We{n>ef2lOt4gGH@RKS)}qE> zO%`vGYk6r;v}Nct$65}nTAS|q(r}m4(!SRD8Srq!vAoRJmBdRt+IQVk z(ih<0uaXldHLNt;c7JOUH*>Y@?t9O~o)DT`T}6QDdgVC2q(6|9s3M)e^kyHK7~bAo zo`h=J*v9-%ihe_|QP6#_9;5OLi?5$)bm=dcKbEwb*_j?}h9^s4U&U-pY)M~TojYE1 z9gFA=Sa3Gk{WjpYe2{v5Uey~z&CV86 zre=viL>QtuTI<=mYUwLnhKzu)a%3Q%ukao|=-01tP?i`Di~Ga~AEEnG@a=4f2!WQT z4?!~Y9_F80$cd^_XM6kg-5m@hAt{+Fg%$q20s{|wHwfUi?`1Yo;NfMIl^yKu0pNsP zjWNDJ#mDCX0@ZGP?WTrg36l@q`hfV(#>9kPhKJ_|Yh~1Se0iY0ehZSqZ{<1@)f-X9 zs}=Qz6*E8xCSvXQj5nlnX=w>;W1^x|bdtYmXiRw54i_K8Q4vkx>ltQK?LNUjr;69bx+I)En!YBV=DzFw{A+a>^%_Mlfiz6+vw-k(Mp2&ATq zgzC#=FP?v!+pL;%Ap8Wo4p#9d{k_d@)(QANbv3m`w`(7<#80I(zv zi+0$K=0yw|?Qj1e5i{w9V~DCH41NU>u%C}#rnv*4+fNsF0)|HaPa_LnhhcVVPVBS$ z{e@aAjFcXy>94rxz!he-@vbGj6$a>UfVdqTrdFcyAnatJc~PH&g14MkIEE3yG2FU4 zQZgterjyOF|Kc|xAow(=4R^~ZRDJ|bACpBY)+@b|wxaxUy#*Iq_oixbiBfO4{J7?( z%tjrrYMyy@@9)(R19gi?E$2tv{^4WGp>=SQD4h9j1m{=gt&~P~fL{%~JGCBuw}J8uQvHa=Y=Tz14U85e!pn`F!sPLo~fS)U{;WJMw5{v@Ot z&!5iBQUjt9x;QrV^45l@X2zBgcaXw6Je%KC_-^C~{6?M1_LgU; zXF$8YC((n&jVHObBImF8{z1`7i~SW5FCZ8J!jI%of6jf>VRw3<~`)8SrVz7%7M$;;aSZTLOM$IjX?Y$ak%mUp?L7MR%&59X5;lOtp491tGw8fR%-9YXe73^*nnt4+Nk|qf9SnN4|vF;)d4w zx})6H>21Jr;hRF!K#H)tJDahJ4!995dqc@A50lAo-TRY-cfw$X{V7Hn7@z@X50<&n z%#7OK$U?ynO#8lpeNHch-cGuv^c17$W$OvU2e>em48^5JCm~)l?G0dEHD?Emp1yZF z`kaU=NFWe1C9_*(iK;biSrYuKKa;sRBL|&FD0vh+)WjQ?@f5e(rnj#)sTpz^j}kqj zHN2E#bxb^4G1Py1r8xW9752E_(G=To+q?NSUN3cgkQvq9Mq6LxPED z&zJ>q;kzu~kglu;4l{N-KDlDH)Cc>%k|DQWn{dRhZA0>Zdax=X@Ygb4J|`!<=JdxJ zV!D@2EsFL^yl2vqWW|Mpu9eU)-mkr^+w}TY-_Ig(bHy>G)9K5KA4Tq5;a%GIKB^4Q zdY8`Dnx~4ik=&*$Z6HOdV&sA%A-M58N_SXuUW0S>#D`J1O3ZuJjeGOS1Grs|-ENd*LNIs@6O&hc!|h=p?ErQepk57-MxYzAS?_**&gAFkSE62n67W>+aHPeo z81;LV^m(AK?@N=XI6$axw~0__`O0kY%DeUW^t8_Dj9hK@`1rfvMR;Un-S?H}%_pxD z@I-;V)%|i6_@u$XL138!^hx3|w6!){D6>DZM~r|<;Og{L=ffL7|9QB*z^KMh$mFoy zmT`CI_4@S3b|>kGV-n`#ZXDb$#@O0zk)4 zJX<3m0E6G+ar`M4I4KZv+Qae;P`E$F*?|fV>i$fqn>nY8JolC(_$Gl!KTrnxef;x7 z_Zo~4kS5h!890h+xMsjlU)|~ zXFldADJZ2|5GqcvYlt67KKf<3-7jUK1mj(gX>e zZM~ZB1sS7&hkE7Q%=Gj*X?T~u_uzI)|Ia6~>LTrl@$q24dz|j>*MA+HPgKbJdbIC7-pf7_-TiUjEy0A}VB3WEL4#KgqoY_m<*qQKjxIiTH2 zWp5zH*wFLo`v6Wz_A=XVvIyu{7LW7s!hW#f!tbU-b5ixSfkfFTC?h{V{s@fAs< z*HoRU{jL4p*}wEHHcErMVB9I8g0cjVe}|At|hmKP)Ez#8-Mk$J%^GAF5X;Jvn9 z%;5UXTS+EdwvwSY%&@PbMeC#P(c-JeKWH1&57XG98VXyz8%=tr+JYSJ*l`(dYv?f-!*NW%xs-#Ay25~qHK$zaoJws1Ka`U zuP=d9^FwWF2tNvnOj`Q41c?7^uvvEs{dmQjnHyZ*VuI2M!Rh~8Y?1Y&Cp8Z>uKLBf zgqh+T(&-`+Ge_GH?}#%HimCoOtg|)VnD=6Sd(BU|IL-_kN-L-`7)&Gs-0XXeb1W!a zHTEH&M4_Dn7U$Fv2W5QaIxDtD;pgIqoTM%YSqg5G$4Fl0Gd#L&e2>A6e$y!irjn?- z)>*1m?0d)NPr1wq0CRpHWM9wYgar1Cv*Y8mu^sROB0M}GNSRt#40d+|8PG?_^UHf6 z=Eq*+^uY`he4F0(!n^g8hU2wjB1*C^{I)Hpj+P$q05CJVAL4rg@~bd>w$Lx{0UfH* z;k!1tZo%*0KLoU|6%=n}H0>%j+|cPdPfWqf1TC&)+Z9JLKf3G};ZPeNIS704ghh zJ%b1vo{AdF*)Cd{D>gs7UJNX<5D_h&(kdTY3tEsWEL{{?zAg=QI zln1O&!ooy!zJQ<{&_U-`ZE?$;ymKCOOfse3IcZb801I_ac}Oy4nf?P`A*k*yQ9S+- zad(f<584Ez$sekD85ypZ?b?|d$rK?DHQRR{`wNB|7^+jyag;E+^AX_=B|K$Xn$A&U z?1ZB7)M9;!=~R^i4oy(w75&>U7+F_~F33EjJ$EZ6!Jf}FHSL$S?mb-L$E*!E2jAOR zAJr~#_?ozcyqxQ5>}t|f7Yz8)F3R`@=C+QQuM%umMVh^n!nT|X)tMK7>n=s~C?GDM z*`W`6d34qlIxwiIE(L$Iy6(7h-(xW->A&%~8P%r8gnBT?=+qWRzSP%&^JaKzVQ)Wx z6-D$;adY6tX)!6a`Ft|+h+F{f0>^T)q%?PK2W~46`{-$PZEI=w6j1b5N-OLw=k&f! zwpnVIGcjfQp>i}>`ZqLaM5)?xy3K9c*%&-zJ>0xm^Z{}HKCZlj`P}p+uN&s8j_K~N zBMmb+L&<1IS`=Q_AjUkTKVZ3E#JSquvvER4+#J-XGV^j0Hx!ip?A|sVA%ePZ!jfnm z#Mf9oi=aS>2@H~|JStip2oDbI%^y~0vScXl94rYr^PXZmL(o0gKQH-p=p0yE_@yGtPc0;gh>bu9DO^52N#l}o%(HTGPF9-G#mIi3)`a$Y2ulJJ~ zI1qSyn0;g#SC+Z9t$2_tB(|wQ$qhZkkhVFSX=1)p&|!-AzdA}{cDId6Z)(T6zQ(=f z@mQyrZy%kGj*0GK?JJegQ!wzub{ErQZP^Ygj?t*B4-CtHZ{qfDAl#kzv7}IHtoLL2 z-*7~4sX)IbKLd`>l$gC8q`A4QL6XyjbG&cd1_V#!%Tw^QB%{;|F(r2=zmy)7;D=x# zCE_TClGgW`+YFB@zmo7JuMmz$;rh6o_hZ~pIU7mfcE4p!Kms(;vwmQE{54C!@?=XP@3hSCrS~#JgGU+3J zE2&gDwtfEfyowwS4w~ekYv$Z@0+ai(3nTR{3112q+{fY78(v6SwS1 zJ1VNHvGqqo!onytz0?L|&IVT3))sa(B013|d@yg%#pEA?aS2i|S z38Eo(04nkH;HBSnt*iYoLIy16L{@ORul5 zUpBK3LPuCwSXNZviV@YrZMTalYACO*{j@!j2^>;yudmM!7r4mDJIBX=-H$JD<06AH z89=%a0u_~&wHq8yhSLS$P>C9VhRDo}HgQ0!(b-NyLIPjphsV21+jqVF*F%|q>g%H- zYTVIKa;fP_8L2ozXfSsi@2?d*B*iLVb{=T)bKFi8_1DrPqi|KkUXdgq(zgZZF z3Evdw6=5D-n(3O)tgn#L@yIjF0g63A&+ltOENra1Cgi-lb!O)8GiEV4bDWdB<14|vCN*H-rXIO8qQCpUu3O%@S8n*#iRfT8$~tI+CIHl zo%w^m&uJXkV7wqcA^`!dq`RBBbFW!Z0_u~hx-t$#5B+|pdcqRG}t)C!w2`})s&*{ zu*E=#_S)+c8qsS)j8qwUdGU0atn8U&vm!GtwHim2f&%q7%s%|tb94_{_sRE%Gj>2XAs~=lQU|hjjw%oPyTMH6N8kVitIwu$WhV`Idv#D#sD85 zw~n2I)dL;}{)H?t#sf%pB6GnBZ7tg+ATN($T_GtdNujzIM;nueFkQD)9i(RXhuvl zvI@u(N`dzki;jw(m>M&_Gi+z0p`;8B5dR`RWVURbojpS?>{m@#Os4!kUFHog;TYv)rTS>q5O}U!TTWuxe)+LxY3F#l# z4_5}$aweB#X3x~ess|BE2x@9RARe)SA})XW@Mv3qb|Fmdv%n>LkU^0ds{FfxpMm)W zB{?#AX4t@hYiHL4e|yK>*zEO)q0#4WuS0HkK91)HJt!}@H%|s@W0QFhSX=Gqt@L3J zm^VpSnTxvaF0N`wUSTJPp&3s#Z!`*`-m{KWzR z%K;Upn1!O2Nr%4)$-q~(9!A>P{lb1Wp{fpRx$*_OB$sV_!{Ikcu%V7INr#++bfL+# zXs_FDA`Ifzn@o zt7GH_=9n$q40+R_QU{ zzBw;fV6@Z?uZdCKOFT$bif_eW78{!xMXIhYim3|Bu9HCY=9^@j<7A>{EUX{{pBEfsT*>1;{H*jEv5KIaLQzIrOh$>h&oNs2)v3o1ZS=xL$0zyJ{f_o5G?gOy^kjrV%D+>$ffJ4dc?$Uhpb$%@{V+Q(!&W;WQ zUw(7*0}vns(IqfZ0VM(SXUt4Yw{mQCd3kx=-QDf&?R9mWz??cK2gsb*TwLxpm&}@d zy4H~KKPoaS{R?0OXZW7J@o@`#qupASEY(r;(zSgZKGJ&H6Y+QP;d%tuI^W+=2CiTW|h)$B} zGKUM-;zFu7N9L#a-|oKqJQfB9iVwPIzJB2Pt$pN?#zW-GkIQ&2k2)tJo$}O7fLI$b z)ckZj_Q_ZP?y3tKa3DpilxC3ic^sQ9bh+^8XnAxKTyXE}?Q?tZo~;*zPk%}kM3zt^ zc*K2}3J618o?NJHe9r92i0AXD&Yn~NGWmIr^ZlZ&Z0$`33}hMp+%ZESX&CzT4J_RZ zqGteJsX5}gb(y^Lk=L$cJ(;G)3LpmoUL*i2fg{HH2Ew0kY8YM=w_D~%6H;J=fo$#bSk32b2 zcr?BFN%7GS`y7|6BJgg8??K6eot^#uASB%bCtb#I_x$BZGtAx9qaSzJ((6tmwA&&4 zkDxFc)%EmnP`PkS3M05yudJx}WVfT{LhOCCb30HEjC997TLVj||Mm!2uSzKBy=Hs| zM)Jwy>qk7Frk7eA#j*-AalCdvwwK1X8cj@0=jvs^Fe^75eOTPq-U!kDWNXIlG@!?1 zI21SAv~jZ{KtbZu^o})gp{KK;vZx_q$gm=A(;#O}7`5hpQJ}9Tu=T5%N`>oKjOO~1 zGOxP;oJZj7eJ&POEVt_^KQra@Y;;g!utQK&OvaCfZAWVA>LD~g_=lPg*>6?_Fh=vP zgVgrOLx~AbdHIHS$*Q8u2%t+(Z|5u9@_I9sVo3L4f|tx!W;Z*kdJQfVG`knJi`MI{zDQ?UAXS(<=3YM$>;ER#WeG6p`7e=@Pq2jE+qd9scX>t+MQA zEiJ8*MnvC#Z#{3$C@(LF{7YKe=(b27&nF@(S~gMD^j272NojHM?kD&gxBBkw=}Y=H zzTBcBiSya2#IZy3#K+uQr~k~xs(KE@n66HW=TPcUn6$+1=6uzJVpB$`{JYepMXrDE zPiI{TxUeoiF#7Lpz^8ktfKaSd^JUkf@c+%|+r_3w5vJe~lK+|E{P0zL|1ZBam%f?W z5&6i({CiK25F<7`DROeRO#t>HO6LQVR?_siV{=5TOfBNV#iMBs{R|C}7L~-2Jo>7m zr*ABkDQ9zby5#FubJcpsdF#xHl5GAgE$uh^2X;YbO<+H-otgs4sH<;1bcS`A-fz}? z`rnHjR=%lcOdu+uHz1Ut=19V?IjYfjdMDs*mFfRvwQO{zYM)#bOP}Pj8*Heq!>lWe z6dfHsGBQa`PxWBMnDEW}u2i!&%c~FQ-p?8wB{4IB!iLZOJ5I6`sOp>`!ee(BF*A?* zBk!U@zzLawmi8@%;UjarSoU*IG#{|lwH__vU}NnWnIi8l<=${<)jKIGE7#v`y|dQe z+T?LrL57&ACk7@uvJ2n&iXpfWpS0bD$WUS?`%X1k0oPkW@-^8!S>Z1^Y?g{cEmuN`&jptHi|Y{~ zTGcXMPF5{fXB;>#@+hbC_8ma|bFX%ATL4EGJj6{jbT-PuWOX}^V0)XpHa|T4+kT8S zUFP~sIx3-g4OrLH_Ddx8zd}MGV@x1aXZ>U6U}0vqes&`Ods$IYb+gvO2z@`;WY+uI z+`qq1&X|CpD^gHE86;inwDtMx_im6~vAI73jVP44pY~UF-v*8VWHmUIGjTvi^W?rF zx3CYDr}}?y6Ak^UMu*uThkRBo;fk0CpH~m`LWxy(AdcK=F4peRVRCr1GMZV{rW+Ul z^S~)c`B@7nU=`5~dF)e_1^}VknO6Fn~#7X!J-dk z)JOsPA|+|cnhXnx4&+?z&hSuiMDwSg@6KpqW|BIe6Oxyb@R>|6tA5P6XVz~6HrvkU z_$W}&<^Ud>b9-y1$71ahpnbYNHu_5+@;T2gWbc&}o4Eh)7;;Z)j{Z2^=uZR9|8WbGSKL?}Q)JeZ{oItRUL)XAbM@Vhikg1Y%1O^z z*Z@033dXZCxCp#P%dMFjb8=XLZLm}>mz~w{}=LM}9#@gW;W} z#X=Gp&{*mb%`}@{hkKzC@wwd0T7v0gszi^i-7-5mpI%YyScu&NI7(0@S`LYBk_WG* zh%f&SbrTN$f5nQJa|($(h$Yqauc#o}ngyr3{S9eNO+dbKq_7E&g#k zx-E7Awf78W=l0=uT|9!VX>R^fzh{becUf67zLmvs&dxsfN-HZf-knG36ovrUM#5XB z9Ciqy5un48Br#hkFw`8z@?MfY_oTw;9L5a>`xQ(JSa;1UMy965&!OCc!286Mh?!2T zFyeWEx4h8rl`Fzpr~P)$IE}l;rz+}jBR-< z&Am!3=H$T6C#NR-h=+f(S`nJU{TG-lJ~&Nx_VT;l#-GLZ_V$MDFL5}2TC$|1{@)w) zX_~C5$s{FA)~M&GwYPaI?|iW>b=n8-!w>GL*SKj?*)Elo(55@>i}~jtR1A%7sm-Ua zB_&L4T%yP=7h25I#*9I=S<(PlG0Ib7s^a2OM^nFAGBU|iPe^>;G85(BjVX@!5gFNT zWLwHxSW4r1uZR@pu1DPL)%(x(7DWHbci|o(NXVRO>J6x2n3gzW8ItM_HtdRYgF1}y zNU+I$8>f6nFdgAKKzeRfJHKiE?_B<$hx5)AJe;ePg&WsLzX7gEi}`B%wd(#NGne{* zN;QZC6MjY4CmX@*)2D2?8CoUv#UH^r<735K(6qzh`RPb83k|lo3h!HLSm^x1?(f@- zMSl@7QATE#i4K?dl5e=I0kK8WFVjNSK4(?oeIbt#MRWl$JO;ILoMgZ%@d4UsQu`0MEXz z|8B+GH34<`fj}WsC0f>luctd2a(S4&xf>lh9>T7Z3?Fk96|CLko4Fs)SU>Yg5>e35 zR2X$|o*3c$@7?>jCs~?_=o+ON8l@5A67H?xe9D%-jerPppL_#XMkb)rYl`s*|N^wCV}gYf=eAA zu3ifB)x6vB8z@JTXluUX8JL_2kr{0#-u2Ixj|0Yl12kwnu#i+wuvn9sQ$1OnSmCi; z-=))|EsR76I`B9=T=JZ>rZoBVtzltegLOV?R7pM{%2Z(kdA5H2nG>EO39NbnB?ZP4 z$jLz%aH7T;Y@($!Y(OZ}(wm&30Z zv~JG##`bPIK5}clOB`^%ZMz%jT0@1vDR(n>8@+y!{}HEweu<=%c%k;K8aWM@!{|9j zhDY|BU~oumiUA^G&F^f=Xhms>J}{?_wQ7&I>AW= zHumV~WEco|~2_VXE%gPP7djn zZ$=)8wrtNc&*9%cVd>czse~#Iveti*uh|=V1V@|A)E@a>;ny4v+T!uhzTKVao67Qe z=5+)deA<-*LIdDDK}0An?lEbUSAKei^cho3{I5~~Y-O8Q&FlY}Z|NT0Y@J4{=;&+c z=pDIPhI&p(4?zv?Yd9o}O#%ekBu0^pv3+X+N@nzIrOW3WrQ{fHt`Fd)u94EW212}t zeP4$ln;u7La}lO}HPr-vUlF%z99LdQ#Up8O>nssx^lVk}bhY0%C?fz^QcA`kA)z26 zpz>*yY4iG_#6{TVME;c<|%#h2-N6mnq+Dk`mt;E&ej42~DG$^fZatY14xrUBR(wj?S%3aA1-*j)xwT$Pk))?QmS z2;6Ot^&Nme*pecI^~BM^HMFg)B3h_*J<5p@CxH*bV}YBEbw88lxn(copOPBxe>B`<3-6cY=eu2=+PTD{{O<+a zYWuh)|KCpNGfNE04*$0^%o1PzC*zi(0p_mwcz+TSc1O+ZgAk7$8$izm>S!IKw53H~ zb6n86Dl1DuSXfO!vzV?AC-xU`8jhL*Vypt{-leh+8(7gB8(5O8>KYoLi5qCFmz74c zv?#L2mtR)KNW*o-rPKz@EP+;qipsAf6ns9Y$^)l(TTV@&b!@z9D%KaK>mBUbIKi{4 zWQmtBn49YD?gv&2;33)GE;(nd{v(e$G2;z<(!NSva^Ou(jgN+bI)!2RZX3n=dZ}p& zF|pP`BGObTp=58NtD6bPn(|`1seE`(Qt_%BuDnnFL{Udt2sAVt+4>vS9y zfZ{nhIT1D0+Y9jbSK4J3-(=0rlM4#U8)Ee`Irm2(CX zq#qmTwV#@V8iChsrRrHD{SsR0w$-BPY|%~Jo5RFtWd*53mZ#X8zUC=dz%T*tXtp1_ zWS|RAPUbS7yw5IVdmLahSwDmMu^1hz>^h+aI7`nyAAxdIJ4H^GzC1!bRr8d3f(K5yw!jFce$}{8v&;D&1ql3R|hE<-Hp^!VZH)v zaNKLE4F0z1GuDl57Wh= zz+N%FCz7QaFE9n3rW5H^Yu&-ef@d3v)7Rho+Rc7zbHif-fsUF!h1K-&hrVf(a(rsD zGmrVCYuK3<-+A{wmn;9d=)-nTP^sEgcfc6}+!gb=9p-Y?`ZqgS+>xcF+6wZFE{-&sX1?sbT(yW$qQAr8px5}cT zCsV3p{wRk_F?P|vFu^FiBkQ=<^0nGs{i7chS)J2~y09U1=6-XNE2ZHSS>;-5vjb3J$?Y7as8($WUaY{L=Nyy!?V-0^Tje6f&A}oBe3X7 zLX&&`0+V}5F*QyQ2~e_maBo^WeLms`Ci|+i1~^@V=4P@Q0I5UA(~?&&%q-T|)hC@b zld{kfHI7dysVX%d>BTmf41_~a-Oa?rgTg20l9!qoU1Ts)P8M1FjD-?roK?eg-| zL3_ePZn*3meFa73DD-kw$$GV{Y(oHVy?V*7sd*4O8M9hJO-&>4_pOsgN_2L-R})dZ zij0hkj6zXPK}KfrOK1mIHJTu_sU$Sd=CdomoPiQ66!!){-u?sj(pOtES1BnXUopnL zuV!X?l)OL%KtfF(DwfVUI~RB*PjaUyqav6&0vZpV+s=*eiBMwt`HEo0{sOgs_UXC6 z+hIVT0!+a8_=NGvN#7>a*yw0Gduub6OJImAo+1Gp?u^HW@B%<->DV!Zbu#AVOJC5g zYuBUz)NA;7XkPh_lmYpb6NphS?USd?u;aC~WJOUT3Idr5XK;IZ2PGxMKw~CTtrh&6 z`{SQ^tC4bm6fWYTP8}__tD?dGR$i=kw2hXRl#1@%*m(PN|GB^idTR2+)=WddUyh)n z8-RsO#cf~(Mx)R{Wh*qq<9;6tdQX-+F~tUXnv0l@xXb2aLXN~lstO9D`I%Vx2pMN@ zV#CL~*Hl&2e%2X*1R4OZ4W}nCo^jJJR8?`QVE^`xHqhmkXI0YDa*L5HCnm<@;kr-u zNi?|c?(GEfJ%J%;Mhyc+jY%c!49=)nZE)OWq?hDm=RYGZMHGPH0c|*m!f^0YyT1+> z)6tzGz5?Czs4xTn+rv{pK)_XpDXOQEnQfqFAS@sxQ_fhk-WFRZ!P@#vXBFK@p&(Ph zkgP*`C3kl>!6E${E%sP`kieWS1~AG7vSRjWuW2y^(VmM7yfDvd1CQ*l(4G%b$KD_DyZ|G93@WoURtLc>MzFNNrr zEV(npMl44-D}bW3aV15!$)U}Eu8!v#xD<{R#}ryZ$%XFE;=+}>Tq zH#E4C2W=Qx(oo#3=>D8f3JF2_1n+gkA1@~d5`e&r_lgq}NBnI-v!7fy?V-32%r)2q zU>dJ|vRamhHSbUo?LV-x?Sbw}DbZ1dMOj&;xfr1C8J+yspv?JC-LO$5IElwAk>n6M zTn-}UNM@**$iO&Pmq>}IivyE*{%0VH2@)6Ufz{XNbK8u>uSzN^3Ml)gBBDF%E`~~7TbVO5YFekUry<^Xpf+F zJr_D@%gOVRMqO0)1nxq*{l5@K%kU=;KQCU0+x_x1MB6i=U^h4)Bas#( zHN4Vkj`)4ThuCAKAoeB7iu`?Q)#2E@Le=yNj$2@VGpMf=?vj5fP8( z3u03iQjy~5;LVpc2Y|lEOdZXDW?OyW_;rZ&v}&Sj>S=uk5u-D-zmMh^5wxIwC-4xo zCH1@od=PsQHNi1P}yT-^ftf9`62jnIk`iJk~BBK#Lk6(2FGVM?P#e=+FVR zlkJ&4o0M3JV8U9*M+WG4OH~?7O3;)JX@5$c1kLQ)E%#{d)5xVi(r%5#2Kp=C(eAC% zncrmF3oau?prk~r%^k*{etdJka2E!;xJ9<6o!vag3#rpyF`UVC`eo%hB%5VR`}C7hNdIzlDlzykd#kEc|@p&EWR`gcNq zYEp?4BO5djpiXB_d{`Zk94SIhbij`NRieW5G@V}oaubMzlL;$2T3p!z^I^XzkEGdQ z>*)Q56JBoXvuYl_x`_1F4jT$VeAmZO!@0u;U^EE9UL3vC3a&g4{?|+SS4^2|QQfUG zj9X(SoP7Z$jk2OLol2dstn9bZ>k$f`!6;w0QtgRmO9n`#&g30coYCY%q84>;$|TUXdWcW* zx##^4{t?(Iv-wvLID{NZocK+pQm=0JM(wPbAy7IU157xzqEtXG)ULN?bSkg2z1U1O z1r1bb8|vKp8b?gtJM_BpKY;FX|Pw~_FkvT;|hDLBoyF3V3uh+H%I7va0Gh^tnS9> zzgjqqC*X$G1}R7X`t{t?oC1G@{tMW<%D&&ISr_hry7%LHgWHwYd2kV5*g;KM`7+{* zb;l-mXJxU#gqW0CpzQ-?ANypp!3Vo|T4@8a_c^q8)5=ncLN|*c(u4v^MROYR^7YO) zZD6jRIEFySXO*JstDxJTR=JLV9bgQc+wR4L|B_Qtv3*#@J^89(1M+2hTH5DOX&>7c zk-p|Z0yak0_cpjlx6ykwLXDq2)SIn8X*&RnCP2N>ipF2QLx&Mi?rHlu31*tbg>}}- z0qFg_1z30D3_QJrrfDxH5*>DV$&vrjxGwf@`as(Z=##c|*V(z1wLgv`KXlCLqJc@y zyXQ1B*JXJboDZ-Riu$j*`c|jiTGY|K8>g)13m2>;Dxh}Jw^6RrA7UC)e`Wj9^Yk>Z zgJvohoMiPF$x_qPVvH1)ucj29od_T zBU;}yoFIZwx6%bQB^<>F@TNkEXMwa9kj^&gv^15pP@vFD`yZv?{bR-IAk^hg|30%D zLVEOW5f(Nv(GE29P;1a>1dmw6We%K15oFptO+uL?k&$vZr5e&XpeD%MaPf^ksAs<7 z$wlFr%rtf$j{ji1N;c>yy0@aEMW&`KE&YCf928@)q@x_i6NzuP$7pKpYJewZv( zo4u4DvyTh=PbOpz@L1YYLeBr?0z?t2Pe#4z8rxcQx_JqGwJ@>n7!w#L7Mllm3$i)Z zV)d1~)uySLHIUtgrBp9!-h-0>oDgT5_xHCmn&2Qf?lyV?{VaokUE=k+laWe01E2H) zpwUu>+umM3L%&w3`+kTe9Y&Kvv`c)38A|-|J}|dyA6F6v4y?`FrD|nsb}(A*kITZ% zygV3@a)a(jC$6A@`^=`fix2-w_8A3uU@U;F+_D@FpfE8l8ake{8yaF`*1I2FBQP%2 zO<~tXPJT)DiKd~R3aQys&&`d-Wm3W zvwLf1LBxR`q@SmV@Fpy!y`UGsyG$L7w5 zG%@{!dFy6NHsR;!)>hUHt2<@?_|NJX@GlJMH89~{h~o^_x(vv3kNC+@YSbIMfab(t zPuAjXs?r}z=r7s_Wyj}GvM?M3h@#Y-mknJJvVZ+ORX&WIoy*c7+CVCeuB7>OuZrzh zbeOWipohbq5;m7om~IQb%V3u0Qx{7zF6{t$PI-Kia^#cC>U}Ud`-*kLG%? zsd;kibU98c$4AV2Bg3b=iZ11t6dMz9$?dkkxnTnks+vPyOjML=6}uhggoyMe8Or&l zEzj)QnOjBisbUTORYXv^BP2Wk90EcQg33L;3ipdqJDS+-y`ASRfMBBK+-zlK?RvX> z`+%>NfVX>xLPSx#Yq8{_ASbswKe4h>EUrno-@Qv*YuV@pQUA1ZGfleR$pByV`pp&If8Y0BH)7EpNs%$ zDO-epU)(UQDKs_t_j$Hk;PW^U)GKHEj+yuZs*Hza+o5yy)CQT=exz=5R6s zBqr$l;29Hi?8uFt#!>FE)hAM;L>nI){>!>8GrCA?gwYCG9`EqF=Q8?4JT%tTE-(bZ zNq}9Q=f86S@d!XA8{J&_2(vs_5m?5y z=d4PE*cUYwkPN!uqnYWLU7wyDIN7+D6c@kiQgUrpqSyScw~ltC_$aX>}Zy6Hqg4Hd(zi z$@J!2(*+f4Bny{pK@z##+`n#|f!G1`) zaM1hb&mqwIhJ~eyRSDY<_7>P70QBDk zAT*pqBQm@ntA_0o(%gU89(j_09JVz)gAIXj3RF62=xC-UNA;NEL4}jfK_^!RS_bcC z4bnH)w|ZEOuWsmzc;EuzHRvAKu7u(`@#-1=NUXT=KXcUo07G(>7#! zW7C5)ik$-x)hzM0Nl8)c?C|Xc@Z7T!NOt;>kUR% z2p*TSqoSgnv1)BV1S#}WiZMj^r6=f`98SzPAR!6LRy|=o=;30Ni>3lT`~Pl(YgPrc z#!lh&i1?ddkhlJOn)a#10s7$93+fW_5r1K6sHj9MR9$&frE>eeh=Z5166*j$Lfo&& z#MmJnMt~X7l63$+pRJ=FbG%TbEj4xb-p+1TRu2B|UVcHrix>XZwcMv9pm$XaZCqw% zF*w$?xvq$#qur*dcr*#gY7Ph#|20q`lGc>0%KMY``?rdgR4DhiNSoZk%!y7{W?uJ4=!4@|?NL}0 zuxFH=xIEl@gGoeDkec-{2v))722S|xO(z2c-PT>bN3EG-D`lXjj~Et`Qpn7Dg9OPa z%5q$HRg;GaXUzoxB@YOV+D%Dz8aKu}NV&LhAP^^&E;Qg1ptHJmd*UowLSM&^NkO~_ zYNqv$8#$Cty|Ey5QZ99S_H{q+n^@qhiX7=$_q~*LMM?!~*9I$4B9b8Bie#pH{jEY3 zOk@6Uh{cZGyn}J9J+q^2^Zn=N91QefUHJt{86#gRO#$}{e9-vpygN!7j7V3RsN^!q0zO0Jl+vifH&UKYB{%qpy?o^v<+O6#5H z2+Ev2x{?p4&lfc7A`PvTadQ2oQ(|2ku)pLd)QGSHU(5iEm9}mn@1avnL4eP9- zcQ5q(Ba(yH!^~V|%K83q;83joVGQ|Am;w$us=U-|u}cY=kj* zWTv9xAp5}1{DB=40&NNmhT=v?I>k=a7_{$*xUI`GBHtyvHQA7Cx(2rXmsSp@O{-Hm zqI8s$FCem)PUWil`q`Uws%k2Qzu}>_w#=NTavA0byi7h0lOLx_;FG6_H*n2D|Pwz(j`6ceM0?eYr`!p>GP56StOnA{dDjX=V}qg#CQ zD@9+jZz8m`-1GNP5ZGx&t3_UZ*Q7^&hv*8-#>}fXX?1o=QyR2>G66u zl6d7Tm#T!knp&NcyN>SX*NWJ#j-|}s9U5e7kH3ysG2kFzNOeRb(ZU!I0`rd4{co9> z7zq7QY~egLRhFmgM`A8_*U4Ku0lD8&aS=x%i1-WcT7aM$051emZXp>WpF=}S-`03i z1&9zWOH!ZWNAdDi)<*HbJWA|;r1yMTRch*+=A6j>xVPe2Qr=7?Ten=E-$}wRb$$~< zhIw*<`Iw6Gm@ER+MIbHu8Tx?e;hp=0_ezXcULwp~8_b65Eg)IM!OOX z1&)^+x*EZG6PjG${9Gv*%E!})oL_e$`>+%-I&h+t|2#5%7%#?npZ0$jRGnyjNf1}( zMIaM>Kz~MLbhbIpX4zF$xMzg=r zDlsUMQAZy*il?XkS5_WbUNO+|ay_GDXXWV~WqQ??{0^dQbZF4L&I>CN0SdLc>oN44V`gV`WIgAoe|8mZoF2QtGvTmxZM_X-9<{ zhiddt+pWMtu>VOWXA2+t)oG%hkK7Kg|1p!5($YG!zGN{gGitrIv9>*3sVSjt(VdYZ zbtq@)p{I^#KP900ob(o=K}IV+bVkGDZdF)il&{c8kBN$?sA@^7Nf&5xsQxmcGJ4K^ zQSO~tYb7xtdPnkE0`(!OLs8E1rRaN?+f87CGb6NSPwW%{}d0Qui<#U(t53jMyQhnv;8@HsZK zl4i~Oy20pzdS=t5qg8Nzt4Yxho$chU`uGsBvvyrAHMQlIRQ?VcvsC@ta}fg|jo|!S zcQELu45mLyu?grZTBGJ=DmdS2qu*Ts*t#^d8&o!Wd@iiytJArEQnq!C^i)(6O(JF6H5pjtC3Mg89PTy`KkW) zs9Mt(mz}#&O3<4CP!hFLD4L^PH=c#2mXdM_n2dn;(V!r+hN5tC*Agz_aJbRuN8DbC zQ?eR&h79il+(+S+DaB{p{>9r0JEv4bFkT58fhjr^SCK!11aX(sg+ap6e+-a58Nb_{ z)2Y9e#0ESZNzY2#nx;x4n{)6j$cJ=bXoVi&sfG8dj zF%K?$Ch)|*4_qh^1%sf9Q^7pVXXO-_dpjBpItc)$ZFH(V!D^j`BQD;Se7;dEME-!^ zVAW?$@x&5L=1f`B@aO!G;8xUd>g))o=TeQ3 z4YGav{yosK7t@REQ35c!G_9{7B^4SSc*E#V(aE*@7`PZ9)y&__qgn^J&G2iUKv~PTt%r8+3%FXfIuUYSdhM7A@;Zq+Cp>CsraHst}tn7jUG(_)8DBR&CRKrm} z!~Yt#EkhY`fckc-#*-(>+SCFCvBQuu%HM5&pm+7f{|v38mHgakYk3+#cTgUlNdG-g=dnpGo80KttAices!n zE7RC`(q$*sq~waPTtQLzFH|(~`O`C0A9WP1c(p9d%xkc4o?PMu;GFd{1x=YkIoNJT zak{2ubsGf%RNk~i1Atqpt4=jy*S9SRCg$eX5gU{lRW*B%m$YZ23gVXk&nm0XF1ANO zKFY?FIXNxPUb~z^zqLwypYYFNWkrh5$FgWQpxF$Rf`KoCVCMJvgoNudEDwP51I(Y6 zuCRY_xV?+X^=PpAT~LI!i|NOcrNMz2PUdHe?HxmnsrpV%f4rurA^YHNN}MNHrB23B z7vQ0xnTteq2o1yj`GLs!p!Z$oH%`a$jn#z>g-j)%WX{_N9DeOTo}8khBD+7C$SK%} z5zahK6Eo}Ok?HY1!Pnbq_n-s4M0rO?%bhsFh#ur#>BPHc2VI5X%=|xRCAQu3$QOKG zf7d%4Y!J!5_8Y#F$E1;bGyzA}D+i~*hUxl#gAyM`sQdoLLsqA|?jaz7jR$mERyw}o zbu{ROfQCYKJ1wFJxb6!uf^rrUQ0RZzJ%pW{!ZIt(j2!9R)IiY3z2bH{k?-NLOSz}m;h{wvMt5)Db6Bn!oj-Muv{wePdcE(L zB@0fixOjL>+c_TVze72zgzLx*_NK^aVh#GQKEHY4u7F1(^2|(smJnemoaiNR zm`%MV)4PJF1DH9MRYzzUfipcn+D1j11F%DbgLu7W=-^v(bI5_gsHW!m)7MKXIApB9 z-?yCtWPE~ND|sceuGCzA`yA>{_C#H6bqUO?%(o7eHL4pv=)RDB zih#xbV*)35PU>nsymO#yFdhk|Nvzo7_Lv(4_D&@_8jRNvk!4ls{BU#Y<>^Th&jR7S zyR#sB2iHSj>N`sM!n8a-@9sc|;_T6@`q>Je<;S#iys?zbF|VfLZWnr90FLrRA*I=% z#N6B*NbFdqd8 z7&$;k)G_2LE?&OU`fWy_4p7f7I$Ez;H1pTlQEw(qF4?(hdA8vKK*(iL-h%1z!lWIW7s4Mot7QKT6_;k&rB1jf<&yG?(UHbwy=2GJYJzdi-AeWsdm2bXZH z5Wu=g=dHr*#XQZTv8Havr9o8Uip_}{ukf}a6b92TD2VTB+z&Pa#%n_Qinoy)^&SeG zKg`FkceG|H<-9{Xgry5in}VLg=(3@qEWrvRV2CVs7zg(YmDJTe76=-QSiz$U4A09` z=qRrvti{CuvNmTex+Hm3dz&&pu;lEjjokpM0Shw(&Zv+H+3CuwI3q`CDLEA1u2E}# zpS5NKmSf8+xu)COlhrGzg>-NQDB7-Cj977S4iWqM^Ki_^>=YE~YeQKT#sk&2NMp-l zvdKUocQ$HJq@he5YPO?vA@UCw`+qNf9Aq6_eYN`K zFL-Ps`@zu~r=*0N#Jfa*&tY2$Ot5y9EpBX=)o**8Lr3^KE_Qa$P!FcAD4E+01Ica%Dmao6$=Lk!$zOu>6CZGLPR02LwO*b)p z#iFpXC9p?JCPvIh4uXCfemytM0De8SKzzjB+5S^RgtLQ_qN*a}Q(8t^S{6>usiR4v z^AYbM!=gCdhddB@0ZxPC?IljvSO4-hE$FbY*oHR$31y$dYvrU`!)vAG$K`p@o*0R0M4D8`;CpY zrp^hWjh4w?T$2X{@PHcD??VJ$&Hx1Q_tTJwKfJJsT?NItv2yNQp}LSyOSK z@>_ava7Iz~yBR;$%nYN9%rG3BP;5b!=ipW`q-d_WxGv{Z21=q1TT+Nxm`EThnEnjq zdl>egEuIPZL`#cH5RxXB!V{6PJZJBwKNmqyt^88HcdTDXPd&i0zjIJWTc+fj$V%1O zSyh$098OuZ3_(n6EaX~zyuAbk2>bvm5!AEb!=sC3Bu_w_k4F3?v`q zm6UJ~crSE)ZHEknl7 zpVHBIJLOJjXa~-1__JxPL24pggy_oI&DJ|la z9UAIL@UC&rw&E-=p4Cpr%kXY^TS-Zg=m*1xB$lY2!nZoBq2d-(a0Fy^qwHw$ZJb;#V_qR zE~R(f8-#(ob9Ny8(D*7h*Id)`C~kn_*5*v`rX#Qf_=_|u=IF0p4W0d>jC{{0*4x;t zrRGfT(?KncMu~yJQ>!d1YdQJzkLMd9RWY*@W{*2kO)X=-ttJOQ`2M7tdpKLCIXQwp z^hf9edGbKObRZq%(Y2cn3kRn(W`v@4c2caG5;Dp%A|#3lb#(-=jVYVm==e~qvGp0RrFhg&L*>r#5&vR6}^@5CGS)1)Xm8vl4_#=zWCG}8$n zX)c1S@^a_M_wSR!s0RDY&8(s*qewO_K{Wcu>0i>~>zcv^`Tx@buoK-6nVv847b1U3 z+s->N`B>nH>PzN$WAB**+T(w7_YC*-;FB?zxl`Ufvkz< zDLSYu{2WXn_!1yLft2uT7$on#ZZyrRUZ)@rLLl5u5S(Z zOhs3j%VhwQMyg{)VaG?clNnxW{=9!cI9$fHZF4)B%@7mq-MH89u?;R8t~}j0kA-kT zL%c`H1aQUcy~4L0h?aMMDWI9||9j>C{Sg2Cc)|5TTis`^f>sm$pR}1fRsz>5g^6(`-uq&S6fwA=N~Dfk|ds$mzVp@A8h*EjamK*6T&)j2-}Ay zkM)Nox$u?qv#Osk&P~@(e=QV|&>jLWhL=9ezUe8MUu}ML4IkBCt2G(<_BBOOm9^|!K1=2yJU=70LsHciZrsn6%rkp5D}%mYwFMdw=I2&5Sw~` z`~TkoP1)U}$cPABccPf&`237)NI)6J@WNF?hwF23Q&JMI@FkDa#&&bleB_byfAYP) zxW`;y%og<;T{>UGVoyqXxIs+hw|^Up?XCk`RNB1KqVHKsV{bn3>rz}r!k!;R>6=#h zMn*=v&vZ=G3SNjCni_KW^g6NziBjHi*%CaPjUE`&(k3EW<-h5o@udxw4}6}BFR*v(|JrZO>ec-_8#@r{=o0af zyw3G>{XpVirTcqDS@OYY$YKRyc+_QZb4$*D6v z|4MMX@UK2$SaEUW)t@fPNKrL;();i99a-vV(~H=-%8Hq*9Ry#*y=|L2vYb^>*5-Pe zPWa!GDSSbOj`q%ugk{R9W!IZfM6SHQrk!Wsy*CAc{_zx>4wBJ!h4o;+;4ik$Ww^A;`SQCV?L-RIke2hDJZ@G|1 zNkQUEjq}hIv0yxlQyNT(%ML}Gt8RA2{co-Q--@}y@XQK@1VP=yYHy>fUU&yG0XviZ z_jX`Xx)z?CHu*QzTm*wC(44=b5E>^D#aMLVxA+BR!vvr}K=LeqnNh&#V3U|>o&0LO`o)xo1ENu+;#+-Y<8lx%>5)VddJ z9j{8p>t}!Pdaa8Jgok4)sVXM(8y3gsU?lc<-@8k2#`m^uY}@-}WP)GX?A z(nE|J&zCPA)-bQSlNClh^+o9zmBwP(6U7DGg@Sf`|t2#qN z;PRJ%wR8D(K%wNZ?OJEjHex8&6 z?f90fj+WY+L92X*EIHD`+%(yZRae4$2eivL2)0XU;xlVmNsSYzLKc30_^M-)$L1`X!%Ly$+Z_wxWr;o8ju&-9nhKUaqFFciW*A8+e$XMP;ACC z-4tGngvuN{DYMf`8TD!pcf3mQ8AwV?Kklq?-XIMo=e0R`V_ z!k>5Sdc%jF4NZDyVf{T=Z!k3Nz(lUAv%}+p9ly4GZS)u11H-x%-s-A46;(LyPfN0} zglrFbQR;rDKgg~y3>aV7J1Gs_1a8LB+4)+-n`!N}B;8wNf|pJ`WbKfB=<5DMK7Lh> zI%@T>_e+*O#*X_R4#!dsQ@IovmfZY`IVC_sthEJa$|KTfAEMtkE$Qu^O$Fqd<7eM0 zOL+XG`n-WfAZ2Hsz1t(yRKwNC`tP}}wilcg3Q2r*kWaFw+c}{s6Q#^*J>&J=tvO+^ z7?aoni-qs+{;x!B&k;w*{#&HHJqvpo8I-Y%2{y2jSN87Si6Y~34}Nr{8e zENKebn9f7a`Iz!U1g6Q z{r)c==fZ~}Bjra>O@kNSiXqJBYs=T=m@|KIH%u{+%MD@34bQFmnj}rkBkJ^fcM-Pm z*|$HxF8lwahCR392H~k7DrX|_r<4~%Q{6YhoBy1f$iKXn(g)IG{3m#S-_XKLY{0L4 zis`hpCe1h2myC={*{>dza|ilhLFpRYNy{2<^d;*&z0074+l){XVy`vm)#T+zrdy?S zbS}s&%yhj(9Av%~N2W0k4qcIdpS?8J@iY)u>(wZXFrgL?A?P);$(pG-N03Dtd-dLw z-J|{P&Rz>^vrFUgc`x1y1PKzgcd%pS+Q4zLHq&2To@YlSnip)X8Rcm^$tVJFRF$0^ z?4G|_g}hS{TjK8JwCj&^NJvb<7E=hKNRqeFwB&kP`Y*{O2=X zZXG@^l}K4}Ia>!in5&dF>!!yb2OKr7U%tJ_Rkvla7a+H)*|XAlS_T7^~|Z z$!+98TJAh_1$LLT9=sdd+oYyX+JKHpmuSP?e0g;6_Et2Y17(6eDNgHSe_vj2G8lLQ zdU*0ENj?F+w~{w}?F=PI1WN-X@gWCe@CBLs+9rmh{Lrs~D*L|~1s~Lm4lO{2agg;v zX>k$pm8Cw_eP(80gm=DdW$ituaA0b)L045>ohY52iWfbo6xlQZWj7iB;8w$Seo|7B zh<|pTb{V!O4p$a~X5e;7vi_k8 zJ#ysrb`1v(U&*`*`m^YRC(wl;4UN6apLg_OPy~ctn3=6zR+q~s@{vN~2mZ9q=aIj~ z-M`$Py=$~3GO72N6rUT**C_|0q^|U$xn_K*FGb)jJF12yXSBGo%10LmJEF8)dV2S~ z(RThxt^uCa+loc5eA~lCmq+Dl$d~*~x1}a2I;;{f{uMC(1if`z=>Cboe2SpQ*-TQR zCjHSttRf7GjO`^=|CojNf{YZt2I%NAGbu4{6k#mXz~BrV1kwkam(s`rSu3Yrc5WIl zXR{{+MZ8H2iF$uSQ9~3xiBTL61<^uJl1^s<71dLwt5>FmWs6yE?Q2P}SwPVQus9lC zf4lOcJn6Klfu)9)T9Nf1dqr>i&HnA^uKUJIi*5CC32YAQ5njBCZU3(Q)|xXslaIzB zbOG_r3fO>6wYus7d^^v&-zRhY6#l$l%ukYz%PPve2?=E~oi-=Sj0aCxP+WdPt-9Qs zkPsh5aK4-$7VQTQEk4IkWA8*yE5_Oz=<2E}pW!H~0yqJ;Nrs@$#@t$y+4N_V8(Vc% z9IViYm>6hfwQA>AzJ4`$U}x`aYF!UvnV55l1UOLa#Y~qvPvAa$i9W@3&JVV ziE0ugxjvM&f3({3=zVeV;PLG%^19$prJ*$fjb}c%K~gyuUgcPYRDZA41bdZ?n0mZ= zvCCEGv+_5+Oki^1>K&RFtiHAQ@l`#2yR~}VBpWdk=YsDOfQdS~uW)%k#k=`9UGLb! zpB{clGeDaWy9g(z<+0B1JA(2E=R!-yBJq1HbWZO_8GLTTwmDE_nth-^#;BbeT|- zyXn#W%Qk}&$Q0JxD7ieU4e+rE`hyeJq#YxC5e0jmvZm_vPWr{M7<@N~_&yIWKnZWt zt}tsG(M7)~nEreD^{O#FYhoftFgq{!R&<)YB3`mztq#1Fo9l46^_pIz-aet{_TaC{ zdObdeeY|*49{YKEdrp>#A>HKXm0M)CTO<5P|IjZxC(^%}BnODr@#^|ZtH9i|7iDll zA3fw_q@%z=mdI$Y{T{U8LS8lfD0~=GN4)yLdGczmxvf#$|BUOR_mLARlY)zwn4e7j zw2;dV41#emk*!_0`q;H_9cGPAK|^3xip#C3sZA`yzz`N0S!||E4WrpexZNO_cJvol zGEe#Jv{MT#72e8l=q`yS)%DtxZgP#^smrA|&kaWHhDSw0ferzc)k zUZ(fM-m2z!_3QHe*^*d4RaG*cmS1kJCeGVb^&9_&Yk7@a!qE^J9d(>J!ty{CehFNA z{QNp%LzOzD2psN-iOJ{a;DMP)%%UhkzPK0R0 z#cy)E>ZsTmHhzxhBegkxW0;hryQ4Iij5d|PYBwcTzwr=vYgwAnv#qkE@2Az-DUP-$H!=xe5kWk}xPHyOTccmmn| z1-;G0#EFBf#b_r+GkI&s_w$CLsx$x3Qx|(PFKKC|Gnej_n9BY%D1xWCtGCAoN~^ zC8k6r*1y`81tfVO4e)>Dx3097sFAFOYero%JzhXr?ELTJ_I}AIc|zk$@or)wDn2S0qMfp)o+&YE>sB1qOZ58kPE}{srN*S zkZ==Q0t*|zjs!>%f|RI{_X6Pv!ZE9`^S*HUu%ehbD(eL-1o3qlq`%7T!J!f2kh?J+sOT}U^ zzp|Ce_`l%Sv(7T@c+E;drq9)zE2#HPsNsLkswR++kukT=_#Pj+y-$7fQdN!tkxlr` zKqDpKcBIt3aq{GmKER6(rgFEuZgUIegGj!r+5#e|Mlg=e&kd9wBE^fL30?SQRIrIsTb- zXi$-`oYge6006iik?=HbZufR}RJ#n>q*aXmnLPeWzjtf?+i&|y4_s2RwPnk{5*G7U zn4ceR^`4T76QEorYA~BtRg6kfdS2MfNlC`pzSWM4KhfR2tgtwg8!O)+9~={d&a}Qj zlf`>;wa{BbQ`G?;vwY%FuYyOaDoW0)C-G2(X5%PcnhRWEWPCLy{lotx!NAI_&IUSy z>@6oa4_rEOUguxgjTIf+nwrKId>EPDh|MYe@T=Y$6#~L)D^BuAe$#Ptj|*K{Aj}?f z`(#?RSGO%^>cV^0Ow;e&8LJLDo3z?~_qO@^yXQZ{2>;k{U0&UXTKkwpV69|3*1IfC zHWa7tlr-i}CGM4Qdv8Y{(^C~Sr5f)gdcVhQbGF~dc1DQ^AB@_}f^_?jt;64oh~GYw z(JgwB11g@`m>3@1U}tEWSDJHcXcxMLy#GoDi#LHt!K&-`5}6oM5XH-V7P&Gs(R5+sT+bFdHzi62nyy6PYxj=GMR zxVT85>jIDQt`WBN^f~cD-P6mM-Ma#M2;wJ3m3Ey7PJg+u)a!44AN|5YkO$bgcMv!l z!jBwNes56;Q>NY2$##OH8xHE`hnp|``SNlLg4~ku9^MzQwSsLtrBA~D6$AnyzA9|} zV~;RJ)zncuic+2W4PUl}|Jfh@H2{DNHEi;(Cq}+*A*}{Dhfd>`xK&g#a(&WrJNH%r zYC#b)b3=s~4343-q;-*BEv>B)H<=jN68lz4PA^8voz2W>-p0ih&5`jOY0F0|4%kpD zJg9K!LQ5&DU%ZF!Ul-WkLx-odd_RSQcq9)&F#VpB2L^?N+{!E2<%VJ0K;O9Xy=>T; zMUaqYXkPBxRl?Pg>bv24_LCZcU%k{Mx|^og_E;gMHjdkBe>K8-&n9Y0SKQHwB; zX!ye|D<93eo0r~utUOOaO5HDo7FN8PnVDM0h(LMyMe2>Clx!GIh)m}XK2YC_h`-Ig zX8<=4kAzszVIrdkK9zPEef^i!@sOJt^u7-!{5j~%!7X_Q+Q9%A6uy z%wNZyuX@@Z9`@$qFKjjHwaE~?n+OJ8jtbGB*!rdc-pw0G!z!Ob%Za${JK48@vAgy?<2NBAj+$=F^p1AjKr)XT*yKy}=wTPRI zcXVW8U(8m>Nlw>NhmW=S;WIMD@}1W<>4jMbBzuWy;>yZwtan6}lgX*6DJTF2Zdf_E zw9!FBu>ndH|Lf_6zM&?^r zpgu4QFc6qJlar$YurHo)+fUJ_?s)H^Tq<+I0TUXW)dPhKKwm`T-+OOb2@9!WM=7q` z8fY+FXU-c<65=hJ+wzjK))p4(YDy0Q5wlFY_N6#iD|^3z+6Vucv$K=2@rsm`l$K^3 z6(#lP$S(-LNK7QcB2@W%o(A}{l^hNzbX+>jZoD014k&!R1Cjf1Fv4?%&YP$(rep4| zrl1fGbdZ9Af~?Nh`uU0UGoSH?vNw7ugwOEsXW4 z85mFzSQ*g9>oATEHeu(}AaI?am~4E-3^*M}*&iCSHP5T0+=`6Lhi(R0uMGC zTxCN;+7L=N>4)(7{txXO-uKOR>E{;8J2`vjESQ= z__Km3<2UEFWg|ZdDQMEGSx#xwhKZBoJ>0YuT_S&o`-Fz7wxI5f=wtX}K%JlRQ!%`2 zHMr&Q{abEfZEn`Mt#tz< zla)I`qUgL9>>rBk_K+7^Y9C&fl(WW0e!AhiIDD8L#{~vjC27iF6yN-?%#XN$BVYt| z0Kx{cMz}uYXuYpk*;pOgjUvOM#SE0po#gV1BHl#%jaU4b&CM<7Xzzs49Y8#)-8WhN zERs36kBLcx&wZET-Pn)!%Q0&B*3TtVVBRZ4yu&%@)7cmc9$@HNtE%GO2sZ-!M{rp{ zHdRf+-`VA(r=moScN4-<{e)F+XBrzVeNtE1N4;((2lHvQhqGfw%AtI6k%BCF0w=J% zg!YGL08vLz+d=oVV}mAL;yqwPH`O6Q>1RM~cD`?wuGnI5GpBzye>>ZN=Zmji#5h0x3`M8gd+B zuf1BEKgH+PL_ThI-3@8{u924=_l&w^(zu0L5i8jb4+GRJ!df z%-_9+gbBpU7MGP9{*W6C+SjLy3|&1QOn5ixyXC-R zgO*GY*bTZPF`D=CJKeoKqrV28KOW9qPZ&O=7f7)&(+|ew4WzvjrBtMmR}y8Y`Y*ib zD=27LrW9RO|AD@^nn&d$v#+^BaOsWjMN?v=r6+|2cS@kTaVAE6KBG~9fvSu&3>EBL zVL>we10(EQ93Tgc`Si)r#jfcUbKo@h#3cUx*!%aR!G1b6u7Sa3R5UKtVFo-31j+!7 z`QDR*g6+wX+;xCK)W0Od3q?mnp!q@(z8@@oNF*%!q}FCI2uRwDucpz_>Z|LodB-%d zJnHf;M4YYREH4HnK}&`T;H2w(AIU6AmmhNx3=S$m&5t1AZQM8;wZfaLU+TSLpYLpf z*2Z?*FcNv8p)xo)VPa`S&cJZ*-epxuiHwYLvsv6FV$8wG0f}u?StfX`t%@En9YHos zaQhaE27S^D;pJJF5JFl&@$}QoIq@T`CIc3rN^jGhtn8^9+7R52MuhC7az6T{9x#`O zqZqI}K!mR2B6yu0AN5y|63{&ik8nVVWkZ{D9GZ?^86&}jmelt z^xAwrb2XX~1BD_f5&%6Vvf6##Rji-(%hl4*A%Z%OxyUDIY}EHmE!(MS=dRUFAq3H| ztbga%`(D*Ya{l0&Z_nK4%9BLq+uS7@2VH}J8$d8qU1)I9>edqfPvtSiZ$8uO?A3#of_-V)rog?T^KKl( z`uIS6Yg#w@t(4(muZL%E?ggHqo@J!JW8`sOUF;vltg{XDJHb^bxb#-iRR(+X((0>k zxoLUd(*GQeJEVqUgtXqwZ?yX{Ln^-T*G(-DYiao&pV!eq4j#`W){8^8hySMqxIwm( z^O{b&i;lYQ%xrTt0``}3&KPv7^3$BBvskX7Je0c~Y>WEnZ#cdvbtIj?!PtoeDD7Xt z#$_@br$`zSQz({+g4_>S+Bp32tz0JzRdZvAOu#%(~olPUjX^ zY)VZ2?Hp~vQ_rBSD~8em?T@ybXVkY7c671*pSC`p6?u2!u?OxiZo}t1yU2GSE{3SX zZ(-nsdy`!8Jbs|a(FBb>EI4R(0dgt$Gd>`k6%sVZ{g81RCLAYQ!6S{vNXJ|88+w$x zZEhSK737b8Kx|gzKt_h~=8R8htAUiXG!RH%fBMJvCJRX91+t2tOhy z&=CnTtQ8j*ZKZwMF6iOt^1E|q!iW743)H-Ub9-trKHp2d@E&}MiMZTIxOYs3_UK&% zH``Zo!k)6kzfFnJVH@glDXiC!mR5`vMqMj&>q^`8?EEk)1QaFK)y>#0-3mJi z>%SoGxsE_Z<3f-na_)T5Y9R~pU+^EsV?`A(OtErVPbTWUc&ZR#$Q-}Iq&r>i#y_Z3 z={zLJ%jMIN=5~t=l|n8iaCHnT>jNlU5+ zhICsozCt$kH?9z2-0I{9#&iwjyS+$Gc%6o`xZ_AEQ6Le3WGD{?u`H=7e zbP))U$1OQSdLloPZ;yFAK+=*0e=h9(2@B&rB2^0ZYJw-7~f?8TN8ZD=Ufkf4gnw9Qu(Z z@gE7fmA=~#iM)^AlHmqW;N8^4{s{}f!^IlScmGy2yk&Vg`;!PtI7|k*fa4JLmUZ6d z-9OUB1kKCBp4xq%rjj5&G3$v%4CK}8FBNu^$)8GPKY5>BL<8FKQSuHSX-ca>zIK@< z(2x+geY1Q^ATI6`tuW=h?GHw@=AWJO-9hi5Ku6sPao!RP7Ld5KG&OYkJA51YZTk$~ zUP@Z3*`Vn98zFK$VBWT8%TPmF=k0!6N_cA0j}%x5JdxKE-H3q9QFUW>KrM3CG=bt; zBOC{VJC)b=Z9?qjiLgl-=`H}Rd3aVu?e-k9KE!#+wOrMGcbx{`6hA%ved0SuN{5Pu zhC{h}6`U2NkI$ea0j^Cp`FEImPC?yVtZ_)eY+bl{^5=vPKfle3ME8wM1LJ6KU#vn4 zsavP(naZnQNH2N3ujZnzzd=G;z0=SVuj<-OyR1!IxE?=&hR(w=WmWi6;hN?x&^1w} z69OA?mq5%C*L2*RtaP&8dt>GuuR63xr=E2?<1zr%WKvQUX>9t@jhe_NtwJmPrYkou zPy8L@Loe8JCPx`ZxY*(8aNYP>mfeOoI9P)F@DIPkJj@Vkf=G~A${LZ+m>t(~w{QkSmbvFR^2iy^*?ax&Ec184V_J!`;~WBxuV8vVauYS z>42dAX|LUzRMs=!P1ZB)5WIUNF1uqirDP&9lxXkVh}k~=bky~FXL~5qMcZ$~8jpYP z=m9c6OT7h%XFFwP1|?Gpf9kh%R4}saY@_le3z~|XyHi))E-hy#(-41mhKFd--=}SF zH5=P&_IS@!T2{vNvZx@Z5*67N#CEqm=XxB`ZsGt-=<}yYMmjn;GJ5su%1xFceW>R9 zXA3ny=|NaJ2on+C<~b<#Ag=qo$v|`-h;uN@YDx_>0DK`@;h3@@VjLZi$c7tG?ZqV| zrPv6UvnFjlk#;Zk&87s%9IiD$)v51p?R^btmy~%zok>ld39>d878dHL(@Zw8)&N?x zcR62#Xc!q3WTo@-a$(f4sJQ;B=Sd}YQ%gG@0@!(X2VMW*3-ky!!SFs5Uxm-KsmxVy z`2nj3hM|Wc-{?+I$HVsOmDt_Pbj8N=$B6Cs5-83Mnd87;cmsV#_@(_v_TX~~O(e0I zhMw`>6FgXf@TXAs8XILq(CLUiP<`_=jhzAGB{KQs%4arvov@LB znmBd!Sm9T>&8Gh~U#wv16Ap}~^R)T2NTt}@&HlU-P}H$kl;ZBPrkR43b9<h|aOe9LqsaH7 zU#`l|D}3lK)6dPV803(XtuqpkAvE$&0{y-6YvtMINh<$b<28zDUD4S#gQ8Vx3LZw% z%$ndJ6og6PmYv-mfF6*(0<%W+g^!<;orgQRxcz07cBPi^ay@&SQkL0TBvP&La3(~j zB=f83@^byLqf3zM{RgBEnT^x9^54Bw)jg+py}Z12ev0ZBMIGvOG$y#($N=7A<&^^R zDB04>kig(t$Di}Ds_XF@~M$YhDU-1%bI4?1$d z)oVW7{JS+zBaq^AafnKrk8)=H_hs9Yv)Lb>u2=HN&G0~Li!16rnjAt+z=fDjrxC=4 zNnufT*Wo7jU>Tk1$K{JE)vkV>{-G^Z(!F{+LK5QglRV*Ye_T}*1h&)NK&IF({uPfC zC(-ueRxEg8D4z6T27NCo3Tib$haF;``>JlCKel1Xen$|yDI`dj$K^t*f#I33X)>F}lwo2w$nM74u1vH-NsS@|ItRyRo@bk= zCDk5(H`n@7rd*szXZptu=Z)F_J?+)UD5%#P3oX=Y-2ZC4JsHR$OX-_+`27HWedDK7 zA;t5f&9N4f5&+{*Nw^$l1OK_km7k#U|1kSf*tNNhi-CpiJ8Clnpe;LVKNe0E0ny^l z=HdhVhn(LF5rmnI19vUTF@2!k70y6%zfVs&F}c&ncjwMV}ld|;<>XB!@rc8$HDT&x6>|5(&{-`j0KzixVw z?lD$!VWQ`D!;$aI858J}#y=MlxVk#VH4?K7izJ!fV^=u%X`{<+R$`u9tGHl1E7q_~O--Bii)vS0%3TUk z>?1z5n*8bE;fIb+j$WK5G?nu$L{WWq_i%1Y3~O14%;nC_8!luw08K3W7Sme49`{_tDgA9B6>OjE(ozWtrdSfS6;a|ujAMNYD4{~WtqGI(kKfHgvn^cE^w}}bb1}~o= z4e3uFNztr911Lgv4vUF}b+~^2JfEyo6gRX!SK@#hDAjm?(DT-!mT2%{mJ{q(KvRT_J___8FQzd_Ma7=f*~kic&d!RjP=ue0|9M?&RNFs=IZs zk+wH)6q3O?V+Y1Lz+oYExZkA|m|(#62caFh@iy+6;y;b<_uYTOY^EeCaPIe1-x|r+ zaz1KloVqv$0MM0hMk~c?*I19wk17-Tco6YgTV$o(9c{6D2^qe?G0U2`NSRf>soOqP zmVixWJz>J4V!N!ooZsun0N!}TGh9VYHGJO|v{_Qpa%5i&p%mtZu5gfrg|%OT8r= z-24xJH8-&Q;jn=Q+V<&boPUYyg(okdH5w_<`f0?xaA)R!;BXU@71lUeNE|c{fkI(0 zS+~NSFli?GQ{)ll;%V#$8y7M@(xtUJgN`Ptg*J9h4y>V-o@B=1)vxUxscL$4?sqYw zQ{MAQ5LmA;`An;iFkJcG4S-05;z#^!ZPQLSV%x;LVd4YnEr-&{+MXvg9Mg@xFnndU ziWouSm>ey2y$s>D0>hfj?{9e?I}J9yrQaNs;c9_SgilyZF>C-;*3an-4_cJUA)T7 zU^LfJEeMyA_=>ss**t!v*WQR&EwQ@QAoM(;bKRUanzOrcdn|Pt7Vf z_Po%$g_PT9%}zJF-#e$?SEWacj8_Olbt?g0>J$Dw*tKn@afGx3yc?J7SNf6*@-uIf5ch3v zld`~+sc&>_kQp_TnX&fTf^@37HnR*zN>Oab=RMzz$t`7)uNXXrhi{ei`CiUJ?l8>dvCbd@X;d|4dks%qzMk|Z=Tz_zr zjn~7#*v@t#K}LbS>UVT|Cl{Y6Q)!B%~CP5|9?@E&-8{4w3GbmhSF;r@#OEoZ%S959jQ? z?^tWidCe>0KHPjYa8jJwOWk+(em^*cearR-GIyeo$!l3{G_vF9_{SMCTZZN#702{< zG>uaD`{xu$Md<0y(c@WVWH=7u^n-p)SP$7=`xnjBOqF`!xcQ#%3FtGphWtBJ7l5)vO6ru#K7^cHN^J)! zi|g6$No=!ju6$@8P*E{<&B$k>+?Sp$gEuD+L*_lr89f&@0M8T#mI#~m*~0qIZEylq zfgz-DSnsv-A*PJX-U7xnbXMNO=#R<5?zlUYr29@QWDXDWKz@OD&Nn+7D-H6zk6B?f zaWyu(H()sefdGs`i9>LZSN8W%8v3UR#_~Q9uRbDPvNz=9r8CZ+E}cep z`woVMrLDDn_6-ClcoUjRtgKI;IMYgxC7Xc{{=JkI<)k}KLZFQW2 ziwX?H)oPR<=4fDOU_!ckdaXyFl0Q(&XjX>R=so6(_#JGW+VBh2P5Xaoc)X>uI zYhrwIL=3PZEF^aJ%@s!nhQN3Ms7EkhSxp%PjTw*qi(0$kfu)#{h=JF%l9EPTR#xkv z+6F!joJkT?uRv!0>w83|89ww6fSd(Pxw%ytoNb~-%Zita&HQ=njS2kUwuyBAd`B5e zzO-H*jBTXhPa87!k%VLo#u9rheAstSV_`V#haxH=ZJ;3~rJzs_6T$AK4I$Tr&iPs_ zjGVmO=!gil&)F2ez6dtzXv<2>nUWO< z*C+3*HkGnRK;GWtYxJC+(^J0{XIJn@?o_SZ918D^zjn%%P4ORqe!$t++5Q+o z@JxK+mCaaiQojAQ_uXWJBFI=paZA{qRC-*|-~{70&95dVwlM>li|7UO$Yj^=p+Db& zhyGaTw(aRBtOHPiAq`3fgpY~ZSjg^PgYXIhsbN+CBDowB>bi*It?KR;0m>Gr)|U9ccLqly^sNAc{8sY_CL zqaaN6383zw11uVfE4p2P4Woy-0K0w`jhY(V_~LRsCUCwL_!*j`L(?!SSYiw7ohiF+ z10D~kCgGGG80_cf7}{Pl#`=6Zk)eix5eRHF1b2tNBCm>@nu!F_p2!{MDnop(q*qpr zoSoT)6}MAde*8)39n?7!j6S*z=b-m4D;#8`PG|XFDk|E{pFMyn_2`KWInT=|P&_E| zSTEO6=Qtnq6gH&b8eM^mT1ZIfndsHds~tF#74Hxd5ydUfhF_tCh4<+}>JFM;pMVH9 zmBZl~DcI$u7evQiU-d_tKgm)U8XdfwR5M~fT?rhuTR{(lyYK4vCDxwkyirIh8Dx{; zIPJOge-|cRw{U$UwK$h(b^HD?g9oI%wz3wT;6Avo@?wA_d{$C|Q@a!~1avc9x>8_# z3~6Y85}V{NePlL#|H1wD_iZJ@qaw;Zx?2F=YgeUTjng{5@m|S3q1v@9KRCCrFeWY< z?;)|U%LwOVZ0C2PO8UyDomJgOzv>+biY`T5YAFl$scS1tNWUtm;MWD#nVuKpiPn}UO077lv_|zT-v9k37mkd%ND&-b6YDb{qhcT z5b{-xYXcyeKVsjGA6RQ_ZL6iG!Y|rjq6{oDaPxl~viCVUJTO=h`k1@gpHv4qU`P*B zX9N^~sJ>!-Uv41&O?p*UO_N%xkl79-xz9CmjiyIW$OYVw6Mt@@DdmzrYC!`8*;6@i z=N4IV@|P$M+r1CpQ7-qW`YcG-xwjIyJn#8;PJys8C%jOsB}^Sg5=MfJ_d9wvDI$p!rSagV9>IuHXe>=ZqVF$!CP~7uVL9~cg8GVk5uyG@v z%gV@x9ypvXC>tuZ*^h32AG3k+(=8r}GO@F;h=>Ramd`Xaz?rBYr#kon7zE4X-x$Vy z*)u^JRz#KzG!#n88aEKEB_{fGxfziU(h|1|N|6*<3yr{Lqydggk>Xq5%Q=&I-nuI}JlOO381*NX zRqKRWmz#h4JpIO0b~fzmdy@-I5FChpIy$LU>~Ef78a zPQ!7vE)gR_fcmwVxnUF~n(y82o%}xs>sA02f~sa;v#{XGdA-=Ty1(U;TDVhcXEBBF za$1iPm3}rUNbV7(nHOWoN1QwNB&Z@}aoq!Vn7EdAU4ck~e#;-@<_!iJnGE_MSt)A3 z#(fpI=;tUou%+HAEDE1pNO=o+rprjme&ZMzn6H&Cdhr9ihz@$v>z4a)WVN+ro#J4+ zrCaF~?%j|saEv0xNmu2l&}bXj`)d$_3> zVUVRAbRxjY!XV&qpr-DOO%^7tdYHd;-`)L%1?xMkZxRBZrPf8ew}0;C3}bq3S@zLW zlP*BL1RQD$QYR{9U8l)@!*JSm3lr)NxxTK5H4_&Q5^2i@$!1Y29DO#-)qZ}9n`Y%f<_y3V(7n}|0${85xdu)X@B`1 z6QhAkiRCyr9Z&^bQw~$c6@95yx=MeLI86|BN8{{nvW|0LF!izD153-?m=W%mE_aCU zk|kfaXWGCWm<#b9F$(y88sPMHRignNaT=x9ol*8H_MSd`hiehww~;7E1qdub@UTKA zW1Ff{5ByMX#-oddO?8cp!?2H!I8^4OKD?Sg^6clS_shF}bAf$jfhpOe`Clnc)wOSH za=osqPS_1p6j7K%qzazsa1atVpo#(q*sJMqcw{(OHdum^li^wl+@-s|@E2!gu?SKT zYX$swiwYD9bdh)n2$=h+AnwRm!ea%}-(aTgq4GhLV&$iXhp+*C`DOU8|HlQ0Mo0RY zc#)NjjlJ^?*|^?iZ@sd z1b8qHtk4mOVOf~4dnT!ysdvZj5gvZm(N%dQ82dGjIE-9?pY__aYu$B$z3yAh4FqHp z-V8l=34$fFJ{qx7)hnwWcq;G0yrOqdMD1uWNHMG3GhD;5axx(2Pe9I#{^RNgpHS$J z_K)v=)zKE2W%v)48WdLEKc1640Iolh@4xQ_!42Zx!ki2n;d}RvGIpeuWr&bT?*|{QBq%2&3;^aMWRy|PExiz~)12x1 z!@79z{K0<)RPk$HwT&ug{;bha_MV4KM#Uuxd^cQoWP2c|EQJE`-ioD`pf$B5A~y?# zbNbZ4@XGguq<{O3w7R60u5QhbyhpS`j={ZOsp)#sZfR@k+_v3zf5H4t)Tc*H=$n2k zJu2ac`*RtYv?62CBSRz5cgXANPF}a*gFFz%1QskXZElA%Sns^Y2 zH__zYsh;M9zJVN2;{)CKSJ8*iQsGtB((TyD$V=5JVXv+np#KA<$f=#rD1lH`+MDIt zt+8h8iLm|cHF3T%#ZKTgL8i18ul-9YkP$vLg7oMq^e_mx4Nv;z^0&EDlhzC$I@k5} zRf;C_+43YgD$r-e_9lvQC)MKP^-JDMDwPbIH04+YwHC-$U`Pg_BM4(~NCj-IjA?4- z&5B<2!f!tuv|r=tH-QRSk!(7+>0hzz zCw{@p3!IC8-WNMpi9k>`@5M+~p}jRwj=w%2iICvD)uVe+07V$!mP@@!N#W7eFy}9NNqfJ8 zVjR!*R_=CCPPRWSL!f`yMktL+q@r{slyYtDC2}v1SLe}z6$!}H?Xs$X+O7{FFFwB`Tl13$7-ZV=(z!JhHq$$hN+XyuWD0Ja>FP~f)PZ> zHxQZ}u>-`@4yD~Ru*fVYJUkkF@@#A^rNSe@B0eX#K)eT6=_3r1#BQF-EWTyfd~y}I zVh%_+z$?EbHby69WuM^^VZFSQADgG6J+Z@^5EYg2nHvngX2u%{MzDub+b!5U*D^0} z#N7$T``zacrT}fz-H}&7@HhaLAB_ zbe&_3PWXr41mz^RxRjOY-qV`meNP)mc$`S;3U3qRZ6U;?F{T z-dC=yiPLV<3nx4Q*Jh>V<#F-E+v7`)Q`?i}-Rl6~z@AR@*OSCLcr=_cc&QgikP_4A zS0cV;nQrRNk2wO*$Xh{(hkhsO*~#krwamC{vw z@x&xjH9^fr#lWBE;`#{tc7-lWl`%-c;phY5a+^uNXoF#jNM`iMmX{!Ue+K{EKSv7f zj#&q!;dh(T`)3)>ec@X2Dzaw>l_J+aygC~eu^ig7veRLP2x{}Wd(*FY6Z#16&J0Ml zahYgKar-yTzOEG0va&Bh%GP@~k0z(20)m5~LB4m*EhC4;%6TOw+&^6-4bHgoJFYoXyAA+IiwHqbE86?r=ak;b^I7oOF{qrce;=}Kh zzkNQfje=yXlZ_SDNq(%;R@#rwN;2#{nm@2!esoJTt@(K9&)Gcl86s1z$ zC<6Metq9S3_abV@Sr&{eZ`)M&cI+=5GqA5M| z@9$0N+PS#6SX=8Ds}Lh85?J?h;?HKOlY8%A0G;ujg8-7En>|uqWa^V<^FC%|reKGf zeH5Z%@1xu$jRg`jxY7aWUtcuKRp$H;s-mNQtf#N+mw_mq)}s^yrh zX>dU$oSB(;J60n>|$GW{KIo*hQoa73hTWwnMhh$^CowMsYt zMnU`sg?3T{B;J3lB1ULvvQb25ew#DMU>uxKpWE14OPlmJ9LZ$6P=w2ZmJGaf&OQ%z zqafJM>Vjl5!^3_fU2}NXjw~kB;y(hOsXE?Px!=*vl#_3ddzUiq2+aZ!-HHGC_J~}n z^1o}^K2$P15=Dx?ffB!3yEY(ydD*L&|D5&)-Ws85Zb7C(w#)BkIzEAM0f9fwv@}n& zm}!WDtJo>|@SR_p9&BUH>sVM?eIMIsTj3?7drOBt0Wmt|Z;1I*Y7@DT-!+P&^X4PV z3GlJz$7nUjFUASy&3khI_-Emx-E4wk1iE~ebeOkEs@p|YtQJM20r09Bf z=K{6ElHy81IO00IhE5gk(`*cREC)O7 zw;TWX(Rn!jJ!3?@iN(kS+<*Wm2~spE!D8v}TC#X!OUDfaGbl*;rrqSZ1WQHI0PNzcLX6v3G0!g622fxTVDG7Tz{{IjmKQU+dJovZeEyOoVs&flk}~UKG(24 z0Na(=NLyQLMLC7c{uL3g7nI&8K(5)3`L?(F8d&5t*DG2ovczYX=LEEwi_(e-U_P}g z!| zfLo?EZaMU#M{<{NW&n#nw>fge2+lUrpqvlj32!q{^imn|?i>OP5rx;OmH zRpSj#-ivn&3gCOV%%Xh@nfUpQ#BfYYTia^y1N$+V@Ko_K^3F$^XwR4b#-v^X09YVM zGJpI+fdaQLFVhg^LdUUl`Dr7oZxb*oYNzV1Plngoum~LI2P}~k75Zti!e=*sk4~Vr zvm*^`_F@?9X6#!ba5pnM9oy3?4b>+&(z9*C1!ld^ zdryA9bez!t`EY6#&+|LXnbd>+8^Ub5=G zYntt!W81DK_W91qgY9;{)!aD>vl}zBXqfMk3%?}e_1IgMw|+)r?9eu0{;rWl;phR@ zBQ7D3_1+;LF-ZpS8wh>C;(_;7LoLeQ*0$PXi`P#sN${18gqjUp4@i%(9+gyN$U#g7 zZpjX*B+dEeu#3=S$FBoDr%8Kh1#qWOYBObL?}waf)n%3AASnTC_wRfOyCyf3 z7G1A+B1WOmf{ht2RpZ;#uf^A3_yzuw^K+3&b*@~N7b_58A#xjQx@MD5TwF-a2#=QN zZr}&A;AK(~Pou@N0?<)D<24yebITA`k96F>{cEO~o`L=ZOM_yg8sov%IX|c1y5m+2 zZT{k-DPH6t#65qGxoCj-!dsnu5PIM09LdXL{P}YbXM3g~hayaE7#hMUy@$X`zE?dw zGKBbG%uD%6N`QMd#FW7A{*`tQ%pUrwFVThXMm)E+v~_lI&QfYSmAK_COMe6Z7isK(Sh2&WzL&W9batF|zZ;U$ z;BJM}{|Ji}z$DhIHr@?2@R}$)PVudkcav0?G#egjpTDI(ok+T&kLC{_#QTH^iV0r6 z@$R@?RY|kM!di65rYgZhK(o@XxDb@N-+jJ-b|fw#@z3uDo}{qX*{`_Un(kgy^#V%h zq6!)>TNLrOh1|LAcdbx86Ne7M9$=wiN9@%0)&>1YtNLmM-#OWwzp!W)AFs2mFCyY# zybWFl;04??(>yfF6Y=af{6E0M6On&-JT-ba^|!RNbg3)uNmLP9o%`Sbzgp|WHR2&$ zU;2$ofUc+_Lk(A#ZovR0td)t8rTZ#DM_Yq^CGWrBVF*kH2}Zw5pj_LG7|h0iytICu z{Yt9M;8Q?A+o$A5S5pNi!SwXZ1kC&66WVc}=VsOc&jNxcCbVXET|ry)__3vl<-42Z z;6#{1?>zLM2v~7~g9=&L`r3w%L^rq$ZrT={3hGNsXD1c*4EQ;=d^#XMFbnQne8u|^ zbjt7g1m1%(X?&t@g_MeBU}IHvsXOUolM#I0yQI*sc|qU?0VuQ!Pi&*9_Sg7AgWCHA z`Wz3sVn#G>x$(gP4ntv5TY309{9$_jgfaS;aWU9a_q#L4wwjcT!NEsA&^j`%qNu2& zsbf7J6nT?nPoB4sFWrNeee@c&BF;L3%BC`lJrRsH@g zQNY~YH=gSQ{T+^fIt6QXdV!^BPEl%MkTz%p(!W89XjqdGcyrQyB4J<}q_zZKhT7Um z$TnUn#UV>x-dZ~7?0Nj2|2Gzz>QnuZk?}MGGh?_#=x<4|M8VsTAlUiG(xl_^4c zdU~x229Oh^|I?}+cP0?l8Y!vGyWUt(x_mF0F17P3k9ir^_$=Vuy(+wYq5S9#V>E}^ z9MI-sDVO!6&B4dA2FU4E2|YauX0(!UELt+iI6lDxl^aX!ghIM9LzFU#SAGnu#6Xx; z9%E1Fn+FJSt@v|Q&siXeKMIxH%;*>393F3wae}gkVva9Iu6Rb4I4V*3+BDtL!)KoKPZ9&5HxrZ z%5AkfO^dM(oIvHu{@_z_YF%cW<-g*>*V5SLyVck)ujcVMxvbKt4Dc1M8Yl1LhdC?{ z$B@hSlBZ+o7IL`%o{oG!Ma!0PWIGjHGH5y_ZZx)b>S;)Qlu9>Yd%${U6c`$i{e`_K z0n5_#?mdVa&r~^Za)y^MP$G>!dc57RqLLTl-Qf}+66EKma(9hJKBM?7DdABo2y)=@ zam%!{A;>`brdMg&r>4QK%vhV=)ZX6%9T3-|__&>ie36f@r_V?N-IRcL&()?=V>A9=##P3En4PD%)SizURv&4zz}e z!RTi+0~SvBW|>eR^?nt;g{}K<9Vxu&6Uch}mB$)R75K2G*}o1ibgXa7U|@h4)WkBC zg0!!ClW4okt8-e<(a8%yb19^hLHR7;y@=2j3 zUdubH;=f7tZ_jS)guOSPo~d|j11#WR;c%&&!BP~`vpqmHwR=hRm{ulz5H_L2hZ`^e zPww6Rw+=Z$xp)f(uHxXhi|t$nwVuL5p0lfTbBP;wqY?54-0{9(_}cyHmR}|=xFW-s zeJf;vxwnZ~C3qM&vi+Hw3TZa}{B5sq6ZA4b@4&;j`H+n65o1t`I%tbci)ns0!?+;) zb~I$x$jH9X`kJ3d5Bc=44qmbEf`}HhknRgR$vlORBE30-*=RR?DP%i)TT2XV&x?4b zJ3E;;I9~T{qckK)GSycr_~TNFefaP0-cM=MPJ^?(1EUcNqG)pzt6Zj5ySQlY&UVG!iew~O!wF3mobT81boTO&QApG!0;hs0=Ar-uCel~G4Lw^ z1L7;{l*j+~!~Q!Z{PAY-9o~Ais4r`qh*WPo-`W0N_N~2U#WfnzUD*43r|lIfsl`N>6y7tDgJ^=UJ2h6 zUTRuNKd@FqSvxsuo)|1EE|$?(80lS?gl=ycs#F4;M=ICtZ}fv z3xFxe#cek^ar{-aHBb;&6D~ZR%#|{Thkn9^bZ-Bq?OKC=2bOXvYA~p zvsnk-0VMLP45W|Nexar^k?sEe|DFWZLNW%9|LvV)?49GwYLdOt)YL2V45oiKL!!+9 zn1#=;w;mw}d9|=EuBWh1XBN&qId9&$>MN=y-S}q7SyfT-!pgnn>~ZzYxgA*EE=9?) z;W21Fw>7~@Z|5c=b#iK>{qy}Znms6zlf5@bE@6H-uB~G;QF`Row+`X!s6L-chc@%g zfdTLJwd`4xtkBs6*%Q@yfS_Qe_eoKUqKdPM*H& z)(|of*DO3)E|LR>MHyuj1hOUoA1`^?d=F3uYwqti-(svC_qE=+kVOu!p5iSPs?w^ zE&1Pz{_o!W`^%M)M}1felw6=C*cpxobe(j%exkX3y5kqK&^+FFUUs^W^(-c)*22=_ z6Y##laK{&;!1bCgg+oCxt}G{b*m?FK96BM00tc$8LA_OOtp9rZ6T(Wtq@thb1Yo%0 zb{lz4&;v9kVTXl<_0JAG=B>WN+uo^Y z>!TpSgk4u(;ym{@XbJN3)AERsqYaUoH_Z{@)E{kutlZQp_tdr~G_>dLd=?`Eqi(%x z-uAA%{HK37bG$)?NB2ulz0KR%VH?xsmP;3AsGOGDg$4H+&CVuihq;_dMhn02yww&{ zu~N{#n(98Ut>S1RcDg_Oa}=W(m8bR7 zFYTtyEp+#GT=R+y9vz*Tz27NRF*3VZdhaOk~KBFKL84+<-P1bdc(VmF)(KoRy6JE?}Az}_2Kutsv}1`#al!euh`YzFMEINt#kv+12lJj0Q$VUiO_4GUQYlJ z#{NZ4T7U!tW0zcnIJpj& z1{TxSCT2z^X8rAkh)=q2NS`$I|1AJ#i0W)xseorb)tp;opIKbYAk2`Nni?pUdE#CP zY~|yrb%0iYg?gFrh^|K$=0ue)^S_c8(`Doogj`nn&o&ke&lUIkLD#+|}m4GP2U0KPH!d`}TYqRa!E4NMs@XsXREcT)i@o zft?E^0siN1$NT+#76;3$24M%sn>v&y=RM~P>yFOyk8%9>_jf^<2yYP(mY{JIONs)j-^QJ$AG7;N;|w?Z2<2-^SN>N;_WdVXLJt z>#+EI{FqgX9t!IXt6zGbf$6+qHgcsTJoW1OOV}e`nhP_Bm5$8YR~+QsmCr|+81oAg z1g=>3`agV>4hjrPlM5%WyReQ{=h9?NxJVZ!hTRk*b74l|#cyZ9`Nr|p?`wyr>({nk z^r1_mLLw$eWTa+F2K|gZQ|v_K!G}p68mhba#pY~UE7#8!B;}$o5x}&hPR^n1)X-2T zAzEps%i`qviTHfUp+~UDmqMcKf`XopPpqPH%E-xqfFfcToIW-&4_M|lXWuNIks^Qh zT@hTo*gvCBeH)CgeVDm9l%$HpXZmyrQpF9-XXxQm|^&c{XoS7>^2 zYT8d{x;R}t@#KWMFFchM1}2Xb;z#Wxq1QHD39hR8xat_|;cNgLrE?xu?0)~lpMd3A znwv;ws+?VlK$LLDp#bP445qb&ufuAs(CXdJA2HuRlyW+EpAZ6?0vcrW!+u?U$4_@I zNZ;bn&}3dsT^q$U#Xni!p!7nfRUNp`ue zB|MxloSg)LNJwnN#c0LzEB8M(J^gsgHG+EA_sP3mRCQI}T;i?az z(*NTE@W0_d@}!Dyq6`lBZ<0MA(wvoD>l?HdVMMFsq$FSj7qTB~A4vu(L7;Cff6uU zQw@r+s%m~Cwg(6}B?D@69r9HIpiGF&kt6u$OVQd{upO9}XASv1 z5ZcoEo3?Mo2|^n0dI_8UXhXWKt?lo46Pj4rSeWLpp3p_MW8%04>pxebj*}ZHY0w0o z6m+#<5c>Xk=T$=d=cw`Km#~K1+$!dFT@kQYIsFn>P?VjXnR>Nw`W(%7c6Ey*>H{J2 zo$ZNMxrH4pfs>Q5h=DywrG$_pu;rEg2X}W%T6Lab1AcmNtJ62F$y>35f;K5G%3g0&k+)=ujoU z0NkL@KewOJ%&pw@3JSvXEw9PXXX0m}tWSCNK?2-ZxV9Z@If;d+7|Va;ePyNs2U&}M zaU{Y7!L)Y+X{dHP(^KUo<}|U=3k!u=D2Y6?KDXyF)(4}=Jd=g#KxSSVNXN(QTcMJL zqX61Ah$8Trt^2_-@c#S(DM@m?W(pi`(9o_#gY6I?&Q^IHm+wj6zZVBxFH8zJN*woY zyNjhk+jx_1?sa9aufD&e1d6Dsny!K!3L^JFK;^F`@WnwI4Fw~Znbd`~BQVbdM*PAM zCu6j((hKIn<)?r5Py1hCRJz>~B=V{-dn185-uag}y%%a=(Wa33*`Nk(xbGDvrivzKVF)eysD29 zH2eZ%P*PyxfZ-eI9{0Vl)(~>izHM0|xtD5P=W`Jpkh5Z8Z(?Z|)NFi9he1gA>lL-4 z&fD|+Q6pe80~#>hGd@yPk9|N*$8+uYKev@g@m^4a6w1pfL`UJ0FU?~hocuzJkATwh zUX1em-2FJ1?$`V-^f>LgtKg?=B84rTKLiu7?YEnevY7|L%2xk-MfFs5SQ!{*SC^=8 zFMz`ff=bbov-?Gbb(9?ZI2fL2Sma0ZJ)wDPVSn? z$3SzUa}Zl738qx9c4c6@XuFKnRQeK~m|u|jF5$#O_}WwK^~*QlV5_Y3nH=vgYXz6Z zcd_Kh07;FDAfN=yHuo6l9|6^`+l63Gdivm0v$B$5K=)D&4mQ}ae=jSCY++zys;Ho zkuG&2Z}&1)F8$q(NmA-$WwoVX02kQ|F&`2pWto+^f6;Tw3WTUQs=3LElC=q;`%R@E)Kg_bS5r z>FQZsasY%bd;??r{>9Sc$Yx@$Uq$7G!kmIXs;hMjNpx9wU}2_w|I)jhpPP3R>-ci5 zw{@oN_}s$b1v4lltc{Dm7zvZXt^oM#EW(CBT+OD1Js=+7R0QPueas-(?*xdj0D>A# zo%IkFX_$#^noUWT5>CE;#B2=w^TDi7_Li3Tu5&Yztv$&?o|{3<<}CUpY+U+fHb#aH zZDsMtb;C5QcLtz30zK5U3-#IJpA@g9z|r zIkcDotHzA`G(szps|I%QNE;LVr;z6fJl5mjnSJk`Dl9Lr>53&syQ|JoqlkNRD{gp% zc(=gVg%mSx^3Ra2=FI2FczYXDHW0-8ImQ4l(>KmDSDnSX zkR=GX{U_Nk;zi)JHfi?i6+1T9usN&#RNe7P%i{Ml3HV*w=%Tv1SbWACHs9WhPp9$Don%A_94B{u6(+L9 z^f-LPKOv!OO2J4?;uXSVnxH#oX)n=;^L@SbT((mX@caPHo_He`*TF%*Y5hbEInx(y{9C9#H;5MT~(c|6y*i;5v1gSiWnLO7)SVrB@;cdssNR%%Ik4oI@XG* zDP1!AisYXSxvh(PeZFXjBC2orpwZ;Y-S_PnWMMkBMmn$^{#Zb*w;?aRzy$aEp;7tk z&w;P`A(H@XPx`N2HU;hzkwUm!@8WB~xUL2>UICeg_{0K81U+fda3;V@uV_%CqM>o$ z27>ggx$Sy;+9PoZ(S7_C^#LeP)S_2-=(Wt_wDU$yO~-j)K0+vM+tD(WUV3`5N7sw!~O*1uVM_bw$y&+wn28q?vH zvoka;Eg}H-w`Wzajiv>D781(Ugjbg*S3CtqIarZeiv_dccVYIa)jKCze@hzvuEJ1mXo+Fn))!g+ zTspT+G-y&b-l*pX%Mg zkYUhq*=WN14|;6adq?f2>R)eFgqANscI{&-Dw8+Y?hpJyLC^`5)tQR@zZdOIZ8}bu zN0-(-+cLoF2o4l>Cw@Ro<~Fxe1S+YtA7}8>!Q7)wI`U|C5MZO0Fbq-QuRxBFkZ5UZ z?|^WN`vlRGwbv)(AVMzwHw5N86nAmq0i)|mr+0PwVcwKHMStN#?0Q0J&CEdR2n%bs zBxRjYF_qP3s*;tDD^Y@~)`K*!C?CEeJ-%eVfmB^hx60y_pWC0B`Z4|qV@6R}R%NEy z)j*lNd^f-}(dxs%@Zf)CksMMcDX(`4>tEmhqsto(&_n^4r>Q=wp{4H4SxE z{mOSz=_D)bF#LxA#}l|@7}6t3D%y31tHkgfw6v!jylQfCr5UBVP!Y-~r&eUxG*>-N zOvD1asE&>`5*MlCMU%IvR-#DAes@YJW|WZXwI9_s{naDuxj)2N-{L3dJ zYljy%rQH=Iejdh{$QNRgqr?7$TGxNviWNss@j<{d(Civ&{S|u+>Ymcz!n!3Bxn1Q5 z1b|{iq&=4$Y3Q87 zM^0vNvT?9*z-uvq?ae|h31M$C|EGry@goH_u1hOma}W6}ip%hgguM@=KYylYW;xwl zVIWI(+3$$co8SiK!^ZjM1F%=sO4&|{O(NA91qXGfUw^`p?{vn<@uQzMQpT5G-$3@4<)nlC{g1bk zxI8ftn4YmS9!RRK1bg8%16?Cvv{!+2>drrC;DN7Sb=o1NGgtKT9s8~6ats7`R0CWo zY!8v&b&6`5s(c6QJrkO}fU@BeM0>Qg-9)(K_b0+t5r?G{d&O;oWD#?B9RX}J3~)h9 z;CG1=qnxRKwF}q#_KgjQA?rLYj}Y^_c=*wg(NAUiVAcQJ5}MmT#m65;D8AhC>^Kv2 z`?d0AC-xcJqFbLea`HKx=>EgB!mP8Ctzm0I1t?X7=vYN|hDneHP?O^B3%}Ywi)Od? zg%MPRpuUnro06Rt%urxIFPy45KGG|#N!VB@%4DTvgUSi(c)#-1Dg-Sswx!3HxhxYL z_!Xy1=?XgDM@l(|`?GB0N7Cfs;D9io-ThrG50CTHruZPtz5e1|i(+j5R#Z`4v4Kkx zLjC#*#njLF)>e^kqdn`J0hMKB%=f4jxx|!^G)p2MjNfp`MQ$T-dMToU=|LcMJ(@^;HU%qB6gSZKsp8w)yX(Eq zg+rvIpDyxie?`8#Hg4qf5$0<$Z~Y_;e-?o0`EFzFB3FDR3-|11I3x>$J6Y?8`UF8E zEF|S4HdcH$I~RM$NmO*KK@MAvr6@=86wJ2={cg zWWuNW?_L313MDl7Ktj>}vnXkz#)}qep7AFP|9Ko{<>z;{c3v(H3RIWd0^t}hVSQj& z2f$MksWWups>6*&Y>-ximqFe3EbqguzjcY}XBEo`1*c#{qp6+t&%LGSCyHBoP9iV*^F(Sq{3cV z+b9sM@JnB~&i(4Dy?v7Bcx98TkTyT4hEt(RG zO^!;)%F6TD`rQ~G+Y4Q8W|p@EOMTJ%3yOa&I){{={Nk7|2@h-XsqHon)ARj^A4#<^ zXC=ZvGgk94sP}y33d@t0ra-rfZOQgC6uy8(likNcz*ipHNC~Rl{>{!_@<|(Odvt`^ z?sNNgNs1x2KOWm8&q{k@h_xo@jxo5ws~GBdKx8gpaB+LNXW@0FqJGUyWPt|uT)_%^ zI)p9m8%zWd(UE0c#5MUdGxcLG7{kDUC?Cfg+Po&#B!CFs!F=h+3$>!YaAk?fy8DnM z??VD4G)Yi8+r?()$JRhx`p?3I@R$feSD;L8KRng38T;wbOK&14QezaJBivQ%dZXsYX7oz^D|_H}1Iw&46u3Jsg$Axy zJ4^qGB!iJf^)WfB==0~+UPj?nU)Ktl%)x-{W0SG3_^(@-CB#;Yh{OI7D!{v)%b`;h z9>jce&@+?>FT-F%7Vvq4-*?~xIZWu|?DwC7vBwoW!Re|2vt(fxz-v?`w} zXnd3-u(YyTJ-z{v+1*S$TMoQa*Uj^!ohpZ*dIpgT|}g9a4kylBT}0y5-sHv&E)4Y}8zR z9_Jg)>u|im0SqM-l9HZz%Ev>F02id?(?bZRoz#6uO=YrjfCI1T7c2yJ@Y!u+1W{~A zca%?JBM4Ob5+9esdmF~PFljsp?Ma;sANUbeLRfeM0Y|LY-|@iI_GR|fvcBi}OHUJ3 zvDd`NC(Ir2Mg;QA;MC%5Bq4((Iedw7)yS9dD+{8@mJTWw;arpNEp(5vZ;1NT-ysNA zSJtRM{%N7;|Ev#wV;N@0Zp{sKr5WbF7h-ozsR zSY!FG!qeBB5v93jy-o~;A&O5_hNkjBl2nI_^CoB45wh~9jPiJm&ArP_9-?y9~7EM1@D-M+f~GLKMDe5tdW?Pr+&>a z4+Aepq;FE7Rs!QG5JVnewLt8#f<{)cCJjm)9x?mx=HHWgq`*q1$@W@_uRh4E`0?%2 zeMGnR)z7M`mOpKVhkMPa%X~M0sD!aY{$_Q&AfWAq_4EDGJLD7Nmf#cs^dPxMOtq~H zT~d<~I?|2wD%9eTC2 zOQBNR!n?t-*RS^{EIIGzzVkqtGd9*rb`%gGGRShr_JffWvqo&yS4{2XBre;%w%Mk> zlfc$_)?@*FT@{U)z8A{sk69i`rT2q%P@UOu&LYjdaM?_lM%&LgHuSV~v~*mIPKn$Z z$1;5aA-41_zXzvi`M5~2BcK)n9LGB39`%KXAnYvY>Phb8$yi8JjS%pYf+K4yD!v%@ zh%h$vHceGlRB#Hj-;%f2(bh&mB+b?=tkaT)hLqBSR8o)qO~#`zs4qNaWjkl+Xs8oM zz+c=+^g#T{uU^97X4tJ@7Bw_7Jnz5@Q^5CsAt<=vCD5EPZwH_A3Dhm*r;9y*&eZW$ zVm|w0RbFN8ZuYm{TuUPOK(x&TV47i}nHbqIGBEUyjOeTBGCpD)9~og`pbRA=2kT)s zzA*@c=;d>xj|&hrJx~@|<{hK_RFac9i*bJJy^CR`&7Y-J92| z6M02jOs?9vsG8PmTU#vGwI#NHAs1zyhsK#V!7kNT>i>8x6HoyHd+%z8eP7Ga!R~4Q zCX>1UJ#J9LM%9LkQ@RnFIYhA5C@TTzd;# z8!KB|bVRm@8#_=!MPseKEv4TAQBv0OGk%96DyEyzZ2GEl5=DQvjB9xCiEwm`I`|?% zZQWd}_^q%YGCby<+V9V}gwiG9N$ZRIC@G0j=sxfQ-dMbg@#@t=-P6H4QBsdj90O!K zA)JV0lm+y?ca@7r?WcOXt~}N|##s5ybB>R7wtS9`W)3O8ruDfKhl!64{M@~0R2Xwq zb=Px#PizD|jKweNFOQFtf^odLE%-thWjFD4PJ4g76$n8?Y`Ap{AFL3Q5P$7UkXI2C zqX>TzF4OLuk6|S$`0K|%y~Xoa1nmy; zSyQP3x;TKoVZB}oR$`%{A{jJqqm;gTCiRuSaVkvOPVdU9^V6;GsOU$538}pp^_GzP zM{Qo~ant+vcoD#ES@o(9g()rP#}}c3M|?0pIyt?e8mxV@PL?%F%}B{It!b;O^5dAD z=+~tfA#DWOH7ggc(?bF%af%c^BIWUWaGIYsJFczIYD_9v7;* z7b!?d5xZYLMQt$Txao4!xAA~$YGLTk+dB^}t+OE<_V`ldOpXTkbMu_TNu95hsncYm zNj7Xfj2%H1m-@)p9LzHVDeb)SLST`mP~Jya^s1*uUjij51g)CYfGg=pKc3L^MjE<9 zz&su1CQw{fZfJdDQ6mkQTqlBowi@Ft;-`Ik@4};^e5U&?mCm$?qh;?NJP47RDSzt? ztRcqew2YiS)_XU~x`E491Rxl&C*RmiIUFt}eh;9}SX-Pkg>b#B?YFpB&o4fQ{oi5% zV%v2bl(+C|0@(K@?BB+bUER5yP*d|h4Yp>XDLupQ?JeMhXK<^Q^bNE~IN_4dP(4mn zN0wt7QvvD^vonB(dYyY)PG%j$|EYdPj{@3RWVAJvo|zUMT)pC^Wa^Mv4m5ry-!%HPcvoK=Q+KbQg=&tgO=AfeW#{TJW=$JAQ@Wwl5B!W%&; zNeKl6M5Lv=OF9)rKuTItLOPTV3F$_VlI{ip6={%?mhKRcj=MbX_ul(G-e!km6^$v`wpn8R*UwE<3`aSaK>1lRZTHfAD zzF~f&Wu;VqO?)vz+{^g3B#$0itfXOCePk~X!p+N$%WbjMZzc{OZd^O}1j|P?pf-~M zlnul&v#0%SsOjFD2mXC3-=k37fhN^!!W4qNVwR+O$Zg6D3YK;RHsxhd7(!%@rlAq$ekCr)~|TM8qqL%jnHm!KzAi zXTLi?_&azt7F@A3>;M0r5j48!k@3#eO=D^fgB$<->&psP;mdH5maTE`MR?87`$nal zqGZd-9lqN5ASaFd+XM)9uDCv1%SwMo#9^?o1eku1m41Mi{*Uf2%`V_e6?Dc#K;?{3gECMH8ixRO-<)vAw}`pv6PF{1 z`1#A$^9%3`{30eI(R}taOFS6G&b}1Y$65WgxjYx+gRS@r*~l9m5k&N6WzWGF3FyBw zl`sm#R?{;wz8%H975yYp=@~4AF&y^>_^Aoyk%7U%VCH&-u>aKp_>mvC=+EEq0Vkjv z0jU+x&)0o+od@9yif$DrqY%lqZZ6n@Nfg=LbGn`0Z);_xUGaMIUA866Pz=W9b%~e| zvjKm9AmU!R1^iaIs58$ifOKAx0&J5mP|9HOT3A~A=vc8!2)3%09g1|Grm!U0H$RQ?ru^^2nwv zRR0=&!C!4YQ{D$wL)9W-DgmkQM>0<;B9(E;JW&z3(WChh9e@g8UN&D!S0X8^WWVkM z7DlKVbO1KQarM_)wcczi)gx_ftfhBNH*v4~sA{Fh7lWx{zwhl4CwLZa-GUGunLFZ; z-2+BXQTIPd;%=GIt-ng0{^h`^sZU6hm2>S)Oac2EF$f#fg|B9*#FcycA|W9 z`d9tlmYnzkbTw6VWFT!^__2A$uPh6!J-973+`P#Zz>F+e^5SK;UnycNnExlMg`v`S zcfZ&(e~s$8H|vXP5@fk2rQYSTa7l3)io3U=!?8nEUtH(zrqw^whnchH3TS2@$#g6& zk?y$&V>=A#irF5weg}aE?0f}91?WEV3$oB%I(06o@d`bgDfg2JT3GT8#NFAtjGHf8 z?FURwV!bbZ4Gs#t8Quj22H%co3_j`pp`%X2tIoLvGnf*yKNf34z=lFLs}46Fvbba% z&zj=!XT8W(addUfoS2CCN&fIZADc}=4xAGMLW086DF6k6MS5d{yb}EuNm*6(F4gi~ z63UaI!bcD>1}_s35LK9+qalq{E+xv!SNyKuueGK_FL-oJ?Wpx0$*rQ~?WFg5K))?=kDm4v_%IH=MuQzze zKe=kkhfAQu0wc7z1yg0R!259TF9l`#Q6M=EyX|2mG)^Vd`lXvb%yf1Hg&(wjW+-p~ zg{xO)+C7e~sUPS!HZlT=kp5I2ViVV}WX|N?dga?9wxg9%3%ODy>}UX_ zCPOIzzt`pu`JrX09m3aAMz`P^LVhtRExU9r@{UDm(bf9!0-WL_%@dVguYI_@CD!2!u?w{YN zzC%iXcK$Ati?n#!^H@nsT|!?qH>1>L=`_7RU6^|zHyDQP)_b;a(!H*47#Yz3Y+Svc zO^aPfWZ@r`4mrB8alk1JdZA}c{)sLAg<(pNbD&y53R)`o?w@KrOH8_r`Mnaw&eQ~WZPJoY;&87Kj$l-A_ zfpKW2ITRPym02Nsd5IKSPJmrSe1P2t0XkY!u$-pNRFlIMCW0nXC-UXKX+^-aiD=Ua8^1wMyE;4jP%j}^3?`82iU%W|s2yBAP=LMtwk2ZE+sC_%mj z@EkmIaDumNigB9!?ABf_)uAHh`G*iQy5vQDhHX)H$-DZRvNDVH&2MrjujY@{ z83@g~14e`nDW12u=7JyawZ$F8N~=WyzjJ9}?QqX&wVj@A{#Nw3RUbQ5ze(sa?ziG* zY2~LTP`(NiNn(vlRc5A#g_b{XJ}HS}D4-+U0#XHhrMbOYQ-iH={$Pa$O`J^*PEYc# zt-bPQPEnIrzbC|Z1s3M4T((^o4`{UymQLdKiusN0@%#RtoqGXNmwfoA|L+WEf8|H@ z`TYdxKY7b-F*@kDWZU+9>1mY35c|sJ)#lnZO~P*&MqV_yFbT3YS7b#?etfQV*b{E5 zoZbd_YekG}MQ+)+6_bhF_-}OY={p2>o;=a9wX~jFn!`fMe?A6X&edKrpmFeBY4+X4 z!@^y%uggL`2R9EOpRX)8)Hr?vXLb9|I!uH=*Bs$A0Q~LUNhgrD)3J~Lk`TovB$Hut@`IYDv%RI+lT`Pqt`s#s(W1$9^wyVUD=HLj zXRM&S3YVj0U_8W71xkLZ-i`NMSIO`-W|TM#pj8!Rk`qyOMGk zhReAo-yO2mYhPfp0*Mw^g~Q0O23;7Y%Q*|ikWj2sX@#rZ+1_N(YCsMh0E+7O&m*xh z5#B;w!;BIgwda|+1zQ_il*fBGt_C<(PykkLpyDazn_O@o=}G((?VlLj1i{@5%#2=D z{-Bt6SMcn~)5~MVWIXD7tfnumf&}g0f?v~lsu8JQZx?P5Zwp`eI~q5%usdrJj;e5M z<6YD4kHuORFl^|7MjnW3R9$M z>-ongpKIMr@?Kfl$j{770Qu|BiJaU55${88KGKe6lZxi+!O|1e7S<4J7A42c!HIva zucNr%TBK;WlL^vx1{#_)%3||fG@o?3_gRDH@1hJr&zzc?3irB88{g&=eNqHU_XaD} z^k;uBZ>}!Qg{(7&1_ygNu_@#5AiGfGV7CEZQQq?CGjirou>E5b8AG3Idk>j5hv`=k z1cTrM@GnQf>HabB;yuw*AwfW4+Lta&K|%m&Z}3usQqPRy&#){%iHv{bUzvv9l;i9e zPQei-NusX#R3dW+(B2i5yI7v&!I`AsENv2*9%whQ%DWob0P8h)Ji!P|@B)L3>w0S} z2q4G=Jf16B7QD}Pef;Wl(s|JLRR*kJx}wf3VAe=fN)vQ_aT?fj8gN74js^0o+$%Pq zUfbF_HLds%xBviofu^#u*?8L1}RZd1C9!4$THWiiRvmP_R6onmM;;Sd|0J`h{JqiA@&qjtpJgm=lBr)=i z08nl|)>JQ2;xQ;wgcs6;&^2fZ6|40WfG@VVcxG@0>}MlC#BB$%F1RfyqB|-0=&0dX zFU|Wp{MF7+KjM2S7_TmpNkQJIqobpndN<(2C1c~!uje{1MBO(%Un-D>J|fP}v{N5` zNR1!cz7U7|u*n?mcKCyp?c7RKTK)wOlZUYSb{gEvf27Vp=@J|i1Z~}E3fXy|nVcQ! z)oDu#DtyB#V;+@o)*zr1Dp&~*YzhTL362UNE}Ca3J$0k`U#PZA-{|>tS@Q1B-d;#p z7)6)n8Qm?c|1oA=34*pnPpsw2do$JPIq~Gt?ieHPbda2Lkn(*Jl8}&)Y2C3=OK0~j z6%BEGi%(L5XcqWu+W`)^DPC8<>{<1PQAhvI%nGHih#s|aak-a_SD5y1@-TbV5MALn zEzHzdYd1I!p3goUR5h4C05r0_zde7p#QXNY531Spa;g%^#o2;sCP)P8VQTOQs?Bz?B1u_3mi$?@eW#EDZqZc$lrnR{6r1>1}+ff=M z-446Ls@V~9UD|$sdAV_piOm)5h1Ys+&i+52OGS?-^%6J#eJ&jzGXSeXkGpdwW|V<~ z5D?6PQo7d9xz6>3^YLRU(PePe_9aYs=dXF+^l#mZ8hris-dx!qc|c2{4hy=+6W-oH zWhckE@!#jWx4&zJS?RA$Q73MWf4`X*ngr)~`#0bOmz}ZqxPVt(QN0VbcwK$HM&${- zLxuFJWJe|CKL2Q0d$-M{h1p+Ig+4wCjuMrz$tS*Ios)*_FUMnVM`phN_jCW>KaMX9ytx0n)VFV6 z;NMLifc8m4Q{gY=g{YrE!^=dcry=bU^4Bl@Cyd~ zl^}dAkRG|ZL(qOxt+loPgq|2R_+W@CMhwF=tMz}bgb&2PJ2P6OU$N3NbJ-Nug(jQz z1x9GHrJdICCItk12eOt4SN&LvHwi+Q;XlaUQ=agUD{Hwf#gaSY95{Lp$PE4bxxTz+ zX=~QaoOD()pEy+T`SUB@KvaaZk78?oJuxmSHZ{5C)p+KPB?V)9m0~V>-c48>H@9$-AB5wQDccO$RV_aN&Nq~^C8*?ZK6%D{CcFlTGjcySH18$6NO zF2+h8?(PlKg?GdhU0CDEZxhZ;E7zo%|E;t3SIE8|*mND?islHBB64YIIXrm!SYwsz zQ^egAFvkRvtzNQ_b$&E>KQMy)%p$*Y z&d%E=7an|38@64)bP`u^$oxN$^B-^Ff1*6~g*|gq$KbK~ED0Gqqs(CTxJJ0aaPdzMUo`pw;HpB&- z_LmPfz7NTEa(Z9I-eh;mKzUX0lN4O3M~QbJs0jk`va|DNcFHLAY%^cZx0q+W_S}k% zirwF-9Hi!-N*A8KNT+3>g|;})vZm>yaq}Cv@!g)g$8})bg^FAy{Yu0pqRM05KM1WWFe==v|_x)SjpSx z$EpjStr#*eGRVn2;C{UL$n;HdWQIWtZ&I>mAIg`+P)%QVS4=|>-2e9gfRS~&$%_E_ z@c41+iz!HJyP9gkf&Ez8akTaUb@~0 zb5(D9{E_OJYlTZeZRGBc)6?&3Pv}R6hXZ7!!RFNu%@NeI?BwLh@Ki!mCm5-7wN;1J zG`EiM^{;*(9)@diPHrb% zzP-yV`Y)*nNfO~FXRy0mQs}6wN2M9EU}NCsKF;?=@9t0Fbsa!99k=zo;OzY8Nqqi9 z2M=+u%2kvLf^`54RSCBvfWnn=m+7DbUiCehfYmRxi;}8mFCLSmC!~Cc!$m*^&{s@} zP}4K}+GpAGrwkwT!28otQ1|Imlh1!$?pRnjW_TNzD_Tw#-gz=PAq0OhNWt(=#O%lD zMt9;Ffu0(!+hHv>Qx6$f-s~R;?5)^14q3d)yH``?_h~V6o^yfY{@=fjpDka(q_+_L zLCcEwCwH0)fT)9In4LRAP#8Kfd={{^z8)SH+}+pPzF>Dh;7)iAr|eN%L(K5`yO)4h z5(cl%ZAG-V7Ty_ZJ|jOns|>?)y}rM4DN=@rzv&A+=t+UQWk4)gS`uCdtsiYWjo;Q) zm@zSWX=m3|RtEUV#%XGN=zJHq46+2-UA{vg6X~kL6UpD(&Gry z6MBp}M>L-1Xse1x8@sy)mU!ElOA{fDhX=&)CMG0&!EVdDj(s;aG6wPw4jij;RjZck z-a-_V+wm~~lWR}ta7uqVo4&F6*>D7ZbR`n(wJ>O@EJ1s$)Yjdmli(_8cp%@dpmzI~8?sI(C zPB=J3yGIxd7YM#5P?9Y7m&Hcnudv-ns_D>908a#zCO|R>Ym!**Y!;FMQKLg=$_|R5 z33+*czrL;))NWLUOL@;Q3Ub&aB_-kN8_B%?4L!RSnD2vF*-G$9NYX@|K7^0r)#XKY z8l#~6gl>ta#Mzru!l2b>^6wg|4Em8!n1ZK7(mfy!6)3^dFRysfMMEUa*k3M0|Kuv? ze^LAodHxf(+SmJ|tF!Bp{na8nEE<~5R3xDSo?V{|18%G`-0)r7rz>sD1ov8UGMP@o zoqaNvlDs`^j2o=ZX{pxfW_jf1UCrm^O15b=LP*M2E1hd&6ziEMt+Ei z{6KL7;c@zlP}8so+kY(;Jp{dm{MQu`FTg0F9<}2gXX|OYC|d7 zfxDF|YC?Fm4Nt(daHAFrufM`mip+sYnf>ELuH@e0q5?cz_sMBxbQo?Xq|ThLiEGP@ z7GKB;P=p86zXD4`wrMdWcuKB!cd&!4q3t?7SPIul@j!()sES;wEz5OWp?p+zC zyl108qsK+bOe5Qa2l-jqcMf?^mzX2W@NAhGnIP>|%;OY@JOOHkLhx0Mjx13TbpZ^~ zmj)XK{J|!L1W`ab8X`vv`G48ir+4SzZNl+7mnCoTqV z$wyl1&z^l}qyE}HdDYou`R?62s6^Qhzi_3(26VyS-|zgIq@Me!sJEkzf&wQO^U>T8 zse_euc&9Z$#(w8`~Mk)ta+a+@-p$M7N$<)P#zdDy8>S45Fd= zL#yHF=mBGs<>gA~X`BCGgno4G-vI+btoC(N>h}X4&}P>*AJ*F~GhnH?Ew4_DOzX}L z;8PsNshHW-P3`Sz%;U+g_6rX}Y+ZJ|$7`O!W8^T~p&3iuioP zQT|#Y!VZ0q5S>tcco?Xv@A0GizwSF&Kf1?upb??P#UoJF66hwlFuK!@$nkSEKkd=6 zp|V8N-X(iB{e}xVb`2#B4H=nrWrJ1hT+Jif3;)7IdB%KLbKt?#Yetd7l&AeEZp97v z$B)R*`Ww#so`U1%bZIJ4*J!FYm@&G$@#ag%;d=>K-S!s<ZY+(n;r&5N4L@Iy>kdvq43oJ3|y* zRt^r*e31N4l~VvHJ}lRn!!I1~id%vw#Zd2e6(Mg;SocxuZ%{{-`G)5zrbNK-+9 zJIpP^l|3Tp5eu*RQ9Y+&h8at1#>KOxDR~bs=b@F7px}SfiTF6pTKAK!ndWh?ZI;X2 zH+!2)dB4!q38M0Txj@*;OfrS8o|xM@13hzAaSj>9;pkG;SZIectYoO~+F*Qw2}n_~ zTg-z6@d1%+4Fqiqr(mMB)phepw#|eD2P-JbpIrBAw0q<3vJz7M;U$E%B?t1=H+Y`= zf;y_Ql88(MH_&1G=ikU(xu?V?P`A+HgU>eWBkNoebscT>=J%vtq2#0_^v=#3{xUb& z_4cInnsZCU5N;mcd#Q;=mcCZpaJ(m9{+b+}`IuQaJUX+zwjeg*)>bCvenQ>{$lgKz zDD2VGP*1SJo0PL0r#NN<`yEuSVco=BxuA^SWo6<;LBN{{R5^;cmW7-xc`vi`&Oi_Y zxLfD!z?8(ABHDG;2&u{^=>BvMT1twGvWB3mC#Kf#(@KQRK529LXSa*EX(A@4MZ0z& zKQt*t!C^@meV&@`X>M$hJ^ z3!c(njk<)~qh}tHGG?%m68}~Vezg14=nJ32a@P?`7d*aESj(= zJzvScP=1>!SGnw;;gr7|F7^`5B5dGEsdrnTR8#!J|G!#*=+4DiXx>rxso5whSP`4` z(+kMs)()E8U?x>T@$pt;2>o`=2Of-y@l9ym6A-O)KmCOQL4y0+QjWtElWQn5#j<#t zYeU*P?m}IS#|N!AR5hecv*+OGv7K_2ri)G&sde4@xmG}`I%KZvOeK09F?MAv$*W}N z!u{*4-{K~K95-WLL!kZ_vDG9td4SY9{q^vvv4kHjD%?m$AwL@OV!BB=Os2`jqoa4N zv;5mW3iZSL%y8=KDu3&oyWA=uh4eT`ncSYcxPd5iGr?IMM#(d^w$>ErT%0EO=F8M@ zUUl)MrS5OLi^ehK+h!+=GZ6X`6({*2U3hP8WPFm3ddH|$V*@i{ThHC?DgXususkzp zbR8*E`*x-XTQnvQMO()A!8e2>t@Bm-kvi$ScZ+O#^F5n*usC{5=|y%f!ctwhiFZy8 zO=myVG^Z=QMp8FOS0=|fZ{NK7GT`-%`u88$Cuug94;D`rCJ}rx%BN3tu@T5o`h0@|@q-!B*%?(_+bsvmE2G4mXB%F34!bBR zT3Q-M$lj((xx1~EK!*C|ZPVmnDFY)e3X*IwYRoBckFl<(=`I2->5#Pmlkc%Cp_VEI z>F=$2t3eMW!PYF4Oqx&ejm=E*7d0@&I>+aN$o_emqXfPG-tW%+8HLs5IU{a2w8w2r zil=;R*Tkisfa{ZON6hPTVn`DT3jcT<4-|XP0L~i8E+B4F+1CcineL>jwQXAvl*P20KgpdSiv2$!{M)> z+v0Z8ZH70MLWzxyd2)hIN<^~115$lR1cOq^Sd~+^#=euwuywDT8sP-}2U!=+-)1%vj8`cZzeS1na;Vi9ZT6c zJv~X#K}4hpf+A56o(XuoxH|Bvs;b27?a+{GD6_*;_6J`mu^pn{watS~4VI(2is;8r zbkgIZhzSp&A*E~RZzA9cp<7!MLV}b6{3t$fRQuF0b2iosyW{`fT0c0TTxWj3bDvfi zvd~7>)}|-NSgrJ0KIhdAcXv=h-^M~xQWDSbQ+**Z*RAtUA3tK=`0~MjzmGx;0|}7& z8`ZbF{&d#+ps=XPsI$$rn2pk{(J?Y62IGCueD?Mw8T!%IRFmfrY4>{w;KufzHekPf z_QpD!>vj|0U9m>nrJvXT70W5ne*gN%$bCoci4G~j>dEm84-yotD9y)1wF@2>NUEt7 z5Z!b1q&4>$itXb_I6-qhH@-TVZ{TBns9*2XYkGEy{!uxCf&#=tn=5^9KqoTS z`Va->EgX>5m6g}N^fpAm`2W#Us90b0?0j(R9+q#(bp7lKS__1tWtHSc@6Q=M>qB=v zkr0lb9IZZth<=f~MY%_|Qw^?(agQ6l_l1!DI1u-u}S;* zHZ3>Q7j8_4Cs|NX>C7gEP!k6ITfrUA*8PLTRly;DBRaSo!?TBth=7EWZ}hygX`=&g z3l(w1K#s4Y4Xqu#HtL0IT<5U$5_pu760`7PIE{Iq6iuh3xzq}S)kbfqYkw=_r+K4M-E3OH zwd`pE0s_%D<|Uxm{E&GqBno0gP!I@_6uY8k)At@$+&gnbV99`|8BUH%Vf(I;X&ou$ zzu@Wu%d7szOX?gNA2!ACuhjS$dd-)jw%4x1q%NldYlELNBf@T&@&Gq*Xlz90yFV3* z?mTS2;G&Y3mnU0YJGe8$R&Lxn#YK;spU(gxBkBx9h`F`pP74aQ6QNKtV;IqmpP58mRM>T0mcp(1n9gnjf;a#^4+&3a2a)!?u(h}!VS z#lzf%=&|o;b@k;pxyp$@mhIYRL9-@pd=u$O;@E2F;AIpb{+G21$2p3@ z=f7x(oP4i%Cd~sP5<)>QbE?N}D1w~aJUl{;cCX(!<#OH5lDT>7*sUwKC3bhC=Es;# z`llETBtg`P@U1vCVQ@5^wnTrEPo^fFDp?7?G)nJHh#or3;sRkN@zarE)0Jg`x>~U} zcGrADy4Ghz2^iL;LC7}!T7xOJ&j3nIQ4xEL50UtMJaVsr9~u@LgHBH`D@EuZ|P0X_HaMDwYpERrGdYp3s1!!=6GlIO@00Pr|yq=k*)x8KdNehX&(f@X-;h zY_W^4bBIA3zoLYM1c9NU15o&@M{5>*mf!21$lkhVPtmcOggq6jt#>Nn4_Gj;JL{JC;De1;`&WhLZ@igjnB;dxW6|WEtr)kfRxgT7( zpj3Zgqc3vR5%(9sgAcR0m>)WkftkV9mi*?B!CODR3*|G8-U^bYc`GiC_u6sjqiit5 zG=dltH?V@63k4yA9R$$zCa;r182D|o7P^?2P<+fd6+Mn9-O|j#@6%*ts;&=*0z7Dm zN|d%HD9Fwbroq(FOLww$eXgor0@{lV{QNmTzwGZ7 z6%};@fnu?Gd=f%wW~l?30;|{LwWR{Ptz9x$ZO+$kI)4 z1mycAi}ZRT#&|H4MScv^I3g)1wi96OpWB|(KDQiclK9WtPtn^k~^0N~I# zO?0FCzAtwDcBY5|REgYCq9fd$kqV5rNi*iu(pAnbdo8YqoXdIvpeT0rd$rc@Ct7l8C_Vs&k>ODu>2Gwvt-(d>uS~p# zkdZ-x*XH`=YW{sFcGT3gKGRQLh=qsJP_8K zi3vA7eRGvF?t7u3e`8C;nq7*tSV)qxTre9&<9CzuJ{LE6UUim!JY8RBqQb@+Tzmn6 zL{D{nt%hDSbG1D0zfTxJ??IM^@+CPn^8L+J0;;zAMy!`k(R~Ja1^Hkjok<>eLfC7M zdX_Q>k=fB^*ooM_&+qYw5nV8ec|>FhMqer?%( zHZVAji7Zn3lM)g6r3EgC-Tc3_!fpI?Zw;bbx1dQJ@}}T@JR5&|-a)hp&3ly8t#^Gp zhwHWy3JT<4%X_8M(b5{5XUHDA1<1t+`>$lRu0N%gQg}>=3~M}16RV$IVW?y|#~cls z$qMX15vM}MK{8$^zY;N=Ew)alGO@6EX=wW5scv*~{LU76WMb^X@!sF?BJzQ%)$J3< z;88-c$CpwH3YnERtF4|J2^p=M88J)^5BHEufy>@k>W@Y6<-_)F(+apCkfE^ygQR`! z$J~f3((cGV0u&c-MYFL0QQ3UKmOj1maQ9U1xUOw%c(loJA0nc++0uW=De5=B2_!Yy zoi?}ljK|0Q<42Ete~*oYbt;e1`icUl-N5HC=dyv9Po<@APv4ir^lxeMB#iPS#OqHH zUD`$sJPZ$SjJWO@Te=V!tM)h(^gHpp8VF2zza1Vm<5A$|7UyW|jH&5p))>rKvCWF( zrSl;f`NgB|>7ufjn3x10*$#b_{WJ1X%}xH8XK@J~6BBEhl*rU%MNiau{`cSzLz4Ux z-J-0D{lO=)yn*|BC#WV5zR9ZI)o*lMxXZwJ@AYE;H2A^0y4mCuR6MuV+}GBv7ajgo zHA`PQ*Bez_>3zvs>l+Z?*~fSS$5~(Oz>}8|d|8?2RgE_oH56J_utgNixklWM4tqU$ z|0G7XpwdJ!u&_vHISrcY&eG0}F)%Via`!l-cJl7bU$`87lzMiE9>WnLR%^>Hc!9GM zClUU(?Sig-}`R7GVjI7TkfQlus*?U8Hr0~bRKvyi=INjq!)AMj2e1Ss`l8qf@0(B|bb|$PFYn5< z5>czs+0V=;Ee;d8$tv?Z7IUEyA4u7z!O^J6qFd^Eu?{b5|L(DUUe9`Ex6_wpm(L_& zqGF2uKIwOQ`tT8E+4EfewthE1ga1C~$pkQO3bf%P0884}z0lSigTP7Ew1zNI8BZ$x zv+ttPhNa~{@e%Qn$5{)6N0h>^A;R)AymC` za#o;ZG-ke<{dfy6z0n8d*fGQ8v4!jPiNtR&CA>Y{Jg+u!P>7Q;BEl?eEKqzBYQLh& zs-mL>{#%BYo2BRhT!b92M0Fa& zUoqJSANl>T=kx&s_q$$GFllH$8^OT9xc#2;-ouAb*>YV(9!rohCVGW6z1fcVn~8_A zc;zpM5u_!|FUvo%)?h(0q;BxP@WoK9o-LcKj=> z`eH5zH{-i`$VBXi>SKMEmH=<|#?Eg}<3)^3SDrl`gT9I{kwJPPhG+;>Yy!rNM8@ma z?hJI!(A`_jK5P^sO4WXG-f(z`iyOGGK>D?TCTNE1USPWfFKMOJ^RKxqw7{{TN2Fn5 z1tW+?(mGuAR24{&6Bp#dkL-R7S(DlY_jv%IU=U@X<`3&$;^pP(_(~_)Z}mM8IK}VZ zb09>{n92f*(ygpA)$-t;fS8H-3%-}X)Ig_aN7;4vhKLFUWW>rPqH``Cc2BPIGrb=L zkIKCBckQITSt6H%`vjSXhvZK!Hq& zc)}Ic)I2(Au}R@~uXFyMk`Wm+LS8fLeSS_Z-O<`-O`a}V%lLq?Rp*@M&ktix$l1pA z3|;Q=qL)AT6uzn+_DL_~ZH|9w&7k8A-#&vz%$q2P2D49p)?&157y|TD^Wgz1LKpqb zfZf&8Uq%{I@q0F>AY5ZcJ#Rc?zz+7D!9gH5$SyVK(go^m#>-l2j+WTw#WrPqHRh5j z_g8iFcl7(x1v#Y+cbD&;o&IGe$7qw3Y^b(P;8SQYiD&&kcJ7yn(W7U(E7OK zkLeKww?czIGm209(lv1ij{R!!24oH!r>+iGbt!COZ?6>3?b^Z>+>rP4t0(APqXo%i9@+WL?r1Aju%iFpux2DqgCWqht{6hqUEeD zkRyVCL;ynlsGILccXw>8=BrnEIOMMo1O_73Sh5jGoZ1j?w>BRH& z!&CA{e*VBM(_9k)z5>jOKooOU*e^fSen3mBp`&SQ^XgMkJ_-UPE#7-@Lk6dtw4}&U zo4?8aJSf)EE_0t+yqud4{Ae>}EBL<+wl@=F zM@GQO4Ur=!4*gUYlrw`8i9Fr{+MT~9lq6=Y@HVH*tjFG-MWmmh`&U<258XNF-O$X+ z{yoNWEh%Xo zg&kJ}(F+zEv=+o2Y}y8- z@sS6t4@PVTndp4wKR$lg6PBxzhJjG1hzUY_y1MqWGNZ}wC}|J_0tZEQQT|7MX=pb% zh;85tme$y08~a%-#_Rcm*=6pWiGS1DQjs~jQ@g=F|8t1Fw`cyMCrfEn84W&$(L<#n zt_l!dL3-6WwSb83R>X-v^QetU#ds*mFIdk5W&cHyS-q&S|M9z3^+td#?4mR5SMho-QVc3RCd;H1U9u)qV+Ay zmB96l_mlz@cdHD>>m_^_f%bv%CE(8b{vjK4Ns37eMMV3?y1AL@(#@|&T?68(K%ziL z@6@A$AJ<^7`qKUl1*x{yi%;Bdo$h86979i}2<8`U2M1rZ89m`HXUYi~rJ_PrU``YpyVsh1w1270cugwu| zGW7T{voJ}l?V@8eYsR_FbPzIQ4{4S(0YB8MRZAGr-dmlzy+Y-2-jfZm5*=+ef0G9e z0s;iYX2#WJDFQ)0KNv`jL2T^I~G=epuOa{ zw58vx^K<($(8%M`^CM6ujsNWU(ck zi#}wb5fr(fEVGg6gH%3dYZOwk80+9FZY~ls@=~_m2s-98*tCD)K7lT%~SvC?ib(^s?i?; zt-HBT4Ycm%s?X%luqX6@{*LOZCkOKtl=?0br5&-5(JGeG3I z3$JY9TOWG4Hw*(#L1lh->+VplN|J~XSx6orm$OZtT!}0`Zy>Z`rpeP`2ZQLWt^yKi z%FD&<-ZC@ZvoX&Y+xe1|OeQZEl=bw9EyRC9x?7HNB2oHo_%~Cw6frZP1{r$vp2?if zr7}ki{()#47vUM3xZbaSp8eR-*3ekIEb3krs%uKMw)VUL)t#Z#Uz9Iy@1E{N;`pht zU7duQg+J<`i_?7eL5|i{K;WQUf&?GPt@DP(;jlsqVM^PKT#$_llrvl86$lDV;bGw3 zC&b)e`L#?+0XC8t2NznIa{(&`5u^0dl@ED)QXt z5fQ2`9R}>&+{1L?RNVz`Hc$-B#5A#UcY>Vz-tHx%9339luEtj^|4!qfik`-8YR&B&mya7wDxCc^*k zJE;i=!;;6b95sHJOaw4f{6AIUzyKK3O~i61c!4D*HsIz~-3|!)!0`n?Wz+d}Uwd0m zLiZ9)#~mptUm$EkLol+0rK`DeK&9r9*P45)ESaah=PSnhiVySnd@CJrc*Vw!fR1RP zum}1N&$cYL3-3TeF=&bqA3&~d1xqgy0+el|E3h-SA0YWLDKQMw|*M<&GeYmSCL!F&5p=QJ#yeJUZ5k<|TfDr`nL zncmaUA2EiR!#dMMkAL}Gwe;uENVuvJSZu13O?=@E6S~Eip8Zv(-QDF%#Z$NFG=L6O zBsF-UV?wDaT+t;k=b3_R$oImYw*6a z?Q9r#h5i7J9L-otVTlDgc&p6&0XvaY!Le)Jr6}^2Y zYEN|`BpfRAW*}bHU|c(__2;rgo&T!^(3<`^F>&S$<{l39!Ouw(IXS9NYvHQ+E*k3#Hz&uOUoKC}LR{3( zpY&#rsTR>Y!K?Z5RAU1sfHzM6KKEsn%>vKBa{R<}0un+xyYZNyeI|8WPUV@*>mW%* zOPxn1#8<5ZghVls__JkB?J$-vn6Z9j85hon2Ila%8(M0mg@u`_dY15Q+3E=#(XlVi z$LchBladf(Vei7LOTWym#xT zoQaTHEc^&SP`JJX7=9sLOwcq8AQN+%0xjWR$4W3wdKq%CF$yApt3WjQhoG;Je_ekW zK!l2FL=7>!XfAcWnvaM8`6No^-?2|Gix5cIcA z=&I17sP8PhxZwLo8FC*|_#M{t-D5e}n}1|m)_^Le#^yzZRoTT(-1RK)Ywz0nuACNH zx0u^cE8-644ln3IW<5CTohmkH%R+hXQuqXPa{A&JfZKT3gR4qeR*{R3ZPshM@FDrL zta4Wvnb5=2SQ#He6XmX>=ixoJbjj>{T*)Qg151|9h(YOjMoV8hh?t?U*+qE!n<8Hz zDJzH|X*}i*205~1GGZ3`N`$w?+-@LorgXUxRA@dx;C8&{LHGHq!imnO>Pd|*cYRv~~ZTH?QT71mpg%q%5UqP#INitv4GyZBXXp9%&DZ)~mI0VIi z&8n)cujAdIu{$c(2M30L!yhu+8k5j1u=k6(eisHkc^W$ac6OV=gCdRI@&1W_z#vj% z_2NDaOOF9N-(^{DX&xG)LOCw>&FvacGwy86_lt_MIbNSmm89~cMC1;d!~2N7PjA`2 z4y>wA3h5be@AP!Q#z}4Wr$;?)t^QNx=9fV(*p(-ek|K6=Q4u&WVIxEA4u-E2ky{Cy zE(W)s!8s)Gg7v*jgUb?02sNn3K10M=e8d%~3XtDYdY6D+j9XQM-yqw8-o`U^2ZAy4%FVGsJOK3-2G1V>!=GY*W0Q76Au(Z7n}iS>+6A3ZTCeXeo7>*n9+A?tOw_P{#VFrE~(lZD^ad z84p5O+XT?M3Exzu7zejOxzkE|wAJ$IDSC-cmDQr16jGY_s;Ho7>AcIRjk^KFvhb1u z$=tPnYD!3sK=Xk?=n*q7RI+@m71Ixfd+(#4cmbacWXVgVd&q&X*XCEf5U;4b!sC}| zsSgoQ*V~G`23mYferSbK&};Oo`N2AI=t*UIWJ06)w}4;Av_wbouugG8plBoX6%Zc+lx`{IDAF=Pqm+knze>F>AVRJ2}u!g4U(gci+P%{ zIu1jsIvp4BgKy;_SiFJJ2{GmHi_`a~oN<49S%G|c^L<$C4VA~z_CsoM?J6q*WOeP8 zYFU~ZRQTF@uJ+M!wdP*%_g07#0tc_AKLZ)bWsqiudqB9@EmO`-|M=M{^mZq z3vcKn5_~|ms`ulwKjlE&wn{Q50b<&pQe0Btxq0xYTb>J?F7&hv<}ZblBINR_V6N+@ zoZdUKKLK}M+STG%t=qmH$U2c)$G@YQ#N=-_MZ&yxwsY9D%jWisygngRjkNl?EJ^}GFP!b)cBu;A5{>AcVxvpe4vjghn6`kYNTKkqjY$Td*YDG4hF8$oQkL(!|%HGP}BS{jnH`yy>uTaU}J3AwL@9lT>{l4$}KmHw#qa!`f<93hF=eo}G zyw0%t*Zq+Vkp?MG$Br4@Q1+)3UTSohiz?X6Ba;Ky!6)>lUmJ{(mtZar5+p z@6tn5AdU~WV!;ZPz*$32Z$g0b*4#90U`Iab)n^qLa?wbzeG^R&LZ6C{Ri%ip5>_+7 zF8%7Q#TCkHtLd69X$teVZYc`p?K_rVZ*R{wF;45aA$%!DD|3^Jmn&3J_H-nzvXuK8 zGQ%qd({%6!(gx6mfzu*5*Fc`cbzh^vLpeFQuB19iDV@CB>bH!VP$i^h|7PVu1YO)A zF`}Per%Hj(sd#lra7#xl#~>TD0ojnrlxe3PF%dXm1UPf zZA@$Cp8dJLz&tJj_p^pfdwY6Q5^1}blhZouFJ??W2w8w9aYA$M(W zAoTPmr?ulvfw#P3BgUr`iEmTcOjSev!|(*JZ`kI`|K3}kA#M()m$r9~p>NcNVZ5!F zIT73|h1$D!uasQxP~W}6yli({l(FGHI0(6iBlGgBr>M$ZEJ86%l@^_Zy?ZzX`Boqr zW+|{je5*V+mNOAws`!rs_n!bOJyrd#N~Gf+kL{Ne&=TU`cpFj58HE|t>Z09oa&KUA z5(~+_M@$jspCY&38T*@-15ZP0UEoth1RA3MSy5T)2*u>nsiFF}4@b#DiqSFA@*wRY zU2d0^cT&rRm5rI@7A9uc!XN=NLE!3aT3O5NPM&f+-By4`>GvHhbtTvsOnS1umk?dW9DvYS*&Qvp+`9oO@w39Xy^ zP5`RWe?aPACh}KF+|u;1M%WUqI&kJg^DW7N);JUHbJd^;0nLfy z^SKoVp9|N&tp788@t?mw`ml*uX6(4(!<@=y#a^U^IwSD9Fv($UVS$b?Y8?qjH>vX9 z#S40Sd7qXI9nl~ytf(MFU~ z;fie-34r*Wx!VB)rC7+HSC7q;f-+NeVkkm*_lJ6oN<;#}((VL*3~h8gc$z8!c?vk> z0xU3V`Z))%tixZgW1y3o_VyD|W&TG_Tya~F=&X>8fo@UpHjg3W?_xUNlB?@^T%lYzq61*Pp51%)kYCTw&n`013Nt#peWl_zpd+WIFMw zSHI4O&)<4-6EGjF1jkCEzT66BgPFP18~>t4BPr<^MPxTm|CWKMeLi|IA~L$9B0m3t zw)eTV1I*Hr$NnCf4iAqngzq#Yv&Adu*P36#h=5895$GZiz_0;rcu2v8<$jKXAes(C z!y}}uBC`74x8({uQV&CwkOy3kAi;NTc=?o+iDe`Epa7a?>6^Tc&ee@>7x`OvlR&d^ z`mSa6-F4bPGMtFWimF6(1co`0&(OjHE~&4NgGfqA0nn`xMc98k4~#ZgSkmvPs6JIw zeTqjFu2M=?r^5xoC(1$Qq^HlLyhusjAXwX}bFmgH z7jCn)({SbX9ADoRvOjKplm0&U_w;Ks4bqTq(*maZ`FYHp?HyFAGdU(I7Tjx?Pn+JY z27h=D!4qpM{$!~=s0b3np+m(+|7g$@3;BGn$;Vn}>vS$0#ywzk53cK3y)<*j2M}jB zI3C79iTd-z0R#pBOi$vTT;!^IFd`pGgw&qN$w}#8x_Lvt3%|aah6aL&agv0H_=)KN2(9Nu)zNPNSK@hj3$hH{A}U!7M~6YDuvI|X)~EmG9gtsC^;LIJC_H~ z#x>>rJ@plvP)DKWy}Y8LzJId!Z3UhXoKSW^A+F-?LRU3ae|~y?mjG|}&$61n>Vcl3 zHyI3H!YkTNj&cSk2j&3D-aJ^W(*@xteD@nF`b;YzKNvEx5E1LQ{#@;u^Wj_tP_};S z=WFlfU0p5?n~H4?-0*>YRGaBLE@*}d>Tu&s*NzzhQ{eua_eczkels2&JnrFTTcj6u!oBLN&3YWGd_eiehfJhZ=lhakpJ$zpk#yMCloy0|2t5uK>)FUgUF+R(5Gb37i>D1ab;QLH zB+G;ld=YRrW-yUrAum;w&x6E8AZ;=14P}D>46qp}+MM1}Q$zXF&b0*P=S+ihT7K;! zloG8tn?f$T+mv;P(Yn@`e8}Etx(6RjZajl@&Wklnr-ei4n*-Pc~zWuNH2R5O@ z%nZZ;0DeR=hRCG8hTp*_75iFjr;z9QdQD^?sDscD$LsUq{_6kKwbed*r7(*#ZZr3| z6Cg>j<#=o1?BQZ7m1l;CK6(9qEjSSQ>fUtN^(6@yz+%0cg^(OBuvF*0C*Ty`J^OWK ze>I_&8fzDZ;$R$$J$A5vY;0(Vl}T2*m4;|+f~~ZQg{4@#(zJ6_fg_-qUOBb#d5!gK z+C|+x{c89W@SOhhs=7EppuJ6&8vpp`iI0y%o!8dG)b}|6n1PTH^e_IGI(q6J8;d!k zw#?ColPklu!=3uV9tZ*>s7vg6P*PJ0dvE?utH-r9KtZqq+|Mo}6UO=V{ps*9Z^^ow zI*S`}LW>=sNGftT)})+rAEsBm)Fh9n@ADnNC?F)jTsU3QL7Y{23cpX5%dK^eIZ6V4 zi=U7Uv&gRVfEbDSGOnSj<8Kitdz>u-bXb$hfU)*iuMnpTv{!m4R$1p&QId2eLH=6k7 zXF5P=o((}~8--q!3JiO&5(CE07RP&rZMaWNe2#y@cy`Kua#>RZOv@W_T5unt-qG#- zJ=IxUU2c7JsAw?xL0|UL&#@Bt2jtb#c!0se<%8QDEcui5=XCVH+=@%i*HMSugMnY1-Ai<{Mb$14Dk`UhK8z z_iu>e>CCFn$>pPIBMe6Pm?(_5-KKVvl``7e@0H9D-&i|Q090bKuSd4CvK1`r)x@_O z+Rix0YRQ0Eq^5@V>9WR5U$tcuB_uwkr>7qr+%gZrLd4SU=WTQJJ{f)n#Oe(n6+>BjrKyl&;)@|x`seIE>SqJ z^06lP^z`zxc47hV){NUWLHpRi06!c6!>-2W8NNgCcAbUApCRy7Je; z7&V@19koC4JXjP`PUS^KMFo|p>=1=p`*Fa@*POye~uFJy2jB^7^ zUa^<%&rX}Bn5I;o3z0UBA5_iDbBpb!WoF%REy0{1#mB`7Y3u4hzXy(Cim8^0M7Ni< zj$zUUCVOFY8kH_0KCeIO$viqbItmphgYdt0XpI@@SwrnOlB3WP8oZ#FcVITBgeLuG zCo#vLnl%`Azm66V9iw#v6H_sjvwd>tM*X`*FV-74!4S^@PNyEbT{Y>Y&tzq{7s4;l zP}2;kG11-x9wkN%Ig(hcM0XPH(wLXp3%j0OhrxAFJT3kn|2+2*d3A9pfr`@TG&mkx zELiHW8%03)v{?INDF)|%oE+Rv(81T^pHs;P)Z6dTKz%m2#L>x;zwcBL%c18hozgUm z79b61+RT*VX#dFb2QEZ%hQ?)W!H|ThP5Cf=(fqqO2=RxauZrG1AZ{)3jQ%s!>j zohQ+MK{4b?woxp9wC z{T}!nI+jmZ*yv3`+MxOX=X*)RFx%;CJ}VLy2bTSB3P!iMxjFmJ4le@5n68%Z_;+_J zz`{Wx=x%6e{n+yOj}`AGY?DkXOlwa$Bod_-{t^qiUDj4-Dg;Sy^cR9v_SAg>xu;5- zWyE$Ex0XGY#C!wdSE?U2DJM0^s(xnG9lfqyL!@!YjKPPi zTa@ay?eu}NoYJGd(L_F{^nG;`HMwzE1Rz5hSb)}#_r~v}h9CWaQD2M3{f)eWLZ_AS zZV;iZ^~9LZfG_dx?EDgLsyN@hqO6*g-~CNlI0Se>bj%Ew-F1K7wMf%S_}`JhNE4RX zJ>g^JJXUAQ2(qv=ecw|HQ$a9GQcf;hAF-I#=V39Q@#& zak9|7_GSG!XZ&RIpx28l;oVdMOSSEsJ_ZIZMh;3sCL4-3k_`11+bbJqVUUri&KO!z zemtNGOJlEo1Jp^Gs(Jp`5BGKsSK9_o2bZ?B>_nJYSh`b$;}M?@18oiarOT%ATK7l% zUVY!a+r&5^@wR<$RQ5)Zt;$2$oRy$1m$pTR|*BZeCvKUH-P__OvM&#EDlZf`qPm z=dy-|v^wQj)uzUq+a8x;ogFOwudf<}lJ9L~#Xd+#*gV64F_@rcLv{T5`}nPnzr-}D zZln2NT@a9hmsMdQe+b1x$<_Oix4U_Qk!@!B46=x37dWno~~r$M1pVrY3cFO4hX^ypzc@76If5mBV< zucGM`35;7*c#YM&N%gM+TCebI9OkOS2ZBzACvZW*0vBN3zUGMXI$qGPos7V`+Q(^S zk1EU~6ksZ;zV>W?YgC^9$vc@AL64Jn`MHlB#(!N|B=?4`nJ2_`F_1(`F$B}seO$12 z15nK7IRi?Gc2Y|C@To$g`zP3FAlKCqWc=7FEFMG~X!}hz?-qTjZV0E&{A|n5&aQ+n z|A1?VfB!FE9@Zl|yeC$J5q8m&RHMeR?S}MKtH0gj5nfpxU*?OLZf)9mD?*wQ?F2;` z-L?KHD}VtpyhUExnPp^4%Lpj=V28`@Uo*A93(5qZK$pxObSY1(R!PFOlXh&##0liG zr7oc##IYQ?t*DTFPW7X378{K%-`3pRnkz@6D|3o|!j2|9xBJeIS^Gwl@FBzMmtZsL zgT@|PXWCR5ResZf3%@phbr|9g(S_??oxTq$QS}x}^AS~1ef{LAb7*O)Jp{X&yGEs? z6qR*(*jSO0-c^0LLlZ7b6aI^l!LMua+WYm%^Ur3*=G>N=PKw%+L)|P(At5)xEQ+Z3 zH<(NJ7X;$Qb_#f|?=O9AYdUjoGmr|6)8DLS1gdFO+M=SZs<^p%AdwR0xA5P;<>SgmzK747T}y3vz{}M?IDAm& z#(S*39MWkfOBW$Yqf_B+<*bwvo+c*tvV{9fgXgiQn5UtdjSV>hwrDC$#un=$6se7y zRpjuhEwcRA~V|sP2 zDujg4Qk;X|G8jy$FhT6byWBX}aBbE*Sm7_)Y_Z~_k~$2_z@iH#&l-h=GWkjA>Boad z?Q~e3jc6sFjkdk?hgB5pC)o5$8Y(M`w0dnrZnj)DwPqO3x!CG(7&s1ca%LG&Myjdx zpDx8V2%-2&XPjc7TpwO(hZpia3!Rq2o33|SNI!MfcUGc6qBJ$qdsDqUoFcoo zTl4mG#%exhGqA^MsjJu7jS?ZQr{@QLw2aVv2njtmGQ>}*cXe3r+jQ27t8j9j^M}`HCA{Pv_H>UZ8oY@;w_4~IBE8S1n_xT7&(Hrorj3p! zjO}>}ve0Yh7G85*^jE&0m0}ioo+hUX`W!5b6~L@K2^mS7g}*G_vv2kiZcU-I&1JvK&Q0WW_3IPa|> zPJ@_}6Xc2sB7n5eSuiup4b*~cbad#K<7<0Rah&h-ogYl#wuY@out;P5uNEM(IGUY} zMOi@!-M2;hI&TYEpVq15->q#1!yQ4!r#}sA1^Etewq1Z zH9Hy4m0gMN!%W9LSGD00B$*zWaS;(s=6YUSxeA(e?AStdU*Y=NKxs)RYjA`vKu!(k z;P&kW4gCblLesK;bvy=?ryR9@BvGx*2#1_&=D^0Lki}0*0nwSW9UxLxcF~fO3c$d< z2G6d?{I1T4swy2`dqb1YH@BlZ%{$xh*e#EBLRAJn!yhEmA>kp*%(&(;-}dpj`jRJC zX{n3tm@oTBWm%=pGjWl&fx%2)ommyw8P*4aoUO&~%!bWR6o$aH zk0x;T_hSpE-yS+%QvNkKi2h?=BS?}++T{}hlQ5cVRYiI=$43_eWA}Q3wYEZ&`Hx0} zF9QGUxI&JxSrw{3q~RF29Z}QKvo*EUmTirRfK~Co_3TZ`RKu0|d;%nOksyZi_!sQS zl@E9?Y|f^YQ=1GLZ^aa6tjw((w$2I}G4^H`xtBY-|d_9AIor`*n>VUWtS%Dm`BN z{MjgkMl&jy1REolfuHR|fSb2$B;ev4=STIW;S3@HZ^3m)J1+4=k6(%=Tq(KUar9~j5?4V>!qTX4 z?b-!MMtnanapfo_3BJA=FG(c-3)x=n4*b})t}o-kWPrRDy}uG{9q7V^O1vNUPZbIcLd@5Sfh*vLf6gd=cPLihP)oSkg^^#`^30MSj z6121o>^s{H_?(v+K$?inBh`+Na_(TUue7&EOrY)R|Iif1#v~@38u-1XzhX zA0)XJ*%jrUVBZQ}AI-^0LWNtbpdh2(XzL{|Z>+yZRYi}AzADLrkALfYSi<9s%yL#P zmWlTM%kwNNP`i#+hFqK-&h5B@;G*8~pk+B+B9nB0$Bp2a03qs0erU6M+_wDt5m}YI zGMs3~wbj3WXL+25zLHgKr5+u4T&^Zp#bV%zLrUc3g*(TyhMu;zS3lM*jB>if#I?0G zhMHRIl(#nhukTA-gLM&CR%V`Z8x2{6$zG3^g}0eekn@0w44|hz_IN@Qes8*NYPu&o ztptpyvV4*n`ihm64L$JfU=S{d_Dpa}FD&Kp8PoPy;t^$d=yk>W0*4f~cP=@>SLa*$ zK z91k3woNXq4_w_xt{Q1#^Qk>_DPUyb-_3Q2yB=IO>y=IE3+!p8NoI$J>y@g-PztC_r z_WkHt{1xtnhDt5`kdL3oul+RJP58sWr}$)D9nGn!oX%M<0fdT`h8a|1{X?0s@6T@9 z&$TtE;+p~?QxOLoSMY}Q7te1(qlNGYz)^3IkDa)h7S;>PFy>ku+?-B3Pc+ z5n%A2Juq{JZhVfA{B(V1W?^Qq|G{0w#+xK(Wn099I3+tl>ZWWF`2LHHO+#bBSMJo1 z)#rag7W5$m!&iT*F1>0xWvcEvnow|vJQ~7z(kZ~Zd(Ga(>2jYhsp)UILGI*C#%1Ha zG|@QDVI{2V_i%_2pxBVlwb_ev(PW6PVMj{~V@H4g`{ud0YFA`=u#5~*VIoK^(ic-U zfdBH|v51)X2+=GMUZzP%=-@E8|B;m^5yqwZ%~m0oWPQ>T96^F;N*+JttI*Zr$m2Kt zvA)9eiR83r=1#T}sSId{Lv8|{2-c&&twUoNx)Ox>?$fhzH2&%F_XV{3(`zAy$aYV^ z7hP>HLHO*B@z@GoF)_R`YaG0s-CZo?eTecs9%HI=b5|;E(bIz!KF_w|(v{qzfn6*O;#JUP zOP=s2IIq&&5kv4>Le@72(G%zkgW4sDI%NMuM@I=xJ{K3Kh3!^R@gvIjp_llS2rDBmq*f=E(uEM!TVE-Uk4pMBNAo38B~ro_B%6JZo(= z#43i?HZ+(*}`bpTZMACj`RS^+AoA`IRNBh55(X%n|P$2F{ ztJe|e=m2&2nLhWQ-;vZ@*|9%Knjw;{yIXC5^Y=ZRuvU4nH}M5DF_VS*_6=(GCy$agZPkwsPe8lTvV0#<(J{IlE-DYnbq)4@#9W|WqdpMe5_ z3u4aAjC?VuaD-ZDIm)kL+HOtD8z=bY4NvCVH-gaK$g(clSIf%yGq76~C_$vp+6!4? zHpN?DHv@YxU+~k~I)0ZO$3$MM{O5XvD32t>#HhIiEn#_Q?+Jyf(5JT8?tNr^a(At! zd{c%Yg&>yUg9PMr&{zWta+=Ha)yE+7(&4o;6B~<+5G4sY210LQ(j7@h_OB+}urBQX zB+1?j#RGFk$5ovz#^Xh%>|2*-SfucL|2W4Gvy)freItx-lc5R$W2GY`(3b zsw&Mf<}<3?2ta(T0qX1;{;YkCyp~-@TNlrxhfq_Rw6ofOns3q?4BN7l)^!Wl_ZPq1 z{U9HH=chB(ZR9iTCTt^ox!=K1XNAkDyh>A3^ThA410D!p?|NNfFAXi=w57@f8Z+t( z#XWY+$MdjnAlsd(TI&Ni;dflIkgt6A$^{uBpAc}OBeHZf@k9Lw;sP1bhfd|`e!hoC z1Xxlf-MZSOB+C%AQ>M*&!cRKnN3@vD_6;Y1rhoN7cOEa|nUT)W@=%di9}gH#TnITzbFY$iP%BG&&HI#eSnIy&^a#2;{n^_MEA>I~2t zPWaOOLvMWZ?2qml9-#PR@3t%*p$_|qfBKs@aPa33LdDZ=n}`}EiBh#aJ^Q2D*IpKD z&Yka{TxczbrLmIw_S5+RZX_T9s1$Aoa8!e6J zwf0ks`KzP?x>iDZfId$7mG?u2m1Z%&TXf$gWp5mVf`7^_coO);2hPK zge`8BGVt+_Ka;^(|K`2st?*o2?paG=-6heYDP!2W1hqv2y<$*%9$D2oSX{&Y!OYx2 zZcQC8iH1De+Y-=O3_uAP0c2-)w>XjxuZ=^*vZJGr>`DY@=g!-xNYLiG;aZH{k)*;y@ohdy50=#Rp{gLrFL3F#g?#x_U4Bh@`Lb{6YW#0V z8@g&Yu)lde!!#dh4o&UY{!W_3X??;(!z|GxEs(T0w4)Cs?Y#o zguQ`1_S|-A3SL^PlUJysH>K5&j^OeVLjeuQKbz3r$W$N{O)ivZtU8Znl%!lXMJx?D z!;f?za2#k6nFPj@fjyg~f&PA5b7R&xt_(mYiE@UP_iInxx#bdIZr%)Q0B0+K5I~FSV9F9@Vga#a0(0chn4u17Hw~ryDSkJiGXDBr!VTEW7E z?~xF@aF8tBsjG+A#oOtTs{K&G@S%N$a9KJ6EHYBEze`ns*>VnSwzLfT`I-@ESle z)Na550aPCJ($!L@(!$^{;5akh$7tQms+kQ>6yy|p+vH^B zFtNi>ku=d8i22JCfYg9&fKR-7`RAP83N^K1U34ZN5jD|tGSPe#!5tM^$#>*){^y;!D&42CJ$Vfpvg4K;5n;DH6u`f!8N4xXqRSi5z$|?1z@he4k zTbszMm5z|(UklJnC;G6`!Aj5cB4j3{4v@a|bo-V3kTvg#lcsPrH$e*0KjMA}QvE!| z(JJ9n-+LYG7=yakGC|U7b1Nemx?0O%Rb?;r@%@MHu8uoc0X|8!Uzfi3AHOx$AO=wM zd5Jclw(P4|d8D;&1fH$RPpqHMO-ix!>Q|@d<}S}J4ivj6BcI+kez0}4_EfU$n zDIh-b1Ykx$`I?nhxeDzwkYb> zzg!Cw+V{b?miY&o-MzWE^`mvdYX5y2yjog0?grCPJcHpQ z%@PQNv%CbM=#bI(2%dI)@}eZpP(1%lrRC82r$|YN{JIxh-Z6N`rTo04 zjms;;^~f0MvS5DX#MQ&e7~k*&3UUkcTw=o5Loz{Wd3B?mH)Bie`a$6qWKIB5 z#kH3K?5e`#X#W*hWVN$qulcn7)mbj!uUnP%KgM$h%V`>y{PyxV+O`a}$xJI6X*Ae^ zRYWOqk6vN=EEeSh7M8#f1~f~5*f?>uhZML{GmaDRXEj{a78=LQ%oJ*%Xj?I5Oz~ zIhz_y@o}-3O-Bs7*YX4N=|OO;zy$kvaAZ?g9zJZdiQGmgoTuA|yPsCy{C#0-I=8ua zk2savbbu8BZ>oB?Ra^v`6nO=u3m_auXXnwy(d9@xTFv7 zApANBRV1-`F`Au3KpEG&?+GtBREsiE-F^=WWJ?#Vm720_nDhU9u?Q*F0IYzKv=q%9 zKheAVt}jIU$|@3_A%8?_&vbgvCkb#5@4X&CEz+&%BTT8SW28+?vKs0=JdcmvHq$lf z@GQpCYZ^vv-$_Ys@wqxqWLE&J@d1Vx6nmdnx(_bjk+^khKZ^|Q-}!3K4*`a3BerJA;e zpu;7n5p9#$wKH@fj{rw*=+dAc{b|xTaYUJZHZa_uBKVSr%k13{<{23YE6F4Hr3HumcX$fQu^1^EBWcLK$ewr8% z65`ajf0x;DpTHP}|Mj{oh%S%^W-U=|xxXQ)r~&tKvVGv*g>mO)!uH7&8et-hQ*u>c5-%d;m`6_fD zK7@9H$f7@Ys@7So|9#e1zNF>2V>on(*qxDn_h)()0TosC`X2*dKK7kj2Ku zCmRiq4UDxzBH%Mszk$NF*uQf@T*O-T8lT!eA!WTPpcxWN^N4n+F@bDwxI{=9X*4{=jB$SmCg^G)XQHU#8mM*Vg z5Ej#@=muNadnDwH1Xut9=q247pQ=SiI7>@GS@nfq=PntEw|qXz=d!GTz&d&P69t9) ztd}&Y`V>WyG!Oac3$w>Mr9XTq#Vggfpk^!uqB?5m-_PQVUtc%3Gn%ULpWB&LXF~SZ z7Zn+vw9h#!UJyWOd4*XdB;-I2{$1I8b0}!HI1HeJy0O_yVtmHO@u(Fgdx9~uj0m2_Rx9gkpifNy?O6>tm>+kw*Tsn>J zGq-ng&3mQExU|e+FEf%5f!!}arsakd4Hv!an6;;&j$0OV=ze}Fbp>1y z>(>loU%nbQ2*GHbkiU$XErse9q}+f){<|x?2)bK<$_HQSP-h-~{&D1X?5CkNb9?7< zi8*S5p!9SjC~wWVpN+Cp40K2Q1v_pEJR&hI%@e2=V9RD;=xpkaFUki^0+?q#*V8ky zGn=1VfG3NBEY;jN*UrK=CN9EsYD`T}k&T&WdUEL3@3ubn139w76mX5F__2Zkt{Wsa*jKXKOb9~1<_l=fV%x>Xd^;^vhl0d*h6Ks8 zva``fzIS)8)vFiGqPAa92l|1O0(>K_zX*UD=gB_)pCT4bsmx1$Ex_8aLfB&$Fu|)fP&9}kH>A($LYJl?T zzrl+{I2`~j?jP)bs~0rhf`)^hAqS?t)&H$DHoz2TS}V#bDD`&q509)))L{g60%t9r zp?0>v3FE0c6P6K~1-Y)(b#PcQF`C+O@v!Hiikd!E0)HGcc?W`RUGUY-%FN0#Q)419 zlkpgH%S$g^ZOT!B0#oeTDY({v5O4DNy{w&-zH6GAs?uVguO9Wib6NgJv}THw+JEQ& zF92tNvS$7uE|d#)0(#P0HdG<7z+QR1NvU@~+zkoNxR^sP!;e&tEgH|w%DeNbcgrp~ z@9hsy5s}XsKN^(&fgjlcI*{c_g*=fI)A+YIi~M-OO-MwzHB<4|j28{oYyJ;)1(93B zWFTHDo!&#$5X`-cA> z&f=246ZijjaR0t3lG-RHBSCX{RlGa~Y7>wKUP-nC7TT~BIx+UFN>)oPy-=I;>gP%Z zu@WWBOr11(HqM+WOid+u^^!9>A2VwbM#JgS6Li>z*A&FCoyTr%>UKdXxzRx{Qb$Ps zMmp;>MPPIJ#psUwVpm+L<=7jaa?SjFX27Eu|%tw9>0LZ6pUx{H3|Ju@P;C)ah>X zp3syY4y6F3bHKo^!3qqKSqc?+Hd?xoNYZE?L1$SnA^5u_)DfblsUg_~n;hZSR2KZY0NkcelXO@-j#c zj=|Y>iYie}PrdBL%Ci$KzVQjAga@W;Jr{+_&_uwx1zQWzN^H-)iBbn2O(!y!@Z?CD ztoxVvE{rCSQ*J^?2s)_&0vf*;W=73Rd;KV3y>?Z%K~Fk)jymQangP6NMj^cjZe;Vg z!=K!slG(KS3Ej>VNgB*s!Cwjswp_FLYS14&a$>_f+h5&WoZAO!l1N_&K%1792Bu^s z+MXaN0wGLhwh3s>oEGfqNjHwSJPaC6Uab|0fFQFd@4}Dg{4VH5|6-%zQlPgpkdl#< zj&1b*hk^U=RlNKVYI>Qpg5MefAt$H*5*l+lKcid>4KG{qZcs(GuUhWITKkCeE+hvb zeZP!%Tm>Bo!(~BgphT^1zlWU}lt4;~sAw738CvIl`H~bLP3u!(H{4zd$T{4C8X79V zhrDh4+6E`sLEKOdb{_1dp1Ar~^vJ73U33I!W;9eN`Kzkso&=e`GzG<&iL!--C9H8# zQRdk#(k6t_0|#aK`LNV*MaxOmgTOF+#9x%k+`*Hg&zUD0jK20Sq)_}>wr6<(cg#wL$ zFd9L^j+~~HGSt}Ex*+?DTLfY1H>cyw%L~fOOM{r|PU8~n;1BJ_rI5lS%L-XZ-97Dt zBO_|CY)=J1DX*U14^cM>aSBo5G~r;=WK$>r$rQ9>1$l*$HkwbJi$tlS>d~>{C#f=o z)iqVAZ)4t+#y=hW-KL@!qObjY0Ep+A5+D&k^)jB+OcQ=L3mnYzesgms0Ct~OqUt6E zqO|&Qc7PX2HC01@%j%*cU=Am*-ZY*cnVtPkn>D6h07y|#H|=+^Hi-76zXV%12S#it zmm?DyJ+=^#J}6|UvM}cx2dLw~y$2F6g(P7Eo;yz7_>s-VrO0O~8Q5Eaa9v@O=MWo< zQ(&mg*MlwxQl7DNL)7uYf7g`J`-!&ov`0Eu%hG{-jR9{Wj$1`b#O?9}^`Mf|BmT(d z%JK&+#kmWws-Gsa?O|ipJWtWEE_JZibxbBr-dtUm&F~`GwdRp|x^c!<@{pGc#oVZ} zy}cVZ>|;FV>&dhX#)&N~Hbq@B7AdJ_mQg!@NmOKchq@%Mzz&Gr7$Q?Z^xTON(VT(= zQnE+foN{uFxqdKd7(kO&6v`9ZS&&}@t-Ow&E{o=ms-Wz5@5&-2OrPS^T1n$a&U}f8 zloeG~Er|?p1O2y@N%XoV$jhK2p*Q)viw%ZW|J@WW!?x%7Nf|K&kjEZ7&%DKE%VO<7 z($Eit;O!g9Y~38GA}9#F(GS-a`3~sBOvBX;1a$iXRbNK?`-y^^KW1k`vqRH~GvnX` zH}~YXy%!_6L{1M^kl9Bz%tGfl_rH_Z$p3f*b9x6cxL&8B7`F9m2Jr zYdoh>WV{oEab#P?&D+hwgcsaQ&@mcH3R@^d+(SXJva+&W{jH*LLo|2^vN@)_ZvR=? z>{!}^0BN6z23}SgQUtac&f`%7S-OEdBMg*3W1YObTY?B;LQfwBLjP*Zi;=i$G>uD8 z*<3%|vH}-2xAWy$32$`Y_qlQSX6!x5d|+?m0dNCTV*UrY?|O}Yt$4CBO+42&S-ZON zBGf}u1YM?%6lB~bR8@Dn0y>pb8n64#MU;Z&Ev>}0lh|s0fg=3|Suwx#;+%=Ht5=!i zh%X==%WEqUPw1e;9r($5pPqcoUm=;hNV#}(oPV4vTj{QV<>P-!e4(v13POrHYQp?U_4dOV`U|H6e8h_U0r8zn4@crI^D~2Q-ocD zRG83wL11t-P!&t6_lS|#2Sl>VMBgU${_UrpliYds3%e)tpq6d$K5M`WaelSMbIYkB zV!$55gRC(T(01=}+an^PXxTHFjzX)Y3reBaE)I)(C${WLx-JwqFoO}__(8^7S02Sg!6L0N zTUf|}5_W3b_;d$%5aj(fUqW3*;DHIiZ(R?SoiJjN^xNQcim6BPtSr0*p*hHIw0*~R7dFflAgRLlk zP+XKZ_SJ+qwo{keC#Etc8@|%~E&+v^s&noi?xzs4FvYfsVuc*~^0Knmj$g^QZ{Jt{#Y6ZRoUqSG2_V_|w$YB@hP9|%KF&Kpwf2LNA_0`Or?2du6q`4QoYxl&RM)9o8K_YW#Jgqd{FBb&twmbJu=Oa_ams~rwn zl0mXM>2{lH0u#!Tsc-g5%F0!xU?Sj6-pAYcStaz%b;Pr{oy*Tc@ulkPm#Po{5&AA# zOPj)a;}?#Ug!sH}i>L)XF(62fP$UkLocbmNB0ZQz9BdyruiYYn|AKatnEgx-wAfeK zTj&OjEq~Yu^mc)3g3WFU58+ z1%4gj=H^bnrjro&rS|FOvtEO5Z0v04zDpc4a^3t50WsMvZ}_-k3QyP`-j{k3bhv#u z47iJxvg^iV&3Q3IR7I9T?1lAk64 z$P?sh92CUr)q9_fjaLoitSr3X1=tcqaX>vpyq6T2;euP@o9js}?1zJc@%1m70|sR( zD#{l*bjX)|21mx3`5s;9Y68ByGTA;O;(6|Q_3}lqI)yE}0|@<#?T(#98^P0Ts^&sOD*gE~ob*H~%_rUyryUMPsqjT5lnY{*YpnQQ@*K2T{27!IC zzTnEbIWFQU+*n;rfK&_W1MTT8c8619+tEc;Rkf$5du_7)XLs{`E&&*#YyUUs6u;2a zEr1KS7A3myHDB9lefxZ(8*j;Ry>^YNACCpfw0ySP1-ejeSdRoZ88KO--D~fq=6)@dgTpD(*3AN%@cjH;{$HIKw4e#XDR;F-o~Y?y9n68)<11|ETf!cQ^<>n87z7!Or3V8 zw8zQ@M7^WX{dW2MwpMy2dsmLPZQBEo) zKw%yD0e?93V&Jrd%nY^AvE0*BRh|6R94JAvys+eIPPzMk%kYKwL|8!AVv2~*N$|8a zpGNkSa5WC;udDDTk5;7=4GsNiQ%@2?r`D+v)1F|Cyb^FA#Bp{u#~^gR=Eldwa#`B= zbo?E_h~)1-4&7Chc>46IuC6t>(GIEtoVOyzc3WXyFtFt+@aAf&&rDd)x@Xa3S;bmL zdcncrMK;xai#dRcA2J4smm$JOnOd;AQc@|mb&uZ_DwyTtf`qRM4>(9e%{I*%{X zJp8U1HyLt%_Qh~Dm&K}AgKOUIQTvc=C?t4=EcZo;#4p5f1FlDQyoprP76Y5*c0}M? zpT~mvG8z>9MV0Y2tf$c(=d`>|>aj8j{StQ-$!}!=oiSGCg4K(GI3)T~C=ihzkGyf( z1W^@n+v~@Ear5ifBLXPT9&p`8kQFC)P_|Tzq-eha)6f4lDt#c(Cj*Ayn}+>cAuTXJ zmTXD~t_~pJ?g!g&jzKiQUv}kZ#{K=GXpA>O@m<MCuGSuzu@=`| z4=*>*yM4~g4T9cK`M={eZK)JVS-pB})6rN3g+70J=iVDSeh&`TXH~Z${&FgVfmls? zeJf#4irJbgdP{FBE@?EM{$m*)kyeIdRjugcyAqXkOCmzTjIlB*N!rLV5-ET_ejAv5 z=D1LO{5a=Ju_R5}RooO%`~8gi|9#+`CFh@u2tILq6fmC{i&cnY`}lXk;K=f-9_B_s zpp$+m-Y6c5z5nzB&hYRsz<3BXuSigf>Cx)&S6$`K%7@c@Uwq;|H~X zO5!4#%=KeJ;>J{cPIM+eP=TeC_EYO{A?$>h=P)mRQTl| z>IJ&245{&Nru<`FOIv@LzQ`TxzVzRq5P1EV&L6OIe)r~Q``PcRMiYrbBC+wP9^f-q zJkr@*cWu<6RLT){%z%{z-f$r7b)7stA067%YY6;RA}RwdM_5H%xjG~AEN#I3U|)O{ z*tx)BGLpd-_Zic2)vI_t6|TNxBtZ7%TPO_`CFw&I7GCbL0G_VMGj%PQETub2^qEjk zR11t1V3l$5^>DCRG|br!WqEVb3XJp&kBaHzA#zf4Fgc~8HR`v%>et1BS1It3AJXUG zER!qB{`~oy`w1=JOt^X>|ASAjI`PH^!1MO$_~FJxiT7HDy((iU8bX}pwep8~;kaqZ z4)`=l4~;q}*W-~H&>PnrQO)kWhOC+KVz-OT(za)*d+~)Gt?25{pSzR3#zRzw9*TJ5 z#;baS+voz_pY~!8@L=Sd-KZ9!SUf;x78Yewrr)2+;7|#7w-^P3mE0yFXgI9C4ne*MOy0Yw+CfN#I@njnVp! z!JpVbD|ZI#Q$fqIjOGQ9Z76n`XDi+510gAhb}hc<4Xx%v3_z@^NTeM6KH#4kT34j^ z2%-b^>fL;f#KmL<^y3o8s?*#0;2SdEyFc(uUkOC6S6htzIa}qzRc>uQ%Og0@=EE&t zxh3r6QYQHQ)%u9;bqFGg@AA0dJ#KH*efO@#sd~*#zX3#LDHMMruTrwIs&7BNaZOaj zVa`h4LI5InrJ>DjZk#e*FT5CJDLkI6C@rt|K6#?3_og)cqAj8yh-)t7#QQS=8+G9G zsaJ2`ncX*=E{k1fe}X-h>IqZ{Ik7aKu-CQE71_qy?O9B^zUt5tNUn9+y3fTwJUlu(`F?A=VSTdH1I*04^1d}vWarn; z0iM%_w-p)~#`EiUNYK*=42?0bnfBSbanOjou&Mwic#dN|)5Y4mXmnH*H`UYB3rRNO z2CPe``9psmR^s$PEwV&1*7*A$1syF+)DM7a+n`<^?LP_U8q{Z!x-V8k{@fLG!#yFW zbEEP!+d70pNP!-b@fr-f?jzsbFTzp^4nkR9!O#<)xM;vz*e7T|UQ z*Uio@rHKItFcZ)dMa3l#JT$1FZ1nh(QX>})Y{~C@QhRZdPGjcpWNgDlgMEzp=O1iqCU5gMrBZr0q^#$;h zZ%1ls>d;Qg9!}xt8`1esp~-4}OkhzV8XA1K*v15V(!%;C7)ONcKR`!qgnSOB+pf($ z;?gf(JbkLpo0(sGyFd+Se|e?p==3*&m=np?bnYvWeSy>ofcCINK7EQzV7IWztxvU) zG@`rtI9)0|XHeaQ{Bt2BBk+QRs)ZqwDZHX0E)7+>!s3k>(`2NqhHO&D%i>XXuX;@M zpKxegK5{Ok8}M{4D<{0vxfb7Lsgd+KGsn=>^rxvK;09w}1$+;F*+%{}E+Hqy7SN5A zxTmoGfV;n_ATK`nO={}>^hlb?doO}>bLC$tV1ZLX9w8Kvr|tq^cvqq#{v`rBp-xs7 zbG6*yUWCaWv|vwd=& zHWw8Zi1SA5Tg95xy+{z7MEs1ASN}C=^#VdSC>&ydSOrzR@lA$?&k-~PK#HRoAJv3_ z^UmB{a4B5*2XSqn`onKJm>*{K5eM}DYf4m87LLzP^k80W`86*y!xXTFYuAF+)-p<; z+UUJ;+OCMxBo3MEZ|@)cC6+70Eb--QNv@W*Hkfjkj^6bWo~#Cjvz+ngB5gfo=`YU~ zLP7x(s;{Sv?_y_RiBu=8WaeczCC7~qktm>pppg86f`f5hx0|ZWHwlBo!k({ZXBA|2 z%jcRC&08%L{<9@;RTv_=> zB6&GC&oL@WRPcz=X!9R%d#}l@&YBdiy}bV)7R@Q&Io}nQkjD;&n-{7tAe|5^5aq)( z!SnCpzi#&O!K+^V`?}L9a17CD_nfO!^h~1;4(yY0qWY{v9O>zGdyprXa zVvq-ok7+v7KMd~!6Uo*-n_V9YAi|i;Cwg4I(G=e+qKTBtwOI3ZVZEdTp{j?=)@iBs zoB!UgZw&pfyGV(nUELD=&ph1V^4eH=Ixug%T`BuMa;vNsS-V_Pefj*&tgH6|hy|iG z|Bl73T)B83)Ov10W#}OL!05Nrf55nU--@!_ulbN|buwywLJs8JI``R_>4KVK`=HPd zS-h1U|CZS2k^onSk%?q@@Y(Kua=jMW{NF3h1b&Df`n`ZO^hHq zs9~W?f*KzrvTy`HV=@D!_S|6bm8cUp^7*q^f~dzog_DnqLx-L*}kWg}iUvmGybrlh^zUZ3KV zgza;K_c?KcqA8B^Ft7@(A+WV2>KdJJzrG)M{wHtPDT0=7b8ys1(#J9FyeCHEd3YHn z)4!i9Y44G+R>SvE}gUV70S5Jm;m_^A+;koturm zBYSvtvYv&}bg)Jxc!Vcx$FU?BdNKc}){Em@BLmgk#_-W24{tvGt#6Sr_^vJvnq{84 zur#Ix#F6$+xt`qwGUA;qeSVBR4sFY6-|UU=n}HUBgDV4tm46oloTn-)W|p}{Qm!Ev7x1J%`CZXT)*D?RmV?$!eB84EHvh_ z@`{y>jVJPtlSIeg-@lg_6}PkhP82cdG}d6eu1%ch2yYVcb=?xB?IeR_FXm|7NRKTM z&rb8}Fk%a-s(SQTL7uyk6To>LZH=pxotd5a4eJkWBu=HHMd@jxo*x!fMn}dV{IS8~ zs@Qh+r`tm`|0Wa!e9#6}q8?_s?QU-a%L`O~G(`6lNf=q_aNzIm@S-5Bt=Gf#;fArNx|VXE*SeD)yq>{_V7dNHth%ti zQvur(E|TtL$QLg4YF>SrFse@};`TW61tP@?izLYpUheVi+O-w^Q$Jh@9m(NvXD zlqc*xp(%v&DJA-BP?TgfQHr+r(_eqJZ5+(m?@yWAyAnvl!J{OR$BQMF79al0>N^>N z{arhVS_`_|&Z(~#f2=&kNBy>o3A)9ncL*d$23kAsA~RHkT3XNHfQ!4Iyf@Q<^}?LF zGH4#WjVG@wMcDoJQwvV&dTqk+E^8_7bHBzYe^q;aML8Gz*+5S7lh9|^eFPBJ_P|iQ zPBrh{0#Uc95d6@9jRj!-Uw!6WPh|MrpB(tb`Yy~=CjUu}K%a{N>rTj-3}b*)3e zBNVk&Sz7Ms#0IP>if5E_Bk zhJw5`D|<}P#@gOB!pB{|k0(QT9qwW4?!$Jztq4R78c})GaIzy_crX&Z0Y5?%ogc+_Twds#89#R#zW(r4m|vG_h)7DG*Ao zu(aIO-MNEz)2O17ZV@s7z0QAWsQ7JybhmAOOVs_4fsYEIzQ^*wdd>Kj;TPyr?s&g} znn~^g)cxdPq|)iiL_`axf58(;QqWQX6%V+hTpPpWy?bVcrdCGgQxk)`8zu+|s*sS7 z>b)8kmUd?smy_%79CQXj%1%j26v|c{9F-Qdvr>bX+9{_%wzA%cjEty>dh8^|x6zY@ zy05PN#eLpZ`$EJ%=y4&z-7TTlX@yx2TE(tLCS}_zBBKb8cAc_ssIcbcemcW&(2+f> zYmHWYm|C~bM`i?8xH~^N0_kRt?TyAic6-|n_y_`W^uFo&?_XW==>dxWo_kC_tfGtS z^o*?O`57c6cT*E$jchGjnD1nVJWbeugbWcB6bBV~GTCJnZSCKaJ*H(9u^MV%1Xytx zfV;Y}#!~bZsFBH=i{eXHWa}Rl@6>qD(g(?5Dpc|F`^EpTR!~q71@ZK(U4PCV3Y({_?0n^ou6*#WTuSw6 zD{u=T%Z8w7z-vpl!fX>p6fZ?u>dv<9d%MlaN7$BiN-E1oo~Z(Y`%EoD^xG@JBN|N1 z2wPo|CR53J6dG>vwU!~-?PE&y#IJ$dYIv3N?+-Bt(4$p{k`fam9k!S^|G8hzMD8)| z$G)%WT3f`ba-H2=6KWbS{rv598`U~{_Y*CW53ve&_QMLlfUHeHb@EUD=Ph=a<0Ne? z-6cKPbpGYBl@ooZcm0O7frZdgpEbD=xEo36!`zutw2YkjJvGUTML1(uos<;?qi#C^ zIrMMRZ(uZ z^9T^6=^MD;b(^cgAVydDMQlPWz=rVbYHPvXI4(BU`}AkS=Woa79g=dtF-`@v`3ifx zFCX++?pE|z&PPYZ-%n&!yfZ(@<3BJqHa0Rj8M+VWJC7;{9$p_lJL0{?=bTR8ObVyEf(It9 zk@Zq{Vg=|VN=kzBH#qRoQ4sX@1qe`+mk$)(!^zLjt#{eU$p1iyAn%)sbc;!TlYi;_ z)oho$oB_H~M9flK8wPhlQi&UfKH)(*#j`<89uG_Fg^sWWVQV-$QN8iKj({hE0RJER zTz-`(ag`o-3AxxEtth+e1U!GCUT4;5^=`@ojwZmGs${>+EiZ#> zncejeB1%%8rg6v1WYRIMH3Y%Na5OmLy)Fp%aKxXt8Ior%nu!O-WPr7eHCU2I%7HuJ zw-Qc(v`AHKY%H8k|Ge4n&X2!r#7N)A-571<@`6l&M6Mpk^{jHp5(De7z?bj3`+W8~ zNwG2J$?<8a#p<1wNbuhD*PyX-k@WxhGf{dWAQE82dm?X3U0|x#u|RQH{J*WqFMsRN2vce zcoX-0VphCEKy<6h{ZdNZIkdj8V(5_|8!?~bDPdc2#nYai>qi?A(UJFV1%Y@24e2-Y z15a|u?SuU>=TuzEi2Jfw_=-s`a7cLatiDwnLBJ3zPlJv4?}6MRL*z)I*5KGFR{GOW zLU_$4L|-x(&)ewyOQt|k}fOT@Yfe3Cg+cO+lJxDrPL($2$I34!#eNW1Ya1Yd&_98@M0 zmX?|6nQt_&Q^(zcm<+F;nUj-~tk>TQw2oxuwJ^Z@ysUeq6O4zaNplRp6RN(UM;mK{I+onN`)|ay7Vcpz#!dPgTwh(^|iGb#k|5S6(5!C5d_4< z6IX31BSQo3aaT3fK~u?E)U zJpn=YKAm6vf7+SLxcO+JPw=P&hen5E6H|fSwzew5#lY_06R?XF(xg{cQCZ%75=BLJ z>Au;$3Wc1y|9iLI%c%H>n6#Y2Q|sTTs0;qGO2fZK>cUT~URgtwL&^n%lH^lq1y$>) z-&W1k(wdrP)$!om>W|X&`AhN67*Je%;T^|^+n8JCX1_9}OHbmo$^YH`?!ySAT9UJt z78Vxzw<$u*2KYLtn@ER0CIICiA?1l_OOzWVjIq6fRpJqx53c7M$gP2>k+sDOYy5K6vP^Pn zc7ynST1vWXIuV004J0Ch7K-k1u=cHC*;0+KE6VN{1cK94Qd9x zx(JLm+c2i19icSojq?CdrJI~)d z1x$yDIltUsTI7dEC%x1|TCsZ}LoHtks<>O&Z>0Kew@Phw#ApsJ(|{!~FdyE;2)d|Iy$X#ZyNG z1<=t^k!z@;qvKWuM$8Ao_Ac<*yYRPTL1m$i#q(C_OEs-Tw5L7}F01 z#WWws1oO>%1i>e|Wn*q=SNj9JHPqDusLVeuo6?((@Av3vlYBg z%~H}355lHXALh|OW|4bDB`oRvkfb1UaWR)g9&ODGe;C1TQqn(85wu^Kho&kc=C|@z z)mQ3UsX}|gZcfuJtbF5w0y?^P^)cSsX>bbo)^avmjV0#ZOiO=b$OIB)coxPOfH?9M=}UjX z1RJ@#xbK@BJ*;nH4M<79wJzw!*>^^ez_jO2rTIm;UWn^~ks#$(S8}6c>&3K`@R4Ax z?eg;X=C{oY@T;D&yH95#h?o^Glry8FHySxr2m+|FF{mBY^{2&XZ$Y=rMvN6;$cBN2 zz~rPb&-PD+TLF^L-PDAJ{Fry2=EVomyC<68*}?vC!ZfUz$?CJf9@82DLE$5csG*UD z$R$%5S64pDE4)tcK)?TfcE=x{UwM&{h7l9r)+&9;wTyOv-c}5jx~Rwx&WWF9f7jM1 zUFI?G=AF^_H5$-h1}i<8mD=;#+mvjZL9t|I$rGrl&d6kd7OPL^yM>A2SgWyv6RXwX z@6T1!p}5_;Z72u4JTaV2S*-!rkE{fNi)x{DFTMO=)Sp~`{0+4!d1>kI%rx8IpsGnO zHPMl9cx=wk=SCffBOb_xkBuXp`&)USgL!o%E-M{p9}KD7;V07*Dte0#{36BrkGyI> z{O<8X5t@@eFWK)dIHg~Tvh5XrYqAkzcs2hp2)G;RhTzl1*lMgMMZSbpjZL>H!HF%2 zFVt*8hRyrSb=;FeS5{Wm^X1KD6|wE3rhVFt?V@EUmbof68yVf zM}p1E(Ll}082?bY%I#WRI;FulNhqoF{*OZN>&(u`tmM@CG*Ne|3wB7Wq%Vf+a;v|; zT}YdQ0XyzdQ++1H<6wn<02ra7_<=s$*@a%RP?GHD`M11`s6%NImJB$vGDnczuafxa zO?FCb+fxVS#Sf;doSRl1H3lD+YUcO0dC6Qy`$4k8bIHt`+F`}_kc*AA=vhQG4vx=7 zy8mgVZ_~WR8X0{{i+4RU;lH04ng^Z~p{3DYD|x^9Y(&K_%QzpV^0r*;9th&2H{v%- z=W#imy*>B)S&w1{K#GQ*a!X602uA}F(z4{)A(=1q`t@r#$l^03@X?9lV*~{LFx<-o z*adf>ZZQ3EeMIC+pY6|zqHwQdrCR8XRIWGM5Pp@l#B(>G61V{w} zG!e1p^~zgC(}RUr7$OZmvug!wjdgD^)~o94-_TN1)3UuaOO#n%Ei11`~D>p|wYhjsG$WNt1RpOIQbPj7Qp13x3veJ=JUW(2dE zyit*!=?`nLx&NA;2`v|zLr{0$bnAw_OILRnO)>FEgDoc1o(O`?TG{35>F zK}?Ewx>%V|k#D0`VNX%;@IZ~!a>zn~o>F zJGNqm1q#Xo2ac6d-V4T&?q<&WG$*49K8SDr2W}v*{kChceg-C-e}3ucY9Ez{Of47Z zRtFGB$-oI$gZ1W#)noI~i#C%ZDpCDMuC2y=ccf&8dQ9hcgr@?XxVY}7^->yl>+t4Q z;Ll54u=c(EOU1)H+v`s}bbB3?{_9$rxcf zm2pG0`RI)_r~x2*>%h4TQc@GwAYic_VZ-_F){eSm*VOF zovFecHRrUzhZ|B*kQaWrZN7IJTk=5V!C;h+&Ez+avwNFp@Zu6UZ8o3+x>s5U*#Y{p zAs+H%w{96`2r5H{wISJVk`yg3FK=B~uwJD_PDzxXU+u(UkEObLOn|F=PHv&HhN^Nz z{LqM+ezl#VHo2Y2571QReV6)X+%rbr14 zwCe-6YfR+3!{%{v!juGe;>WLnK?5Iar(TSGcWD_3walPzxppn5;`tH|PH4NE_EYt$ zSGtf3XTpHyrAzO=X`l<`?5pJk8=ab;zy8<_m%O8=-C>MjL{n1Rf3E}u|EP$bpPw2* z`arKDE}Vf)n`u>5(#iKtjm)zOa%_xDrZ_y6t_DnuK?-ZDQ}fSNpPym3KT~KDvgBPE z0=*Lip8`4pKqv6M__e}OWRI36uP0JF&(#>&Y5DmJrv0;uCPq!t_V@Gv=}Ad`koO1sKQ+{j235FO<6*Y8rw;=R4CDl%FQ z^J9w}HoR$M5phwTug`+ZGf875lvQFhRMllDX~kKtnLk%iNtVAQ=Itl}@|`ol#pZ-t zF5cPBxMtIG((A|tRyYmlKh_4~YpCktyhs5UPT@}Pz%vaZqFZ`^28DKIM+Th|Ce3Ute9ldyXQmOEwA^)-WoMJ} za5~DZOW}!?h>VXO7#VD5UD=s}q+oa$^fZb9lFdArLJ4VlV<^lCb|qSgYf!Sf3uu*k z)d~pKEG!zk^X|>KA4YtWW5+qTp?K&;AVl-QwM-PskF8lMxC0))Sd>$E^S_c{8Wjj4 zoF0c8U(^n8Fqd9ynwjSkxV=8bkXc<@Yc;e-M-@3!piKCrB<3q*fZxCH%ZZ1`1T51i z*n5a&ETNi03>qg$(`8q4a+zXd;#gVQp9 z@l37 zE5A_pTAMIw7GvgOR@YN>i3^X9_NU+oD%;w=VI2MBz^;^&2hi1`uV16^v*AI71Y@?f zGVbo|+!#RT&#B2|V36NkDR=E$;s;XXN0&m{kTic# zCcm75d|b={ch?vH7Fc%%=7s{+oyOAFPOvm=sFgh+(i*_Rjbt5W=8mYrLBQp&xs5LT z+J_eAzHWT1#y0^$UEO3E@)hXl@u-XpN$&0KsWPD^r*P2GscWjKsOj8n+CKZJ#L_7b zW0XXa&>P{e9giASH!0NtUW2|uTKesB-%jKFcAKL9jF1wR#I?UaTN-W5#D9ouEZJKQP1%vx8Us$Qog}-n!!Sauc-7^V3 zlY)Ld6dUI!-IUM~&RauG7HwuO93(lP!Pb-prm#gAzriSgeXO629V6a561M%R&u5iwXrf%9fG>=mQggALenJ9 zuu*ZObEVr%c#9EJ`<&kGE1o-vgAvNYFKpM}H=wNsAzFR-$@$FAL#e%_s5mZprB{Q^ zAjp{=scTb1dE-hY0t@UW)(wEFBjvx$Y{R}GghZV++Vb*g zrKNj24;#GWo28__X>P24b-mIQBm?f`{9ntz zZ%PhpH}A*%&K)Hxw^gbS3;c7q#34`wE6G2$w6dbb-#)U{s|Eu<&E}*?%4Q!z2;bh@ zUEPbfwJZB`MT>qdN1TJwE~mfo<)qs}_{v92^m8d$^5Q8c=l|6tObv)_)g|7EU}G=Eddl6C6z3i~8&R z4R`37|KOoC+5O3z=Iz;Q2!vpbb5FV}TIdpHL=v_@!usEjH}lspjQ@;#O`ee@X}vZ3 z1)-0xnAurvkM*KF{?|%0;A3M*W9t|&GpK1(pag{Ui9)mHrr@=!_PEyAGt0;(?`?s`}^m#{71Hj`=Txz zXXE!~^{-<$5vlWBSx`KModPHYV|G?}8TG%bRn^v*7S(At=6eNPK7GD5pGW7Oo~;2GbnGoYiQq@%u4IoF z66ijAC+1^}K#Vq^h=YxiSNPknjU>@1Q>h?gsj022$vk<|H-7A6_gvKAr@Ib1o`Y~A z|FNn_&hLJyVdS*ZpHlkl{W!NA=YR;@~lG%{q1%+nf=NSWy z@$vBx1^HPTB1?M}y4Rmw`*)3Z$|-BA)ta4D z1Kcc`FX~Cr17k$6l*%hCsBu05Hu7prhs`)|!L@%tFiOA#_yYGoQNt z)tAsZd>45q$AciD_m~)%FDDBbnpYuPU0FjN*W;i%E6b%K{5QA>1aP5~1i(1=G!7s0 zcmTs4(%Aylly8xmKc=Ll%)q4#U#7wLlZ30tYx+o!5>;GDBk9v8+aJmyftX!# z3b+?f2b>`wW)}OQpzt{A+muul!@DqKiaw!vO^B6 zLuCj6TH6#{tAEHQmkHzzny$BP^JmsUwh9tOm%1BsSP1v zkL8|Rl8|3bzMH4PFDYr5sNVZF%>dgoJH)Fd9zo8}Juh7D?Y5&quwpyx%0E{=Wd{dN z$FJSagbCppycc7-f4}ba;*xKAbR?w#Bi^H8kX-=Y6DsL4XHYr{U-9cZT(6a1TBfpc z0?tRn01EQ@F}-~?2|Vu&8RJ{d&SmY!-p0SYKC#iul)QbUs~Om;tEkv;5%*zl`zHcH zAMj%@QaLD5zoezD&BZ3A=<1>sruZUNwlufP-F__FbAo!lEQVh93E^3fLi)60@J6w> zNaHaKN)%YQ3@#>;zKo2M9&^kjL_rOXAsFgr~i0d<1Ek|5qOG>RXEg{!QpT|BL^}AxD>5k!!^J+ zS!}0k6_v$t!3|g4YkS3-`s`kXeg^14Z)=lCZO26frx#< z+?Xy`*OdEOl_~v1ZJ|BL5lATCu_~+vCV~j=hA4DpWYS-tN)}Fi@2a~@vAhGt5(y9p z=y#dgJ2nM9cb(O^5nnm_&9Qpr$Knv8qbM)GJCV5#kcy&RiL0w~YNfIBG;yK6(7{Yc zh+(sFY;-KYx0$PlhsP_B@igJw#jSK6BJ-js=v*UvK?**(RMe%fj7&savs|6JU#X^Z zaf@46>t2BEufRW3_IonvfV~21P2m=`#&dPBZ>-3wzy)Z7vD9^$N>^7GK_HFKaclNx z+U<<>qg;cZ1&l_ATPh?oNJ}Q&j(fo5*XT zeXEl(c0kw=1n!0S8}lHuGqN^c;_tZjzKNNYIkmxaa=7KO-gB465*}=s^@EUXkZ#F_ z4|<$W^qF-t@enN9PGCGMOHEjKdvnM_=nY|Fema1D-bqIVy%fkhODS-O5LC4(U4=utw>RM<@-NP)QH&3#nhALKWEeZ(bwlX)6R_eCg$!YlTJaFx|Wth zQ=BLy3?g8MPz~A6KWFVwjZ6GX8QY?YSB2lad?p;Z?n~RIahno{WVVZ%yv$L}~=1PC2 zkP7m{axBK|-jDiv14h9P27NB~O1+NWq{9%lC%7eU>0bL=QIA`8%Z&sXy~X}DqTa!D z(H)Ma?v)kgC)WaKtA!}oLt)&`&BH?w)I3sA0dvgQxC@HW=|5J117k8LK9eI2e&7>a zuNVBBIExVFO(gNn|Ie_yw%8y6GbI3Z+P7c~2DkZ_KbLs=ve8NUpL4!c?1kef>7PP3 zNx^TA%gn*dK3(Fq^~Fo@=FO1s6ahSh={`FeXrGVgue*?M)!OABo z#h=Ry3TkSS#HhbAF?Z|MT7XA9L|s}Hu+vW0lIO4U*-krJX-?=eBH+#D5Ww{o^5GYT zb~1tpfQBq&ILLj&-rZVPI2y)Z?Ohz2LO#!7%gg@Gd15NKY;@dP_Z|gV*WjQ#)Ec4g z)-78DRtO9;&#vXXsCjuKsV5Z{L{d*hY4Jq4b$ZQ_)%|R0=; zeLn{^`oME+@LF&ou`GSP=#G)@+?Y%XKA^Hr+*nJ;fkQ2(9GU}D17C{ryw3YKq1T0z zga3v3e;Bu~-5x{WGzqqho?;dtLL`pg9ajAyu;Jpp(o4>d@C@ua>$1N?N8YDfQl0tk z3qW9t6Fcxd(&ki}85uYih9_F`pmzYqF&KUx;N3(+@?R7mFN`W|P5GdM>%ZIAyd3Iu z(BL~lT@JQ+C|cK8RzEb4ilihGAe3R=82Cw-4s+9^&51dF zuheKTRgaZ6JGNxU;lbfi|HQA5sI3L9$!Lk8x`EuIk`VxfqZzAWVl}E^KA)E-`@!Vg zziFu_wXh)b&DZz)&gJjS%C-afbYwvJWo&6ZE9_`!shyPZ_PA+09yXTIJb~?mw;f0} zAd%|YySEMdc$A%E*8<*N4|tnI5<0chc3Qu<_v(zBhp%Y#JB8opdSVMBYqmDQFx~G~ zx?ukh7`RZEXs72vRDIc|Z>2 zyton5Q3x{^sE$VRH9Z$wQ-I)U?0qTtyRWj0Ga?H-9Lhj)gN@+glcuY#tCOjc6QPGhd-MzE(^M5xs);2evt3-sK5RMPUe1R(_37BPY z-IaTG)Iw4h8UjHNDPWJ(DJNWe;WPFL5H;BCQ>DaQBn1FX|K>;mm^|1W&UCR%On}m= zLZ07<=zto*j#M;EL6ZsvW%p87J3LhYI1-S*^>?z$WPHillF#}&vN2i3z$>!40e%3! zV7N)lo2%VT*iGx`Hx{*PJZD9-!piG|_M#m93K+l@hN=}v~q?PwLKG_x@H@;(8}ttbJjy1F_hmJm#c5V0p} zZ#{rUc7lOUcaJ8BImb`!rizWB>FwLZkLhCCr3IZ#Px6yyQ>{XsTWYaLkWY9{{Dsvv!7b$dga!z zx;p+RI$m^VXJ<%WYY0E-4xq;L`@U{wKw@KS>snSB85QG8y)sxn?$2x%VM>iDk%l|W z#|^%v#WY`wbAse(T}nrhf8w4YnQe(!#`&qWkt)HQZB$$&4|x9F=6EdqxVY+_IQ{J$ zHI`R`11(bFsX`hW8h8*tGYXe)I?p4v!KR*TIrD3CgoLNLb?N?HOn{04_prR890dXS zjfi&`mHcrA)lV_~Wp!i1m+d1OlSc3e_n5;vjBlziPwMJ|`T=mU_C!k>XvsQ6ldDX zMP&q?>b0C0;Pikgw<>|8F*!7t^!*$mEl?xMxa6n#C4c=YC-<5jfqRdAf7p_@`O=01 z@_qwF?(+yo_n2a-T&K8(-Sn=7VR!d|1h>{q$aT1A&=4k;&gDDAG(?o52h!O6QBkT@ zWlag1fcq>EDXVC0F7fYKJ7GKA@H1@oeK##FHkI#F3Y7g9wD3BW4blwYCKc{ zrS0Pz_R`nf!t93}r^Vo|lS13PL$bDE7Jhuc>H`%-SVSeJ-ax)_@^~Lzs=j)aO&tps z{gE-rrZ0b$M!#Sqj+N&Jzm%6nZ-v+po)Q!TRhcF~ zzEaDfzk$v}V2n)eMrmE0?E>mT-fKrkEC`8@kN4Pmbk_zV>wyjp3=}Xr&My-D(VtV8 zW8&;G*wS3aGt8k^V`6OfOk?_)&KN;6VZdz6FYMkOgV!L5^7gI=8{TiGPbKBo$jLCU z_Zr{Gf3cb_QB;>^URD72UJ`i@tH8-AOI4(proRS};BH*iql~^6wncGEivCtY_u*Qi z7W3m9+N^tReV&l72toiBcGj)6wabBlTR}oF^cop?zR<_J^jjT2?MAhLAUGfIrv!Va zp)MfU?irI6>py@dUX(04H86GSF0uV`V{`@YJ%^VQHI7(qM+M(A)FR}?=fcG}-+u4H ziG&O>j%2>Q%}}h+(5K!gG~uZS$qv_3)p_5ptRmR1uCD$f)#M0F!u<@t*W_XC7f;{H z4*5G#{pS9?QwF|<>E1Vn-_uH}OLD9AIRHKaVSAW^A`)N#m2thd5*n>kdO5ex0kctA zh4+!?Wjdxx4j_RjBWiNb)L-P-Vlp;|q7FPQt33J|N6pI}^Fs`c7KYh(yKX&-djRh; z#V0>L0>7`LXZ>;~RDjToTbzhN+M3;2@i7!+E4ntlStgRg>2y&LAt9B5j(jaBkabd@ zSZz3|QdBcE42sEkTb`MtFBS-l&+?=rbH88besfbDt1FDR@4S6y-qWM6f6wb_geC=B zFGj%UBHM8Bv0E=WwfGPCX>8w``5{Q}7?+lIBr|)_SL|@l(w7}VXkL@xvJaywp!qCM zq!r~^e=ce=ul>sB%Z=Tr@32Q&$3cvHJuEp<^i^xZ{B;9NaDuLOJ2KQ4jMSHxR_~b} zy7hmUWwPO#+40ElKXsLyB!HKlA6mlE zT%3WfE2}U+Q#|Bal+xG5(M=VXuBxHIhc2FMyj5GP--H1*1n{@0guF%eM?m}x&0Rp# z%X04Fc=p5^_t7-ieKcCRM3$AFNzzydBDsy169?t`)t};Uu^;Q}7DamXAK$-U2J&J% zLvLe@7`1^%SPo}p0Z)JI3lTkh^rt%sc;XQ;8L#z;h|rsK(X6wh26x{!6M}ObZ&FHf z;-&6%C^3fruUDy{YS^TC9Uy)^q$Ox=jE~Ss0c(DEy)tlUl*4aeth4RRXU9E~;Um8n z3-SBCWDz_+Y5`qvrQYS(s(v>B5}~DERb7(#;bupWJ2cnDYc)i3pOBcStE1y<9*cg% zff&qNei=Veb==#I`utF9{P?VSaq-DU_~6)BE8jCgc5Dp9SBjRVRGc;vI8}QZ+T;&` zf(25ZkKHcfEIe#;&P8$sZ)j%kVB93bY3%pKQ}B|Uk0f{njZ0BhZmmIB9~2C{HW%+k z(ar`N*Nh*GpSAGHi2wUJ=_1W4BEW9Ui)SALxP){ltG%Q1BU!lea}{6eub_@a0PSz& zWV?S)KJ%HKCfhq&@bQq7Hw0$2mNp=1v)}%_b9{AI_**^hQ35Dh#kypI8gI;g7whXl$;+aslWSShhJh$I@%v0`6g8dwJ z?OP*RH1A_3=DvgX3AeDBfW2MIkz)6H&k{p=&z5xM016=s)gh!|*bbR>3R0GPbN*?s8p;svmd zAb6;>c%$d}!*5^xdw3DC2>G0^`4Vg#_nsyr|IpZ|iW|e5l~T{(zWFh6lyb|FofY-)GOvapkN$(+bM3MM+@#H+CnB zNIzDl#IBzJ1ty%-hZQ3Tf`U@A_xk#iPxP|zt*^5C_O8Kq7zFNz3ILZcMOp;}`LUJa zgFp0|PiQW<5#m#yiPo)ij<}Qjg{bhM7AYK=RazeisW{#U@<)7OdjW^n-pD$_fze}p zY)pXu7?L9RIGZ~Mt!mSCHJdHN76+EUXB1aFm!iFuunp9UU5C1>7)%TO2lX`nqzoGc(ZbeDf=JNQzOIciewwLp-*~h? z7<854*lusyW&!HdldW4Gtf|NnK*ZopziKm8G&EM@-bD*(WNGg@EMJ|G0F0`xfav!2 zBN#auMJd?Z3MHa6JOrkd(t#b?KcYdS;*XF&iy((oA}=6N9(jGi>wd7?vX;vtBm@kx zwEytv6;o#lt%LQXr~1GVQ8Mw0%}e-q2Ff|~T(ACKU(Fkzs!LbYM7Rk{0aTH~*8y1(vA zhc*1kHh_2Pt%tOfm2Y{2MH4ZUC@U{Eg?yymCNFm~8cf_kslv%->_0L79eZ=p25y39zCFu_n>`;YluyvA> zlH&fQR$(DBKmoyAc}v7j;@uTK)bpt+pnFie z2?+vpf}(Gph;)M>f^Rpq+CLKQA(1CAa@ zKN_vJL|xz^7Ivk}O2b3`Kjd+8p6%VBzrBGQEVSG#`0($}B{+*dxx;+g)Vdec0fy4l z{4R1L+7918b09)Ga_Apk1E7NZgz0o8>1VJy2Egd9>p;_)lcST9=Q+jIPJ3|5thL?k z^o=+Lcx)%}l`~O*x9`;MN8FjqG#`r{b|qATB`C zll#u`2`SozrfTn-H|}H>DI}EpCx5Cx=>;gg(sWv%Tmo3}*Snx}OvTLmbeE%*c$N0E zZSDMgWjTyYdNyhROhXLI{N%@v7nwCR14BmV=K^bsz{TyB`K49w%*a5eEG<%-TY0^G z?mD_lVRcoN&C^YQG^Ra6mX_$^Nkau}PDv8%-6!1CT|P#g9~so7{%63Qa*j=^sW~xH zk3OHZ5iCeTxf^P_Ra}YfJ!u58}_uA5Ylr@A2q{*gQ>qUx&-e zY6YHy@&u8R(o&!i1bhYl=T{7bbaW^1T3Rvmpmhv|J++$|K?DSJ)J=0YSzY+CLK;@r zOcZis!0!4(Bb5P}hlc=}DKIS3dJ0)|2&ClXvT`b}^;1X%3?}neJa+GinvY7&^MN57 z82Y}D?zG$A|42ym^!5#;Z&-=s7I9udVf)%g|E^wE8oAzX*4fEU!!o<`PfE%$z)k?4 z0#|Xz855Ya^$Y2e__r9nS^4YmM?qNukooge6H#!106H3=d<>>*?jMa3*55+N9PBU! zxUo4o*3b1G$0zH`$Yi8^QwtDZSfYb{3BYDhgacO?)E4!N%h3U%z$5}k?dOcX!ncG7 z{yuC2Hj*^>@dIBb0H%Z;l17Hh5X%7nE}l+wSggAKFESUlhJQj$k@SZw3}OQjM`M`Z z;e!&v9C+WlaEs|v-S6RHnn5N;T64A+MP-F7Kn&8B&78rFiz2i#F&Cz-M+Mug#Kh(gAA+8d_1X7oIaW=xHW~wGtB>d;_b! zrD~(eyPFvwwrVkb=6G)PjP^?~_=9pW7<_hYNr`+q|gM zGlBPBjwD_2+EdB{ZCuPMHEnkh*Ed|*@}g=1adA&(^A(@#sK|*|mEOF)*$gS#zCEIt`yHf^ny#dA;nVmZnp;d$$QNI=`%V!gMplF zvnN2!zmpvw*zaU@!a&)rdp#|^m}rEH#PI0`)>H;cN{I*V{SDA^#eGJmoK+b z5iltOG5oUug4A{`&dN_rJnQN zy|MR+tGuLSVkRc}Y zoQ$jjBSQRS#hWqiiZ3QsHT3^6EvV=5w7DFg!2@N{9a-y)Oi~0mvt(=3$7$g4`{cJW zrjKsg$8`B?@W(Tvy!>2~oP4ll+S@r7x`y$H7Y~ zfui$8Uz!DS@*cK#NFFeO^VU#B@$mn+KtPdW^JVtwfa1Bz=!80}_S2Q-Iewr7d1$i& zXmX)MF8pL1CP}%3?~|=2n@vh%{qF3Mdp7_N8no>PU#(9^Jpdp`-j>vqz8Lq7mVCQE zgGWK7sy)Frp!k0en4T2gJ9i&2aTxm@y;iL?W(E!Cw(3*QVS6BN?0o)i-v0~`RO%`^ zIM8>aPkZOaWhc1JPxk%CZS2T^1ir{`)?Y)H4?n11d?_G!w^!=+4^B?>ik++YkgF4j zYrMQ%SOK<)RSQV*Rld$7jD0RTRrPAoFSpPLZo=6S{&h~ow*xxkaO!DqwbD?kOeYvA zd7Nf32<&@{Tzybe+6`n_c;HfAewDP9KQ*-()Uh5Ji~3)P*l9VtU=@(Mz@9KTVQ~Jz zD-D@Y;(F~a`~u>hXziKTZy1e$!rATU(!f#&1^GGoyRBoA(7&nWU05&x@0e`@oZtDv>xrK`n$tM1Oqz_|666-q-MQ|GOXe4li2&Ibs~- z5(fEKV`(e>eWlfiKloZb;WO+(BivkmhhbQ-G1@se0*^#hMWq(|#&i03;ai`$jJ~I( z)nuPFpZ*d@e!^FFOAHeaFFgB96l53lCH&9kWi`Z!2f_gVtkn+BZ125KrSeJ{#Jn7w zRD(l9{BC|ENJ^&fdqal70r^)EaacFqmzH>>#MF!H>gF0heon-+zhf*xzYlbCbl*B* z#e2+vi;2h-R8RN-o+83@kKY zYG8Q%!2Uq*c5d}G5M^G-c4A|OZ*FcvC-?F*?@6Yv zyv4RiYHHzIa}(>0t$WB%B|PqCZ90L+JdYVZ4)Wo>XX}}Ej7f4dRz3P$-k-?720~Q+ zdrUx)MuZfFCxJQzv+bvy>&xs(aiWZmSX{Qq12$z?cJOadZQUkXG~`ozG~@NU#~jR3 zn4j(8u*ZX-4cOZN^Fg&>W}0*BhO;u!5>RqwKJ&&Whi|WkR=44UPUp+7v8lQvHC4@5 zFC{kM`v!e|fX^#_tz5#lE@Deq^(lV8#5016xnh~I#orvpfi;1ck8iZ1;)QpkOo%uZ z7S?s&nt92m9+RV4%gaED#vuO?7VDQ-AfwVl0!{*K*XnohI(pid1l9rBIL7c*9{EjM zt)wh=lIe+W+x|LcOO0Hb0yi!;l8At~ou9t6AfyyliH^>|hf0nyb4Llc)-M<@>w1{+ zdecry;ETnSVxZu>2|Ua1Y@8gAuOT&VSG!HEquX`(j7<5UmTRJ&mPus8bs@jkl{6Q+ z7!BS_c5e<<`GNP8yt$nAkac8iXpNe1_=uQ~yc2FKQf&HV19SVX2O09f`>#po-#Z)r z;t$-PHmj!oQ$uzrI=Tj@i2U>bTkmOpAuT26Xh$C@S%=a9{0~Y>%4Gf9KLtri{iHjX zaB%F!HSqt-KAef2o9N%!-PL*OuB5I&^YlqtYU=K3=AfiAAwgh;-T^xZ{ggQFaJ*eE zW%%&}G=9{q;TE5VJ!^@c0K@yS)7K0MGkOPiNecvyK=>@=*WV6=_qnf;m+Y-Xfyi4i`3E<^~f zFVzl1+7myjHAj9~7JrP2h(TZ5vh>qizr1|P#uv<+Qn=yd=_VK+-n%sE*6Z@JhiBWp z*DdT16C(>IF3u4JzwLrmeTp@a5uKoJvc+f5&bBU=hm2#80^l2u3H$*cv}#Z8$L#M& zc6DA#be0(N-1oZGOE zkkg{kc3)B2jH!dtYOdUB<&A%XV?*lP5a$o$FzUXMq33fl^jJgz$wj@~<#hi*kGJOL zTkHCc?4%FZVq;Zwq&2}7W_+mg54svS!RlZE z&t&e5{Qh0De}sc(@zwWX42(jtJ|SZ+VSZg$4)=a zhkZaVpC2>$;c&pJi$O-VN-0?Jd1O{F_{PWem5maM!STuS52JX2OmUA$*PdZd0iiFM z&+If7r#>N`bY+=nMy{g;hUw!Ivm#(_C}kDtQo*-zPf*Enk_aE-`7uCS;@~D z_!FCx`QvMGvCDB^gUa%o^IWT$@0KBFIVpWRF{L42KM%XQRHRw6;&Im|xhg3|938|5 z6vEc`l5$E_O^M)~MHp`6`S^5qm6`Ya6W9k1z?T6FF5Ev;mfSzleP4cN2hX2kJ`Ku0 z#YyQXLds(QA}L?I2;v=20Q1+c`T0)v->BD~h0uz|bC@OM2jBkO%~R@Bo#mHMQ?vPK zh5@;Xga;eGSZV7J3BGlp^gPA>~Fd zm14>q556BB;-aYc4GzU`aJx4o#;58hmQ}BN zLQd@7h_?Q^eZ$89!;uD0x3RHtMUH}yfFLL&2+~IP(p)@-W3N!BBt#6bJp4#-|Iec* zd|~2`Ng@R$@_OVl-JA;&A;nZR?e=_ZBwi+AVPk57A3Ofp`$&aigsrEO|H-+#oitOR zWa4A@y{!ug$VEU&fg?AZLxS~e1_<}q;;PbS69IE!CCjV}stuE#$MzRROocl2V7dZ{ zK-Hf+0UWy?aQ_}N#4&JnTPc{TgSl=%Gi?58R%;Zv*%hUm=X9Qk$@c0~7em(fLOjxd;8NyXr>iEaWc!wv^%*B~dPeEwRQFQD zf-y47Wh-jwf%0AcnBePI@Jinf4g_^RFul)W9Udk&&4DX81&>WxX#pxifj*(Gsh(kE z`1WlM__~E3?dIoO1%$o?H@dZwFfeXUs&)3Rt95d4fce~BBNkRL7$YDEeB1QD{HO|9 zoT$-Do$Q~zJ2wZg?K#by%XQy#g<4ngsNKi;S!FErEPA7T4Ts+5{b<11!b^U}!QQvg z%cy{~&>6u~9Nm@D-nFPI2l9_1BZ-OA3e@XA3QqPqb)Xg4^YrtY5jN zWE8k7vhpT+3W3|drKCM8)AQ30Hdv*vXJ0OkgG`{K7c7D%@dAq_?S@tD9~!7<1`f5wN6#Sy(1l$Xt_gC-=b7!;eu zyZ#9EX5X)lcWbO05tZtSwi+1P5z4pxn^OBl3y)_G4{fRWJ5I(r%fW>6&iY$86@^ou zxt@LyrF99Ra#Vc?~0u)SQy^6zW$QXu&t7 zIpg%@^I>+$#R^?DC{8<0h;j^gF$ppUE7;@l)kRdlBh z)2w@A@T>I^Far$1ef_fMqV>bZLs$lOsJ|IRe{s=5>!zkT}_R(e)y;kdMR=bW_(EiK;>#g-vknt@=5 zEmdJrK4o&m=GJ=GoHc}dLH^gPK|%D%`kqVf#0W83f(`Nn!X=yX$*2E$2>Mu!^pldW zVPPtOx4i$QlClCl^a*|a&+%|WcS!l!*%?(-#K#+^EywZ>kd(Bv`!p?!YP`Hvr-l7yuWZEf;+P2U2i z4f-_16ouh1P}h=WO&_ZZ~qfBy4|gwVe{Iry===1dm;mw!clNzOeAQRPMwL z-*$a6^7S}~lgGTNU1ePu`cX_+v4hlUq~=3oIO;_P#K=P62<>$o#L#f5K&{H_1<~c- zE-UOR)nkQX>Nhf<-~DOK{Z!!t$AKwXynE~b=nBKBMu~{XlOtkaoOU){?Y_ei%X>QJGY(#|U zoyb}Av?>^J_%+nx%pV5^DqN@g@Er!HW-_O9Ia4=7}q-5l*)t+BY0k=fvpCu z%tvZo!)|g$U%&Xfxc9@SCRnY^x-hen;d6IvZ0t>(aKiKR`4f+H2_6ucW&*Adq{y`lL^^8pP*1Cuc`jswfEzot*ZxW*Ed2sX26{_IT<-S<%yhwFRMswlXEm~1_uE#B=R8W z#>c#zySi#h*fVyxa`iJ18~)bHibTmPSwu{XEdDvjviED&Q(kQTP+PM7_p3vJohl)Af>9UPFWZ;ZqswG#E<`C@MnR)y5Z{K|nV8GTR!L^P zd=Kin1r%Qm+q8YJ_4{i(i_T0pvEDw#jXPUyw`0XCUydD)EAl?I6aQ3G^T=0NoL^N; zEVtJ?@hJsv?gJ;l`ZA~}Ln9*ay&Ie_8Fx8=mg27^ zxDA4A_is8M3}x#YBW-P6mlg(|;TpGZ6EQzx1lNY$dKagL$@u68A!59D?#N~o7q87l z8$&Hp$)o4~4UCSnksC4R&Oxz?jXFS+k=R40GG(1N?)<&GZ3e;$eTaaKoU z6vSlAbPSJsdsP*M8@rlU=f~2|3uTh)ocZ+<{mWv1O{6$I1fHFsCsD<_-FFtuIAo@4 zV6v|}yDxT{iI?-Yr)Q-I&FQj^=HdND^CR1s%4apU~(<)wwojk7!_gX2U z^uxBL5_mE@mM<^}l-+Xj*Y}w2@lVs zUbqT^?bcATuc?K3898i5^G{d%b!%M7_023`iOXtfnfD1z@LiIikE*4rKGm|6!1yPQ z(N%37AJ9@3BST<*?d#*S`3L_~RbhZ5Ca z$%f+4(cOPX?;=>`uI%$dtULl`yof2T1`~V9lm~{3@+Z=g0bpW~ZblpV>9eZ5lGb0kne4xtKBQA(oOqX%m4I=KG06y<}vQse&0}G6?C*#O|EPbF!s&P%KKn@ zUzq6m2vBa3yG-{kR(q7Q^>E=g%L|GsP8BGU1C6Rcr&ExRPep0zZ-kslry@SLxH9+g z-Z#rP`i7QoL&U>bl{Y^B#D{Bd*}vFCA2mOAFf!Aes2AUO?ghrd+u`UK|9Pyuk_4kq z4-bzEmnX9)`5+^oPR2&RcPKLdE!7*kadQhJn}r^<%V;3QGr>#2Hz>yP;1(t*rqXCj zjAiyI_`es{c|bG0icgHN^^M5NNkO>4)sw05D&Cpg=tGKU!^x&LGUgvZ9rUbzjYY0$ zfSnyY{Cv2co0yq`fhl2JEQ%NGuBnB02r2xCnKO-+uF=}09g({6X_Aqx9Vrx(`c+m} zIlEM7XeKz>_hn|@|H|AgD;x4*Ml3}sN^w9u+}4q5&crY!8S)7B+kU>bCtKeDU^Q*% z%j@~uQR3L-yn2Zbf?EWv7w_}7J$rax1}Gy6&js!mxaG(A`2lyw0ixFYn@&E(^Z%N1 zfTqQqYh!5QHI!SrHOW!&AJf5Dccyp5((548j2oU@#>!nAFaG8J|*= zZ8zWL3$xK_&GuXvViw->!y6hIZ13m+^NO4zD{vP};<5PN^tu|PTD}sZ;kIOQ^pUWO zj(k zGcko~CEmQtK-b;hFzv8-15VtIytk!JQkv2n_1|t(rV!ssg+U-Xx_|1jwuXj)L&tIZ zLhYiVk<%u>l8O$^T>w|df446H9PrJ#E;+7s6{=UDGo|uB0{jI$Ql6*k)rJ}OboKm6 zaq3AFXqo8r7QKI2XD4`NBWUsG4}kC^meUqn#jRul=50PUNyD+rkBFs}V*J)=??3v` zG$-2P0rQ!pjTJtVlx_%-+dfd^&&c$IK%Rs6Cvpl3bK%mL9WQP^gp@V)7cXw^pTGOJ zdqRzVwS%dIs+(4uevgUIYCUFmFiQqn+rG1$Q9eAX^i6mT3VlKAwDLH(tHF2resjyc zbp4iQQ6nRxh`6Yhs>5y~h#-Au!3wpN5(F-vZP(Bqbmo8*7)m`n1GhGije3*hGQe>MmL3afWh78vCu_!~bvrK!YTK zP0edo6+Y2+3zESk2HBa&GlEGf9FH(Od6V@EzGK7(N?slw^^i_k$#p6{Y@CDJ z(TW@El@H`U{Up566Yu}Zr>>BZCtii3%%kj)JBLR>tBGnHpr#cc&@ooB zIfe%G{lU>crO_pSIzcE5@Ho$8Bic@QR5XEglhadPYY z<%UUfOS6x!Z`}F!LNJ9k?i}FG%VT^_qxaG+(S3Gy2k@Lp&%ZG%eCR9GLbA$qk7yZZ zT|CbWz~tq%^G#}NgIzn!2zzxZ##h3=T~UGtLDS9Or((31_}YIDnk(|F zLEZ$g?ty1AvzRB3`_@1&G0KW(OxfY#ZDW>mbB0UJ#9Hfv{G^wiZ8Dv0=VOXj}4%sSDaCmA047ak!eMZ^5?K?26&K|G(GRPtME_mdbV-FrgLH z#Q*Qoyi$Ev9VV{vLQzh|@odYxCZg=n(gE`y)En}GC^j;7S+yD{JrIk-^*J=G3I6to z<-HKd0r*_^C_L}}v$K9{X1W*FJ3A#t1A^Jh0&}>10Tvy!3j^X@o1yKiR~!m3{d7gs2X4Y|6M~|?=0wUUiQYv#zZ(1fphfS?c2XXZ{11_8ZeRePzUEgOo&R} zDq-VjV1oe+8X|XPb21jpjr=>?mPb)gBZY3?ir?fb`kr@3Y-xV60;+f#nvMzMBb1c1 zyth4#A6JL_(W+%aaeQ+dUnbQ-Ao5GA0*J2G94>mpuCB!RBe&9JqYw>T^N_oAbd?63 zymVqMhhdHicmA{mmQWL1aZU%={QEL5vtJjF8huo*2>YKhWyKR}G@wL}kMoJ}bNZZ=T!LCt1wcCwChYujK;0NeT?VRN06jKSoe zH(%e7*`7~siJKS$ME;NqT>*qucQ1d(lpbnUTT{# z(+G6S84vX!OWD6vgX7~|4#OW@f>kM>+IHxDo9boxpA9I{^D)uKAA*`b)LZX)&B~^xEK6YZg_-@2-JTKw4AZ)YZ&8z znvMDkr3X*t71cdUkNEmB=MC~ZF0lu-Ir&zrt0`BTT@JTNDVAq9D?K`UJOr*<={(_e z1X)fzl^H?o0dH4xHrpQFBPV7w)a4@W{)XjKz2V30L^@3;%HxS>SdPIqm1<7Ws|VRg zpMlnNO4S+>p{&fuj`72*p0r!(W?$J)w(88-54}xH%%6e5e)Y}~sb8dI1|Y#IEZRRO+%6YZbuOP%N84T=Vj|E` zLUH(IgIilIM_1c+72nNBObm^Giu%hJO(*K4YG)j6CI-f5Rdxpw*~jH<23rGB1MK#) z^wA(bR(TX}1&8%Yu(r(d?|7^%IVwTVo)DdK-qpYDb#@S@$pgc)H9bP`NhY_7~muU5?H`K{YkqYb68>|_StRk#cQ=GeYnVu~y9Aoi$Bq%uW zS2LZ~VZE}F)@iS#@BeJpS9k=0O4^!5xrKWr*H4Q3L`65z#|;hN)H_p>-Xek>&x6u} znK&4HuJ1=m($XsZ8+z*(j}p?{)S!Ycs42}}p4>pu_P}oVejfjW< zeuA#^91kvJ`1f#NVBldwvjM4!j>6dU>|l8@paZ&f*WDri3>rW>Iy&gT&$7%rq&&oU0ntb@}CXU?rgE2miS4uShYBfbwKI@oCQYq$;e_zdkb4X7FgPV)f z_+8h!LvV{e2*BYpmX&p25A;EWsD;nnsfxas*My~Dk|47-Bmmna`JFoqgx~Xm*ga8D zh1}dEjFf*w;ETNy`SO6HqbT6U$EG>(JA}_?PvcE86r3t5iXEUP>*`UjwmDm<9=U(N zS>btLfIJ9`<2o}3`A~#pWC$~fu4rAkr8$37Xv-rbxIiY#Bhbu z($WwQl!}kt#LK&S4fVNF3{ZYvOd6h-vA+zzgGSm{ zn3$NjFsEQWWe_p~opHbqL_>r8bAoaVk(42O(a4RqTLjQJU?aaOs^6OCpdl~ikX3#5 z&JIQpI*Wt~p9G&@qkSGD4wXH`+rZVz{%}e^CA_1)D66o;{@J(aCYq_?E#B(rD@$1k z3o*CMsGPK*Pqu<)K21x6WrXD%6Al@r_kHPH<|w;xxqf9uni3LZeyhx>%+FLUrDM(v z4WZ|>QFhUA`mAR+Mp=N%{{9i;eT^XDS8&4j(!*)nY>lKSx~YnSBF9fJk1?5Hv=^7YZ$Dj# zKKg8ILqT($b2XLPB&J%fE}5BNX8WFZLmTU(!n{Jg;;|fWEKE$yT~1-t_kBY{$~p>k zG%WNpgRN(x5O*S>ng2cS)Ig;Nth7vMf72XJY3d~;Xo4@isA!If{?DsnV~lpVQ`Zy5 z6DQxnc66V{YT$#3g++d0Zl+=`92+$^`pq%Aslf;WVi$aUeF1km+i7{plKyAN~`P_U>1DKTo7Dp^t{e=yt+;0|~+Y+wh}F!IJ@pUMKa42)g6_LFPx64X4g{oxBcTW0 z#jaD^Fls^A23(HAm^c2#)G%F^L!#J+SRI&he;)qI&do`zC0f44kLu;(Q3t0}6Px+M zc6GEO%!I!?+s#*<`khFtrpEIYJp@zHW!68j{x67dT@F^J^U3Vueh&|UT;m%sfRu4}+q-05A=eztaPX4V%#aSf|=b8#$~ z0#`y&c)l_Io+N|aw%*V9{Wu=2!m5N080}n^zk~tb)C3pm&+332m@3s_Vr!Sbx&B^A z?PTK@B*NZ1bDFHYx;FH^sJ9uAThyOL8|8a1O#iGKwTaLolNEMXzy4sb$c6EhTOwZf zT6>?loeWQOy8l^Uni=%=Ejq?7v3@EHOh6_la^r3@- zjE9Vnpw6{re&?uPs@)V}>o7V#?A}1Uy5CSgb37gT{Q9q$hKjgrcL$vbL)-GdG5pb< zEPikcI)s;G{>YD;!1;#mHMHT;d;IB#`~Eo8HA_TN%u*M$qwD^_En zy3I!x5|sBA?z=TR;*k;V$tqVl=~wALzINN4+tb`!nwmfmwR#t0FeP3m%+cMP_+j*^ z={0h^G|~+O2gK{m0%8rpL-+Nx-J1@lF#6R{kl5ucDAA(h;x-J~CV$D@N>y~SnyGL< zN-aBdq@e-t9qx$WSHnpc5>2$}o-^O*=>=|`D9&>F zp-ISrt4eC?v9uK^gn<4Ob>USIx zG4V-oVd(?jkksne6Jn2S2`YMmfL-N4+;VOIoY9j68Fiwn_bo=9fo-B^=o`|W)h$}C z;>&eS4P&E`DeiczpBpWnDEYkj!frDO%+`8|%nd$c8*O*As^uR{ zO6t&i1UOTpAz>n$Jc;!McA(UDT{`_TJj}I9B@>%Y!ngW2w0>JoM(#S|q)M{tuSHTg zS#iWRyXH*}F3mMR2Ul45Jx=$!OWdH(5a1x$X>6KC-%;|Y5kKa7NT^+)UTFyU@^lmA zF#!{XkvSjn0#|LhqqPy-89fpIN9t_kP2c!2!fJ7`U2c=|Hc#U=eX}DenyS4by}(^x zdrLi9OXPm_nqA#L?288_=?8Qv9`$%#PxYXLzm>rwRL?xnk^W-pzJk}6n`E$)iL?Zt zKD8Z9OD(LLE7t}44V1fd%mzc_%v{67(1VYaT$w!ZMnSBSrbLpzcsvx;!u*W|1=a17 zn zy?PSuZYkZHgr&7j^defot51o91XSU)t* z?;7yKUPVr7-y0tHC=ueo#s5Vf??$&HJEs5ksmeP1e#MOX z{@ZT3Zq}Jh&DAjj5^-F2+k@Dx8YOu71jQ@u!}*p4xs%mIKrG!e$YHd6t6%Ecq7UOu ziaTMBLjUHs9S0NR33tj1llUO}TEt)>mhy~u^~0nw5EzMw;*g(E;O5|a%>3Qh@TxAn z_rQ0YZJzKA!1cwy4GR=rg;Ld#tqpKz>plOdYh#dTJ=dC4;z*L zPHoQ?a{%WN&`yC02N#&qz^3Nrc3!4c<#>d{>6M~!@u&JvgEt@O<=u!7-=9*vQ2xQRQKpt_C%XUZlz-sO3Xr~k$Hdw!JrIWA8 z=Cll-u%L2ncQVId*YBtIAF(fLi^6CVu=9WIB4lGZ*!~UQX*PGQ^j}Jz)$KOB_Lr9SMzJP{f{CfGvH?UXBhMI z$A%MXQvLAY5RZfXOciG*aDuokr+kUsR?|a_7k#;URnW)SM1`&J$D)b1)73w-&$u@|L_`Rb7b)?Nr|&Bsil<{aQQK zCaOFkFH;9lXcr%PYMq}bnc;kx(3M94n7lM>6_X9+clE zrAH_cHHzV!vjqXA2io?0>J@h#5_U7}7~;YoBwj~uMq(plp)PoKNjhZijuGE2+>IBq z0w`xD-)Lw~3|mfvaZcIWlQj7L+u9^`Yi%ApqP>a0%pa=C$?54jgU+gS49E61$+MX% z|GLNTI5vhK&A@G#z3c<*SwP>QaSY#_NzKao@D~5puE6r~8Wwb(fT$<)xdgNuT1>eh z@$xk|H)Unyh6a>i0?O?x+|$Xr`3j+AmPLeaRs%eiG0w!CFCP(io8&fdMgQI95bhx8 zZWDF%>G4(}^4h@Sy~bTd-u;SAPcsUg+R3dM=Ak3^DEX);@DW_f{9WzoUbNoZSCDNZnK|&_z#R{`jCB5k-_lg>H&zE z1Z5hbXIr8e({(evd+7^h1-f7Fvc9}euuh(UOTmMQfV(buUkMou$Ksfgh7|q{>4IgA z2uYc`n5wrd?YF|P)hHtqhNmhjI%}JY0pssMWc;}sat$!NDBW;>l$LGr+3QawEWvbz z)DNeO+IoGGF2>UM!2(zr>)nD(dTrnsK%(i~kum)PCNs-7jaI|~|9}tgd!OJOCH^dX zf--S=g#oXK<|T*jJw0}E^1#*O!t}{41fatTm!-rkzdKj=+C(A2p2?%TIC#Gx{6U^CxIL4o-;%3U7L3#beUio?Z%s}dMB?m*`pMk?@k5X1aY zH%ZSIOr&@h^RW#kaDw5YS#_?X{M?KmEPcWWPkI-(lbNcx#3%v#wiH&n-@hkASe%n@ zi<#E`c?DgB+?QumP2b8JQi(nefg}O&Z{W`Wh-LwJn{%mCV$KtKSGQ2=jfOWRx)|ty z&VY6p5U1*Il89Qd;rpD276Z;jKu0&27XZ)$;xe9a{(v+|v(FI`5%7ICQTnI!?l~Ob z*ieIy)*Ki(N477Haby8i_2@>g@UeE&dHqi@jy~OiL{>BgL9@^QJpckw)6| z!KIWPe~@RJPt$gB-EBn09Yf~O&^3Cr8_(F^UO|5zM5}0EUXFxRChFt zqkC%KHP{P-`g?ZF7P6r_7d^wj*w5Xc0D1;ONk#_d-mV@XI(w)C*E4h7#HZqC+@8VsxQ#*eGqO^U`+`OK#bh2BJ;xf zi2S&Uhr{2ovE{lN)#;DoF=Zd|bR8qa(CTM6$w8ANXqEFXUAe4!tt zs3cX?D&Qh}eWp(Xzu$%$2p!DKU445V8(A|^QG+;^eN{}o1NVa5H^(o5Fl7zpKCudsCLFj0}2+?oc%j@>~| z(N+)AjWxgN1a_OmDa1?~YcS$ln*hHfxF#S%V0#;r(foHTSX&%>;^gcU*7=j50Z~zL zZ0Zif?+EUJYk%`78wVc=q7)M&aQ2x`FL5PfS4WHG2uv}-Ru3A0Q}Nj2FAwXC(?VXj~*i3 zHmr>}gaZTPek|kT6C^CU24+TqC@R{oG!@iwAVe%4^7v|_vYZA`W?=}DvG_!jT$;t- zZfoGjGSx_T(KNcJq)Ztb2P{yl58wcEJcDx+2m0lq-=lxSKl1&XV)oQi62vQMj-=#G z>aO5ky?x5R%%Q8nw$rVB1A#o7QR(OR5Q0E#gm+xSgoTa{wx+h%818t#(Z0mE;L^%5i;0Ls~L3k#CNJj+YNgY;CWvn%uaipWL2WdgI!Mii)Q~2XWYu z$1l7=-zFzRAKlW@4DcicZgpNB`k#KsgM_ZDcToDLyyqHprw2YojfZ#Es?vYB0B4s6 z6Guiy8B zMP6^ml7i~YH^)~;61Eo1p+iiu_9I}`1yrAkxDVoN0A`q}u#6QS;Unj#odcKLvqje{ zMJL^Mp1y>|tUl%fo9@()D`Qj1Jx2Y%4_7e4lUp9#hbJZ$79RQAZ4T2Kpjt(@i(+vl zjf-i^LNdd33nrl#)DSX*+{M2E{`Y{-=PaHXHt(b++or@})iMlVE0&duC zy0Of75gdit%WHu(QVd8kDhgmN$qG#I?Endj6n-E!~fY(_*9Kh||9e;UYB)$q~+X=HGJ2 zxF74bo428O`6L)NrV5&|{owl2uBRwTh{&h3GX*lBdesC_Am1uxw#HGJNtIo>3N;cw zr!lj=GwR&1&jSa`9#1~bA3ypy9^nUqn+Il67+5 znyy1@^6Q8p0SITfq4kE9yUKlQ!<1vok_AYo;epZtfzm08zWzMehw+;hhAA`5b1Pc? zFpDWGEo;ELf zh`Y)Fx)lm$|Js+xe$Rf*ksbS-!#>l|N^{kb!%r*US6?>b~FpKsG zpBOO{Sv|OzGbGgN4C}xiid9}+N=oQi&`sOb<2Ddp8ir`=D8%nR?ra_nSXn{k$=P1w>s8CPZl&nDC2bHdt5ao-8!0)^C(qe8xO8G8N|5I%d+!=ViB2EMJZ+*) ztEru=Z+&cNr0f8uR(@3p5dWzmyrWD%b?5JOQA*Vttu!ePhif3w?d&bS`DFq0-$*gLF$v-{JXv-+k`g56}6-qkC`GnrqH6-tor#K(Ve4!-?UO9qK58 zL@fVy&T7lp`_H{Fv#| z;jHqb$(JZ150$6p5>0RR#w!~>Jqpao%92&n(vnvfHa`Ca`&RO_#)X2z&C8ypz;qos z2C+@d8eTrEn>Sw{1jX@^-NomIsyaKoO+{sFTlh~VTTWg;_eYJWpU{{H)1+s3 zU}N9#_x%EwmD~pJcpU%U(e+lc&;5>{x9?}I(1~hYY}9ld@4r6#Kg^qxD-`dy21`mJ z8ecp>#++9W;>v67)zIJw{y;fX`i#dBn|W?1J~U7DoZ%gJOTQu!jrm&>eZsl5iLEV zrCVs=fkzUphP*K|Vx1Uyru3}3qJr{L@cp5`F?d^}h-$mvcP@!aFMR{7Oo`f>svtXd z_WqTz%zHOFbgH$T4`~61v8h_Pg;yC0nn_4sUToCRmIFxrD4~yAlNH7W+O@`Q@n^wu z+lZ&5?rjreMk2Bi-;%HgpPz<;4TqOoa#|VVZ?|q)%(I>i<=J}o@6#hdpLl1mf^|bK zWa!#gEbq73##AUsdAFrNMkR4OKPBJCTSO^{=v(W@2u)_1STN4@l(CYK#J(AJ?qN(% z;Nq4^CFejeOGS`4&0eDR)x#=H3t353_|dz)Xh;p2cedQ?NWa+-g9lz! zzf3I71NtK>{QZQyKed*4|0ZOkjk==~fgmGWT>Qk0n-&3cR_K9ER5{_l?mpL1cMKiT z&blA&gir)&uq9ovEFrq!}dE>)cG0XWFzsgU1;A9E`NPaj<;j33+vkwXAFXeSf3v- zG2L(#V?a2$xpyn_uq2+pq^9E$VGtN==@@A3JU_{;Y%(0DgJh3`A<>M)RU^|t2YV7W z*GG++G>ZprKX_k*xMY#bd+gblmoKPi+Mv<^@1K!>zVr6hx~w(tEAAOxUpMHae|$eV zC#9J|o7?AO?;HmYn|DGY{@=F~JSgXM4g(AS>1NKalg6m?YHlC@O1TPFP*V~DVKC@G z!dt*HS(De12uU*Vu$S37WfFj=C1&@}s4=uvB)9K`$He@XW{%t<#MhRUrJmrUxAyYraP##(Cp@+TeP9m0vpNtJ`8EBn1q#n6ST z6in;dv9mUTX_553TNav-toA1#@-UgycQxKfs=m_py7f6sMHbXBo|=p|=ens>s>ol_ zrQSqTrAM<%iBXzHTk$E>cgI!QX$HUg>9J_nSG)Cn2h0`Vk!yk;>6y81CsWp_URHmb z$HDL4lEo4HXG``v0d$11p&V(T_=7;QgbCSy$&2)K6ut!o=YE45>S#*{5iLmlTuSw z#zg`gwvMJb+~c0V`BQrqjM`2U=6n5c`y<`KtA9?Q`ub_;i)Tw4NkIArwStWU>FCT% z!q*sP@nk6TP(Ay^ccw9eCS&wAIKk@%CUd`qC z4&twWBkn-f-5VX&yWpJ@8jXek5ZM0m=ioD2bVWb^@P*G^W4Wd#|JLaJ`!n97i5Id` z9yfN1DJ?&Wg{FkO%EWB+zeRAz-jQNFe8IAq<37IcpyHZWXGOo>I)ESg^DWA@cy7FW z8%rrLesV0KBr5;dCh11t5E>dA+vwJJ--&M=^C$B$ zUFk=RUq&WbQC}m?C+(ZSaN#uH1zTba3<5xIZ_*5`un%HC0jCqs8g>arm82Q`WUYtX z6J+OSx}ZKVii6Yx7*zwmmbf?e4k)@f7%-9kc}JGnYw778gm(zy!K5NKslTocb(QUn z>*;Tqa~~S;aEKntRCAw{Y{GA}cA2`NON|L*Ss5VZxSEuWrzwXdy52Uq$Bok-M z3vk6KVP^-K9Y_(PE-cF{Dq84hZ{EC7s=6uc4;ED%w9HSmUyb&Q_Pza+q?g$XerIL7 z|DGG@oTv37?U#cYXdVm9+NaWqf@F--5sH=bbajaJCMn_|AZZ0CycqvHw=lssz{r|YM zANMUky+}XB65I{#nNNGSc;MkCNgF<___g_z-Qw!;BA9wKoWPWsn}_2hXAG$1B_;bn z>c4Rf4A-W3S&qwdBc)!xMDeVu`WBaG_Dc%Y94bZ^#uc)(0->u8VS z34I0#aJ;yc+sV3s697auP`OZ;%P8G35Ob@CzYSDJ}cx2V~y}kcbOcyK4pHBcsDI`k3?NZ)Y?U}ty{@1j$ zPoKn{o+&v!iwH~k5S?vp=876BNxCFRUTxtQ6qqUd$Rbrj@`a?T`Zp+umY0a@Z+hs+ zDRCLsjg1C2%0D7R^3RH! z@e(9A6FmknxB<%Mvu6(_yiZ{{gVX{<9+IyI1_xu}Vv;j*1UsF018Gw-Q{$uK!=vMK z(;kUf$EirERW}p_O(SF|zmt!$b5a)9e;=$(Lp29OL57zc3gL~eqd{N%@NW1s7G^!T zbzMFz40M41@or5(Cj%2PJ9{=2U{spv-(#dnOHWRShp=(g%}q|iXtFqAkLvo54g1H3 z7j`qrDS2tx1!;=L!OsLV-!wn^646Ca;|^Jk_{*o zS9*>P$%Vd%-uo7P*T7&!Nl_Wi+kCQ)`yrEzRg@rBDNhmxb zWcDE=x(#R(ABd`I$Ur}w7{ci?%1QQe|8m$j?EwEqHAMsc3v-_5#l(zOl`*^)uNAB-&m7M zmp>>w+8$T(m&QQcgz5vj$+alr=fyA{l!7JlvGC(PHS8}|d4-vD_*_Y)-q`CZN?PGj zI-qo*n&Boy=+^?ES^+EEqL6!nl(V#)zJ0_YlBCnM4tx8DUt9=w7L5v}MN_>&>6I-# znyyfl)8ge<92y)&7ysRrXul*WDN9DOKWFJCImKrG%xjD++H8E~Hl;d>xv931eiHM? z3lW)$={SyLW~rtlBNYUGNui1n&Y5#w7*F(`^l$c1j6Qhy%2`JFaiz46xRU476P^Ln zfKsn;W+^(a(DRBtH_NtdI%f)3mFN()*aeeSa--$fxw+DBXq;l-$8Di~GtG3${i`(iQaZ1Le=aHYucCS-sT?!o9#tSA@!^T#v9pDaj;5wv{0;Dc zw)c)s{A%y-8_Uh9IaTZKI_MU`p)Fv3b$ECPN+$52(UZU(8Xd*=f4DXI`F`b8;eA4^ zoBl7Hk5@?v=M0UE{%pI2!IE)yNf5LxhFUAE7rxNY*ifnxJH3$|nwuNphl%0lZ!gmO zNxH%9>Gz-w0ks(6#0`U_or&~RzvA0SUEph)Ti1nZ93&D6bf^XB;5(n3IXihoe(dWS z5@Mn*EvizyM+p8&Q&Ur%aJ;1DxxQ(f`R;WoAI-_dv_HO296eqQ0Z1Uem-9{>8XMza z`9tJoq6PMj6Y3`!^do9dW<^y0;%3iJls|WD440gQ*6af{t6PS47Ra zQjm`m`}0{OsSh~y+by+z3l&@Gi8)Grb{1S@bTR=Pwn>#whnGUrlC~+@vcGd(JByaU2}~$U^XYB4`cj9~yGmJK&QNOVx-9 z5>aOkxMG3B5PU!y6$efd#p%NzW@g}#Ov*U(QdjBc}Dekv&Fg|~t~oTa3Deg+w27eS@|5;Bs@&x0EG!GxN#f6TOZmz-IJF>-Zn9W&7x}b3I#yIWNmtPHHz++|SZ1SkQc4Wr=oIZSQW6i|K=zMkb zl(;{R>@?rV$f&)$cWECTlBygkM%RaW&c)_zoOJ!fReVnKF|Lg~3G-}AHM6+F3Rxs2 z6?dzD7npwaXno_EtCy%k_$Ql@i`*w@d0t+^#o1RQ?Z+5vb2 zCFk^s#l=Kc8=a^hO#b?{x?R4nBbXQ2Ldk9tRuMTmXPB{t6&Q)h!f>+E8jtRD40)+}Q&FOTKlcHELd7FtrC9-}? z4vrxWkGK=}ziDfpo8@R`-_~1 zBI=Q&?C$HEHT;)>U6~_`vc%Uv*}DoOcge_Z-?;YHO=u81<}TUoVG{>jO1_g-DZ_$hot)maMAlV49e?Z1DE#?7Q9nS-|;E zX~Xx|uQ}Jux}B^(|2}Wy6Bvz(T$~+fmYY4%ellOCb`#~zz`5E_A8c7$5MeBAZ^h>1 zUst7RsNJT@w#KS> zc2{-u^ir!iGID6;)XUv-BhpWLnKl|_Q+~(V(>d})O;7R+kaP>3;dwL?8L{mX7eZ}& z?pn((@$L5{R}0>p4%4~L{AH@|$LFt5n=Yp{KB8T`j<5eD|AZ%-ii*q3YU~w7f?#0K zm!OLQ;@!Y%=Y`(tmnsV>RF_6i8n81`zc>AR8QtL}?~fk2dP{-x;pu?;v9MgSY@J&a z7$!a>9A$9qSla8(1D8@meWSVU<&*=Bfw3V|G9pbdOKU+LC%3TBZundsJ!(kQ1+W_( zPh&=8dST(p(!4j;*RCZ%Tdtwb^p1KfOR>;1mzNipl$OgSjNBtK)Yrecu>oFzTW0x6 zH`6%2L7j?wP=e?*zaC%CT9_v|4qu4``oo;u(B*gBTp@chUYjo zsv)=Aa?U%UQ7rx2aNRt2^1a+szoKs%&I~)ki2kU)=`Bvq8#IP!bV%0iQTESD82WB6 zbI=jR;ejd~!JA*Ala*5&Rqxk6@`)%!KG!DS6w$$GK z?qze$ygzGhj=~T-6jqwrT5$ia4R4+jTlO6zJdT2)c=MQ+;0`Z~l{ zCMC2&R^s(f7^lt{7=gu2e45PuEd!&Y@(aNVn(`Ux&n_1|_pnHDeJU$T5TsfYdYCp{ z>+Wu8#qdMcZ%R+!)A40__XDgR+QKkRic+aBK!iu zp-53lNk2_!zXbnBH60dq&VEDNDy+{>?TPclX8P}Ov#|{(%adSd6ltdc<8EieJPZFC z#k8rG76rA~$AfM&%EI2+F^!R>@XBG{LL;84_%Qg;CSq#8>#-; zJ<40SiHMFI-iEL-MQCm7>;-o;>q?>9Wr8P+quNK80Bp*lIixqO`iPBmD)ikf>Z1QDs> z)zR6d;U7(&fp1Gv(YNn;?3}~LR<=rqM2e3Ht548JM#@_yykV}GPZEE6ywYGdx0u|*~;V&TLL>X=j^lT48`T2nO_Bm z99W*deg@W-aJM599_JCG|2^U4pyMelV5a84MCyd5MY40t^5T@G=t3ohwJBd%@(#~~ zhswbBJ7q75Rdo%F3j{$88=|HpLxVr987FM*0PzJKT`b^v`{#IHS-1j`)nR$j)jl|q z>|9mLePtp4Xu@)V0Jf6ZR*zHs!i9Y>NPL!p0SV3{u}hnaz(sJW{_^erasfi3BlcTR zq4YK}F%_C>xQ7-d`ydo-@*XR&ar1qHM_{05@5n~Qt?Ulgvv&>_Zf^C3!DB1-Ag0UP z6huDz#BL-e3>CsxNa6TaT&RtPI5)xdB26b-S{yOZUqT*DX~s7$r(V?mf6f8=2qjq5`I=095 zOEo<|PMyhdd30ArN3@piltDEc5i!)?NVNQ}w!*^7t|Jl$mELHh7A~ zQ4v!W#_ge8GO$itDx54!4eJ#(wBH!CUWhR;n%SF^TkNgJT(aTw6JMXqjWy8%aAU(` zyhFEipmDpP7YuCVFWQGPjL@3sn6-kb6Sjoji=TLHtSpoRsdBWLi#N_y*J;z)OWgO8 zVnO&^b2gRqTHtU_fJ2l^QXtBT03`i|y0i{EB39fBgC)Q0IW98Dw@%2AJl|<6zO&N- z%CmQ;uazDViil{q^PYSnCqz6kr#3y;Fl#R+O0v~AbH^HiJ!mwG>mfq5)arsq9Yii- zJ3oB?fF)RkM{oQ0habAgpZ&v2w;c!Qf$tH${>J^>=}S&wIBBzP2g{_RNU+=IOD=Bq zsWOLZlM$W8rg;K3mfy%ddVZq(;{x@XB#+sIPP7u6y~w-?&+apDrxns$zlO|NRWpp9 zhwZ*SeQRi1>T)LW^5%XO&(6`N2tFx!k>was+0E*@o>(sP?)GGz@V6bEhZ(f3IehJe zSS@AaH!s}k9nGiz83noau-IV`Y++&lnJza#iy_diu>nc1517$Nz3dZzM*$7jnWH7T z=RrVTd2Gela7n)!#g9p|@uDx1=j&qv(0x`PzX2ZC`qIs_!)+8qKHKhe^D{Z~&cVP1 z5c)w@pjBbCJ%{3kB1(fV6DO=fV^ZZjpN1Pzv(gj_YNu2iMYnkE)_RC5tkj;aZC|t* zuBfTu2dfAPg(w9>mUv;v(YDJPc6W2utah)}iVe}JG*`4Og{w?(!iaG(Q+|B!l+Awd zmvMM2xVXS)&erUU(nbMZE*Z!AuKD2c0*Yv|X9veJT2lf=EVtQA%hE3Lrfu0CevPc` z4X5s0ungqm$Y`dcsedgkH*H|5$K~7{a_Q#$phxAYJbQlF6GK*b;90b)oeoSQ^s?Id=V;3BL8*o3!12|fF=?8&^7lv(vXd1# zG=z>km&Xk3yq;WPp#Qz7F-ME}RmwMIkjwLKWznsQXajz+qKcAho-g?I#@7QMrU%AY zzjmyI8l~b_l-2pS+pBG*D=h2JQtqRjVPKx7GD(t2J_xP2s=4~UAPM0zgEdyCtn$AF z{ahKN!H7&rt&ThCoOq=^s^2K{ytlc;z+*raaCmyO-e5A0s^m6WGD0@n zN{8y1?Y9dd!NV>)s7);lJg?%beb;|1?C!slA>(poMc^>hI&*J4p1Z*h$Xg9~VW)}m z*=^b>+U4z4YcwoJ0iz$Jb52#YU&K?CHY!``Xp&1gxwuw(5~o@M7Iyg zw+6??CX1ZajI1Cxs-J#^Mf`~x@vA!M?HfSS4#j^DB90*hz4x7*V z2odSK0=lCI9Ws#)fByI}0590n%gg;wqMO%_Pynyh>ic@hPnTO$$51y8$*j}xZYb9A2J*(xRPXJj9A1I!qo8|8>c`J6^q6rvndM9W~=9Q10D-{E)5?xsZsffBz7i+)P(+UsKIozj>pxhoS2D zk5qViyr7Gms*VUO)SH>Osh=4?Jxx|EcQ;kzGynZ76f&pdxt+-4ziXDe`#((6lsrxF}*i^xkEcE<>>?M^eawo3FRG)lgb!bGwaoezfjsx=$HgE1A; zUFL<9!sx?WSSooJpIhERRO6rJrJ;Hkhn^0GU&Sk)IdPW&i1=VDYOMm6>4Hc3ec=!B zD~c~wj^^vl#USK^{kXj9?ZVm(+tf&Wt54qs@Jcfq@VM|!saIMHcaG6KEB&5N{gSl% z)y4LDCcwXD_$nA;XnJ+jVPApQz})UlW|94A8q-$zBOh@)V8?4@=8*iP}=i;IIAKaS6I;cALA*r&p2`S%iLOqU z#*&hdZ<;gEuOUtUY;`%FJ-8j)1Mz0BaF5ud&DL@P?J#VIYVNSzc6@im$@iEKoI6_E zsbTztSePv_VgM0&)&y>-$xHq7&E zR(l%d9{nl$>9LzGmQR^)&Gv*$RT;EWyvD%*AApZB=PY6v#XSMeYUstm77_fc=&TIxp4yabhH_cGqH1~Un z$dKS64KoQjzaO70oNmGX3`z9Y5z6PnXEE7xZQR5Ci04Xi;Va5Z^ZB}Htk1vyuI-~H z%U-BG4@zWaq$d&9f3tY8!Q2}JoRI;xz?v@e+9QLN#lxMUeOSSI&w({r_^bN(9%oRZ z#)}tSQHJ3VAWdeGBA3|OSpTAi%O6Xo2%MKS1}nm-305bY z#qF45lb_IaYMl32));RYjHHUoE|+yJ2J8hNu!wsjO4h;gA`rxa)bML^0zlaq?ZEPoxckcOldEK zJxeKiSibO9 zC`}y$l;&SM^fer}7WUr9juqBfpKwC>ywypKeC#>bUF_uuGDlzW-`-DYFaNvwa5K8#Cyhx7D1HJ!C*?fcXAf_yT4_VLTvVCtT( zv*xcnJNV$e^_t?seDJT_>ct1&ASb8!J$nU$1%>Ax0=Ao;+1VX{FD43k42TQx3Q$Z` zqFqSf{hhcIXfjG{ZTTv((JZOZaqmwpmj8QK7nC+V{EMiKCb?*%FIJ$mD~**JOUU_yWxY=yo+nX8=E z*qN?kb!`_#1^?=WbV6$3`_VnicS%UG*Js;$Mw||O?|0Vxo}~lR1r_2<_j;Fav9Zah zZ~~8x`G0MLCmaG16}TRTJWURfq&a*r04EjpyOG5ksfkndUn1o-S`ViLz=hvdpDy-j zTcY5l*a}+HRvjjiBsO@X{Q6U$&uZ5yi?Me(lnmuQpphe-k}Zhr4+ei`m-}IU+ql*8 zd~__`>{SN8ldC!N!+MwY-gRr$y<1R59nw!hP*^g&Qpx&iR-G|EcYeF?TIvUk8G#Mr5fWZG) zYC)pXl9=0JQ5KrvXzA8>woY|5jynV>Nw51X%y8jajQ{L;2m$6Tx(a3GF2|cWvXA;7 z9CcOk==!B8_&BWFdhzl9Djg?(>vpk}Te-}rdda?Rt_D=7IPxuy5}R*IBg&D>(8)1! zXFjj&g?qIBiehYX=wPSkE-vN@7(k zfBOW+*O>0?Bv4vl9+`#Aa>` z7DgZ?+clDiyJRH7HT!M(42FPR)C*3v>&s9U;@Z30{gX4@0Q$6-72HKU3+<;d6rb|* z54(*Cem+HoQV|uAq|5a-WMe@|@<^nFi!MBq09%!2tuEW(lggVnom^KsJE+fK*l_>J zB_gYt^UBu8&Re(mE`s&th5Qf)OqIa!y9Rc)lBaH`jIf*!H#=8u+Sz5rPgE}Ly$9L| zU~~`YNs-n7auY*SAxDX!QK=V#(^@EVGe7B$9JpV+W)P(G*!s~SFxKl+c@yHOp4?=W z$)TW5Y(W)GYP6J;6@?#Ic@eb}FvB?1Q*Qcy@g z=<0AK2Ixc$uRw}6#o3lWLy>kI9|bHYS>%bD%W+ z?6-HH>sGrQQh%49pPQ;@rmx!ZrXj%PC9NssPQoPBVCl~N?^K!mxKtUP?M|63yE^IY zB&XUs70#*aM|D3^GWJcf%^$hAK!*0huDU*ZunJvx2kb77G!^~yGh#Kf zx&p@r;b1o|_io>)!da9ix=6yrk6+cWF*uHvOwG;n;ZPbd>mT?o%u|RUMiBwYsdNm% zfb*MixApfU)%37KmYWi!sNZuYva#7$>Z7M;j_A}kIlxtNVUi>;G=3XoXa49ZK}`!eGak|{ zEcoP6>kHGttRZ?4Cz?RS^OdFMVB3Nf!fNt2$?}`tuhCYM#iA!L76zZtuCenH!!}yZ zN7)|R!Vd5EbXNZ;(%*s9fVkq&`ybUFnso-z0$cFd(&vR@<|!6B0nbWU3dsHN)q*{mTo^k)m^v|=^d3|&*(^CJptXSB zWL3vSKtqXwd|f48p!|x)B~+TW(RFG0P~-S(0#~QEO;IJ8U{W72fGc-q@DSxlA~DQE zGCs4A690+GiKJQez#lx;R9(iL2rHY#X5x!V79?xoru*52YPUac*}J5#^QQOQoM&gI z!Nj!0#zv)J5*u+B)?R4Cwj85XbOI0esSIaj_a|?ylBo9XiJ3Gv9?TS%mb0<;_qBJD zz~|&lFz=~#1DUXYVG4U3MYygx;%&qu!jULm3SpPxK@#l0W~|9Z9%c^N(hl>3!8SaNTT_yEf-iWt>e3v3N&ky47PHl`aj{dvLN@ABvCx2%_2QQ6lZv;j6%2& zPtE(72sy4}@~oe|xyn>;<5k7p#m^L9KDn0%oc3+bL1xjPfQ*2Ru@Cgx=~_GKS27-_ zjNU(&`1pP?;K?3gl9zQFQw9IL{pWqOW`o{7(2i`E9MI4{Du4a*;xdW%?>+i;IbQPZ zDZg2z2$gpk8OR$;L$bSbv0vj_->}Zo(Z)7aM zYfe=fA!25pS81V{?u^xG|Jj6xgX=`x&zt7p$WEtt@%5V7uFTrR>Craj3FGoetfRL# z#*foe%4z<`A0OguPHr4jcR{}dN_XFu-vGdH)6sAsbGuaZO4$@GUdp|Jp}S8_QF-pY zZN^`seyn1l7#Uz8yAj%{PmVhV)_C8iNRT3%tz~%rg@XbpIdrdcay`HJHLqb;yox%~ zdkzzRP`<1@f$UZ z>&KxYP}Yy9I4wxv1v}l2ijE*S=DwT@1x9p?7{-ygAQG&=R3{aZ8}#=jKBce&KRtg1 zAP^!l(M?Oc?|O7jJ5aJX7+7vS_I*4t0c!7#_D(0_#BVVPp!kSPh;}>r`-JYf3%-;X zeT0R2D@cp3-17>jB`_0 zKaY!#PYjwvP&`enmI`;vG0($ULi$8RF1cBC=>9jlw-iOcz|1QkJqcnB>3>3Ww9Nv_O+!7m7 zKm%#;q2`#|Ao=S5vUAabdx7UN8-&U zc(~6nmp;gzW-2+F<1itTZ)g|@u^PTnDIDP=&FpN5{>UQvD`6Cb;Rn$)r40%ComaZ< zc6pM0-3`DJ8N##Hh;4?GR>H8Ej<(X2V1k=*5k5Kzzju!8L)(Jf`!^V+o#0$O@Vx<~ z3`s?4e2;AghMx^9E|NZkZe79~1M4%Y%rFJTCJZ0GyzKmX!_ymQ9S>=H6%S_<+Ub$m zpkVXNCC=~8^Z|i^!0hVj=^5lG%{5h(d4a#WV``#Ho)|)SC`g209Gn=T=)XytEmt_p zi_^3th~>mZEhl=oI{)H@)PhyDeD(o+Tz`#zH=t0!kq-TQ=|+~AWQO@S>6UR|e9-zc z911Sue%~99tgpjxhP*Lz80V&Q0lYowX8urJrHp%gsO_m(y18tQ!jZAR+- zlf6(#s}|wpQq@occmx&s!qxSLRp5}E(`ZpVTXB7DJ|p>wzAY~=Ib6D#?=#FC0+nTP z8SD8c=g+3HOQQybIron5H9;k|chesb4Y+{iWo5vq1x?Ip>3N+H$~`9Po6oeWt|Djv`G&woh>j!JH zz_&}4ZEDJ39mw(l=;rAMw69LdlR6bUD9FM0o`t>9ixL=p*=-E7)tN`V z+Dg^aE>JytX=L>Vvfo^bP++L}9FNL%ZM_Hm@x&7*apSCiR;8pFr@2w3Lj9&2KX`r)O^Us~tl|WTst5^KWIa))uyI zAi)XZj~B57DJ@NO1l z6-rwxUUIB}6iOzh65xqFjAw((iS9Oe?o{*Z;bD&A|);W^Qi6i@K&>1D#Z37RluDckV};t?z3)hWtzjiWCd;P4d!V0ka0!X|OBWFPEy^$jCUFms4nEa3?##SlJQ=Pq=(x=n z5*E4k#xg2C+;QOuO@V-bIoNUKQV$WWp4X{&@5;-;@Pw#V007eEAAy0eze*mpX_#?@ z-QOa?p=z?M(f*}dzB7NN4?BxBGV?eY7v^~5t4GUfTS=;*BdWz~B4-Rmk z_mK*xrE9+Ph|Xld+fCK481qh3H;$@U9v(C#8&_0ZVc zyQx!e!P#>i2ltP0%Sx5vgC3Sg2gm!?_6|=q#2?7PR$~>?SRZe0mE-{=S^pLXYlo>K zXOd9KJcH8mvUU{r29V0!@ozz}7+&7AwN)rqWhC;Cr=WnU1NFkKN>CfL*`boErLJ$C z(0c$>hYbZDxJS^~^%s?dCB*8yG1KQXKF_jF^-0C+1e(^%o2A=Y0-*W;oQFf8t#dF1 z{z(l#KMWc`0SBj$P_4asg%xvYP0Y{viE(v6R6buj;rOP+#l`rKiB_7yo}06^W%hH@ z`?pmU!0blG#&;sV0^q>Q$KM^xmH&L~)w+h5#>-pCHT$>`#q zyM%pYLPkM+L11~%!&6%}EAq`j9sew@W_y^m?yTN;I8{Pyye+ z%u#`-)}qO3S!P#-Htw{Xu}!t>@#W76^+o&K$gJZE*{3wOj3W+uDSrHX`axPI00IjD zjKjoR_5V3fzh+mh2{`BR{R@SMxS02!pj2>=7PBmvs4*6+ni;>fFfhI!)zzX~ob2@k zFfUlExKRv5{`rhdd5lc@?9r->+-%%O`@1D&tayL+=l9mZv$R@JyHQw(J6~%iu3Pq9 z2JNVFq-#S`ST7_;^uEH)>Vur*wT%sj>z4kU*P*%w=?m=qsq)#B+Xc7|6-!M21NQ!q@1XB-wXP;yz1-5wIut3y2O10+@OYO0!i40n?w`SlJDQb5^yvLUJiMi z`~(P4cgi|9ryvJwFAUw*8;8WR_xJ95@LvG-NzObIg82L_$Fme07-eQ+=<8t_9vjoC z)xC{HHx)3-%EZ7@SyFaaFsTrxNbAfnqwvjI0fco;p_L(V71T8Wd1l3`c>q5d7$@iF ze@+;MsB=X;y-zJ?~fxd~~9-HpgpXKEYY9Cx1Ya7?w$E^e33rsVQ zWL~JMYCcUj@~OlTGtv3f(4ZaLKj$3!h?*g`Z2_=eYfuDre%*q!*og^skRr$otJh@9 z%c^$g zE`=G0`Z8t9(&g6{d>?UY6vMscIb)UN=d0osI6ZlP=T6k-fw`8K10*`^n_yT%Sy)h$fnl$QgA>?GF)@GRQ(WB#Fonl- zsfC1e&TRZ2^Hvm-l@E=>Amb49ZRf7_Q7q9r3rk){@a)2{7}eDc=3a_@Huly)zs@o! zcCxo;h&-RlG#mrjH*?fjflL+<(;5?bKB`58MrbKNDP`q}KOFiBpN1l8&g8G60`$$G zw4ChhJJ+uXYbUh7**BFfD+j7^=!yC-0fG zIEkMLfFsiltb>((^tI@5K!0HJp~0P(0-W8swQUS6^!V?Dqy9u!f*KDZAiLWBx@9gc zw~P#vf8UDZ+TEZJ55W9a?O~{7jpLTSo_Tg*VcQ#^Ko3g;D*fM}-$0YHudPdrKAbi^ z&96ll4pX35`@DE|QL1p}0KERo25oREgZ=T+eMSu9U}^U~r9}Y@{Aj`?U>C%am%S71S%=zZZN^ z?#Rob{8gwT>zOG2U>6PsyP~0ij1+~5k&=rmr zsgmPHOK%S+Get{7`!H|*69Be%%CDIGN1%p;pgRVRyA|Ic0w+CNuFoP~iTc#-(0X++ z{iU6qo$ZUngCRkEob=A-VrpoCKzPnF>5wZ@Z?4)7l87anlbplYhxlvj1~&r(10N(v zFc364uJ>C2fC^SvPrsax7U_Q{e~wwpMOJCeJEcr?cmo7BI26NI;ejwemP@=d_+!d! zZ1k{8*-}2xz}E`+bxysnfiMW9=Ykryliw%U7^NN9{yoWhXBDa5cnvNvy)4a-Vf&{> z2w&aU5UR6R>p@BJ_TE?7<}wso_y7C8=93S4#paY{@5S@m0uDSGp$P)E>#F6Qj{~l# zUP&Xj+=!lK0_7pwPgg2qTAOB;UT>=F^FaaZE%?}E+zMk zZIzPJNM~T8>MZ01Lr8c|`P#3B{M3&^Si6Tes-4hni3|SfrxHcO^UJe1`^#Co&Uyd$ zOR4sT(W3w9I#T0C?YL2}1E^lY6uu&o=*8nNyIk($6#{gHNo9G0tWCrZ39s;ERTXU@ zv@Go_XSJrVTjyV8dLzVBl7>>qsQ~2Y!uLCRM^BF~+Gy~zfzBav`t&Nfj6H=L`r0eJ z{K0{1e@`G0A&%e0cw*67i@yRGgu%40A2TxFz`4Q6!;_y=aNk3+d#2ph=Z-&_*k~fK ze@H6hq({%4dINx|27^{0?(EEmU--OAy8CjWE#Mok1I^wmkHgdH!ZY{6Yy*_0VP0{pwiJdNA6+Ktn7dWEOCzVAuh{ABwK z_DM~CLjJ4qIL|oz`YiL%e(*EqC@9pRoq`&gxzOjJ_@u zjkbMW4{t6@|35T+cR1Jm_y0=@x6qK08M61@lugK98CfBFXDcf^AuC(<$lh<2WN#uF zA$#x5@ASET-|OlR*L@dnuW_E|oX1KRVVkq!3D9)~%Gxh(x<(;ka&cD_$POY_TFtb! z#+|=(e)aA8&RNJw2c)L5nFfBV$$Us%K8vRQXQz$+$V?;R%EbO29X7!I%L<>=va(D^ zI+Eg38%_IPe}4A`&Bx2?yujEC!1S|``X}Y=M($Yl+~#xRFiLveBJFkCeg$e-I^JyZ z|4EB9sBZtKIa;mtVTQSz5kqF}#P_wt)e~?o|8x*^KJ4J=CgOGerfd&E1^pzH)T{x5!ih^1M>JH zrZ5Rz<*LVe(PY0=J;ObkgnC@geNe+|c zFH3ja2Pi$cGntOPXMx)cM8sDor{omTva{cTOEYc*R$-A(Us0SfK`J45&Y+;3_m3dCWNO6u&io7cZhi#E(T2yk<&Q`+|fdaw6MsL>NBM!rlKz7em`?0KQQlok+&O#&>|-erEXp2MlLXOsN@uJffhG#^!0o)~uj+nE1* zeYRw~H!RP+n{f6I6uP@G57(q0-bTxC4%wzNoy^U(_~R%*WYM0JYNezFIrgmsfBdIK z8xJ;rm1Jhlu2hcG(DymiH3jC3ggi|<+#Ga0I#dFhR&V2x?9((!tI-xLr)ZOMfqz;i ztmA9QqYp)%4GjozKF|>Y9wDS-K?&31AFy!7hIYZm#CQXghuBD!f>78-mfj8Ii;>Ht z3lpcWo-xa+--qu=VtUlo1AdKsY5sswTpvVIBRd9{q>!(;qCT$W-_gjC94&!n&Ld;_ zZY!GbuaRVcPLG)ue&}^zO;fUl3mto-h4&5)QS*yu;{2G2)?g~h;Rfkmb^Jo~hXq+R z_3pdar0DFc%POkVNq%qSFfcsDyE>u&U`$|UVeLsy>Mp(kRD4pB?>~M7Gt}-Z$(dJl zDPZ7X_I9rBmIZe&IC{7WX@tFgeR)(`)R0?)GBmTyD5EPyZ2nE)z%+%5&HZ#penJPBF4}OOZ6TLyd2)K6c>9%EK_kz!Og6!ZE zF6?7s(z?ei7zlPe^$TZ3#pJWu_?V;%>Go1~twCy3@o>HC(d^sI0QjDcZWT*A ztA^f;-<_QEi{|?cs&*w?gQ5Q6hqT(B9n}8pjQB@?MPuR9ri;C>xTfQc4_E-CyS50h zGBPrz2wA^|doX&F-nS}o^=*_-HPoomRAC#fwaN##v5N{{4%n070mryJW|nBoKrEzn zVR4ZF|FQ6*% z$`CjN_&Zgc7HgnhUFu29YDnlOGJCOfdgjTv@#_i$k6e`Cgylu&8$odit==+JQ)4&`nQN;GI7eMPTX9>{P=7M?IzAH zt_3o|Mv27QnqyekC0JRxobIXEv6N-pxN%%S;jy;a8f(EGMJ(h1jK-1#g@<!fx9h=E2rQ$pjjw&taC3*cFf&7Z+!~_|+ReB;mHgK)y?hpHFZu z`vW_XaiT=+KG@TNzs40VGbNxUOP{Rc#Hf+m9#;*XKJ6=eev_!N@oND2J!{zR-aC4m z*MCGVGEpQuc2M+z$0!Y`;OSz19{by0_m<8_KF%Le2<*;{rc^T%9<7XSc-4*?eF_pH z0f4jK6jmZUo^v=!ig?H~zETiq!F*SiQ?!Rt?O1rCZR=l_80P(Ln+m3xCbx~Vi^Jv+ z)xq+on+A|jdELLpm;*>ig(~2M z_mQ_k!0o2kt6dL?q8D7=9f+UNR;t&nY1Gm7N@e2PvoQ_Q0YDY|dI}f|?t`J2o$krM z*6*tJHr8>nUQJTL4%Ccv30P~x9$6-mSj77DZ~u{AVhV7ifFZcXo^TfZkeAgNSo?*~ zB;K5Bvl(OveL5k5j$h32^c%=q-=?#EGiq_&I$y9ZOA|dNWt$x+9}beHy>f{EH8wCj zYEb(!^oKil97L4CVG45aDKFy8k&%2L&A9=W0yWyR2kCijdk@tLN~8LYO68P3=#Vp+ zPZiRBx6k<+?RjZ3HXSg=mR40!bxe1Ii@0aO+7FZlz)l!|Lb>0BWmrRIn?v0saNGNm!7nfI&P-$J*w%Djt~pqq0EzY2Bcqnm(kMkci9KP@?|)#E zAtNOU)c?+z0g_etC@Oh)8{Qgp2Ulcz$0U0@yC7zc@WMATP!F$Z@;I9AZ!dCg-|p5P zT?t;GOw(StEt6>}-sE~8d8a#8oKAPe<0_%*xH6)96Vz=MS3D<+lorGhOt9B0*KxWW z;zbzBGU^-aKML(Wleek|^a_=>Y2QML?*I7Wj3rqdA5rn1Bu#kAd@l@=1@v27kF0mW zmV}XEz#VvR@ccpK1vSIKt>ni?Y~-*R&p&tX4KATIU)_xo_dc&O(CL8Q5k!Q*xp|hJ z$yI+2Z=Q@yuz&~!0-A{F%4Zfyia$%XI1s>JnOK;t@JY_Mf0_do4M@>lA6NOeM8+?> z^$<%g?%JA~pCCu}wi#cE^-bnN)#cc9^`xt|PB%DxP`1$1Nepd!?@S~fRRSG)1|rV3 zxETl~a%T)|w0UGk{hrFU&N{)aSkyq&nmHWygP?if(ycT|xovhGBnhxH1!R@s-p0EO z;egi&c+&+$~Pe67+YHYTsUQ$azYVW?QH~kv~p^5)r&Uj z5q}Nr(Y~J~2ysuL9%1)*M@P@d_0S!WLjuHX>2FMD@I`L4YT_ zx~ekRJ2X0|TJ6ikoVT#J0PY8fc=}=;2UZ|lLoQoZTTHLtLlt4?ZzQh85Ca+(2WS#C zdy79PK`>AJs14<tY^T%i?#`<>At_IkSebcQCDHzokX;@Y=fE1p*-_wQSr-UdL37vJ ztH>*#Y(GPT(+U~e*wEz1M-PB!u%=R=kjswZW@>uk2n`>zAGgLF$07oGlt$i&s#k9B%^+SUK z9&Os(9C_Ni^s<<+X`wUk>+5WEhBe8E^Q5+Cg(BUVdAs4SbV;`?&hv#ecdi1QNMi6? z7%et@C8A6n(-ivjoxC}0ej(w<`Yyidd;xy@wMVo8I}f?^!WAiCV*&o_zWXVyv)fG# z4bW{v1PkHa{MA$qJ*~6^BhX|4N}f#9O7DYlV_$`^jR7pTr`plFfQy z%L-*g?jaKk+hg~uz149tI|aSqn9Q$B$=ATP5}cE?WU|$&CEr|&TQLHu%Ysul;bXt2Kj<$3)GP>Icj*<3QT z;u01x1Rv(EKrtu+zooKJPY|O}4fL#v$|WH90G!z8Kc;(OBm0VcA*6qnrp}ci6;h2I z+$~6eLo&f+pAMp|!ekP4Q&V)w0Dk)!{&mH50aS#lr6AV?eCJO ze0Kbu#>Nc#-es2`@b6T3kp~a9t)$Ep9~<8}Ju=l;|0u4NNKA?rJj%=SATN=IzW*Jn zC-r;nv+`dbgSvk<2nox*4X;%%02-{hukV<#buNieE& z{7pe+z$g)jb%(kkpnX0*Ui+8cmEV(8bv>cG}DDmF;i;Adnv9|tw z%G$_P=`8<{I;B@tO7F#`e$gc&gs<@$r$ol*W1tz8Ip?ZkBb5bN_GD?jUen!C#V!`7UyM`?$X4 zDj1a<_$Ygbkku#;1mG`!eM6UR1||if-}2Yg#`23F#$g0ln9bV#pV3n+#ogh zxQ+&~Z?HoVpk~4#pr(`gRq3Pdr%ewji&msH8}guAYi6gX_j-N zAVHsOGBt8llHHbWL21!PtuLhTq5ex~)vfX-#|udj^r@I>4nZE##Y9m~2gvBjX!Yf8 zc%`H~Q{jb?R+-`#Gb5F%s&2LAiIAuPxw|$!DiQJX4sMQyxC0&qW;CSPaSIYi|3>q; z9DzGvv-ezd`@D~L8Kl1M`CO(XKU-wijclDw6KR?{i($tKY1J)vbM}0}hkyrU0XA_C zT>ggMMo3qHTpN$=<$@1CwUF@|tBw3w$$I_G*?BRlvgs^Q*pcru;(uUIi*B90mM+pV ze`R^h5&bu=3M|AC2dB@%j<0Uvg*0AqZ12P_Wy|11Mn18zw7KU0db1R^ljsOtIPLNq zD&jSl)5(Rpz#LF4h`-vut|Ztf@QYHojufD~T6Ll_eQkX;4CK^P2m+Hi9YaB>zD*{S z-+r^7nL)_mJL!MIyEMFE;ZIIRZ%4R>g({8v{mn@UH`P#V0!zR#hNoBl4{%!9J_@p# z*|pSv3Nj+`98Gw4+i#BrDe*Wy?t~Px@ziwgsKX1qt>084rL6qy;_E)&!|iW?;bWK3JHXtcN&!uG|r8pbTl{OzN?1 zyd~Z4CSsBEDaXxz=Fhq%`?(7X;WLLGEYFW8$A(d%PwtU%_eHq@t@G=w0~wyh7>Z^~ z*qoumY`3&z4HMS^-M?^E@9VW2rm4^v05y70`O~#8iK=M^o*q++27>h-%jFq4`3~y? zcBYc(h)%7Q#S`nb4+llf{f5su5)pr21>_IRJ^*tew~5cmg6?@xPW7kH9@F6np^K)z zD|%sPc-bXh3supu(V?NF#Ak5V6=LXG`ailYo)hdptF&m(%?!XaGxs}n9>&v<(3eot zm$r4O<+8G8aQ}*P0)>d@H3UL#3>p~Wx_`~iTgJgy z?V<{pge1eaidCTZ^CLf{K>nJU7kj2wZ-2RC&%=fGh~wwV=x_HW_L)QuHy2}7M{qts z7??U{>K$j(iH1&Z_!rWNYR^_I^uRN~Rs^uv7gc z{2ewwa4k5`o(>G2@gs&rWEx9ZOUSz4<`jCsaoo11f-=v61h};<7;QX649#5h)to{U z93i>`zEwkHyJut_(|&%0;h;J+>nN(F6qAAMTr}_qkFXVAVxG<%xp<2-J+4dF1o=kw{zgZ69ic+{4iPNP=Wo{#NR zSdNu#Z4{wq;@lsro6S;YNf-w8eH8Q9JJk#)?2$>S*%bOmQg>}lc4rSb+#H9l%D0KT z+|>O!v0KlXJ{(-22Q(D-VPK^_*mWJxYl7vWa|y)h0=}r-moBk23=Bs!{~tec&u&r7 zI|!uXWBT|+<+@yY9%6t)*ZU7KiE*d9O2kOSF4@FXn!pz{|Foz>_&}Bu+hJu#@I^-y z)D4;7G4_*;h2PrX3bpF>{D^OV>+EfQ<7KplM%+bds^1@X>8ILDaO?h>K6!8#`O{gM zkS2*~B;*d+J*b*elx+L%V{{=gYfa5KRSQ<53dFWy{%YCaSoVqYu9Us&ek6Nn$knP&i!u1oQ2t zqX9>zcnqX=Q{?U2u$8efrjDz{OrPJnEQerDJ(vpsNp7yWVj9PyZO8l?kxw1jX6c12 zc-??Q&gu;OHmf>gfxjd%Zb5=!C*ulkWF+#%qsSf~?F#9=>1G@u z4>Kbl$@xo~Y0<;7txwhf;qEQ+BiM?~)n`-z&o`ueuB1HY zl1<+~w7efHdw2tLk}l@$5&E0LLN7U{JSZqtN-sz{DqZC&^!! zUz%*Xc0R1Ewk}ZzQgBaES?@Y6Is9xhq2u)s&&|BAIf*lJSXnKJNt5j9MQy_K**<{}O@+-L^%7g#g z@cd)}je~BR+6^;kXe044e}&(p0k{ZJ6yrA#I7WeGhd8uf4W7MafJDScS1Ijyn{!SA zn9->(ZLQG}TwLC-Vt2o9`92bk7joO*esGmyFc}w>CXy1Ha^V9b84|BTE!PtAQx zJlI!fZ~zy_*LQ9p4bE_ot~TB(;2}ZQr%_*>J02j3LRMrA*iXV+7BRyfDYGp1y|lKD z2J3N`!|wuRF`%cPu>A+;=)g)K#J8k=^sgLaG!eFsG zIWWF|+_`5+c!qzHt{Qonat!AIJ-_{7M1g(U4IBq|TpT=)Be6Mc?bY8pi79DEPMwC^s@t($vx-1j8LO*tJ6|ek|4s`D@Ss&1k4ST6&qMmT<}G4o{G& z3@#=2OM!oxs04)ZataEGDM|1(7)}=!709&HKDfhAH8tyZwp;WEQyH(u1*3Jm1Nk=* zW$Y;>bkEx8G*0$>w>*ve4^f#Jz(Tgfq+r^->8#%9>l2BK+a!a#<@0QmpNGw;(rWay z1iFXTV4Qv85hJ-N1bFS+^LJcas{VbvQnI-@lZFZMa&*rIA^4a+Ji>bLg32MpTfAyd zwV$a=`oNt)wr5RwTu?C6CMuf~(^%WT#zX;?xPU?T`>SpdRQ!}y-Po8Lanhs5xqN3vG zsbje(^0*G}Z5%A?7qZCP(5IHx_O<0T>)8d6=ox{7*%x(4`^mhJNg?>O0p6O9WST%o zibQ!(aCn?2LSuZB8y^O&qGe-6m(vaQL~hB)k9{|bS3vU+KRiAX7x#X8a;UImR90HI zJg2H>aQcHpW}vK;F>&x|OzW(Te&ZW)G1UW5-|u~CWDmILRgB=kQStwe3lj_NfCViY z6WI_Wfr}~x$ydV}Xm-21P7vwZA&%818F|9oin?j}!Yi@;QHfY`07jkbf74VDV*7xK z%6D@lX2(<}a*w2-EbnG&w&!Y&Bjl@lHcS95<%a*Qn89;CvOkMMV_o0C?>oO8K^{qh z&lW|%BrK776eDBG32Z^N3I&urIyiaE`Y(q&|I%ftvK&3BM@$RuO-%pL`6eFT5!q<6 zV7-C_h3(-yWf!+9%2iGkNEa2_66O)Bu0D17bwA$9);#JXS_w-}Tzp&x_a}LGh(+=d zCBHYEH!7=c^fZLhIFQ5vK7cbfKCN`nOs-;JU;CTwYmMP!rWHES{b{SI={K9GCe&lf z%m2s9s`OPx>q>3519XpF-yLC(5in_hmVvso-yb+4SrB~X4@LlzM%Fix{4#sQf54p* z`7$+HkX3=*`s&c_0`fl9hPi@>g4*W?7rdwDEqfv;ID4jgfAzMNmsQiYgrD4!Coreg zwrFoXbGMl}rxyy7pj%s8SzfkuWsH=`gYLAd6DIh0!k&}$B)L3p@M?fApsl>BSYJjD z`(l;q2umYUurl}eeswaRM^j*(Kti{4R2E!%F0LUYBmY5k>D-w_kID!s*AZyvK~Dz4 zxSms6E@XtC2k{TAs{@1V_gU14oVLldfLzxn|WT{pWf8;{BiZR}IB_&d%L6Q<%IXqYr6m!Iw?d zY15Csd`DjW1slx49gG_P_83ySH4+&z_B$zP229xz9}; zoLM?qz4i9bDHwpk%?|5&`PDg-f&Sl#BR$Py&2L8}!qoD4e6mMMjvT7&DId#uYx^=I zemm+`bYNfNwR`O7NDI3@TOClFJNaWLwoi!3z2@S|GJ1=Seu>0vzt+!l#*UPcRZ`W{ z5_6tjQtdV40{eVbaRqt#tY|hKtU)v3U}NZKP@a2iPyU#q5^5*--DzUvOl70%R@}u! zs9)wRy{p<+bmi4Nn=at-#dpE9vUIV080vSD`L=x?c|KP+XpnVT)DZEqt&}FRj6LdJ zjHL^SAHLC$++#yLWLdQSNycv@1I_Bn;BY9_(6+GcNECiXpi>x@aOmJ*-p{ILZ3)c*uMSt&E)v} z$wg$g@89<)b9T*F2j?s2W41h4v-SCxw9vp_g?fg;#Pr>dj4eT1&(Pf5?!1V3mwUD) zWsg?m&OL981tj_Ap@Sfn+iOVbfB@gj3slAZ(zVlGO6{d%@-(&3Bwlf3p->YF`hpS3q zAP)8VRj_n%%Bk7YDN{#d+{D*yY&>6UjT37%(ad1FdZ&RbOoT0B`YaUMq= ztw{ecaxo!TV1HNzyZ%Jtps&vN3@7a5)hgmO)?=MqM90hGae&HLLY&3s=t3S@P^Z4q0MySmE2Y%<< zj-uonZOmg!heA@#!r?>5oHX_`KDY1O&xSZ1?Ci@M7-etUVYB*jldC|`&F7(>!uN433(m3#P=G&Y^Q zb8EumtD=0xHTm3aBLl9vGaDY_U|^(+P(-t3hHG9kaAI$~qnXHV|4rMI0KZU1iuw+k z|7d;XAI-O+;b>nTxsSy^A==f>Dw5&H^ZRnea&~W%lB{8Br!2}94%9-XuH~nj^yXq` zexVQ=?;if?>K;%eZg=PZud0sa7uX*A(aE=7>CLw^K7gf%>PKZ`)zF6I!iMCR3NK~! zCAemYUtcGBhbhJW)wK!F&4(Yz!-V+RJpBJ6{-E(IEM$XPs`0{XVRQ z0zQH>Vksxq-}j@T{hI2+&T*-Mm9;4@EGB|1dw{q4;|D$0G7j>&yym)pN1_0dF?=6R z76(cpIsm}*^>Jo_v7MU&bn^Pq?V)H|hz!lhD}wd#!1##1js^}QLQ%`_`Y2W6 zldCJYs;ZWgyR))Vzc)S!m`}^xx{{DBfODwJRlMa4t=YSRO0(ea2o91lWC@+}n`eVl ztckPv&d+tw9yu*-{Nxaz~4=iQ{HG;Nx`bWO@r=(yuv?C zYo8}bh@(Cmo7t7yV%cLSRuf)%Wio(z#j-Dno0$Ci^v-xF2dJ#TkDX#62U63DPX26GlGx{rZw=FILgKR>HzZ0qcE zV!CmXvpZrlN@nPb?NSbglt zseZoMbLh4TiyI^(RRJAB=T2Qy+^>B(=FFa)`q(H;6r`BOe^$GfU|&2_;kAv_ew;i9 z?IS28q6x9|uSnvsXv#7iewU)31hoZYc!D)ebU>O_ei5Tv`ipO0`(z-C2u9hRt^@2{ zCtCA`0qc+@SZBt}=a~}!oK|k*P&9hlK3!_$<<(eKr2#A4kK!NpUL__MDrmY!X83%q zNq6-&=~_Mb4G}$hzbAf{>c6S*hx!HrDTwI#RKyQeou9WBd_wi4b*+lWw*M@#OU}r0 zBq}0rBcjybC8P_u-TUH>(n0r+a-;O{sE2@00&Bj4f&$1C_oSSt^b@qKl;tXtE5 z8kz>c5`tt~@YS}l%)mr$F81lSm>)sn8h530a>_I6?D=Ar9+_w|48-!BBZ&pc7+hJN zF{VJ5eT9eS4=WQmXO4OP@*)xUr!vbyG*8rfbuq9rEPO;k!8P7omjnL7t)}mU-{Wth zLIH5)ic~ggG1q=#o${s$co%IcpB>ckX?%4)+y?VBasqOqly`wIB_7<4i;t&^BE5jw zA8@MrdI7qJpYA2ZvFZw72KussyV12r5^7y==ZCo~dzF-cWV&@MuJ$eg&=zbg91Q=V z3J5&+qI|9PW^=p(?^9GwWjUS?Ww}SY81v1_pEZX;doQwBY!D8skJ_)KG^8{l0IMLP zPZ-PGvYYV{a2={MRAz}L5!?ToO>ewKlc@MY$u_mFPUnd=^Ho>_v)O9Sj(w|`h&P$O z_6&G8r6qoxkX1K59{74rp8QNNp?LBEGjnWwEHfJ&--zi0KXoPBsa@Wb@82121howf zsdYSC{cdISH!v#Bhq#)qG@ zc4OA5zT+t5t6A303TH=Z*6l^*7pQZJImCZ{rSOMARMnVs{g?Sy=Hs;}r7jmuG7JbDl zq4EU@WoHD030(Ihj_#;poHoz|dtiyL?j>dpBG0ex*kcC)N3!C1pCXm%i^cs;Xc8D^ z)FJ8#-j?uKQMGT=sHNMf%1iyvGN!k-Rz-~08Ol^!FE&Ld-H!vqvwaL7-@yxM<-oab za+xUZb+#Gs&pKC5p`a8_O*!BzACL&*cvjFrk;TKpLCs&~S2P5-&L*ai^xqRgK#dU_ zU|{1t2l!p}nI$zn7mwez55`SMTyfW4dYhlv#{84?{%S~&Eb`@zAUE0rMp|52yrTT# zYyKU}w7_zawunzhH!(<7 zinq2ZE5Zs4EX4gzWH$5839R`|Q|<3Hr4?TM{dE&^+qnKt`fM+6G&*o%FMLU}73XN* ziKSXzS?NvU&f{;y5XFUyeYwdE)d)CGcI(TtrXapw;x+kcuO5Mmn>BxTe9CWhcqIbt zf$1T8KKE#SIc8xk0x0T4&&y+~E4%Am$oK__GQl@Bk4q@Bp zaT4?`vf7XN05=kZ_-8S*R3E_^3Y^pq6D9tKkO$Jx(Lr*+Ln>ilvyDVdQl`s&#oxx> ztxyQt71!yD-exTOW2(WT=T@~fcJ?uDN>xXvI6HfR^Rpk3y*D|~RZt&*Ae@nZJ*2%q zSq zd_$|*oiD)h;w8jQp)QxJD|p6+N9|tPLE?0LT%1m`_no_H1aOuWqe_(xd?dYuZ4V)*{;$7728IW$yQlpXX!41R92jA!Yy$_NR%@X%1WIN+LkXFJft`2{>$31g%9wH_R#a5ju|;{W;1bb$5r#{D8d_Eie$%AyM?}xb85US~{J>pX zgu+NiLn3cD1c&1DPCo%d7^8nA`I>z39UsvDtM5(hCX;nJx@c8^03)Mqe*5 zQ8`}x<{LF%xM-jCcVOnJ$c2YhypPQ$k8NXNVk02JPZ6-&@A`BzCg3Th60X-XQlV6c z8;dX=husQ)u|d>5>Tfhza65t|21f^n1~&tOV_&1#?n=LpjkmJ4*49$1Dz70E_Pjpx z2qeJIUzm&xO=v-o09_0r>#@)5r0zG{+h$yyKYui2WrZnZ-`XRtv+H%DipmfvPG%1u+W%G7u*X)C_WgeNIkqu zK=9Yqwdx)Lfr@p8F**WwUY{abA$xcQ$I6OPn;tVs*}4Ln`hj607YtL7jtNZ|VNDt~ zITe)`cx*)}sl9K;CQwy*$Ir`ITvB46HrPJS z{>8gUIYs3sO5KS9ecj(CA96Fx8a;JyUlbqokB)rW z)zfui+a)Dxurc}c1SGk?`L7BJi*b-pc}5ouXcks2kKpQOsW5=9OG&eBqARPRK}JI2 z+aQXPDW{?VfS0b$uUeYY5DxmnKjmhS1Rp=|bc3;ynu4CR`KU_tYwyqndo@*ER(j_7 zb+hO|4Q3`Lf_r}rQpx`=EQm?{fc6VJ2*kCYvP*(I0kD!)lCCdFUH$uE8&F(~6Dmcvyh%qV18j7AJ1a-WikUh$ z+AFGIa;08Yf_P16WmjJo!rKiYaPW1Pf`a9roy@^yrQTk`7j$w)_y56K;j!qkoi^5a zTsn&8=F;x`7Syl((**gnJ}i#__$ENs_hSOvLli;6drJ#T2Zu*xesy{Nm)d7(^OA35 zOuC;|N=d1tuC6X16<5u*ps+Ol{d?GoQ=R7g9~YojQ(aS4Q`OC_!jx0@Mv$}(pV#@g z>Tmv%4bCffAR2b<6ti-?(1?$Zk*kOABe|4VP*?yNbS_Sx;U8*gsli&+w0vB;jn?E| zR~;wK7>iK^wIj9Z90v}F0ZXGklq$<^tyvy%a&lGG@Su~G0?9#&+SafoSti@+WqxC$ zAjBJqlG~Cpce^oFf6l)i-QJ%h=(;czv>Z|mF|3-JpIVCWvH6+VScg@#Ne5=`>$XEb zonKG@&pqU>gD?JA*T;kg!D^I#pD_D+4eO6wBt4-Vu3IE0>0((3tp7f)vq|12fWE zTDe(Sg%yQ*@K9fs_IA|E=TY^t0emj z1EYiQ<724qhQif^SsnICbm8Xa#=(<(DSCRaK&g(e-@fMckd5lUdPX#V7&nc&fBWIu zZU6ZSLEX)aK7EwTpvpuwI>CvtIn$u8{%mQ(`LVpbG0bDod4Pu)j0$4N{@}E%+jiK1kv}-^05e&QL;e zm$%e(H6ukXz!Z&q{qzpjabS*yFUu*`(_5U_w8tk!j)pYg!#*c8e4Io=ibpZ-2dA^i z+}$iweB{CL3A67zDvIcI0Xr8%Ut@dy!jEL@bP?pAN^`@wOcn|5eabb@P;zprpb5s) zQqy8$E_#K3m?pFKc-m&@RoVSvI$FBCteng(aVhG~27t>jMZHwj)h+CQ*8&UTS2(}- zijAfwK|d9`xw)B>*aEb79Rx4a+}&N-Sj&i4IUg{CGA6@vH;{8Dl(8e4D5MqlfEe<{ zA=6_zNOpG#pJ!xLKb3sI=}+v^WS^i6Fy*+C^i_3vU+gy9?Yr+sJ=Y2S*1Ps?PxqgUjL2=`~egs^Rdo8%qZ*T7yJyKRaJehBlaC^>?RJ)0J zbtvGnf# zT$)VDXJu>UdogtX8d`dS(Z2~Tz^i62&o)nyOxW>b!qK2lmqHl$M$2EMB#~Tj+JAVC z3!j8d;XSO%-?oGnu5La3LNr9w6?P%pIs?8-D@zG#6WpZmALmOs6C-uD? zoT*-T6e+OUfH4-Al#78|_^7jz2#3yj>4PiUq0AG#yXGw3|CFB+zmpVJvYk$ zoNo`5NpIHtQiM3!TW+`c1J7eTT1LWZnIGldU7^Q%==7d03X}6XuG}R^N|Hk+<2u;jr9`--hl(E%FU9gTGdn9zQ%H$(DF`Q&Mu3N%)~k zqKVJh%vXZIz@ef&Dl{r9i_)Zn^Om+%qsLY@dfyl8KJ)#aJ2p`+Cn)Q1!o) z&q>kl0~1&fj~17f8h&PYDCB<^&7tMqgX1WkSAPFd$2>^<{l0tQ?te4(T=TnoV%Ty9 zcmI?6fE-bZ^mX*yQNy~``tCVyFj^T+BPj*~K|}z9$e#KB5==J?2_73cT>lci!|~OH zp!v=p5wFXxpQ&daBGd1KoGGU3O@5b!c7hmGyrNMyP6lC$sNJ`tkrsKw7hT`6Q&Oy0!&60s23+ zb~=@i^0HvfudOx8k)$Ys=Lb1IB&o`oD!`9PAxR0mr>3EAVs9Or_>p9P{|1DwF$^!e z{H?gNySwo*Y525J2z-V$v^2B-u7Xwvo_J2j|Js}St*j4pYhHKLWGs4A_y^0?b#(jfvf+o(?${4;@kB%k)3>lz>vTj6>u$erUpQQ| zF32k8xj2&USf{38Js=alBqx-06^hQQvB$kyZyxTTFI+s6Q|kY_^NR3AuBG92i;TG@6wK6$_i8}DGcrg>mjoI( zu#verSCuOT!NI4~1l-&n+-cqVqi%>6FT*`Bys*@HO3TB;msPa}R1-EklXJl-9RmX& z4-1T2jPmj{k{4|4e7w9w2_HJ;o*X6qC-)Ba<6&J)H#`JqJ<5CqSMKYADrs`0u6;zA z`ce!CNyoqR;_eZ6#?CIcbc&bA2E5u~a_I^z=kqB6N2g;tp7WFLTCm2OuCcP!Ic^`M z2?keUyJ-|VHPee8hNt(zri;KFLY7tos=78YuZ#NaxP2VUZ@anlS;ywajguSBFLKSz zN)20Z5uosbd7DTG-iO;nEiM~;PBWJO*ly0;u@L7HNhX$q%2a)9*f%)M$3`XpwoK8= z>IMP~kzy1Ie62s_T4dQMAdep0w$m4L-ZK94YfszLxtWJ#>XFgYqG&+D-X|ngG>kHTuLOT<;CC3nccdDR(3Ee+8S17zK zSFNCz)lc`Wwu0Kw%+_wHk1T-A-`l=xXvP&4iT_rxbZMymRA2F)8XztrB3U&7;1DXO zb5U^#m1br-_VuhN5{BCk-Z6GI#m!4CRnQhvBG!S(9PO#DxP8^BRfWJA`GXUlxdxH{MIezrpu`G!AYa`Dy8%Y^Vg1}-}C88Qkwq$@{c<-3vsoYfB6>C z{IeTUdFtnx@1ffaTnwK3R4lcpFp*rXGGG1&a7wsLHsHIGn$joQV+%yv>uo71zDLkn zfRWY>(M^2=gV?xaG4JK)v-ib)S)aL6z@XmXx2EPk6WDl*4ECxGZnISxs@cL&_upQB zqh!SVHJ;wP&%*3QxPLVNf-10 z7oQ2AKZ)V7twv#c*wJrATn~pp7Ex<^YHroy*}6!628;L$d!rLSQ;+i^j-;M}qdPFj zDulJzV6*{0kVD`)>r*m*`OmIlqsPv_2jB!npt%>K5g7Hk3Ak9G^q{UV4L?;Kvv z0~qq`*|QMb`)1ar2CACPHobH?rE{xu`K5)}2=v7WhYj#6UmXhw0Gu@SAr(TxjOuNu z0=_n_Q92Cwr~75eKJI^^l#T=IcHt@mAR$Dj3-NCq&AhnaYdacprHifiJjsHQ?+5L} z_;^i7=Ui|^^H;V2oc8C4>-P?`g}-V0g5nO~x>~17@)EG58gehBq%Mji# z%gs~qER~KWWpV%|>(h6yCaa*u$hcqzFNsFSHqR3}8(5;$Zi$|Is;fE{bcFuv&&BUt zd#z%iURHE*<(HgaUCp{|h4uPTozwL1@d5wth0D0}g_)e3Dx3bVrleQ;JC!y(Ti?V# zXlo>skf!a3u~4z~nUx1__5a}r7F%ZWHO_k(yX|#6AO?Rj{>Sf_87AQ~i@JqV!G@xkezrLs5aEvHX<6VDj0j)J-yizlnEDtmqdpdIvN zZ2Jl@jm`ksr=GgUNy@buC5?C)L7;`tywT!y>*D%4$*`Z%hAGqhZkchS|u|R-NASNjd9ao zUN|!`SzpflwuG%islDZQjtB`VZS65?b~(&OU$Bq@>U_p$9d$_qAmLMUfW<_UlznkVN7S)#J=;+$<|ty z=_ZF%eq(t6{p+~N*qG@s(=4Mw$(POS-N`=;Ul4w49j7e&z;3$X$B>+K#YqM&tSk5R3-FRItGiu{p*}KZ3TM>OcWE8y$pf>zmWm%`R$1zho zh;M8cr!Qz+HAB$c8yLV6nwvgWd;G!tUAa&vemKnwIqt-!M^e@!&yYuEH0(IRlPo%CF~Z+hJM5FRv`UOe=!3A0{q=%6?QgDE+!Sx;lS# zDXOV0;y*)-EVJE%0*p$TAH5IO%p`-wz{|yA5At-SHg93#A?Htwm+S>WJI)J2y zzlpTic;SA`s*sbB`PJH)l-$NFM4O*m4Wo^(Ki)hSf3D1+Cw8-XKcX2}q!Q2DU%q^a zN4hXWUi=?ZaCB^>iJ3mc{esmez}y}|0p?~Vz#K;N5dsYv_$a0QCFF`**f0v!XMWku z6d?T)Mjfm@0+gySiGX_d;ubi8-HsRneq?Ja1KN2l*3rAlOUm2$h?H)5btXNl5cVYx z!~*B6Jb{R@wT&H4ri!|fE|@(4W#9QPa8I-E7a`QFLV+(cQ9ScY(PG+x2RuBiKoVpD zHy8+gG&DMYwftB97Nlg}ox=lzwq=!z`5yKd!E*9aGPiEuF0I@oC%!5!Z1^$KnM8&0 zke&VC*I4kq@1T^XXHD$XOCTi&FPr~_hsekiAcGFj_J+a`Z4uKSCjZ`#_O?fNWoX6R zG|5eWxbO)*5_oug+pl|QSYdtpFP-|I#;>}!*Xf?Mh=itntOK|{?X;9#rx@T}m{^p38 zm~`{CW4TGqYR1Qpn$2BdN=elTRus?n ztmUYiRaXsjP)1)XG_K~?&J|P>yK?)TU+WvH!DjxZd(`rF)T{* ztt%;M_=V!9EM&}4rQ>f-;~{`>F^^j0<$P0prD$Pfwzpevd(c!!Y5v2` z?oe9I5?o}!RUc9yo@rg|G~DaRi;YVxecx8=R|?@x1srmeR5}7O&mBvb}&7M z!Gp4*Dwt=Cm)LmdGGmjHu72*gTei=)-8^gi@~>F-Bh|vmFNymv3thO4bIpP<8jFwDRK2Rs%&8LBcu}00d3Z1`?P701_GgJEgG`1PJ9x6=`!eLs z{Yw{h2W*2prdM>?ax3SSm~Oq3U*3l|$D>b$&`B((?GWRqTGaz{cXqjc>uPDaw$C)9 zO=}--od_n(^#3k1SiXuIdI(&L-6$ai5fhV~^+N%R*;;DIyw$n4q6MMEs@~^Mi?t2{Q`8u*0r!|_}vX6vKhn=qs z-F}6+_)#KvJ4*<|b6U|)(UXUNO?9b$la2f(i6L1x`Qt~LY*{*Jo-X4kr#(KGD&NS} zqNhmR=nKzEL)UU|%*IGqPCrg0Q;Sw%?S2$FblmN{Q;8bL@qb(Zj!MJieNTCLq)E)h z)2|o7Ys2l`K8>+8ZJAk^D0|WN4$LBmF2b24!-a@kxN2zd5Gh9*L`(kIMTnlmLiO$~ z$ zarSGE3FMoorb;)BJ(GcGY38c;&sKr}Dl-Pwd}@OKo?h|75D)Q$*u8(|?A10Ro>DZ4 zyTTNX*I8Au|3vX&r(b@&ncDZjLHv#^@Lfnh4{Fiir-1iUgz$1^1|8LOJ@LVxQNVV; zC=FhqeMG2fnIGO~lu&*C;Lle1I!Pb(Vhbg}m(E93U}m^5w;9&tvh2p6aYt;{?LVi( ziqNdKA*7My$5r%F7VQ2o;rWRzBi$$SqPu@B{g&hPQiFB^^n!o@ARSWD>6Wh-o?{>| z)kpK>O!u9)ssczUBg^2)@9{e4Gmif2tdcK!K;h}=_7$a0Ie#rDSJ7QfDdIFbIwS1( z=DI(u9TYt|tJ-Dfz;_z^rhWqfy_#YtC_pi{{s9yqV*;!VwaG7ZAV)dGDY+o$n@)q{ z)%-cHg*Kn$edQxc{vUTbw$5Zjl8?Y>IF8?Vsy4>U#ojD!U=_5w;B|l3cXP_+)ARHq z&H9b$?i6iJUXaOXpNAZ&rx0Yx+`UccXbD39h8f%WBR4oy7Jd{ghi)ecmZ-@}@Bz!g z{56N+d(hUlVlUb9Wt8NdwXE(#7jQ&>I>vkmP-~Q9WtKp6gHQ`|o(|NnL(mf+H!jkvQ}tR!>o1?af>B5M zc_o{_zhdFk&UvKno44=N3Npv1roMcM$3yP=*5gUc58Xz3PjuLr*iPNrhC#@DY@q`j zUOvYKqIBit)MM&nf@j$eLGgm`vU>t%G{_&=`6=fElSLXJ&g#a$MJ6exru6Ki6t$75 zv8sv+Y7=yRFqj1XVaXmA(myr?%s(NQE37#2y;^(WgVuS_Ap?u$^yG-T?Irael$3&c zZFx0;jn?a8htao+i?dWBL(RVk9<}sOQ%|!p{Kpia_m;$dInT7cX05Q;-oc?VN6ODr zZ(v&JUZaRGM@gG!e*V_6gKlWwyw&{BaqpB(pHnh+_^mlTfBoy!`Mtis`=_8Mrwf;I zxtg%uIzi#`w_Q9l&v>e+saa{?Ow8Dnw3$FT)vz$Xctvx5=>KRL<&U=9%+WMPtW1=2 zHoh*ujlPxsESN1yt63o6`ZXRrdCc-l)>22;Raf;b4sX7)T(lvL=x zNFk!iDQN+p@4a}_LjNGJ^)DNbD&CPcSeK`C{D*XOxDE_Y$vlXF&ol>11)+}U$Pv}( z5cT_%w7zZKLUpNmW{ZnxIlDlF@NYbg5q2t~mfyLPNK^S&%@s%e*rxd%UYb7K@1Iu; z2Ajjp*(WX1-*b1q{aTpTYnAW=QRpdvjJ8*SU5wSaw8@b?F}HSaP>8Y?6>9v(K{|Nb z<@;Pdk03(y(+v42|9%+Vv$TjEMKp2*p$||>@oKD%F8$UMGpmi^*@-tF$?%fQ_Hd5J zd!lm3<}+Pdpk>kEXRIq{;kRu+5UZ`8OuY8xy~%ZiquU89N{U;FGl*jy9pSa!?jRHS z@wO~Kx)i>O3P}(Z8xu(SOhak9`b9gbWWPAPtiA1V^nAgI%WI0ps9JM}09mT;q5gv@ zXXb~v>0(M*E>52tt`n0IfYp-4Om(Au^Abd3g2PEbs~$vgjdMn{g@V1_^R`>53N3S1 zk975UAM=!O57(lfp@};75yXFd2A*S2--ds!=@$#k^dVK2AVpw_gm;QE5J4D6=SDeR0#s}ba%n7-GdRUBh!fva@D=-^xz_{%17bRI)9wcQs zZusC=*3x^~3g4ni3?CWFes--<&-UUE2;9l>F}kr|&L}j}_2wgxKW~32BRhLm2BtZn zx3xWyql7F!gKFWZD56PS;SPy3H6w=y%06=Gscr3T^j+-zKN`A&10PnJX=y6G&JxY+ z@MC9%j=BCx;52-;_9_hw}}kVe(47()pAI0*crk7S*LGDTWVZtKowc zD{Fsft3JbVOJZ8XBt+-lCaGki8Lo&t*&KGw0*}*+$dAUzSo&z4E?& z%ShQtWpg)!ipjBxFCifwB2`c>&k)cc@L3tsB&y5H%?e7rD1ROA#^ZCQj zlvP!q9l@k?)LHS*E&?>(&qRt2n?iN4aF3{xv!>vvz2pZbr`I+l|12y$q8fqpE^gtgg*gptpnvye1 z(|B=Mjt<|`bjQcVL-mZ!?n9aer#3cgr-sXLlyAQ?s_hHwgqIZqR&B>k15l*8yE$*o zR45X&obG$K{%_9mg(fihzm#BUZ(1hrpNODP&Yo$qrpc?HZ;p(VBcjV2(SfEpls737pVns6KZX5x7TEh-q#;+F36aVmL~@pfgQ~et zO?p`=kiagpSsVTFf%isfB9qCl+Ve4x>53;TSaX-RRg7);kPb3WeOrY;Q52LUt*SPr z!I8N38JiJO^fxH3EUubwx_fg;oZ4hVc=KSi{OBtbb2-rooPvrrWkeK}5Q$xSd_R@t zbgy=Oh>8V4Zr@4IOu~Nt8!vZm1dBec@6HLn=ofl8{yF87a&t>4+uQEj-33|0IOhtV zmlQQM7#yl7{R=Yy^Ae*HpmQh6K`taWx1I0Jv*dhiA#BB5qOv~&dC|DTUd<1|<(ZW@ zkkppHQk#XRL}YOF@xg-3{RlAOx(7KD<^sowwT`ctqL{euvQmU>ute@ZmC(Q}SBNOM zZ;HIM4~KNIukmaG4Jj!E2;f(UXx6S*h8TbIa$(!rsFz9oRzJGBrrN{CvVg9ho#j_@! z4EYyngiYDqh*r$~x;;z=O!dW?%#F;%DcQKnCB@xg1Eo*`Z9@mfZN@P$wgGpFqj%jY zLy8HzjsSh=nQKok4;r}A?;FZ%HyW=`pL05Fwr|nK% z!7^LIf?ej0fh{Ji(6EHiJG*c+2b7aTpc5EIV9Z(PP;if^N^q2^F@^u<12bO4JU?FM z!5pMlF`Lw@FcKYbA$)c;wO>RT%uGytLsiMhjwq+mz_VqKj>r9K!x*y1&^>ihe*p2d zeLkH8C*S>i-m+9zVOXd7g>bUjW1nX?5&mFb)S{}Xkb5@5<1WwuyWs1=7(GIH2;R8R zT#-o&tumGIrHqwD+ zYT!gzc220KY|!~QbfBCdCX6U1j81@3;}=*18OkSDE&o&Dv3(V^@d681TkKd)1Jtgd zi~apZ<8)FUeb11J(U55j1+6q35vo3v%b&)N(J5mb=DLE<@u|``qlw?UIs1TaOVF9( zyoe{LRX>F4(M4Z@0J^oT|12?&=DXqVKWdMufaZVFl?I=;U3qy}GOI5|mOSP4fyOfv z=e9ADLq6NGa$sZC+VrI+%W6KXGnA6XUUpC5-3x8~IrW&MNiF=xAFFD^KHcX>C)=WL zJd1qe*K%#{H5#^^Bug^+r2XY`Fs!AT4x>UVc?qiq|J(h?{J6-;#W87QzaiP?rKZ;S zUCPFJV*|0q6OZ3uH{AvHX7P1MtrtR8tUFx;?U-*9X^O4sJ zNJ{!8%zL&($IEJHKUwwR6NQN>Mm0jMOm?LNLLh`1cKB~GMedgdy?J&CUm!CR5*Sk4 z*!7e9uZ{e>RKr!6B>bBu*r)qK{C{!Z^ouz4sPAxh`_q9*Pv3ee0MSv#z46!iEMXbu z0UtK8jOf|!pKkat$D0KUP4ecJsHZ=a@!03V*aTMqqVYeSV|Z+ok;MGdRO@rVjdOr7k7A5q1{_03q(rmhzm%Z`Lug^K*%{L7iI+D&5X~OMi zNuG@F&hL z?GK}dlK4dq=|jBD!NK9k5zmLVOwkf_gtz>~^*wHR{HdpjLPe7MB#A{&U&Y_G7CAO1iS{yWvo>U>SVk8zK+5q^z!oi+3~ zF3Y3AGIthp6mJ{!`melpoW-FmD$b3QOCMN$Y%7nwP0D3DkrCRxF_x{?M>!p#G^pF) zY_q=QozJ_)@k-@E5IN>GdV}e=h>GZ?B5XsdiPFvfBy(nWZO@GF*O>t_LAYW22{^D{ z+A+s~Jhi>`VYXpIm>WK1TgmZq@+cT^UT@xd!?7|aB*tG|(s)N4RR`cYF+Ne!M_J+? zY^jV<&r%<)%$Dx%4Gik{#c{5#cb_gXFf!RM>70fF>}jgT!p6uLF?|ANiN59C6W67t z-Nd_zAFRKZ7H>t4{%H&buP;D5{+$N1b@_RDTR}rD zUA0s{jFfBSu9rH)HA`e3ro^2no|sIPI*0g`_XG$sW@Cv4hY#QJ86k3m{ASl`q&iK* z&Jq|M-Lqn}QPEn;sR~BPNBJ|gj#u(Ltw!O|35_t;jdhE=|0O%Ah*ES33a|FEyh0yc zVQa?o*(7^&>pqI{3me9WF5CA4NYmkz0HV-bP_Ij{|8yRolCRo*Xf$$oIWdlfKQSw{ z(0>2O?SV4=69!RD>HVWiNs!jCWYzh+oo$U4r6wYdO0n`BE(>aFdjR)3o?W+f8 zro3Anz1B8%sGcD4PgMY8vzOgnNB0B+ekz6CFnea5r_Qp3eZ#-gIB^xP?DQTYIz6j< z1KfF$rqUC-1G6AoeSi)ZVnFJEhdgNWh3J8&&}tA!g9zSGU5WttcS^7bhZm%V{!d@vfoo)4f_CR_r`9GjDynX*( z24W{(e?jkcwSd?))BsI!<*AvzztT@%!KC8A{9{JQiR;x+)eXeTygSzS=dN5*wSt2B z!s^yX!g^42(d}QH7(;pwP59@SqUk60Yu_$NmWO%RqcS3GA)#u$*9~yGt0S?$8qc_p z6zNye5dgIm*RuHobX!x?=;lHb3LnNcM7cP+)R4qPHgvcLPhk#3`Fv^bTpAc3Xo!$A z+{Nq!bFiattgH(H-y@@=&^)8F9wvC`_y#oDzXfRiv4(nvzeqX0TVH}Rs6nr zkGpY<*R|kwbK{TWN5nZf3{*46BylngF59s0Z4j?)uP$-v>)Xl-w1aPlJoO)uFg0j< zL6$&G;OctoOQ(LZ6x|gA!`88gaCkV%i>+-D*E%;sixv!v+6g>cs}{$Mv2jMhBnbK6E+Gq;8I0+0Hu$kER-a-LJ#ZG7qqQeul;XSu^sb$hT(|(k#)g9NtiTiPK=- z_!t_hTYrUrLS(&kVDrl3i-p$}RL;v2^uB6J`i@RjtgkD4hmN9jRg})S(bzn;0Q~n7 zJ!!hPVI!9Th+UwLZbvl(znQv=)>jIwz?Z58lLxCuxBZj9*7n`MRuxTEn3zqEk533q zdU9Ln{YiRzv98B)(@7B=jCm$&k0&YqT7X9URkA?Sp)qAmBMYM}4$>0cqGpvRqY_j@T468loh5uc@Fl^?A>oGX&{j`( zjx!1p^%eSg2V;gPDZ4T2ang%rt|FgfEJ{|lng>}7JdLT}tE$VgKUg_57w*+gQ5#Ji zUNl8H@YbxIzp~ETA`vW6EVmuAM)5qnVs`$z$$K;!88L2{toHtc_>@ttVl<15Elp^< zfa3ujN@ytMqqK;91AQYPr4Lp*03Yl4$X38*t_#CSQ^Z#;JD5tyY^L&uIq6%t_Lg%4NizFd;c_o<4anr->|DI!qw!1i02?xptBynl# z%2%cqf9Tn31!bj>i*2Lxyquo(MAeH&9~@ne@_8p-$A@1!ceD5W=;h&No&PfsA)idI z@=6Vi&gz^J_Sd^+-L;=cKJ_w5DKdO#;N@P79}*N6=jSVl>q3mXJnOvaRQR z1R>)6tB;8y1_W5d?VS&Phm&fsL(ljnDSV{=cjV<-aCYr_N9x{I%?(r(+&^iV`LAAS zFqBuQUsSe#DkWhPai&PPT*F0D64d>gS2kCp!w>f=gEiT88(SCmZsLDl86A<=l#Y<0 z85tSDIB_;MH6|k^1Ij6NF%tkb(9yNa&-;ePeaq~Wlap0TQECq*orzC@AQ8$VOyeAGoWS ztn;5wti5xs6a46nS~ycxYB3)z8T!siutA-Ljg67HC`Yi$5Z*v>}Ejgqing)WE8pi>4{;H0pB)&0+1kJYBHR1 zrRaS&TYXI6L>e1^_uhD8Gw_n+@r}J1?Ds<#*RCn$yf)lTN!eh1!O*P8U<*hZ> zgBH@rIZJqk-6TEhL;39~9Sz*&c21}01@-%TC+{x1o`$>m;gbTCS*r6sojf4de_(Af z5C%|>^j4^Cl;<-R9v_5J{8{I=9F1I#(-5dMi{-O>HoKGbP%b&fj^c0&X5A(I4d25o zZnp(L=6Vc+`!=`-M@ELj>@=h!+|E{XUJ)}h^jq$Dy2Fdst^YtS)E;t=+jeA7H|^%3 z!-tYax9!w#i5^!CYd}Tkn*RxGc3^NrVZ)G>^Os4y)Khu2%qEJdoRL-Por|Yq|6x03iL9G2!woeqzR6nQj{b^cpx~~N8hM}cl!q+dp zQ>Q$Z>IFF+cLlgiK-r+^eGZ}6#CIymgxf+XEA%=)K1HMP#re#@!t|VQ1+n_uzcoOc z2Cu8g(#Y|gO-;4DAb2EeRuwOhj6={QB|7W*dsUuz0ojVSJcC9V3K457{ZHv`F=fkf ziT>KJ8yvTMy2mC1BTC2DF3%1i#X}6@af~&bj5VrVTTiqnc>sj2NG8%G&o4+AO{#T_ zf|%wc;SU3KanN4T{A`8-J8<%s5cHJp=WM)OX~$WI%#yfN5KA3IY`*>dkBEov2zy<2 zln>u4ufeIQj|;m8w&i05T7*LH#fO{Rq27tpVCVaa=gezA+;0%Le29llDwtvkLPi-y z%k{Q$RIjF(h%FP!yL`6kE zBcr?__1+^ICXOaZp4*(Qc7haYZLNx)h??fpU3*$NU_yoBSN^iktuqI9Y0l~S=J){zKV|W*yHvh*37~zG3 z+PSJaisLje1%9q_2>NGHN}or^lMa@%Jt(k z^Wu4T$Vv5mlhQ&;v(odkBEn0&;O|j+)wLDa35A?ZY4}l61xuqVOeTE}3Dtv!oN=Nz zJ#V_FC#6}0c{LWM;USNA$Z?_$ym2nd@5WP36o!!5Kg<0duh~QW==p^BCFu#dvKJ=O z#^gKqAk~o~HAKy@_p$a?y;b45?Rdls6X&HKf#dT7yv;g%S}&GoR|az?jCc^|9CCOFm9f}1TITBiRZ+2O<5Fe`X3m-uIW@qQzO zjpu)ZV39Q(r4*%HaAzT6^6`8KiRyRBRs?=K08^HT%RuQMC+F#9s~!YbRTNkB_bbd> zJMpLzuSnEOu`KPQSMWrG5sOJ4i?q$722t2sL5)2ulE&-ToTO1!q6>zBu7r zv1RUaHXd=pTcQ6Vo=%+X7uk{ zad#`X*7Rhv2bh@i!8RA-Ex@=8LJj-bBwaLNra?gj&DRqxw$S#WfdR@#djGwaW@Y^f z87a7Sc4$J{%-A+CqP2|%QnvTg<6Y~qp&{crs}{ZD(CqVzapeI9e+imY1yBI_x48Mx zgsZcx>b)}<+e*6N+$xkqZ2> zo<+QIGI>Ro3W}OQ>MLCEB|wG51$nWZ?=~^1UWjin4-42ia%B1X%hleE^9D`qII#!5JDY&xp z1i8!Sp67~geX6bzzdgBYsI7eoRnohpw)@juEphM8VM3AA(x%&BZ)$yD^p4h>4?Cvq zVHQrnhJf4N$vW?L62C*Sd{IAqEZwTt?%Pj;{EO$jv^AOX^R9o_42^fb?eP2zJgk(; zML9fsle}-7wCM-7c4v4LUgF@fLnNe?cHe;T-_AxKELnM zfE3S)Iht)}|7c_8(H9Un*sk;2tbN*5eh+Zc#Du!}bk+M`Eu^RD68c=HZLJZHmRWn9 zPXnH2UvpWufzd#zj(&txOKCg4R-=>Q_1tZJF5|-zi6<1lG^%ib(T$6@IURjSbAFL& z3Z8H(=uBif3oEG^9aXMJD@||Tg`4ogC>$qF$Zb%!_9z+ViBMNw^cSKBs)1c9Sl6I4 z9c~@k3@E>E?CC^9;?hx+lmq|6UmqQ@yoek(qKy-;feTe=pNGx<1B*MGU7kzGyFh_N zQ@6Zw%Nu}yZAFd6rN347tzS~#J?QK-HL^6UHJ%O-M)RZ99qRv+qSy@XTbw#fQ5Mrx z6+X=xb*=>}Oo>8nX%LYc8HfEZlRo4#*~%M30fzx=M3j_sQRarG*kwzqcH?#RC3JEu z5BB?J)j)`9Hr{{{+MX<2bNuwzkhTl{E&opt{TkRxctEVTeIznGG6FyVM9T3SU*z&Cay=xQXz8=JxV)SQu9Z5$`T&JzbGtk^vw6zU z^u+$lDfe>aKDM|{hak+>30%OLVw@huGBMeBXgu(cFg7mfOMF5=%ANJ?qP|&&gghU2 zJ-Sb~?B^&DqX=ePcBC4lu4-OM&ouN1SuK1rlWr6i0?LVe5~s(-%3JMAw+AwblfzR} zv`?&Neleo?1&du-Xz~^q^sidZEK&%4s7Os!#2Sny4vBtnK0HuwL&65Bi_cBuIm-fu5UnY z=TvJxs3e{`sip?p>bVg_mR`=p){f@5|=NCil zQ%R!1V>3Wnir;>Z1^Hx)eZw!d|rMFpm-;LRcYdDqm;tj6Ygp!Ta4(%~HEwtST?a7>l@)f_+^$mV@E(75>& zvmS7M&`;F>kzS;7v)BDlwL*=B_;`cFE+t)N`#)Zi_U2iefJxODW`9|_Xt=Tv1# z9ye8_Im7fomMJQaftgQW975+r!F@1u5T293?{OOK4-kj}rv)LViHV=rvxXT;unPhq zBm!2mHL%UoSki)T5-#o`{QQedrBnsmzX#~}yoUNFw*p%s`0J_70mJ~lBF2uZuf;(B z3R~T1ZEE_%MH1qQ5a==p{u4f0pd-X=^F71i*QO@2II$7n>i<4YHmXWI>9CD~_={UF zoEYeqZIQ+FrV|UvUBPnwt3^Z4KGu%yGW}R9f9f@o`xB&9kAOfMXxp0kpc{mLoI04~q%HLV-IlqopUuPIP8b^Igu?{D+UANAxXJKXFQC3i&^XVGw7Yrqe z($b~H#V@ju!Qa0>5z?cUy$0rT@VYAopsxmGP&49E(LLD^gjo@pAoVjz&Cb7}aywhtha3l@sqw(T`UVzM&r-vBRQuS&NbaR~ zym2*4bxbR(;CA|MeEiUc1BXbn5j9);*Dmt$0rQv%jm&AUgr%k>6zwOU>d& z^aD-WrOrp=$*`-gt>u+OX9)F0TE?&9nBg9x+hoHin)LF;gNX9S$ zvwob#SHejk;Xgg~0eMG60W?ka^M7C1!c2#O)$&jhNLpHK+crw7NPquty6z{Rcdsb~ z@zgYDQ;xQohu*!pXf$XWHZo@|C}Lms@I<%CntZL<+C~pjJ)h&X0p`bd(I3Oce}NBC z!I(&koMGT~Z&(K%MBbljeC6O@$Txco*d3QpZ$O^ODaQJ$3Hi!;rbu7Shw~ZO#$4;B z#)@<%MrO~^NMC212C10wX@z`5<<{$$(&%zHVCbITLqww+WhFJGsR>ldv6If672Me) z{R#YTa~LLh|J=I>9%DMu2K zsuj*IeQ-*azVk_^&pjM5agnf~@j3~$@ggC7;$zP~pw`j>^aP44vUbP+CAr(0(AWWj!NfVz>F-t}el$L__5 z#+}iK%e9`2)qQoDto+jEm0M!AW!w%Me4m|p->j&|(dRq@N5#-|bG?$KX{-1sxb?06( zu`;qIG&-Vt%Q@JtaM@aAL`9vlHx@<@jKXeVRi40pQAC32JMCFdi54BeQ@-izO z{l@3lc>>O#D1SHh0Y>T($14`qnlWU|Afja;(2W;Bz4MRh2?~Bhv?P5R?J^-g!Ps`< zfN?&2j)Rf=9s>9v^3R=ZVQ$*vo9?N3J32S=YHoGQdT%GmsWiLe2nJQ$-6VjMugfTI zTU+1TG#@b3_;VA<5a~PEKZ+DGJI2`zX(@a>PR8J*34yaBOio|(PZSg-fZMJ9T18F{dx@J)jeV(RibrMP)tKdUgiuMAP+yqO zCqy(^Cv`7E1JzT3p~mXeSv)yO=%aUUr2z}L%>$w1==kutM@j6jhI%oBY^e+-uG~`l zHki2p-JB~XOk?OlZ;~)3!lkPZjwKp5C1DQnb;|YhiVVip`d>a_sjTgK3m9a35K2h2 zdAiR*oQ#^o`AqYEOkpZtPY<(^nc3d4={*Mf$>Hwostgrv+4A~&e2AaNyL@-al#wIZ zG%qx*J;6-uA@f$TIPFVXQDytk?-N7J7lpkc;KRi*%6RD%T@?C|Uk22N@b)@AH$54A z=%X^bv=EmNmlCTE#N1MEp$WOBc|V*Dzh_6#`cd&a5B}~~a332?GK8T`$IYIBRB7jg zpySFxD_QA2xHY27twbBG;yDMZ1fXT>&Z1(!!fe3ou665!up@bRpqIBb>;Dfh{<~s z!avKbE~4PW#pnF@+lD7F3?*^ywTvoAl5ry6U1r}(p0fd{FMbr5w??)Q^I`j`%(1A4 zyh99jHXb80;~x4)*jiCNXWsr8$`(gMln$QQ>4%ClO?&M8&b5gV9?MIJkq6{}#_K=C z`=+?~!K#L)rijr=)zC8Ask6($#=vyv4>@?hvnuSzW}3IbixCJSUT+$1Hl2 zcy9T%M|>8~Kt49lBXHYLc3! zogX2PZ)I~(=!iLM&-XBDxIn5@nfs6`TMWryK|VAPde(UXB}!7q zO|l6NDFZWA2JOJa!#O;3bagN4{}TxMMaeq17*L`?EZD?4gHW(E4(@8jc~S6cU+FJM zK*Kr6?$f5rCP-CKQBrzuU;z_EC7SS+Rg2v(;iJA|}`Fj?Z(WM7xt6`oMnDig$KqI zpm=5%V?yZGQyX9&Q<5iF88oj!k7r_10_8>YH}$bj7*cDzZ@Az%jZAIO@k~BG6RCLG zep<_MmCaD}E&64q`X`>7|NHQzXwcPp2u63i0JMQ|1Wf*Ti>PV$(Xop8+-#EzLsZ`w zHHvyrF-4tmEcwfre-fv0cB(Qi;a(#rqiPd0r_%WI#?bVw#l+MgVRBf|Fuz?HcaMxP zPE}72I}DUi5GcrK(dyMKXQC=g3r^0R-F95!*a_0ahi?eZO!sK$s%2*tvI*Zu7iy>o zA;VkVp4U)OnwW{C#sH)j&%T1*U@P?lTrL4 z^T6S7|9u`qrPULl3({s%ehN=*C{6d4q6?Cj3Ag(sL*wLB)kdF1-KtBPw8a+L=%T4I zHvGQcbwq&W2^WoP*Dv&G)JqWj)cb`_)&_@3wQPBz5MfnU6bgf+Ah%jW8&2Y7sy3QBYGp+_ip4@EHgZ z`bVgr;{1yXflbP#oZ-_mo{Jess!nTUWuadn0$U$3igoH%ais$ER(40+3H_g}APeEelR@`75V{~eIL1t>< zgLRej(X(@B&Xt3yI@GxMAm5t!^2}4%iM$j#^td+73J4>H^qu+#rXjvS@Us&WxjHe} zHya;)_)YXvRCgO-4sXBEYtiZPe$hS!U3vT4O((qS0xBwyD5I4JwC+()-)lQ~sb<``?E< zi-_IUCf=!PG0xq@`DHS5?R%Rb9Fz~CtOtSIFVUg024dY!hRa-BTxaqayeL%D42=+v|THyG{-NkE*|bs%rb*$Kj1ADvE)Gf`D{`w6r21EseC&-JJ#@B2v=b z-5}D6bc1wvcQ^lu_xt;d_hmTlP%n7)*=Oyw=6vQ8aoC?ENFpNez_6Co`KoU-D+^mH zHI>VVN+?65^WG`b?(2~HQ?sg<=^2G!>s_;m1tm}4P9xB9+`5&Ck`fb5;`0uEHvog6 zE&TCW%=IZ+@PZot$oaRr`Z2uMuW=X&BJ%Pdmt3?jui)~_C3HhSsk^6#r&xa^fp?_* z_y~EP*R}4wZ&LJ>3DZf#}!>UjLMaprQE@Qf1+JFKbK6xIOS5MJl>0|@MHA`1#ji}H-1_HHe6J$R?9 zz{ZtYnTUzNWPviv&e~E|MiCGz8E?Pd?tW$#CNT-AmA~BIU!>LZ_(5NKeXVw83s6J5 zAN^_!0L#ingiA@k&&w-)C26V-r704y&y$p&E+~bqgGtSrjDM`AIRSlZw?6bjej!4j zl!kADfkr_mCU@~9%rDT;4!;PJ$lLa}u0kbREKY^_2yfB$?yfnY4x`mdniKDT{53#_ zowI~(C}j@Yew}xA>u@^@p086@hVH)R-*t71dr|gxwPO=`)J%SO<&@09`8TEpLyU?W z*J=UhO)4&-X%QRybOpAz(aMk4nO?)h9j3?xIMMq&Md?3ny8xR!bFEzGa{&4z4BCoI zijnx@?Ck8yMAtPqs$j$j`Ko+O+le&)6+rarkei z+D{W>Q^R+9PEZ=9rp^=o{t%%&l<@lnlrZ-;JrMT|ICkT8pOf7GBT#N^cxhDw8>C<= zg|V4lVq*69@Ms(;NdgnxAd{e;mgDyB9$lNnFr&QqEfzL~{=fP+3GA+-qR!}h{h8Z# zaG?p=hRxuAPYTO+RSX@N5JeNL4Vhr@th@G&eL8(jFoe&RU*lUe(<5YkurZpQ-ZZ&$ z1kQUJu(xn|iIBle)Ox z|35B3qyKVSAdK7oQuu3DSQQA8ykl0dGaZZ1OY^m(*LZ4NJ9s}uS62{oYce(m2Eqmv z6sj+UKszWZ1M#}Am7;`Kp5H+8ZcHN8myGJ-lW1=0dOP2H6DaS$QMwF^2~_)|A|Ez0 zioH~q45^ou*NMOKXl+*w$m^7TMCz(a8!KxCmSgBwJ|M`KFJC`Zl$LXH4?tM@ueK_YcZIADW<5f@=y@^m^wM@CXgCVKBqM~wS6yZ>5 z(w}iKqJG<7>;ofa7e4p1WnJR2^2D{5*Z0i%Yd~jp4G}`q?)cjLiS$H8HR1xIBumqx zYVroUUYw!4wc1^T#*aCoB`cKSF(KQprswYKZPWg6cFQu+82bPA zrMT}KYp)*QhYjYv?#!QjGV6(g07tgJ*GC}10`@+r3Sc&V7yt24n$luHus-;BLc^F% z*Ioo%Cwp62S=mafGxo4Xe5|2?mL8Xb>OZffqTh8by}G*^&PnbxI;%_oaZukC-@F-( zuoEN0{NZ){k(_p7ZWh#F4D9_dD+bb&UTOn$^f0wxfB-fG&yQVCM_z2$8hPe+Tp66C z4)T?kuU^}xqVvUF4CJ06UEFlDpE7|!Sxuf5aPl?BFC?v^wIZ5_QwY>}CB8Yf3JPpA z`Mj*xmcJ{k9Jzo_b^0jyI_#(5?F6b|h$vuGJ*#*_S8aRnyPCj#|276dm0v37#fUFG z8mj?En6jdL<Xmmu>%?cjusv|Q zjeuVpojU6QAg*gGP7}fzSO^_`n*r-^Dm^zjW-}y}jjBDibcjAJyRmz~>9h^%(7VVJ zcD_K!K;V2s7x?tyZW3iD4K1yyT7}E-s%MqJOTfsG=Om##-~1}9H>S?;alzH*DiXz! zjfO;iter`-#DzxE$f(-&U@D~O6~WpX@D(rjPPNysDIrJPX4gg;8%U`BTCu_&2cyNh z*OBmuhxqu9+9KW4SXR(4Hzi>zIHN-Gm$fOB{#vN`tT$Nc4e!1qm@IlgchV`M(39h;NjNt#Vt*O{)+Gf-j7A%C8@z z{wl#QMfp|VOo)^Zs&vKyjnx8yO`SFJBc0l!wOXZBRj~R4-+FkknwUvK#8#G90%tzO zIH;9qp z++CfxPC$y~lbzExl~@T3TSEa^^yBD=ketNqw)nsJc=+|0HxW?$O`#_Fz-tKU&INop z>xzRB{csBjJHwW;i_7#`NT-^pbk&rq0!@t#7d>A=$#UIyR&-A-6O%*W z|8>S}Gf;+g&a4e?JGTYvL(&2WLlQ3T{+YjReb5u+3}sKgIN|N2E1mGMe79 zva`b|$w%^0!DUs*a;;kVorL}(q!s#e{U^H_R+7sKi_wM4a8tc$@wf?1N ztl5N{3zDy9O-=LHO&=9)4#L?eldcV0yb z1g+K@VgSxTdGaiMrRqA9Wc#+nT{02?lufIiE4cH_|FrwY3|yA=zadv1{!gWbJxURYXBU zYj`cFEmyQD7yJIwrQ*LYO2MY^nRYO4 z)6z50hlr&_M&gP5R1y!yKt$4%NkWvX1gSso| z)@W-IL6cdiQGMn8hqDt2JpN^67tCecgdB88HGl3{$i>^i{PBuQD=p1{>F5=N_1ZPG zvnr5kl3YVF}bWG!Z1gd#VRr8oS_E zU=b){v$Bp_yw$Mg42R$JU?9|0*YAFQ<~;q&%eEB-grpX4FZfVT4j_2u+WT?dmuUMN zU{FAj4AqdVTQnkE9XT%tI|h z0oRC}gpPjc#wkm>RCLvs>LK+W?nZNPEcS=a(^1HU)U&d71-oy?I5WXx1_e9+L0Gef=z+KM=(1xD6LDB43jaM3*244@j>!JOOFTmdR(>LBgOOq z*1WvFhOzk6>GN65wmw8d4Q_W8FVCPwAz1BB;-c^|W z5v?vSZ@8}hl2TV+W7MF62ZtXoLE1xivf+OpD%r$qmN#UWpaW4<7yvzBl)ZZNVRHJ&hpi_xKLBjm_|M372aO}0{4|)Jqk5*^Vr$S7Yn^U zGaZBzlpz9IR?Ekds@+U*Mi&)7t#FvPaCEG1pyvkDLz#!e0Ez+I7x-~s|4d!I@_WfT zY*xGn%qaS5&X|DR<5-j`Mjmvj#J;e09`PVlDRSusW0W0q^aH|Yz^=Ob_zvTJaTt82 z$WHD!)YqpVUA?_u6RW;_5qDIpmZxcNWyJEWzw+VU-f_#G>b`omKhdzVx_S{%E-th; za{nZ%3?uJrMsCrK-T3^3{07x$n<0Y1F;9B%F|wznNlgDt^6}wkA#FsQajdMh|DdtE z$^|Wv${}xKqpZ{Qj`e(7M1=LdeckD`le(#wXtTIj5FZiLICDS6Rg?%_#w7|lR9X*;&Z6}Wf@c}D7G zX4WtJCxmLchOvE_iwa*ymvxO|zgXlKc=3Yo*M=p-8tdJefcWHyAj|TD#Fe@UTs+NDQ3-{(Qvs;>PCXsQ*&)* z=LVhDA9`WAYUb_(-!oeB{Kn-KxyZo zOHKa5+xYKa#(Dhe6Z{NY$;ZU$iK=A`xK59I#{);WUy~qGj*vCqT+HbB?D~ZA$QyCF zB+WQNAJJ9s1pk`Igcx1bP83(2$A zjLBzg)b`blvxFuxD@#$JJZV`gUH$K)dJ)MgTx4)ghZdvAQGYNWW54lWrAACKbZ%LP zv0ldwC+QKlRk}18Oy?rWgcKBXd-Go$e0o`fm&C-v#+UCbuB+R3HYp9fm$y%s%cwmi z*x8tqSr$#(C6csfE`pgN6Pdx}Brk+~8v+b03g0!ww1Q)T}(;{+|6f%P+$cbwgE6_6hD;w3GuPC>8-oaA&kzCPar^3x`&Xvo=I2g9l3$b*_^ zi9a0+Rl5A$tWYg2y82!_twX)WaiJo@_~+=*cq?%5P8s6NS{RI&zkESoXOQB@ODeNn z7|-E8z;iT4j~LBzUib^cY{xHx$GRAgSBE<5(%C!(L3SPktw(@lAon8WAc?T?;GvHaxrwq5sy(C%y0$R)abRK22#3Az@*shVNU1 z|9nTsD(;Th5lM75T?Kk}$(ZWE14oVOZ==?eRF)nl@DN0Kuqe50p4RLcL?q}9j1q9wE7u`Qa8c*{8Q!+&&dJGdw67c+_#K6_c4Gd z0fr5@Uj(b1Y#wpibPe6&gHLwF{EDXlr~9@vEJ0jXmzt4rp&fmzXPm;qIi{w=3JMAv z!}ia3AiqXBqnD*|ALdil_MK*^$GgeWP2=JNg7tMM&l~Ub2UC7kR8pAQc2IITb2WE~ zRs~8p8+4OriuCnbTYtJ5!KD;ZZ%~lXuWcrI&36d6k>~Ofqo95S5g~+oE_?J8S}99an+UHcx;SshR+UWsPm#|_eNdB-uApq&_1Qi!uTeXSP$Hug+S^$EztNzqF_^}7E%z}(4zhLzC z03uE3X;q(@r$%Q(O!okUHQYr&gu~ut4iSZXCY#TZ!E$R&3AHQ8p#ADo<6|-ex#$0o zBWIo7%V8g%HQqll5q7`se)SV=t(ld)pg_=`WIWj0+q>#=UUi)qCSHvxRQHZs=jL$1 z7_5i!wiM~VJ;}X{U#oo*B&T~De|I#bn>wnzv*|{1Rern|$;-^%-l^GHJ@O4LAI_v{ z2|87dHqoMv_$u)&?t*~-g4?##^GAu{#Bh9{?FBb1^l|GdFpCuv6F)oMxM!^~E?9pr zOvmyW5)YF}S*56-(o%{)QgVNM{c7`a8NeP@8JPrPPHb%KhOdvE`VRE`-Tverjj1g2 zdP}9Dz6YwBZ@oBfheAN`PGqS=TXID6syt$lkW%J2woO~jaeNz(&AhwCNBh+??&14F zfR%AmFjP-WcK>OR2o2HQTkVe!(JZu(x<%iP@~2^+i0cR7SAEPI{-ui2piFCuN*^RV z^NOi(F!ecjtgMWKRDSz91Z2{<&bYNIlOG3#UaLbkC+lbMIA2-k?dp}zi^|PRy`ZCGApP1EywvT?H1G* zbTt0cRIAqsPSbL79X(xdFJ1z!OXwzb!*3)mL{mnl%x=amL8N!QB{3%=Bl8P-pkIBE zW&X-04Kvr1%@@3VC#)DN+~97z+FRb5(Yw(XMMWZD^N0WK_F*^s8of^fD{K{obp5cd zSMFq3Slc-q5HJ1y^Au^C)|vR}T3Y1260Cn8j6g~0!EfU2)1PYNsiw}KPkUr#VP6YQ zZ6_C|B;a)1@7VK&1xikyLJ~;D%oX~R{VA2hcy!n_QvMc|vbOYrHc#nKe0ITRvt!MMzD6q|{!E@+YUr%mJV(g(!*$LN(`rKfU zMLVe<`4tpsSzJoMVwn1Tv%vOTsk6DcIW$bES8hauw|unf06_>nJoXm*m!#eXBx9#G zc#$@~Zf$GHjjG8{4`-YNyL`8PVje>9hVJvbi|J&y;@N(g9&-lAbIpbQN~(8=)X&C@z+_E`4}; zOfQ5L9|* zdfQfH;+R0NR2qcIB8TgP8&1sN+wMtn9|2!RR3t>~=(ir*6A{#az=*z0%pz+mpHC`$ zHHC$;%<`|uaJ>24j)$Ug>FNF0DH=HPVo9sNhXw_yRcjx-*l^SUOMgFqcie}&n{F2A zU`45*kRVG{QJRB$!!sxrGKj&q1^bI2d0glu_6=b9*laKYH5P&g-23RoTKW1Esp6%Z zkoeqOF^b@WwLuXcInkBr2ji|;0g6ual71KmL|o&tN|X^XTu{f+OJXG6t=F>D!aONL z+Fy6|_f$Gx#FfU}km-9VF8)$OLuy+BOvpaHxWFFszjuuL;D#rJC!`7=*JQX{cUCo| z&R7N)Fe=8pO^x#MGS#n_rNVAl;$MSddRF|PA{nO0=;+G73w*h!k%7&IlM3r&Cj;vK zUccX(DAxoDQnai8B#DS|+G@LR%huP>D|SddOKjE@@DM@br88=WRi?Ff82zLb?aeVO~S{Z#uJFrF-JFcYC66q)&y0vrb{82i|(yzSbzA9@+q58xl79U1h z+N^T>y(bO%pX;VInJ43vTGM3RC!M|FjF_GGk9?Xl752pA9F5cQnjhn=DioB?HY>dW zy4&|JYoso7Hr?rmy5uQdvnKL9iP@E>N4^7f?$N8!akPROQi{UD<&~8rQ)Aa*5>4{_ zWm@-g%@@Xd$ooQBZF~BhjHxSdy02x(8~NwPAo0X;Rn$7$a18Q}M%P3IcXedaTXqSE z%PEUe)AI(yV5`6{{HF&jATR^%#N9l)!XozoQr(gGze`Yb5C@9D%i@==$8tIQ4Bq2n zWaO`j?@FyA4~%=E|C5UwQ>c$+ScD8mj%rp2R?W!qK9T z@b+E$f~}j8ct$Fbk%F|lHnz~Ot>``EcRFl(_bD><;=X|U9lkhCWreeomN=ew#X57$ zmm4i{?=F>&)j=-dOP&+9r|vA-uXZ95r-KfBL}72meNB|mo&KBnT@0_9y_q_P^Y224 zCo1XcA)98~Cos*`=AJ2#CrZ19wX4d@LL|dcfLFCAUTw; zr>v_x=c^PrOkbRig^rlKDQfE-3^?pE(=(D7by}v|@LvfD5OH<3&C1NZjD{L(_%gktz8&^M`S2_RW z;`HFf&~I+Ky*jY6Z0kN97Z&%=J>)63fBJXt$CGbr`vZD|KH z3}N@xXFQyppL3@z@+!yM5-aI9Psz-@Y`T!22T#bDd@wfu$i(*a6h@C$_O(`O&rGy{ z;-k#@!d1(P_XxEA1s1tsA3wSX3C;a-t4h?GCi3DtSJZ==3yeMKe!skN)0xY>pVjw= z9#g^A-t8mD6Fy5gT4`unB+y7ngJ;|+0|XKrBx4YyMeGagECzc~>EYn(7F#t5X8*am zzVj3m1>6-Cz5Ux0u3&`&YvIw4{_DVn_cV(c11o{dEMrhT1Vo13w_tSGDaRf#ABWll z1Mb3Ppqy7&&6hIh68ejf( z9(b=`szY$A8BGfY-iwz{$|@?nJv>;+RtB4iI0%moc`IyFWvT725V`!ga!Xt|)qNti zE@QiJySmExhNr*<_Xp6LW;5C;qu+3O-li868noPBuP!W{T3&u^t0N(-7g`vUm{eto zLrKZZg##M9VZ-z+UMEbcY(`@pgST3}TwLtc>%~C27;PR?R(i$FGp(SkTqfzFp zt!*C)6iaHTqp0-xLX1gI-}Zuz;zwX=Kr=k+H(gJ&6Q^g&Yaq@M9*;V$ruJ6%ZMu&K zYogELfDd{{ifNvx^=zmNM)5-R>~BqSt{RJ(_T*@y6iPL-K^ZS4eRNwlrYtLur` z$={>G+z;HIZizw5Xp)d3!@=*bL_5?nmRc1O;^_!^w0~ata}Jf+7L{>v!*d1e(Cxq$ zAOJ5|;qNC>oR}>fE3NrT`A&%UR3DY}?^vu49$NidHeK$|gBuLarJ@G%nwpxpTuvqa z_2uVw?r*;e3kw_V22{6?G5u>67yUO^1{#GlYp3Sm&()c!UX63-A4}T)+tl5k@8rGO ziL>9WQ$9Vzhh1JthH5stvQ`lszU_tny_WROA4}F6brmN)-qtM=z$Bu!jy`5bk2Zu| z;uXXm=cwm$On5+SPm!gb<Z-WS$-_5ns3)`F%99U+X=*E0R~E80Ie0fp7|E7)IU0qB{P)ooC7PMRg8T9p6f0vrX{ER&YgG7{WI z4r<5n!~T3cb)QhzQKo|iH?CrTdWjTT-}b-UaZSz5Ue}IFyLwC-xY{B(Z6`V;{TS1r zlkL>f0OrHH7v~UwTa%dy@f_aOJ^Y+u2BSo6@@YR74ph#gu1^D1%u zyGosIRgo1renJ!+3q?Csmc|naGk%Qv7afu|Tx+d134Pwo|BnmMlZT(ps#iC;5+=2` z-d*zHi1Hi3(C^eSrPsXfM|xY^witAc#_Q9BXWrOYIA@t&PdvbID`j?mrWE_~&fhcI zX8SazMgZq}OG_(|J*}zJ5?2ppf6_iyA7r)L6E3hZodUtO^g~<@6wXNWJ#=~sHFd26^C)txn<_8Xq^${ zMb?&WZPTn)ELLcrrlxh$G82O$ns2&qiBVyRwV0dhzBB2C0-Wm^t3(gjnH85p95F*) zx&zL6VsUn~NU@>$+^Rw2!Va)^kSElfl2b!Zo#(lCnPwn=xA@*4d<0pktCVf{hdA!Ry$taBG?Rm9l7ewf1!(p=(O|jl;t#AD(Z{DtpXk zOFw}&2!Pzr-~8MU_+)}|#{^n`=b|D!818{%?{%r6jP^H+aWx-Z?#}t^r)p2W^n)D? zB3ZzIuEKurZF7^LaAWmFz*Phei#57-M;?Z)XCz5ltu3u6&&B2b45qUoB80&nSiwuv ztMSR{znVK7Pj?+(YTO02y4-`LmX;O(!)zzscy!srlfN3x6N7`-u73Z&vZgK4<}rq! zu&rIbFI*nMBki#c0oXT1G=CVZZwu`xGVW-U>72J2J^(IFQf@A2qZJv{^9{$Wv|Jyd zdlovLeSt%_P)CP7TVuGtt=OzND~x`%r#a+VXmBvEv&+TmqhQFuDRJwQB`GkbMSYl&#{Ii@gR$*r(VNOLuqsHSp*w_4m3hNF&^CWO?Y#@G;N@u$tBMU03 zcg&9nOon4Pe`wYyh))NB?AB2-2F5+ntK6}iZqF4%g_6PQs<_uMAtl4i+*sS(IBCnU zM^0Ql#yCtWvV;lcQmw-n$?q$o6cAS7u$(W+Ev`#mPrsrhCdyQ!I->lSGUp9*+>12T z@F=jg77^c>(f0v_qb-it^!U3a5J3PB)A(U=2kGtYr3>dB8%ju;-DV;M_UmkFN>68rZDo7fOmnDdJ7hrK-54>O==KtU+qgIC^Qo|q4zRL7*O3ZaJIbC6`Ib-& zTKMfmfupJF4m&J}lC2$2{>Hzj;+_Dq1A6Ld*Amu#H$ zG&|mHp23A#)4Np>YU+##P*Fs&Rh?U5Va@s&_~uA*JFDVH?JL@L0@^k<+9nUNt%Dz8 zg5==1<+C>77Z(PE%oa4*UJ_hIX8NKOf!|`dCRA!day$5)f8SmlX|;YHhg!jLusAHL znU^wKvNZTxMO3uA>v^05@9ZpJHG5_z4P1?@lbw2jj@#IF2`gK!#xK&g5$Mt~GQ_05Gs7hlg`U>Vdy73)MJJ$jCA`YNJXU0UJ<9GrbptK7*!oO)>-FL~N+ocrb<)5I{+_R5hw&wE=o^f81hpq>VA|NFWw>?e) zMA}-T@(f`djm9naI^9qX2+1f4ihxPAINnFfaK7f1Q(G{c*$wJE04+ztIZYF7xmy8Kpa}sZev8#!PANMEKjA97sbrXcoLwhIEaofyU(i-T z+5NInYM`oq_GS_hziYMa0w3aV57Tb%bZdpf9MEDYreswVe;A!9A`Zu=^<@vP!zx~m zj1+1epde2R5J~>~>HruU>bqOlkOXNmV83X0fTKk?G_vcaD+A_33atw#W9SbD^A3_I zr;uN7pO)L&X%@SfsyKPk`KDwiL%8(Jw9W+uO=|kjy7&-6?!B2JUmu@?t*V;{VAt^2 zIzeWVk6`2so9#*Lr-P3I$orVc9}ef)@!tMGu$8eCmBg^G9Sqc_Y$rqWx`&3bC?}&_ zVACi3;rZkOk;QrMRL45QC0=bfb~f2{@;l)VI7-E_avgzyQc+w~ekN{=DjdkjIX8(rxTi)qdUB zyl=l@?bl>Le)$qKF7Rt@L>W+o?F)^lEtrQ)Bs?uAu}`gz!sgH z94=H}5B4nA5e3Na&Nnm<39rMs!Qb(5=%?xHv19wyw-|`2grDlN5h?;XSBB=)oU~Xz zp9jUXLCygvBl$O&v~YA6%ilwV!6&vD zso)?jF(&?Tl_yvYmEXuHT0uvJAfH}9_CuK?t;OypF} z^mBR=F%~=hW6pO4;Ehse7}oU*OfhQQub-b=!2iLHyazn}3;H&!UqRaBa9K#K1nKjm zh{xc;D#&rV(TmZAyif6j-d&-TfyjI0dq~)Qf|CcGB2-wyl};}uLR#7x-*znlV~5VH zt2dv~^OcK{x<++qC4Yw{LkU8+yRuzWR5=bmo_jFT4GG z?@U_f12!xl!E2sSpP-$o?U4vwext#gG<31V>I|&XuPSA_C^`16Mto&v=CAC-$-h`~op`bzl^Qu(o(_x*O-RU{U!J{({Xk#e_`%jT z1_E)QrR-hT-vjbCZPiReCP9IUEZUTpN77s(57+O4T^%;+`!{4)5Idt`{SZpMb}Cxh zLY-ukC)4%D-JNdC7dU7!QdiS%2z+=C%8B<_!OgfXy4??ymHRVDomq+s|B!0vT3W%l z*xvkweit_Otm?AC15Gqu7eNnp0EM1!c4CqxGKP{K-S!jawX?wWZ*|^1RJM~cv$ge` z{r-jy(?umQ#cVV2XENC{*6i1LW)NfuTFj^oKW;%MF_rT2@=VxLfl1))wD0fpx~aWA zD>s*zqI&CsdRdN<=^+xIk&s&__e7TA_)x5$5YtB}A}%!TC{6p(ZIA^R+S*7!AaF}3 zFKOVL(@s5FKQ|OIe^uFg-CTfXHmk!ApJ&=d-qwQlemW?K4ZrjfEXMY&uj{%yjSB7V zLR74tRUsQ|FQev`ub=xZM1o?bD1#PIAvMBMv*PJjx(l)}e)IW-TY)o4;pL^rK&Q40QR4+fu_W|;l$H{8zi>JrG4=kV23%-J7VACr5i~N^1KtYOBW&?x7w~-TY+`0LfN2?+J;IyoP8?Qov$Pa# zPdgHGseUp!{z>K!+L-e_66Hj4PV$oE;{LF)xB zJ~nRHy&2um+|j4*`^DX)X#PYjJiMgV{ii8)#nge)@@i_b8X8;5o&f*O&gs0my*FBF z{Q%67?U$A#tix9Hs2CVBa?j7v{;=ZhEqgu)6925tfAJpJ5!ZQLk~82_=rTiz196F> z`Cw>dCp60tbSgqXt7vf%4LUpJ+{0Jmb;Gmv|`n=(+O zyLJ%4fTV`#F&_2<%53{`IeCd_m}vqN9byxI+SHq$tbc07>1gk0-Yc_9N-UJcbO)jG zIJcU346wIzJf&%KJ+<*z<)QaP9D$wal9+ zG@eppyvSNOyyB)up+9g=dcorv)WgW}=bOXP{I978BkVa~@B~h!K;Sc|Gtw!c8GN021Z zR!ek+hQGvYES2Eae8*;ulLi%5YO3xkpG9(P?IrW1(n-!eJ!_i6#t&r>gR7>qxMnJMrtcB?E^rKI*jhg_#@h7o44Cq$%4G!Ye+dkiDbSiW+9 zK?=*V%As4K_s!>JH#mb7BJ^Iz@i-p?pjb(*aSJdiuwJ=reZm7j8Op0zEY7IfUzm4K z17qW{;HNJ8K+)OhAZBdc`v2{M>}2-#2LS#pB;Al!- zl=*LI(u*;lr+jc*><8^X6!sDk&%6ddq;I)Rlm4xt(%96OvN%MoQNDM4_rVuIm_R;E z`q!x*N1b&u;b~!n0r%l`OjYk_C`~(+SPCEvbeaIA0LjJtgAfn=&JF7Wk{PReK@Y+%%7Q`ehA~=>7b3g zbkBbfryDj@AdHL)T?L;46JGquJ5KsnD0FEG-sUX3R`XO?PPKKS*wtQSF__=9jP?@$ zNvNr7X2FF-KpwMQ3rP41IF&3tV86US%f$E4S)Os$4pAL;m ztG>>f4@XSGmk{z9wUyN^Hw77(Y~i}NL{azKg zj~xP7`IpSOK~!9vKAKxj>RaFF+RMv==2#VsGE4>weGgsfI2idE&T~R^M>aG(nJ!;> z@%0OeK6jSEZbR!*{|c~0&8z?2VIq%CTpis7HhcPimmpY}RjX z=`dZV`Tze%I%Q>Kpo`}Cf8Q+k=l{MrL7Lj}NC-al%hx3C?S1;+-BmtxLAm~a=*RKN ze}Mb(jEweA|8IKn z_vq-MWaoMe7*>bFB**oB*SEhJi+wg z%L;MCe*g&XG!993W1;~oe4iwQa8?}nsB@QziZcC)^}xjryQiNLE^)o$4)kr0BBI0c zNXfC^K79!AEc&*UfPN_Btr2XL0aw-t8mY{PQTtdJqJkA?r&N3M$h}j$KusWJ{1ezk3<2(tHty+>?o>^YH0Cw`- zIXl0qc@0_dKvo}F@dk79VOMF`!RRA3hSJc^&Zbj+7r!rGwttVQLO>U0@KDa4Dv+2Z zwjyDut?)VrO0eRhlGMM@j%Hj0Fu{R~{L*1A1$P*5{sPom{FXJQ53AAPWZ+C{pG6tk zvD0-s(}bRM7%Vpkd%-hijhlN9-4iAVIj(%aJ$&gnXxeIy4m|d_Fny(@2u48&vU5&1uIlONK>*JY%Nn=cXGw>OjmfR@8G9I|j*BZ@^k0Xq zd6AJ4%ZK?HXXt#x{}2E{H@C-<%3%4H|ArX~T`&f1nV#+C6~EHch#w%b7Y57^B);71 zWR4oXkS?=?lmXdSc_|%#H*q%}1iL3@*3q!DK{8;?RRk^g1sLJ!Fw0bXm8%#G{sm+V zaX8|#>O0OcG<*bmGkq+7>%}l9F6GHY*9F|UE{?ecT-P5G_?IUPoR=p@maLXr=6$?s zpjQSe;g@fI8$Hbm6`mBx4IvUlT2}Td0(P}{e!?le_PAOuU%mv9Y^6j;XV1#xP>$r6 zoF{0fLBouI+p^DnCjM1?%S0Hi17wO9Qf@dB3m^oHH7No;;`)i9Fgt#?sEzjNo$;92 z*)qAd@EzCF-J?d0Bw6TRjE+u$v@o&#=5i-Pm_CcQ(qLiF!W{JSIt9|HPhGyw15E<9 zokwyMizU5*K|##s+Ol$gfb68K4Dj!8^O~zxP*RMkBu1E6%9Zypw-3=(y`=9W2w>_h zEn@Io!(6F>ywccMkIQnjDLdOl{}ng#Bv1so_i(oh-VD%bq@t_ecK4ec&sqYto8XyM zp`TAdYAP23h7%OQ-OXdy5RxQq-Q*NMKNZyXA3yRSFrx>B3DCRv@n=Yhv3x#$By!x} zr~-@5m2>-Yho5)wy7U`kUvijTYm4MF9GpafEImIT-!zs*c>Lq@rLEr4@{R$WcP8>T zyj8c-=*lbq9f3E_$ldsag8}!=+*3`HdOT9|`gMZz;S;jewxd>P=zyxNwzG4vI-%dc zD~eybDytx)C@X)7lyQw)>)jgw2A#wH1_p9K=uzqXFC-8nD@^Jk3kmyq4)7vB9Ul;T z*=cT7QInFc4b>b>xsDhcAHg6G*m1aslg-ZJ>N+|CWh?R=t3qea$<#C_LXJ|*jLGrT z0D0zm&P>T2Us;(5t;0uVAR~YR(2br5*(c(YA_)YJ)Z*|^PGOv9D*r{~Q1@=4w&=~}XZA)m6A>)sRS_qf7ZPnM}Epc$KE&rgqg zP8AU}AmNkFPM$OQ&1_)JAYt*iv-_PQ*{Av5C~GKKoMtQ|DEF z8y=q+F2~x%A9|@ZOR!NHtk+^A`K~*7XfflX^uqk+edYAzy1J_UdEWy=&DP6Leb~(> zqJpD1%=?QKoys-a17dOZoNkOUF3(}po{ z336M>fm0;GY4h60FD*36p%M>9SD>{5BS^QVB!8UMj+c6o}ivnKg4V zQ4@G4!*nok<^u5ei{JjBdW?$Fgt6j4mU9?15&&tyf~KJHlHa(428CVg-1gDwL{E+% zWaX|}uQdSTYHVyg5{?HwkkqRRp$DS}z+$p7ISnXu+tcTEJ9d${r{x{>|F{4qS1jA! zc1eE4IN4q8-~2hk=z_9bs@<7TynB0kaIDYgVf&dkAl7Q7hTgN{hKAG-^r11vEl2fz zL*s;x$*`|HN0lS{n3?Jx{)OG2P<7`Y=3PC#hYAXkT4}J)v+;HPupKyWllbNZP&D}t z%3JuJGRGrRiLZcNO9P>`uMfPMP~S_ggtj)fuy1NeQcsPYK(Jf6YMq-f8K!mFC?h}m zO^;d#C4wWWNOOY_qwGEIxO_Msx{Z)stQzD||T zm!-jkvkl{qb>4Ne(Cfi6_R^sU6akGy&2sh0-8gCKfQfvkrMv1>IKEZ$^uRKHYBU&6 zu^7jH)nBP~&Pphe-%{U8^WMLHs>>Yosn7sKQV7dQzX^{F)lhBC%gsm#XodkPVlbP> zD7p%Uh4x?&ugQCIizA22eQ9~?FD{suq}=fiE;^Vj_*hGl_nK&Jd4jODZ|Ha5&=l^$ zeBQ4)s|5zN*vL>~+SZ`3;9Jg^=j*Q1TQ4gVzK29s7!jqU>VmeRd&veZMg!uTwzk{% z@a?4@kH1_cmEmWOZ~@v`YKFcZ4kc4W``osNK*BrYGrjc^ucY?eorTkNnm`yTBxy~4@kGf4%3m~nBbBAHk-Xd&${(sXjSw()bF@rYA7fJ`G(i7SGsxe7$$<tV}7~)nqV{_@qDfw3x*C?!&+Io2BMzoc>vCE%)Plkc5(?^N8(ZXufA(; zqI*-gJhzgbPdOEP=ImQP9kCM%o?ENqKhD8B7I^#ufd;t)SkrcOnsRb~dp-y*8~k-! zZ_5yDcg-F9N%WY@W%E}{X3O6N7eTFVs4g4*@PutnuqUFF&dff%;c=?FcS+ph&>vlV zr~T>zLi?!C5aL|SzpQap#gQqSNsr>@p#HGzy(Z()}C(QS}DLqQ4wV92D-LiU>qW$~9| za4S;RoTo9zWvITRR{Xd}>R(H$aimre7+#m!Z2)Auna1040b$qQ1J#H!Etx^%=}O1g z_1_8atP1J!G9KVRoXVNo8896`I<2QA7xFwRO8cjn8$2w9+L4$U^f7$|EQRk{bCm+S z5sUVX{y(o?BX8mEVnlEQ)NsZ8_+H)tIPaSuPDDJj^N5Z%fd)EoMxR8Zdfv)W`2VBn zDuAkNgZ06}KmiG9q+0}}OG1$D?(XhJMN+z3I#jw-=?+0U1f;tg?*8}AonhvW8S$Ji z_T7E<3I7KpRN)qf>nv{ARI6Yb3GB}^&!*tAgRYD;-g*}}A7If8g0gHAL!_UX%NAyD zk}6_GfzQIP73;OqAk+4Z{Z143Fxo(<5K!gX3Ua=&0VCN)vz#7OAMl;-d;U6OY7Ka{ zq2trP?wFMMv88uB65NBK0snF2c=+To#qR0{GAh{0Kfd=7+9a6S*vQcc@J^24)4(|j zqaaq$4P(agulbd?SCp4{^qO$AiB6d^)ec>d5Fv zDj)(STp?|-`-|kW&w{1Z<%ui?!C$_-d4F@=u!BneI|2O@KYk77{=sn$op+F!<>T1j zzoyJ*^7Y&qZgKxoGBDiNX)^@^gziu)9Ih~yzoD&6jcu}_sG zp2So*6}Eq@^rsZ&=f8AQ<*joD%oG)ILcoEC5&yE_3QR4)#(uDC5#b9*A5??q%?5~{ zN0U!#8^T-1lwTzl&x?n1@Y!9oln2H~cZQ1_=V|RX3gpO(hT|}sz9n9?g!RuVqZVik zPBuqAQIKKa9NBjL^yaN|x>%Z zEFj9$O2tMFOX2Z3SHETt4hwn_(hlh)vt0FeeM{vw36saqw7GI}3k3~dMt1j(TmF_+ zF(@_E*Ixpiigfn3g2IGEJPD>mQ1SwWL+k@g4@^OXxLD=x*UGJ97r_1k&IJLl8#cmE zaG@o~0L=v3Kf-UVPze) zvhwad@{>?0vYDG=qT|wn-1u-!dZR96WO8BtTkBbwLx9!+o8MvhG>J7!&5X&2Z@6deuQ*KdF|2a1oo8Ebn z6zDy~gCCdf+qi6V9|%=aWE!jEZ6IJf@H`r14eMw1{c4L79E((_C`*ZX-Ksop zA(+Sk+AEND`1`z_tpn3suamq>=zVMFdZQ-`gm^DQaXGU~@?C#L?KbZrUiL-}kIIIB z0ZE~&$%MMjf%U(Ym45A#t>Gn;)R|PR)NPnnp^G5x_I^2k0E5EKVk~UbmPDS8{2r}Y zaoKE3e9}e(3MHUCmE;Y^f7iS{aEbx9$jpyC`+=}YW>9Zp3yJ0hV}@6$nm{sDFW1Aj zqYa5pO=z2I@UqeNZVHmKkapey6&{?fcn9~uF1=^T-pQ!~8XH999VLZ z!=;DVKmP5z{@1v@V9>!oA#VWsJ(L1tX58eeT{oNy9E9ncq{ITIeM|@^fO{bIMO}hu z-*R^umVw;Pru-#3#fHuszm@ReDD(Ez63d2%!=IsEI<=9!S06MHLHMsN%c|mRtc{=J z65_}2#X%DD9b5ngIOlo+vURa{*jZc<2VNw0LcC51^q~O8shGf6f=N2?uB&gc4=(t6 zd7$*p051^K2yy=ts-rw{k;JS z(|@Ad3QvZ8<;{%>*_imd2qmNCaQ=d2z8%WMK53|qPy7TCe<5cZCZd=(i=zO445XYg zSr{-|{jLG96Xf0S9$bz50ToeNwdwM9y};Nu1DNWZP(lWK14W`5!J^b*QvI2eq2p~6 zuhsWyUKFne>%-12dp$Xjf!b-;3xhN#st7ZBW zUS!}@2JL>IUkJw$9r-mSPVW!QnC8N_;Oq_yPD#r0!aA;h!x&B)`5A_r zU0Ww-YI=Hc*VByscWMdMOsq_Id;xYOcGjwt^_?>(@~WVW28X zok>CR?Oi^fe{}Wq`qT@E1VB3fkFXh6pXI}-rlve-oeEYxWM~gJx;u1VUmWic5ggRi zWM}^}%F0!4T?T+$zHb-x{Kkt~@i8Kyl1ZDx#@I=!-U05hhwkOCG9v-R#MguLf4 zyg=|`kqNbwvhy=AJL(m?aaVJxc!t<>ZHN>?cG}*;Cd61}wSh=fZqnJw;epBF|464y zc8{D66~H=;nwp-=T{q7SKN=`57VK8xK^{*Kc8_+UKg3Z8^tek9aVQRJ3$7wY3}+6y?Xq>UVw~z9@$A0~lrryavn&oQmJcY#s~nkwNoa zkazb#9Cj%4v$2v=rD4HKYzco;!hLQpNzInDZTyX@_}*oJh(yGLC-Y3IXJ#5;))^$z z_A-Bgg#eNnaLXlf(84e5HCOiFfsy+7k^u4`~gVFE>Ug#%36wuKe*fQIin{P`zpE+FR*!e zL?uD&4IDz4WI!~5^c6%@px}E7AWC%o6Q(gT#^#qC@Dpxsnn-#auMZF7Gn zozJKtd%E)r&f>xf0Y?05*mzHz`8`l}f_zreNDjIX18ZaT!M~oF@i-{{r<%pUXO2}; zf)S0C`N~9v16!g4TKV!-Mj%cCrF>)Kisj+*|1F5HSFC`|^#Flol#}X#s&!buyl5(u zA&>Nun`##*8PEU~($a!budSn*kf)^hpS}KhLLD>bB&+5tCL&Q0^(oLPz}RnpcV+F* z*qC}kbx@E*L{wDSYa1ay4y3=a zV*#cf$ZSWW*XFI&MhT7f&0sE zqH)zdCp-7^XwXYEFyD&Nkxht7g49iZC^`Pk(=V|PT^rQ-(kD<=l;faZBSm`*9M3JQ zAIK-W3}g(1&rir1cv)_?JwR0gC?lMoKoEfEyMUSw(z(6fOIqiQKS%sCG=jh8I=rzs z?3?-doOWKI!`hBD68aW4#(z7(ckdMGh1-QonPeq}-^sxbCOVp|FiqsE^{5@5>l>z$ z-AIm?r|ylryS;}Cr~_e>7K7gtdCQG|n0TEXeHO{G(rivDk82?#1^+1!ouckmBO}n! z7kbBMysqb7@+r#6$syj0i@|ZX`({M!8Z)I`|LO3bXOFJWSot9S>uobnFlI*dbGlbNa*X_;k7FK@kfsyC-tXry=s0< zOZkfSf{u1*_gDj=3JmR)rxwY{8h!B^dip&3UfueoV;X8js^-rS0E*b{2Q8qdbE=hI zrOoV;dj40dP}tej_4!c4ArjiGw^ZSpdchjB5g{i*yu>F=Cl+@;b(5d4QT$v$yrg1J zZ`U_7&u7r6@C^ufV?4Uo(LqV?Xx%v`h1g?Y%6TGzd2w_Y;Tln%(9d=WpKkO>(uo!8 zs4|JkD)>5#0flnJetX5gL*&ka%X$5k2V0UIY>`-TelP1PJ6oH&wvEzY+zQ}_76wOM zR)zo@;{5*SE6*z!AQM7G1hk!h&8Ydzi?H|%^kv65eowz;H29@i4v?Bpn@DSKFB$dA z*Um?QyPUO+lcun#9RE@E*E$O~GtYp}=Y(HBi{$VA$2)gmvD+c^^d>M=L|C9zME8BfK2v4#Q>j-1rwn{+x-6jqf$bQFtH~EoqroZwEX*GgfaDX_X`u z+s?cAxG(QE(F-TGxa<&+W={oA_Wt_$L z9k5m&6h(`VbYpRi3DsjDA834SxGzZ=rBy$BgE)~jTqKD; z55Pka6BpB{=>y~3KNDTH3p^|p!Ear#u(7ANo$nzk2$N!>dbChR>D1_z;e3X@;jn;| z6YJo#_0YO1lUd8es|Pf0oN87Kp)S8NhD-MqX`dq2QkM`(zR%c+{Iz9)(JJ9cU@rPk z#5%ti8G-p&O+=lRu^H_mp#HRRcW$>UAvdFl%Vt3Gi2qsmU0)?^i5kY7I^ESVDVhRY zoB2knzxxBCCr=(fJi48`$vP5s^YXfjfH122YV06M24$Xz@<~a?gOtaNWMuuFozrvD zaGmboqc-ULBH7zDwQl=fTDb#2kAXh3KmZGPF(P&oN?s#iywrtHo?Kn$t^0DgN}YyfQi!AX(x+l0<-aCfcW? zFZ77FPe`PmvbKF3rST-@eJ`b?rmjxQKxR>NE)Lg3j!bf8FqMXubY`_z-t1_BQe?N4 z%$6es+PI-hTSp#Fil+$2%E}>`ina|5^`wnutk_67CXx)|4LrPz@=d2v93oUiT<;5) zM_fXU#|uj)jM_~|zQjh$h=`P=+#fj~+|lGwh=`(6QY5Xkw3E#qK5CkZnb&T zz0&(*R7pwFA9o<#q661xEDMLmjDPb>6OrQv3gYiR2jcbU_-6Bm2ft)}9wIz12pS!S zuTmPGb<&ZL{EBQ1IKOD*Tl9>R&Kp^CM0_2_oeAj?F-ZDA1b@_(RmsS5+ux6U3v*6Z zXMB8+pZAdQGqE)frk!rqFJe|uYzsO$Nxh#4=*mwR8)HL!W#JkPa=5}o`}$r&V&w(q z_0{*V62ea;?ocw#aMflKdqqQEH+jrYUv?j|fA}TjSeXZ2kXAjyn7Qm=*rD=u;hB@w z^B}NM!o_%zo)-C;bL1FsRnM7($e`|mXUJjQJ@4^RhFOz2r8E?l;)56OUHETom}VR> z#3gCaZ+Dr}Fi;c}=N(eM{x?)QqIzp|Vx1*$u#P&6g^3n;^_;~c<8xTmN=<3jN?+30 z>Ug%pKXw6q0Qqu_3^Qc-&1J7ixcy{ zAD^nBbr;l-?CYi=j^tEK|Ham2s=d*$%*Y%E<4cx|S59bDVLf64f zCPZHvr`5Fa=JY8M8!P*?YO$QW;@f@nTeX?P1Gk&Axi-5je$R^o?w;}S5hh$V$03nJ zm$!K=;u3T>-$VU?00Lx_3EALxO?0cmMwDM=(+ksUQcm)}}Z=MtSV@;<%O@dAkCc#4*(cT?{p*IQMW*{(DKdKUdzx6@$H6%p zg|x+U_bBsnkw5$0|LoVF%xiF)MmUSBG;HbDZMevNs_yoleC>_aF)@2_4MhE&oSXpo z%I|$4sG*=GAdprbiZ=YfB-+lbiD=L<15E_0MZXMzu);jN`t#y~f+sP3erc*b-sVm# zpKxxjNp7~0E8me_v-^koRl=68h5w90d^_=<{^@*x#+5pJn`cmSyB2hrihN+tJd!6J zXKp1Se$!1GyqklZk*la6Cz6r$o=J8_sQvtK9yF6fAZL%Gas;N(yQ}1>*#?rt;eT)3P_(RPs!xS26dD47&XxvS|v)g>`Jk+4J+U1}l42>3BzHfA~ ztcCea4-*2`HfHLbmd|^~ne65>^_xW6J&zB(-FM?OeBQ@+e0xleX)`t=MhT_7!rM} z{YiR{eW!iSTmw=iWlW-v@o2UO|g?zd*d?3n2C?e`BHl!HvebBkMKW$H`Mr_x>F3;ydXmWv3Gpvb93;ND~torsp1VDUUC}jn9y*Tb-mKqJvu(RmB0J^ z+5X`14)5N;RobUfr>!+~=z#FYevlp>Ha|i+IoSCti({VOY<6ufhrSRo6wd186!m=3 zg@@SA#}Dzj%=X6edl848rJ*TL-=2d^RWi|(wCrCg-PdC?*B>Uo6iB@2UtL{gq^#nvv*dJS*9;`*fb+hOmnLu` zu+|?P8uH`#$FsvLDJgv)!n@(7bwdYwe&!0!`|!Vokuncg2cBa;w=}gZQ!R^V*XKXj zZ%>hh$iN@U4C9I_7G`G0o@*F4OwS49Bjct@oMVS&udAv~e*PWml+pRh6cZlqGP{9g zAR11NwpXfyvE$0uvO;?Mu&rJAssEb#_eoXFgS&`qa*CC~#Lt#*=;9b4Tm(Hcho1Ho z)U1V!!SXp%Zjoo~X>3M|G%r*JK22M)XXlvs`+QMUjF>KQ{)5JwIyP4IHRAQ|Ks8G$ zkARBC$mM#--Dli?6k`q^2cun5GBWN~^=}5?qtek8Ly~D=dEdT)=#NrOO6r%YfT?Z{ zeEQP2J3#}%Qd{TWl<4B%EG(U^C@L(R8sBBT5POL>kji6!+T@}xf}NP|bFJRHZy>U0 zTO*^R4>wk%*s|7h|D&7n$B%acy-pkyx4xDd>r&sy-I&}zld@Kx&B%Wl-nxJzxv~)} zf$sJ+R(yG3VSx`rknRIGEL+0R9mVC zv@WR9W2gT5S@tYCkh~mMAwFR+Yj~}A{()A5{Zjik-vR~7!b0jhzBLIr;;ik4%!!i9 z>B*TwPyO4xr5--Itnut_S?Ca%=%Bw-h+)YVL~`cEflyVaSF823leWGAC(cqD`ufw; zsHm7TIc04*2z*1KEH5Z1xc$Rw^xKuLcX>8EOXTuTZR-Gc}iY= zGD1|lyd#(LLF%vG&S_%%o~kNmhbP~1bB&9+$NwnCLWpWcbh3hodT@Yn=I}a{A(A_0 z(o#!h`j8syf8h>^c*dowX6uO_1gYB1PvfKKh%A#3wig}=r_7?~{+xS6S((1J_d@m~1N0Xhu_9#X zL8g@_sJWS!UXUt`e7e11|N1~e(!wGcBF&KkQc)-|-eKt$3z4IlNV8~F=z_xB2Oo3M zetjxL1<7a9)cwxyL~%)zV2KO`ldt8w(cZJWcf(tEC+Z6FYlL-VknxXO`8@Ss<2dvi zZHR9g3oAD)*h z$dByu?R7|kdz3uP56f6epe|>|$G=T-0EM%$p?7h9zMx+v1qFY})Y2WcWCj|VFj8~i zhJgz~It07DNcSyezI|S@Pz1TOa3|aKu})=B44n##3L@j8;6?qAE+sAG=+qHL)NNco z!dyo1j05_bNt@~%$1=)4f7naZ=pTRnGE-A?ci+c=yNAhcA3aj8^$SX#RAQh$S=_5M z8!IC8Ge)F1J0Ti>9A~FZLYjYBR7AG6#3$UrJO z^a&vvo)|(_8G?FhGMKXB`mgQ5bSmWbEOVeoV19$5LF{AmXi3Xm^ht){PwOrhboLK^AkpAxZOl!|8`yH0c$d!XuCGg z>KDqlf`T6(2;C&T`p<;@y_kLR&+C(EF&^^i?PsXOm#?(m6!ULnXTK~i%xuu>9gwhQ zcb>E&GS3eu+IHU&7LeQTajMGG)U38v=QsL&_MzY#oMIYmKlpvt3qSwK1>h>gBo&>; zH?U-{f+m_Wl(-%WBm5HfA36p+eP{^K>!yZpg@njVlo+YNV6*LCIm3#pca*>IhW1Cj zmb|R&Pw#;3(&*L8@_?V6of70|zE8;lQ1MyJIWogm!}NrnvZ)nwE-%*28GG2SA*;kk zoBEE)s_=RonEtGfz*1&I^-4?U^G4h5+uMuq$H%vtD>-SKlsLHz4IX6rg2bzIdZXLv0y6*Z$Eydvh`0mTX&CY9a?XRtEFe{5HL3qUYYp4|EjzXn_yv&Jbautb!qnNt8rKU8W*07k ziqh#%tqaap^E@S0JCGhO1(5ywL_3a@uqtD1ZzM$E!zTp=v<7T9yOXt&lnSEXRQP|7 zjg6V*V${>~Ot&CLxAcCNy;H&Uux=9(rOFu`U>skc`P*t7=({)?G{G#4SrAbA%( zPcp(8xvZ@!f9nR$f%UECD=ny0&y>GLWnTUAsxQ(!RW&g8%l*awnEbz-0OoZnZ zW^6)CR76ZvO47i!JM>D!WKsad)`qAY9z6Lp?~}mlbAIp#zc$>}zKj{pln1`4&jO&; znDUw6Z2k%ulU>at_M%`ge`6dj)yXs%UQ<-k`sB^V#%kxbBuDn;o!{rOrd1x#tM8!w z8cRjaE$Epj zVr7Xz(Ihtv`md;8MjZ2+ zb_OHAAan$e_r@s$)8zHQUBdyKJvQ$=Ki-hDV~xf5HPyaKPFl7b2}gR;n9#86jipO@ z1)*)<)-b1UDu7?q)MzhZ8Z`EYxk)HfkmnwWBD}r%{!QUK|t{^*-$|@ zMkmrwxKs{@kNJc>)9NSefBpUgqVu6k`IKL}eHvxv=SiXIe4^mD%357fHbGB9BJeyi zzaS$w^Si3f{NEkBTPTQ|e>5a19sGhKYZ4~rH77h=`5L?iH@o&(OvQEeEy-y}KCJm! z*UWpIUu<`FQU9HwMMk_?KR85vFV-6v6vDXrw^ih@2-1MEvw`*L`ObqpnktMeGLV>* zuNgHQ(D{0gTXaNU3ZKzWu06K3>k7^g_y!U2I;wy_kz!s%Kga1u#%4(1wmFepb94eR z2lEo~-VXFztN9Zl*6i9%&oHshgN%=JJH6hRYr)e)aMEqajA(0X?`)@n2^dyfkgql?=Y{@%Rdwa@P;@_$Dt*1FxFnD|%|JlEuLOTjfZc+2ouPiULn zYr_!Xx=;I(a!12)Ykbw)2BEv+sIVjTW#cs;2@Mm?a*q?Lw)R}r$qA5ju|rr}CIFX9 zYdnif@a$dU%ZwCxN3ycwp&brg!hC=i$|Vjoyposdp9uJ!yu;4L8Fx+idwMqU@FHgg zGKs%PgYaVJi<2MJWQ7ry^RqKbX`CmB_~Kdgt#tG7?JV@J$r!YT{ec$DTA zUuAP8({#%#Q3v19&;&JHwX8rEU&ylK@6U&$on4I3vLW{Vo%eDYkGQ$H6)0{rOtM_h zHwZPcFlygV7))oeyGlTMjkJHbA4p3d*lm%ji& zze%a8E6+{CCM#Yl5g~qhbaL_c{4)j;HyEmUczD7+J=YyOroW!TxVkZD93K`ZVl2Z| zWmBCxzGO0Sn#?Wxcli-jXXfH_2+=U3=>`o#RzqxKbtQSKba7#!nS~AN)9|-#H^7hH z__vl&Y>G+(TRKu^^SX8Nwyyh#@!4HvMa4Tm17y;fzIj?}3ut(FfGbV>0P~=w|0b); z(PNdOTI6Q8+``P5WR;)#rREl!5dp)!;~zBGHF|ryuc+zLxs3E7G;D)gr)2SV1J-kXTeeF5g4QRRirh*}>z- zaKmzP$YJU6tiDRC*@?aQD+s_y*^71C_VhfjJgaxzhu4mvu2D5HuJ)&$uXQ^pHo+=N zVLpD1%3lnlgKy6}7_a5B`^nY_;P}rh^9u{DZSnwU9$?$83y@XT9v&LX|4}fIIG)SR z_fBMT(mLUdH;Q=JYnzikmEya&DE^4g%x;Su-$I=M8AaGGUu*}s6sOnP<}{|N!@#Mf zMaMw#XAsE&t?|QKa*){5(W3C|8vCHVNZ)%^(^^_umy3NaIcZt4i1gdHIvnN(%%dA2 zdHW6ULPyd~S||T<=PNsb-Hb!qx#U^r`!~X<_~s=p)r7_bTj(4FI{-|XFxG%5ZF%Re z4HPh02hQcT-d3TkWATfKbKcH{pE~oRZ`ki6PAy*4{YziGmzaKgmAKR?Q#*516JGTZ zww@wix?q1`#a`5CH|@|G7bay1x#ff!Z2t}26S%9b(9>$Y(gNo}A{&4M+`q-nd=r$H zU(6o)a2Q|QNk!gHMGm{f&xd^__3lr$v`QW2X2$&(hwbE;blAUF0ubZac{t3Kyj|=? zY_H}`ucJ?db;!!fJ7QhEbXJ-r@{Aap5Pv?sA7Q;_l9??(brI8sU8I60@WK}$ko1M}ZjBf(m6x{=|;LU8DEPifOq=#D3n`_nHv zumCYq>i@=WV^uYI*Y``sXl=A8_bMCgKm5gW?4Jq1v%9tm*dTyZcYL7^w(^M>8yihq zyC8i3yR(*)&B1^yt#7Vp=bBhgmbx6hkB;tqw#DmuRd>mYuD1YxWZRNmv(e3DY3$jK zi5B4xEoUDq>q$#hkF$w&c=^)Ln(p}moGT_C_>G#5lhCo|pYszrqy32Y*b>pSYRDJX$~-4e$#36YTTyfJTi|?lcS!Zq^7z^+^luyqa~5>*SOdi<4oSK zn9^^vzPAkW>mE8IBLEklt(=WxIdu-7Ft*TAly?WR5GLc`V5fz*&43^3UsrgnE^*|ucZqseV~12x z$z>uF?euwBI1t40zZK3jPNKUrFfeqlg%Fw}bW0~b{L0Wu%FZm?IMFKBuwIq$iSfCP zJG)xO?>pa6S-55EGR^;m&%X=VRx&!7*LOeWH_C>P;3h~?KJL*ipBQzfU9sylWKPyd ziu~Et(!UX-CMY=5F(s}h*OV$Ai}pga_Br=lSpN>uaP%=R6*MPMKx9011`^mfS?KUf z&Fwy15*7*h9v~{6o#28n^02{!G0WzU-TL)NZ}$2rS|IRWUNs@ zTs-RSOWjNUnHLcR{VGA9^+8ojI$cthndw)oX>Ak*mhe~`7B+>M7cQS`5ufA`0fp3V z$eBURCnh15go5ur)sBV0{AUEpvk?L0Y!p|Xw6`y*X=uN`iTiXW`rit-ht{V$)OeK* zKW5(#!?U3QVY~BB?m;vO^^jhzEw9hVZbN$}LtV(E7}pwv)F4*&>WJW7k2Dq z3i_E}Bpcgq(N8Uft7yr|*Ea0SKyqAIC*mt8C#SqM=&18vb`K8E!orP%9$EpU;4=A6 zEllRg`SB<9l8V9;@K330DgUR-Jnd?j&%#enk6TpV!^3y&)%e=^t(KLP&7cNk-9x_8 z^F@8rXj}wPDstMRJ|CKZ$Hbjv23pd-?pO^3d)!1(7cxX62RBs z!OPz4D`3*q(_>p!v@@qy-#=F8(G^}5*RgcDLy^NTEbO;zXf$DY0w9&E>-)@1QaBRS zIR?LkR~u-SsaJZQN8L9~SX^DLw`-^1rUIsxMgBLo>WU%LH<;LM_b*Q^tLOE4=X*u_ z567?ulsjy9$FQ*LHxv4LTOmP`$W@c#d0C5VuM>L2ALG=n2?-ipuz52ND3>w^U{6Z%E~G>KK`!nvIO=(G7AVf{Q5VzY>|F; zEwtXbY6uCI*mq3{3@mwX()>D=M{n_r3D%1M*S!;qEO$`T07ek~%5JCQi&h{4ju%z^ zU6B28_tH5tCp|m6Ej2m*Nr?E_xof0T>sy`pZo{7L9#gYI#`X4INX8-NFY}pAirwDc z2EE@KJgewqb4%qa%l(h{9ZZ)0EiWPR;C`|0XWn<{Gfp@-aJ>3Q!1DCyIE|-4>mo~1 zQxfGJNA+gW^*ypJaV;vaZw&7x%+Nvz|HjDqJ}jN8N=t+1-O+! zpBE)X|D&j+uCC5rq!T(9A0EE@pN(l{rO|RMIjNk8`eQF;#LxAwcSTOVAbA;Ce*f`Z znW6J&Y{0pda`TU>kETtfeNMYR`NWOT0AeWp{_M}aYRfmr$4?U^e8jpDQaBHJUyYi* zxd7;?o*;~6^aefeb|Y(gYO0R7`F5?Fh2IBGeWomb{u&3X)45iFe44d+U}Lp4=c1}D z-O)B-I_9VVo0%VM)BGse{9+J>hN&xK7L5#5|4C7L~#oLGO)0q*Jfb& z;o)MlbY%Ynk zdHe=1)Zs?>Qvb59Qrm%=Bxiy*X;0=Mp!mAFfAm4B;s zSJbBs=i~GgM`+#xF&Xfq4iEP$kJBPQKsJmW9~}*ewLXs{@E~DfU9D*|JuxAiH)62# z+}eI|M6PsNAdZGN7WF(q4^v+vk{5K}u=NCwErD?E<;4xV z>)$}ti$Jc7F%%trMHK!51Iz56_$DBX)D$#Kc2{cv;xnc>NlQ!h_A~lgr#=ktG&sR) zqaL0bt3>D2_;9F=9kL7?yX|^y$_^0_@_8TZRsHd|qL#alY?;^7C~MO;MwW$e8>8P(O4&mR`jit`ff5~cL^wzl@laWFF4 z_)Jis`F;Lehe9|r?4`T@8?B+CA#n5)SWGAgYNCXokvtDdacbcmjEVUA(q8Ri{p*m- z(U}!EFT$TMo_JkxVDz0*+tsz5iLp#h@XdNgQ@xU1W1e+fN^8gC}QhCLHLXj0}CqVnahyO_dE6B z>AZy)E`Mz~)Nt4NmWDWgU1vQ&!0;*GsBux}Q-JjL{d=@!c0cQlspjql z6}A?C?Id~lhiXO0uXO>jfiG;=|uayK= z6%Kh}Z0yy-?cNlB_hGiU`u*gD*e4`Bw=S3MT4P@$X-+p|? z{`UqiH6Q?)vnBHUJQnj`ZL$_P7{Xr~cfU2C_&nCuH#$DL-T565S7ie|%-#JPQYA4_#}@EM31Qs72heb9w1tN z8kY}hH$0#0meHOcl2ubmjh#gol-JVwbljODO?gF9HwD~ZgV^sm1=^FAVrg!8bqvIA8lOu{%P*cnK zP92VY-iQ^Ol%{8xA8z&IyO!05=h#4>3k-aC7!jCHO%T94?V6B*_#Bn2@^|U-U@fRT z#ef>9B^^%R9`uQfo@+z1_PQ7Svm{t|a1c~fRYANY3j2=gdsKEd^E`PLuAmAJVQ+zq zj1Srx8Xk7t;bmpP=#;H!tRm(jKy4*u<7H(Ym66yjF?X6ct8?1^*rLBR-O=4hK}DC9 zQKUXUgo8(9chpu3=iaCEPg2m(!kSTsDf6rguv@* z9+;a;sb)sgXSWsqpijV7z)Q#=UJ>&XPTTEEwjVnEyn9w?@E>~^04Ey=rgXZIG zFIx>nn!O?qE$L!moztibML zovu86P1=3?U;Sw#rzK+u>Re)Ah(kb%DCI%Oy4|ggs_NQSH`$zwHdu#4>-iMF@b=3m ziJY%ryWD@|ltn>A>OfylScuHOgClqZ!tuEeA_orv5C~f8gt+AEGr4DoM`AR<11PRm zhlYvGq0UUoitqq!vp=i8dTDtX1U`Qt6g?y?B%Q@VU0Wa{4W!t)T*1t1{q-N(_0OC3 z=1comZiLeLvic`2Pw;w;0A@x(ahD;Dj7u5}*`muD0`|1ER->*_Ee|)l4GjA|h31YD z*4libBGdI28kU|dz$*Ywy_{uCT<*ogXubcCi?B^gvRhvl5QY_~k;x^S!Gw*zqyg;2NR+nu?npb@=RfOoYY^7p}*uG{HFm z;kT@ueO{_+emP~ec?VdMeIj)!QGW91x~o7PD^`hGOC91oXu#gB-hTfG2rV=;-LPQR znzgn+b%PPKyh7FeHd01Q>l%3SmTdU#Z|>h~)TyxD`E^|{)Jj&t$_l>{TweC;>fHaD z+t5)4yL_NxGq?N#^N#O%!{nVicObIZ z65HcuCNTxi;@4bYcBzZvR+AA6yTX5A$F-u)4f3X>4&#`5Ncl#OzUxPn%tM zM1-z`!)CD23cHPmeMT$7_v$0*C1KTRgD2weNc>tq{&%!cQ+E+%efh!>fdC*U($IDd zCbKx$n2fsHFu*&eSL&$e2$u}{MHEVpPk$cq zQ4jXP;UKX7)_(zJRkzf|FP}dGk+qk<=M5p6qLR{&@82&R)_cco(7SKv?r zZvxSY&+l?lhjBR`e2PqnnUc<_s;)+ELgRv__;H<K&RNK0LOF)YtT_I*M`1llviXNj*Cc<$nSO7lZCh6^*WzdOZ2`Ey^7nD5c! zhbXM0@Rd=*&lYF96?=@!0l_KRJ{_xKBRBW*5}F8j&#PqxLX4v?-|5(`vyQO070qYH z?ro*!rKSKuUQ@7y_-T3TMt^Df2 z!5$1i&mW$35K2h0xsD{{*aXRIkAg55(br+Gleb@DCa7aFTl&cB$7Cq?7j zJNK#Sio1FlxANa&!+J8SpdfKT*U!(JJo+5QPaKpESUr~9s{j=O9)29~?;K6_S1XbGu`k?@!tjfL-b%UhB5X3VA)UD+P)+ z@wua8@M6^OiQ;*bZ%^yep$9&(}m{Qv>&er#1&*8m;xRKX}*A0S0R{ST1Q z*>g?leoc0V$ftW7J8mMa-?+M}ijp$NnQ1IL*XT`kMc8Xx|GPEro7NH*7WU>3qPyv7 zt|JptxMp1QA$1{EoY&!oSzI?j57^nj9bHs<>oeC_T3~iDD%QlP}K&6eyKJEF~;24381>~u{!+nS@GgxoDd3bhqMjy|(VvqExTT;CL!i2@;Nh)9lGi#AJ{WmXf&}Zu0e~k~OkC3hV5ZuA+386|0NRe*$mRcJ; zh7ZME;xZo0np{(YcWpvKN=mTzF_}z;JNDvdXFBUk|8*ZE`KZN^73s~@b|9?kwdrYC zjTQehPr>`&8h3kFw^WUuEhO+(2Kh03p-*S&AAuWXIlF4Vu?FOo%LXIchnG5SZTkjs z^eK7S*(C$Um+5iKKKJ|k`>9e0$oM-U+zxsU=@YeAt{a!;9iucrlC;f@CTN>SLPbUW z@b=-8+oQbu=cszgKb&Cac7tOIwx4iNvzz;M|92UD-V9w4zm<|Ag!TGu;5-#@LLfPG z%|}jdL1I<<$W;xRpdByKvolwly42fQ(wuA~P20wz++6#%;Tsv1L7Prc3c^-OgS}XE zxb0SLV%yo$+?>y4<9}$n%BZT^Zo5&i2$hypl#ng~Nok~!Zl$D=lmU|3>@Ql&OT49HP?&`rE42)%=zHi@RrMr$UHOp2pB@`q#LfGGzU_0 z{NWKo7wA{B)49=;$b#kjceA-$b~j2<&f<>Rg|zJKu~Eyp+>;va0oJ!(-+wh^iSm1B zgx9^*FY_IbaytYrBJ{lp;djhS#}qT!NA{`R!?3Q&1wraIwhy`4xN%rpm*=U6Uw^L? z-4>^2^>&>)G`AW-MfKrznG-PF`;XIm?M`_Sb8_(VENJB^&O)L?De+Y?eT^PSbn3od zd={FC$XmeqE|1ai_zJDsotT6m0gjIT$$r>9(>=Vsu&t_&NmY~;wtAYqR_`0Lj{~<6 zE~@JXK+%I-9{oD^jh<%UtATdaYO12gyVc}D9iAg^ZCa_$o22b;%-8hm#6+e4Y*3M+ zM`_{sQovV#m^=_2)?p+a)43!Dkng8jOGR5-iB)kw%S>Bl=EzQP#F~j{{Eh=sg0TuG z4}ovISDO8wlBxMDwbp|N_O=^=Ok2jgb-TuWd_n!)yC<(-?_Ry1J5_A$qBAx&PZ?O7 zWa9a7V4z_A{4BxOHzoJCo<&Kaz;i1glC_s`u%$=kY8EARJiqm$!hFalHDEbU&Gf2D z7d#P+hPWqS=pjeQdG=9Y<0G_&Fh0MCD13e$laFqHr_)0~;oT-}%@RA6jN(E17uG`D zM=03dp6A{>U#@Q&8x~Wkq!ssSTopeS5Y$zT5li>|;%;tgX_@XYwf%D2nIZ4j*MR49 zA0P8@Li_{k?da_iIcCCdso}#g@-JRBiJf~A`i!z!RMef|oE~9*^bJHbTT!L02JY|y z-Hi@5aGXxjN+*uvdF<^n{)Bj}GeBho^kVLYCSwrbYReXknZtm%?)?hPlw$3;JxM(X zNSxdL4R@s9=i0vs95}cK+S;##8nQjey*Q$;MR6l8Xs%o;`fsx!pb`vMRl4H&h^hDB z5;aUxG-do!&}>64bMp4?wpW|pB~9MNu?4#{6LMeF98U&&A_3J zuE&5CbX(Lel`v%ir7y&a)rb-lj+tw1KU)VSl2j`v$cQg4xVAo8JK@K40U5jwSLbSO zU}ZTRUxjIn>bCsjV;2`XW?qWIiqsLbQE0a+1kk+??RPuZci$$gPM>La0~w#u(8lU= ziv|v8UHOd1Ow{zx)Kqk$4*6E3?6UKY`MTRa2Di4J>1gdGi=Q2(EDi!b$=JDdQ?Y?; zcg@z$Rth~i!hW_cjqN6XiQZTeUI#wrtJ1AUWamxDPv+cBVq`LQ?l)cT)MfT@HP6iq zH4sp?9G1DU^hViH#E&j$YuzImeU7}%CqyMl>=@Jir$y+T<+Sy0G-mujEqW?E@IMuF z1@Le-s+gl-$YL*deG${Uayz4i1#{m>bkJU#adkHrW<-FTPIvYK%*mJ@Ti$vqwIJ=$ zyO7TTRM}~pc)h$y54q{Dtr_rWmIYWQONvX4-mmaGUC~mo-a~%kx{MO+0_(W{`vVU~ zHr{!*2)V^P$vrFB(ro{`E&gl!$ln&IiBcL0AKG8F4!}IC$aZgieH}Kevhq)6e{OL5 z>}@&T{Xx*hz|bHhguc={AZES4JAKb`}!H(j|utNlSuSU>rf zXFdCXAM|EFPESqPAs;fZx|;WWY%o_G?=c^bs*ddNX)EMPU?Rrm=9Sjx(^a#~L~)GN zbov%0HpAaCGwOl8O5Lr~*}*h8G|U=f(ter^Sn9^sF=+ZHCNiP)fbqn8u2%LYz;kfoGl*8|3 zN!$rYmd_g!+Vpauxu>Ux!l-O(GT8sL{Vj8G3A=gr$Ty;QHxTHn#N&D>y01j>%@l6N z2~34vc6Cn|6c=M$Zs&t(sX}AlH4q_}#=E4c_J@b|37BS12p=^Y6~$q3ODAwYLTV|g zW@@(Mpo%pr5|1RbC{WK!O`J~O*%oTBbehb|{=K}g5XWj=4ZES$c;0ncNJy`p-=*q! z?%^lFM3|(N#tbv)a1kIH6XT3JwGTj=1eklBr6?o~Ig_ueF2&a79O z*a+|5uG=B^0|aYp!jRRR=1oHk*|C>UgNuUDG+VcCa6DH`+>e&bFIxEy$=cQ?&ym=w z)n$iE4n(WY6$F331F$6}zV>YRV&ABb+K@ z>Qbj^CF989H?W8+hv-;5$^e`p{_l(BA24Qz`9PL($9tLW)rT-uvz$D-Ai)z(X9Kx9 zXRhXdsJ`9$uYpWZ=d51K44d@=44$*WVey`suxzqhN>b8}tU#YxeM^-|;%k7Rf#FQ; z1|C5Iv=bOUtCj`oR-^Zq_eS&XM&wOF%S!d&PO=yw@B*~8vhQad%tvI^hExA6Q$p$o zsrMxuu@S9E8yml)1_!aKQs27{hKucwMW3J{ciKJbo*Vo6WN57JjjSq32#)%EbQsr* z{?nmzquy(QQZ;pV_H~C{XOArR^!pGcp5#k|L8mgSG9u$Z5t4Vce>eAk$YuCv zfL(Q0?ZM&rp-AxLnd?2jYL%J2!7opRs)bg6nq3@b%}nkS-TUJ7CES#$+Spk#MEn7g zfw7(_v2xoam`*cG)X_V_BqkxoRVz|8vdbuIvY&ePuAtN<{fzlny6ZIB>TJQh27(_{yCF%)a3f0p0`jw~6@vNzIdHI+-L;&{>yJz9?G0G3f4*KQ z8&gBSd-p~?FhhVihs++H0JBRI;hHzLVh8(hU_;m1)GSOSrv0|LD#kg3>o+&QaBgLV zXU(pD?TeIyW&G#r*!{js<5wU_fpXu*e3J9--CfYrspigDnwfdHUaKT9{s8)6=2{fc z#eQDVfFzE`W$zBFZ^g`^GXaTzZWM?%Kx}z%-G{Rh9TU^sBy!VB=;N3LBO}Y36~(uv z>Ysaya0&a+P2EfCq;ep8Y)%>ttQey5>L?XwRc*rHGe2zN@s)?TsPexANuJakkQTuj zgj1eq{r9M~Zrup5A78(I4H%W2n%d5P_~1YkNhUZcEktcd#r0?bYC1B|$9SpWu$4qyxsf!1dF}ZN93HTE z##L2ExDqPV)RZs~m~#CLFtgP81L`rLz*(Qa@<0MyW*2sfz_TRPlw$Xl2j!ymh2hYQ+svaCkeKI{OZ1<3s-2*pxM4#84$Tj;F2^D#vC zDyZ9!FYV#XL^xNDHwFeiU=RX$&*A(gG~DK#xqJ?ey6|_HZql5_l}FOBW+R?sotg0B zHvJDE(01H>S&5fSS3!M9HdU_$NFa!}mWob6OW$)=T8B~jn+xxJXq=Zo6mVJl6abPS z7l%!d^nMSYn_V5$*O8C~jq=Nc+FsdIGKXSL!X#os66Y0naWf#yD=Vo~=+ua&54@L- z=^vhUKaJ*TFFxtNwt&v_Tk#^GvbFAcF)0Dv? zO{bmzQD(S*?(`4wqs4O@-Y9%QNc<@2ec~)X)sKo5PsO(%{%bh@eKddg5WjNe@IMuZ z8$QhwMxdwK4=h~4f-wEv{#5$ggP8k)Vl#6eXt)jFG z|M9_do^pBr7xkVAQy_2qGgWv0Z?u;Rj7-!|pD?PNC7Z9rd%@a0;xWI^GfsdX!NtY> z&g7Mg@LF&oUywLtj*~c8#2Ime?mV`m+00GFJ8ImH?lSD+nLDDE@?u8PBQ@+w(*B_` zc$(f>eQ#){te|-J$5cU!H&vR{8koo@QI$5dKzA$}3l-u)HtMK}Rybr~p z-XCEzub~la%LyO5Y#XMQnNd=APtV3G89q))%X0B{uftE8ea|OCv{x1;*+qw}r0clE zEWfxErZ;aKBA$(N?(7juoVXgbW6WZfO$+U6p!lKW_Bvl{qCx?jXZMTc8o9@V4<5GX+;uG^72KV~+#4!v%fRu&hnaQnwMCEYuPN%6~*l;*46$BzUwk)zt9dS!Zv zNg1A>AKyy)s`j>50*j9sLve;&yd{+IZ@KgK@q#)d!T#Siw{@qF+2S=b-6eswwTy^b zbWEai8@-3UU2!$&_5b#V-6MnGu4qiz_S)#QjIP{@zQ);=a7&-PrkNCz+(&Kge_%@J zbU5no@86VS!0`ioQ*3uyUqi;pdwqLq)_c25_hy&YavOf#^fc3auVg5adl&Uaf+&i@ zG5Nt}pZ%X|n5i+Y?)c40sp6#@>6w>XckP9hmOuP|E=nEada-A>Jy{TUar1d5xlrSlql=S#=Fip0a*vpeo`jL)KXdtCNzLzl zq)5K&nA%VMBPQiCt>^HjC$VbxY?#!#2g=;XWIs%dHH#dEbeTtW4hu&seoU{QC0#hA z=a)OeuI#cby29K!jv{rSe$zI~dI$0YVBuwFn((`pW4!lbvaLvM7r53}R;wgpj$~`W znkBY%ctC{Di0!woO)@z{9MBm5(xYP9-bHg`PWU#xw!%#F@@QKT^7yP@rQ<@E+-nFkr zjE`vq6Y)D**K3>xKu}|D@0|*?>*o?3biYUG!vqov0LTnz{^svT$CQ`P%!V0!A;hPq zsZPItT)@ovf|Knz4Gjjj4J*s@u_AF!wAaciDv7pJg`%~$&ML<>qEXChV=3F)+qXqq zpar+5i}=8`xuwm0M9j^xWzAcbq9k+Jv2rlyb$)EO9UT_OW7=>qct0WJ$o@DtE|!#Z zQfoXTEAua(f3VXI4a-_s;wSfYUpaDt)E^QlVI3C|B1yO@Ewyii@@CRpPa@ND(<*H$ z1ju}g3YlZLUH$~-e}!CU+Q^v<+;&Ku+!hfT8r%VIYAfSk)Kz%I#~CTbonw6{EWg=_ z>=%Z$N315pGilk;(jZ3@Uaiz=>Ymek%D#g8QXTEQlT{QZhdOKKx*M3$6(|5_#%xwW9 z!<7_610de}`gq^02gCHKD*LU`Y5SAW%=V7Ca8j3#TYO6D%FT26s`T0!(?1iE^L)#u z(e{Yhy8@F{Q`6sB;ir(ZU`7}4 zoXyR0@?HYJQ|!EGc$>lFsNEb|hbttc9rsI*2 z2-T3#C`w%)_-tgnt~N|(f8dBg9<$QoBWKP|H(og!u;*1ekIr7*JUhCDklH z_4!YQ!s;^&aHTP@iIDxLJSAd)GSNglfgl$Mb3Szj6`; z>ltY#p2t(VHpe}QjVFRk!P?R_m6eVC>y_&p2@M|zH@PN4k^GfrL1eVgUYO|zyg8M4 z>vfBrjwID?4S!dwr6u9^F$#~)1so@I?v{Ac{?YAhV#sv<8EUCgg1O)F} ztQBi_^xkA1ylKTm)E0SK`Wv#%R%qr5mh}jMNwYBwwBfD)LN9nyRPW7>u;B8_JM8uKdG9vpZYD}N1OZsw0fi`T<%CP_cWk$7o zNIS{B-;ULRW)VD)yD>iO^k*KA6}wcJCj#rAR`)Gk_s|2$D<={xYFvM@V8Sb5luNw2K~X2Zgm-Ad z;l@Xb&IYMr*?{E3A7HkgxB-D(qTX};e={p9&(!Co#$MvbRjs?R78ez?cXcpDEbM5E zjTsoND5}2(Q@_|96myWzT&#+&>I~6E zHCE&?-S3u!1d5>Lao?D)wkU{g>Ds#1P}d}MY%Srn6qPhb#R@L_oU zSyXZF&SOONe3GbNKuC`ms4m}lSBlHYGvjv;h-TUKHH!4F4+ZoC+2EWn=f!V&>bPJx*vtLoe*ObH><-=ew!~ z6;~Lsh$riyv?$)9yU(LZmy+kNA^b_Tu7Qt+S~2vl7TlK2~~Xse3Mm5%6ra`DK%_{RHmk87Pv3EPi79&uQL`uj z@6j0_4}*MO&IE#VgPjl0rp_2XA+zJdCI>rzpfLohBH)U)?`0ne7zXJ@!%^roMnP_H zhx6m|&l*8-{duyR^$bvDl`v$0OBc`3UM>3X=%$A?dL2{eOx6w z)QYT$B~3GDTq8~1W$y-BEMJJ)V&)|q86LA9jchBFr$@cvzTqYJ9~ z90(JM2oHtsWngG{FN)7%^5@G`Lqo?K?=~~uL6-sDh{?hr5mNeZQdLtE1G#yA7{y}f zYM}Hjn)ZF)=a&O3Cy6z2K0f2nsz^LbQru9P_7d-qD2U(Eo~S6z%*lbwt-@mF1lD(* zi&6Z>M0-bjy?w)lh0Ik>JbS$bOySO88Bo8KGh#Vu^qbk#^n@;)QHv!1sP6|VvN||z zVejcNcv-%s&9mz`G+o2TUFmWyLWlCIteey#n;{VM@CO(iob}(iJU=QeWd2{x*8bmT z#;OwL8G-UJ1NYwgfVdnsgoUMY1uO-){j>pfZ`|xw~>`TE@up9%vYg?DCt+i}VtB~ZQsVax!(&G9BYY1u*Y|rRpv6f(UTj^dM zoqoYh)!qBb<9`o>UgSW=*CCg5qKaQinux`2);D-9H8u5hTb}0Fgmer$N8PX1_Wo=| z7tVX$lLa*!-|;qZ;_H}uN2cm*JK%V2*2+!T-gFae0SpB3%q<;`3>8M+F6ImB6-4%| z)Ty60|0*_1$AYTpug@7+wX}*oKnAa9;j-H~GhR-KYQ5VK+V$ zzU|)K!$g+__QG*I`g`S>w!My8C7m> z2MEjd&%#d8(1yeD7g~ZMp%kF-h+x>W&Myme!L`ye=Yl==X#YZ^q2{vV33h<34Z3-b z``eXEVyKe=>;7AkQ{<}ktS&VR`iiCH`OUFpv%QUvn0TB-H~x(nrW>Dj#VXOFc>2^j z_OTfg`?t)^oeOg%=<>L)PeLw@yBqI6^Bk*b{}7fU-Muq@($hNP5?j{J^{OTObsuZHyG6gp`VwckD{bOhO_=Xwf{O$=*ZNDpJTgBIR`e z2Q7#|?>J*`l2dnd|fD z_I>hJe+f2+dNp|*$b0(-33=-w2BR%@_5#N^dC4Hz^fY(TO+)?cFt`kPTX^|V+^^P1 zhe*l(bfu3x!O4l`rEX^xE$U3rEwaB`R@D4_k*>V7FmOEH^_pzC{XK~4TMg6cc*WD~ z+fQ|0-l3v@8ud?EpN}H+k|~&p?a3{41eFA(*NMz4OrjZi{QWC}?Ate-S3JLAjia|) zrtsF}oxf}Eh`_Um}T$8S~89C4{F)wn;kx6-;aFa z&?N7#_`D*))Yx2{z`uMRDiN|3f3R_u`fZFrPwl5GVXX~2;@N_OAOU~ocO=bvIcr2D zi6+lpf_I^tpg1M=YeVHBIPM*XULf-FO2vjP@qP$)E8TQxCRv(r=U zyMuslQlA9t2j)_2D8K%x7m4YqB7VspQ3M%f2hsQzWlWrU*dn?thcH;-y z#YwU>yg05ePX^z?zCZaQ0>8p>?-LZS^+v7>Ao}NG#)j&2|8jvXN@h=nK<|EqF$5+ zX2Bg2DBtk5_Q{3YipZ2lzU{UkatE`j$TUiKG_K0faOub2z(wVe&V9{W2mzDs{k!~g zfyX)@pa#|4DPSVHOQ}xXt@!}xopY%yu%xqwt^DL9 zn%*+|9$6gk*$LQOVEhLlo-$~I?SaETmpVum8y#=gTc@9a))f!$@6hZI*&Izir!x%X z#xW{N{ofZIO9|qd+7CP_VJw!G! zC)pv&a!_7o4vtc|&L*{D0ct$a_WDXW9KO7_prELjoS%P3{kF&y)|kiYNr$SHK@a2u z-QOH>0o-r3LwGj4M3FjhD&LSAF)&(^j^gs^$s^@5ua+j2hDSxXDwnXq>FD_}FMdi~ zF7{Er<0oOd3Fu>>T*>pD5_|*|XAlX;18a5wp)AG4e3VUZF5^w5hBZ7(X`}6 z4+5GQ_m5w?ZnE!ng~x|3t?=Bg_zQ8E&_Mlof3ovCYg@pv{9{X6oC_AM!)Z{HEmotd55g= zpzd!P?M)URNlc~&A`(mlLuPThY}jEl_Gc);c@1mfzut!3Da}gE2tOdx=CGk{80+Y6 zk8ZKG=~RV&)m_Dvq|tXS4(23$dads$FCG`qZd25~Ou)K+z)UFZ>%bTQCXbvi!Y#zI z9IIh1qP(jA?rn~bllkpM)@)*O>RqIIeSOdW9{vyY&dbvI|8hT(ezJ7=R%KqDcMl1W zkogp)aW0p=Wo=1y^@=o~vw83Rls9W#OdBP*h`ejpMFRzHn~?j*cY5Y`5FixNRNkH^ zSY;m%7Qs8sO_N}w+|Ihva2BINBBP>d21;S>Jvb8vUA6dRRbIod;72cU5vU0ADx=#j zPzcO1KXo6S{M|d-VUf|*C0J@+P!4EHG799Y>z~2Vp5T6B6;#+A!69fL`c(ugMq5KA zJVN(ZYNo>Efx8^b1F!m}@cNM{hj$7E6;ZD>G^miT&3fv+_|mqTJI(<-*bU$NS)(3M z6hID@&tELUy3b6k`!x^xjR%dqA6Q!iwnGkqb$Q=YpSR3}p%MUR1x-#Q%AG~2zg)G8 z_<3>GK12)!PO{H^4$c>1r|zM}m6g9~$pXfzYjv!8$O!VZJPw|rkY0x<<06qUMAW<< ziHW&B*x7UYXeBqAWw8;&i48(Mc4kqg^Jc2Obh)ZupaUfO$MPU)H0%kv<)malZ<>j7 z)Pcx|X66JXLET#RNMfB6IWjx^bGCqsYOSL@rq7wRF!%GeXqJjnK+F7bxfPnHtIns9 zus?0hRyqWA_PzNlA!Df}hSO9v8fK+8JR8<2Xg>*2_fo#`;}$K6)ZGh@=~u7wQlcfr z<9HPCXKSONOJ#@%kvvJo`+SeeuPtlA@aKD!S9F*>#%G;Yo$RQdJ=GT)3Jj0rx^vDx z(d5E-!1%j}b1fWC60z~TZn?Q>{%4^*d7#MNL2!$St z8f4f@>q#1t&WOAkQ#*($u&kOYlZ_P#;}OrL==t}ddcPD`H%NL+(vjb%>euj zyKxVzVL^F9P5r^$FWiYc{b9Y<$-Ym6(sapt;3Lpna>!z(ce}rnLPrmH^Jbc-xX|Tv zJy4Be&PK{WqY1)tTaAbRwzJal+apXQ{och(SIM4N?x?_4hwrSHWMgwxUQG=k zkhd4g<|L$>9+VOI7mvP*B+=@;SV+h9@<3C=5EJ$8%L}G<9a6i6V28pG7WIbUYS)A` z6^^$}PiuR32UBmad~o$gawG&jU&Eb&yHPV5_yB}aG10ElQZq#5JspjtPEn%iSPTp@ z@5OzA|HbghrMiY!sIdVY0%4FE^3qw6*iFR4wt^D6Ea>(Da7G?e9_pKzM3sc0c?quA zNFCiG1u$1sABXfJR)Yup{tOIPAS$oD@6T1!+FN9GsZTqKFDgD)mZfZQa4cYJid%Kp zF=x}4t3Dm(Bmb%lv%hpKRwj7+!A z9jCp6vEsIc5XJkfeqUOHem$jzt{9TJs_aZ5=NT$VU)ejx0eckBqk2|;d=)vijKf*8dr?Wf$#;0bb=k)ZI1f-hk>P}k?cqCY=YDz!D z<4sKUQfB3sm)Bi+3@E!&|DOwRF%8TE>Ti!Af82rupDvt6%5Acmba#Hq?9(exd`tGb z#6+u58uGYa*ujULR*e|a^Fw7|Y&E8#p#9t4uSFa7hQp103`-@ zPN1S0d;!!gDoo#XEHEc0TC%9I8#rWh`gXU|>xLh1R6|+xVCO{R(<@boo$jB^EU5`k z6LaWZ@e-GNSz>9wC(VpF9<7QQcVYO7`K}1&2KUKcu8jg`s75eh|K50+Es7`9)#TjH-26{=A;a8(LnT5;NPmGW zn4Hgbe9;68v+=#;_v@tuV*yCTX@`ff+Ecv--c zL{2Ml4fW^^W3xoZcMt}qY|ia_Yp=0o*YtNR&il`6>_mB$+%Wu43?sct*P5NWuuCs4 z#Xv&XqUpAUhcZneNAY80F2elR=%oL47N@|JI^|-QL$rU3`=gT|n=ReJI-3MIo-Olc zA}=6l&EM2tky?P$Fa|-$0wVaH-#-p;BVVC3|5sQVBbYwh$!_V> zk5pul_%A&aSCq%XD){@PJ`if16&v73y%=^7nccsHpKv&6JUUxD@*WJqs?4RwMi>5j zz_I1Tv#?eC^>l!QD%RCDSF&{@W5cMM!#gdLj#!zFuTqwg$?9mz+pVjbLtyHzWXu>Y zp6xLS*Zk(Hr)vrDLJ&`1mojwV3|HEYwD$|ish&5j&894lxbqj?KHA86tZpELeM}SI zXcWgq`t43A{0nRnooZL_Wy(eFed~)NNM6HNM62K(=^MV`d2rB~mH2Mm)D+aog{O*g zsx2isww+$$Ai%$fe0aF{Zs;thEpM7$gIPR^%Ngb(IXSjU(v#4+L%1Qf%_S8#o@Aq?#l?0bwT%>^tI&QGeqxIGZNTn-&#Mor$jIBl999!-j=zp; zR&PVDJQT;P12F?Q*_5kFR1{$XZ{e{fi(N7Q3LXi+`}kO2Vp3sYO$@rG9~YZYsPXzH z8uH;>gIV$J@3VyZyfJP!kF7Ts%r>?GYS>7|-2PIPIzEd|TWuXKHn(9zBvdTNtHY;? z8kM|H`e~Z5{piu164Mo)x1Qhq{7T+eR(!ALrfV6UK~dx<&EdqlWc_&qjOMw8bg=*D zbFqto6j4FJzN4wz-|NF>6yxMOM05mwTnR9rH*{25|{%~$s#*vSw-pO-f9$~nZk5zWF|`f)fh0X z9L)PdB_T+UKf?APOyZd_*((i($7M7BmipYTHceox4v)7$0NcOhof+aO-5TomoM;@o z;AEmH)y}uwCtu~^ey-?C7rKkg=*$XXNI@~ZxL5C?_(<|j80@P~Qbsv>;-pRCJB|K=u5xc*d)fttzyP=^Gt;vx+S#jK(>*kB%DVazgD95F6)X&> zI6yUaTfe^kLh)1|pkmanxN>N!DR0@Oi#TiMbGBJfY2vLjEX$2otv3YBml5(e4=-B` zvpA&VxHzc62cr7(_K~*C(>W*XSJ=N=Ri>)Iu^byQl1qp^yV3_-HFV+qaGuV3ncWK; z9Um+kz~J3a7S|eEHhJ_mlY_d#5J;nZyyTZ8re``9LqOcnOYrf z%^-aCr33%8UiG$U3A51vG%$;H@rFem$F5{oBgXzz%ppSOKCWn8?A=W`Wtv zlMuxu6ogH2;V>(Fi^tJ?|A-IymDHeCV3(+;I*%~F>)cVzo1x;BlazG-$McaYxP|A# zNmI3VM}w7?`4NtGvf8EbKtPhgnB4U!9x`h&@5e=CQK`bTKvvfO6y@#rOHS$%+qkrd z9jW75u0=O{LWIk-xbo!*1%)y0tX|y@ue@Mw+$!u#b|a$rf3_kqxoC)HWp)l8x@^tM znT0nV$f{-UX-EqreiRxK!;?2Rq@3VRyotw(I>kM7{&;<|Xj+_VDvEUP$wB|QnOgk; z%P!aShLl}O+ARVUajvZr4=9^kW@0g=?(2@Qymd|qew95KKLbZ)aklGP`ynzrons4l z0Rpb<y&TlhQ;K3cAS+vi4+xGD*rg5AkJ4Wp3)Dl7#qEKbQ ziYzKzS)3Y@V`El(Cw)LXf@Lk7h5^JPz{6Np4@hH1%+V)(d1d8W7a?Vvo!C4ANEeH! zPk2_?&R$v`>=^ldgLR!dQ@H;|Vl_2Ho|nwyY}MV6E864cRqPQK_hP=f^rAHC!&HI5 z(c8hxospYwttQIKR+>Y4jS{RRP0<4amKuqR)M!PUDgA%YE{C(PpNr9kO$P+k5Rghe zo)yO=FVt)M41j2&W?>vG=2V5u5?8Re$Fqj%d`MduuYb9g6Xu=8Qu#;w?n??voytr3 zh5{g@H-*$@N&%Af41ZN{UJ0=^RN}F<9r)I1{^^x`6T@lkyQo%ygWzs5r~`oCN62eK zQP>Q;u(UB%_azd#`*scQ-ISk0HVQC8&X_<*Ql6jTIJI0akYKOCLQs(|__iRDRq=+O z$mh+dI`JP}T-KYd5SyG(MR)@Npw(e*@QOM>=|4>aK8{~XJ=OqR(-Tz{4>2 zk7pM8rzhq@hkCRjP|p%;SOq_#hm?+v zuH`MW9k)!{;q2z(ha*S%ckeVzk_MxOr>JcPd&~B8 zu-!ubVas%P-3t_(sx<+l3e39FaLLqG)K{So#IzzPgMQ#N7)?k@DK4n>=C-%fCQ3-8 z_l3;hBh2i*_s5HJQi_W!&u)Lox3cQt*C7jR{}z?G3sfE8&VhrBIfqnjExu8j`29_K z#^pUTG>vGYJ#BNV_qKaG(@A=k+HnsKsValMC-s5EA_9$MjMp8^#XSf$DTwKZyPpgGEqEclqjeh;1D$2v;)oo{5+LIchAz8ddm}sXH z?rM07?r#A2ynRpP5qEGzp6EczL*u>4A9`Tz3;byyO+E@}Zm(MS*V~<1qdDPo$>rMW zndo)i5p6TMI;Ss&DJ&o(^Nlg?zmKM@@Q!)aE>medPmXCYc!DGMjcYqOr}Jp*yPF+! zw68_<_}okvnR-$K_4yLEfW!fb;lM3_+_KaDN=j<~ z*xdg9+-;tOHQ(^AD8yQECFo!8Z(#2&UcXXKQ^?#rle|5zoLHL!#NlDA`oFMgL!T3+K~U}@-AvMU>{INCIp$>NA&!j zEsldnHkFBq>EkZbW@FmC+xHjFU0f1pXDnhh09@yHibe$NLndlX0^@RCAa}fzCMSFL zrX4En*S&r}e#^@x5APp`b02YHgPg+ST zBht?kYbN@f$%&84>&ghCy@0x@rXDTeyjMKtlaqzhK8cg`0^tkh&d_&Jl|pBev@KPr zqfp&@;3oo59M+@J2#IH=W+qshGv^-*1iFSb9JHkrZjxT@DO0;gDx-pg*ionm9FlG|ynuX}+lW*$ zJ3DiOf=7>7W47aI-LD-*jlVmIH~V@N8gJLgycVTj?D1>)bv6B;c+SOP+n=AQ`g*4z zQj^u;fFLXP$<6bwsF$Kg7bP)eJmnP_#DmxEhsyNlpQe1)+pLFnAgyMW-ng}sg`VAaQKauY>lnL3SI z4c{Tb0#}0}HYw-|p-+G{Dv1r_-^RK1k`FbgFJ25nb}jK#chXI~fVQ7CL1x)wyE8Ku zw0G{{2Mi7M4fppe)$Xvf@lK{+m_?cV3`RI9|P%R_ogg<1) zZn&*AiQ%LuWSaga(a?AQV8Z@8+VtsSYw*29R(XYY88%#)EAN%NgYQ1U4^V#0h5(Y^ zH!x&UTnqGAkaPq(6!=s|XLBy`YW7?1StMd zW88YW(PGqm7rBG_846GTd%`NofPR7o=;rY)l={_&!s~cUEYG9FlCd@?h}}`SNMzxe zgg(ba1CQ>FjkVvIHjC->>|Sl*bv|@Yy<|CiM+{%gv3`?HZ|h<6kB~sy@BDqJc^iYc z%w!^0&g9YU;J6!}Mgpe8nkS1MNzN}WV2OOtP59$h^O;P=y;7egiUh0b$;UJpm*k~q zVF)JwN?gZF?rSDhmX`@@G-BD6X*F%EIH0CFnhBc86CcQ|nP^{fM_$zfnQL=OQx(X3 zNqBG)Z@&D{H1ob2W`{svkF6ps%vcNNy#q&qjk(2r2sUzRj9$_!;G&~@-%hV+1z^G~G%n^n-I8E@IcQS8^*2YfrT-{eUHP8U9LrCz-q%ge%aZJ%7&7IG3;Ve>8n% zR8?Kq_67t*Km?>yKt!ZPx+El|L!_moL8Kc&rAtHvB%~XI25F?byQRDPoA>jL*KvM0 z&d{?@?6uaM*SzAEX-BwS>6=|8Lf(8WD2qaIaE>VX{8Be3Z`Ybxo4;9Gx$^T9XH^IT z`U8{5N{iB}4%%^nPDycUD!3~?nYTUIRa-_$G4>eI=(t>eveD4|$l!@h|J$p@d*B0n zP%(SZcS(?Q(6^A4$7}7ev(pBOgWKB^D%vj!bH00@O*a1!H29p#Nh)uJSp}B}mOyR$ zVKZ{2(QzT+r=1BvVJL28P@WC)@>pZhoPaNQtKK2P*$Ram7vGqJrU3G z`bLxt2$1H_x)tn}V@?oG1MwUssM&x2O+1?xWaE`0WkC*#c!PmLSZrP)mtEtBDr zW)#KzI*pXql2kw?I`1h_Fv-U~Ehx2t)%P&wu zNE}xusSzfVJwOaDJYUDe#6WOdMy&KP*oLG0N^W?ioZVsZn}qI~cA4#?wLZn0^>H_$ z+dkXg8>NZoDwg;zX|S0Q5Wm$2cPx^-it(H@$kFz=U+5pO&fVIiXd-BvaLEz5LIDLs zLg}V;D#1 zy(0aH&wF%BygPE2uFN7Z=XT56b90|i zlwK;!n$#`4@$_azi4Y|(4rzS7v`nH~{gw)W&+sc|J(m5*X?q;YzaEU;Z6_*}j$XLA zNm)-C0|ybWre6bj!+7#5DXF`8AfV!YMsDu6k_6}0ZztS+KTbgkhiR)98x^Oa^C+W(U*}4;=Jv zAe1~h48-4i^a5p&SX6V^^-7Ai}x5)Z5n69EcL)T_*(!CQ)8#wXww%K&yX@t#D z>Vt}=W|j$`i4b*ByWz;-j!NrqQLN8SL=jmGRU>z^)o;bfLD)pJ3LHkz(<*RMw_L9JiK=KXB9S&NQ}GEFIb~6>p zDUO-0mis|`Aut{fTS3uO_5S(|Tm(oYKG;Nb1hB0}NYYsh9^a@fEu|Lp2P6;sdTu2b zDhu4-Z)~TSH=Gyo@VWovv|AlGYU=5wEk0DS_is)9d2JEUzDL?yb_^WPFGb_Z+Al=B z+D|a?d{m7Rq!N{1YQC7>sf2ILYQtR(im~hB(Tf2s_#sSLfW==ZZF0dBRCh_Wk0!46 zTQV&_EWl#~NFq8*+*a`lSr0$Q#9T+>)cMF+Ywrv|Hx0oB!Tj;W#VP+=2l41}^24g# zYm4k(lx3^FUgOiEz4ZNu7FISZ8Z6r02g_HF>F(Ki_g|wQAnXu!-ka>nFgASsYtYBr zNOiA|(Co_O)aQ0HcpZw$xLrtrByl<<=zJpMq-=IPOvfl_)NBg}YMB!{h==;^QzglI zf=B!6Cp&e7LFL;^_T0NADd@pb3CDt7ZDs%e4Z*@BE}ot7g^RrK_-+1eX4*S@uS?};yzV6tDBN65TA zdMJtu>bASVXlj8#P(jLM0qU27*MLvkt8(nD!hRxibLi_S7uvMkePQZM$W2S&`BD@RvMKw7ADSp^JZ$0M5P)(4my9SR*h`Oq6thVdc% zl>2_$XPdpUiG{NIvM%eN>eWvST`X+kUTV(>k7fU8tI$YZXkl7n-%Cad6Qi)?k9>B5 zqM2Wks(pCL*fDkSrIPic>Vw_Q9Cd9itq&Tc+0DITwH~qI|6X7FFxV*j*u4{b1I$1` zPSU3D>FIszdakf0RdE#|&Kz^J##=ot<>fV%hUt8@nffF=p4{8iG$y5Ri)4V``|P5* zV+7^ZX8Zj@o5Wrz7L3R4C$A7bw|}U$r7!4CfAUdZYwuW{!iOpG={;^XsILu7GyM4# z2G*hFE(UIRjCz74?2n3Myf2UJV)gK0&P};tvn~1Ta&moBcLGefYo2>um#w29ugr`y za&qwDMk;zR)RE@l{%klP0!|ObYvF^fA%A%~6fx_`g-`u6v$)43_3A42+rz6+ulO`~ z<$@0GY%e-4#y{1l40%_da=bBppPNOOY`F<0Zt@DDO$4&fo(->;J^ZzfyZn;SxN*2( z14)4hQsAeLIdQ*3^HpZ51}3}R$dwPOcvPT{_7Tmz`Frtuz_V&L5)c?286Jkd%oFHn zu0pnA4k&6FsuuaWfyM4+)Iz~6Pw<~Xt{~ z^j6Ked-|#q6yrbyUtfE#V9zb2566Kt0|B44^B<$Z;b9Os!ZeDA2;+kGOQ( zTv&2o(w$|_`#&y#HO8)>;R5!l)RSgMI=#d5m2MF{G+!pcQOmLL?smJW=NyujO1842$OHk|qLnsXn zfd5WeD2*s3eekFT@sL!bdBDHIUUR!$tUN0ME#uh57zc^1>_h4ZE558Y(3N2wiYvoK z;He-UwwJ>ws{M$eHRo|s3O6VzEu|;UI-VGWJ>w2Gk zh=_@7lo`R+-Rg0?d&ASst@(1a+>R2LSn&0tGE#n*U_5(Y?Yr_zRqbaCcgXc_sL09y z=9Jt7EHIdM-#~rW_S4)&(022bO#5+EXtaoHxP5Z3& zeVT=--TGkVklmd5+2JaY{qa@uqB*c|fTjeBj1S)96WTi3U-f{e96>be@>EYDOzs}I zA)G8^9r&P0r)B=t;`14XzRBl9+V=qq01aR#QJeO$o-<$k-fqbLO>`XwZ?E~||I_i1 z3UJKp7faHa=5j3Z$HTi4&B*BJ`Kf+ON=;&Fx;wM9^hHwIz1dxO75G4GHi_DN@uFx# z)LCbCVfEjU*NlH#Enjzp3-j7aRV%oOrr2#iUS_8faCYB8?0|@hylNirg0F$?Fkz-8 zZ}}240`-NR)i zYQN!V0LH8GB<~~a$Jgi%fUKjW6e^ZG%+v=~9Y@i7N@JeGRF-^Yomr9N!uH^->m9Xl zrIY1$rTuy#xd=@_qj0$g8gj$QN$~iF)U#%=NXEe!_+ypTjQx+U8-0n0noLhqGoaB3 zr2je_U|uc{H;9hQR!Eo!oaSI-9~*GqaQeGf>v%q9-NQdT)e{J!o1@jlmw)(PZKgfI z`fVmp&F@HCIbX#z3%X^W8UD*Z9z+h$OE*RdlOvg8lOjpyy* zg{jlX-YtLmY&%`^KxD2$of+5!dIF_7)o-Z7p0Tq1yG_aYR6@dQ*RKMc3G=e^O@4kA z_@J87_Q@gS_+b zEBrVe&*{|g6iLN#V#s{}>yAY5>M~ zlNo9rkQ2y|bomkMhLc(N=1j2-uXa=HS=aH_=J)dV!sj?qR8tWfc*Ui^BBz3@NgPg2 zEYw{=ybY!n-+LY-Z=P^(3|2g6dgT`ulabuR0#Gcnqf;p$GIyG5>Rm>kN|00wP20jV z+yovsO80dbF9MLFEQSVxa8Q#egoYtv%rVr0Zfp$PIOujZ>yz$%eiZ`a+FeI!MmJcF zq^6i7I`cOQ1dkuox}zaD7+x1EbMyhY;$^S93=Y=1s$kX*pBK0gn7*LGAn8mXb7OzK znYo!HTop%7flkE_>Xc3e1(=%oecj6HApo8{sB}^PlK=r$v$1KdYzpKkF3&eUpy@cf zxc{Hfbn1^6+B>mGxeTaVtL_poN6Az=Zy_@QH~tXfI~^=)eNOoTR3KSnkHd{=Yg3@2 zfcE|D=m-iewxc;h7+-bW%2H<*cKg=={TMUzBb2(yL6fU*7U8##cAP{E2!Akfvj}lM zB=fJAr{pETcLT0e&pUcvJE@e}T;LNyzXp6l6fp+kZQ(bI!--WgsT(#0mtMD$?zAT{Zl_Y2T&q2%%YZ4$=t1%&yuJ28CC>0w|ZB#&!9h}w)zOcxZym@W{S16iWCFM9n03G~5%i0xf?fh0ac9M%&xJ|E5| zyVcy8PlJGCftv23t)cAK@7=A-uJM84@n5wrMWR~1GauB<*l$qrpN81a`v`LQo*qhi zqe6u5Q)$Jk=Q5qwOlL>ClLX~vz)*lY9vpzQu3prwg+1uejjRQtzGZJzv{Le)>{uDj zGKQ7bHc*&eIon(XKs#E(J$U8((3=znUM$3#BZDBiKz#oc;36^^v@vp~C>u-Hr5ukz zN-2aywlM(qeO2=tym3NoVHm2A#6gabIto+{%}iiVRbJ1k%qG#+y#g@pct1|>Ws}}d zXHefUBCqUU77N3mIZKfo8yT^ib^`{@ypib$#!qpk2hA&_pgn&^BPmf$IFt1u_SI!O;H8E zd*?@xI$DrDIt72QoYQD&nNodG<(|8^EFWB5z2w8h$2d5l?m6|EjEyDveLSAGudhTo zR_oB-^wS>8V8mv6EQe9tV`bHRj*proxqNw+^NEHj$pcJ}s`{0o-2oEMOaBcFylVT? zgAFn@KM8TAZxw<1iGN&&mWQke>HX=?U2app+85av*d4Gb0*=zz(NsVmTR1O!GMV7` zsfJkZ(t6TLOi9M+$0r@nBHlLHSO1Bu9+2^%Y?-OCT0qauwW?`qif%K3>p}AG)W1Z0 zl|Be!DTfRCdV6gPdj@Zb!|b>(dO0CJVYxe=$A0qbGW(@_zYaqVLF}`nPbqk^0eXXp@q|OlgcAT^`!n3V+Auq^ZMdr0Dy2 z0=8-r2fLi3sCJyDCzjDe>-urE*LPn-?F5r1^xYdS*G4aSh1b zEN5mnh3ed9Xy~!TW>IPmy2iEss+o~0+RhcT`BZ(6nYx9npBENuQrtp}ot}N~r0|Kx z7^ke95IyeFpD36cK*htIUWz3wX2_-(#%b9Nn>dn`^s={q%jf(iR)MBt_Sg`P3seNi zZws#AemIm+Hf5&B$@>^6&7uxhmP!H|&!6Xw{E^;&t;E|qkdvQVbFxY9n(jXZKfX2Q zXfcH8!rW$K+NZcO^L089QO%4!IS$d#X(g{{Sy$1Iz`gcHz7yR*QX+M+9usD6_yYHDlAwP-X%p(%WRt`>ga<3#ET#>X)z5%*wWedi#}Y-RGL% z;zsiXE*ruZwdMO(7B+e%-a6t&{eMdy6&eU-q-D5W?Sb$spI{RdMCYR($(i9-J+q8KQ44QfohEwoOjB_1d% zq+XpKdA+ptu5Zu4ZjzKDtYP#xKS2{yl^bt-Fbyf0I=qE1BBO1#7x)Btysx98Sqlo_ zsp(2XCpESGtHuXadI<>aVx#nb;=__sZYy(hd6|`~=&b3Q5#epHG2WahF+DYk$p1DO zrgKb-*#t7Cmfa9dJ2LW>uCHA1cbzgC!1GajBMfG-XOODz{%qd_fUH*8|7)*65HH1a zy?;nY>5mtfkm7Alh*+Ja7q#VB@ot+qGFA@|0)J|bPhVZvkNozO3A_wGN2pKq?#bBm zhY!z$j#o)P3LhTH$|^|X*-h85OJ!}({Bc@sr~t7NMd{X-oZp5)c;*Io(nB;rsSI5Kqv39QIJ&IVP@v`vf_; zLtGyliXdPXT=6?={NN|}Yt?TzZi%|Lzji=;Wf;IemdEHsnJez#3IE|**n}sVcB0Yp z@S%!8)>B^X=7FG;2Os{U17aMuuGcpO(9cbw4N+o-aeS1xyQ&@`D%;B zLCu)D+v8YdV~%jU)K-GF^Ba!^teCsEmY&N`!fjT0C}(SF&FAsgM1yDvJhWSS^i!Yy zbex-vAFm8hs-E~nNHkUDu-}}=3$$+YhK8-*nd3)gD86`e$)Hp9!w9L2U;BjaP`b@u z3?JIttDszW>r)H``ib>)h0|riEFQj!L9ddSqFS}? zS|I)V!?gVy>{JHa?i6%mqjrYJfePl|Sg&6xrxOBjr#Dh?$jOTJDON&znl>sk~ z4Sb95LZiSB{sZ#Fj~2a75;2u}8~A&?-0kflFEoE4&`_a^HMYp@?|o0Hh$7C(H)zz zlHy+D(x`Vy>F-Y#cWqY+|7}+FYN9+vez>2Co4iYHq&&x7$yIQ%)?eRTV83@y#Ai$X zUT*K9j!W-*+DG(%Tb5%z2L8slGkmB#5acZNZa{U zz`m2Ih0WgxLBIl@eq)o(;5bz^+w&I2Qw5E&Y1;T2u6sQOki$isaObb!FSgtAWZG@XAFyKlY+I`bkk9`6qz?Q?_hwVuS<-dnzb)b+@T$syQX|6E*(O(-{f)D1CB<2^$*m z)x%#k-=E5=)Ejr>;4}t5m!lE#p0+^`R+c1=@0q&XRTj=OjIWoh5hmw~%4g3=(2K=J zjFXgbAKom`O^Q}YKu0pUp6Rz8>+pxhJ&V?S$&TSG@cJ?}WeA=#OOPjwB<4~}rKR(E zeS%N-?(0ZoE&WB^(2bs5cVpWJqOW#GDOi@P%)@T@MmOG8bmd=vx)!Zhs?#b-aMs?j zbnByGn???A^j4;N2AXg1*SD>oH5(Y_yyd)#8J_gC=v}H)4%tMpE_PYQIXy^YZw+Z> z<4&Hq8sh5sppp>+uTPrATNyR9l0ZZ&PY`d*C;R5TOlnJ`V`){JBV~)%Z!}0lOEx=E zgn>M<##)^EqLTSGf(pIaW3|Ghx&`XR;wkyKSw zOlxPiY)|W_&vy)JEy+HJ4-XBK^ExSxwzU4uWK2(KGT|C2)U~!Y2cxWwpRP`}-cQSh zun|a;0hSwZV!+}Brq&d6At-eXolOm$>7}J?7XvO`O|QYqiUMkcRRV&)&oIv1E5T$l z)g?=!Z*Xw3{Z(LK?~0?WF}?p~m7_Kpj{AtV_ewl=PNR&RT)tMtdr1bsy7epO4)=zv z&J86{{rt47kJ7Ph(atB3x?EFi-tAG6yBCIY)mjKRY@;F?LqmAgh3#!epR1((c<_>| z#pYQ(Y5Quytf7hz=u<&5OhwtbIXSSsrHs`zdVQ?O;Y3W9|G7=OkrKvoG&v)mrr;c| zaqJVKY)PwWdTsKWu2(le|iw!G{cQ`(9mKqpHLYVpZja-&O~4~FNk&7j@O)u zVBR}EF7WUnm=D}QxPpr9mLc5|g4|q=>j@8tD%;5|M6Ev$+@x%&ZTh?4BfavK->rIr zDGMKojE=qD2|7-gJ)R1F_x}AyG_hKd7j)e5Wf49y=C0>-j<<>jbcB3vGhfAiJ;`S0 zG^C-2lR!#pYh=W|&5xfiT-q}n{BhV>m|tkBJ@vaQE)FvYQ)~w{E9}SdH^U_9?8|kF zmvzf5a{&9CeRl~tg|JT>&lA{Sj$VlGe}C+4FRSJ7yd&Fw093=>i9+fx)$=nm#Tm+Nly)m*)G;8!}3E9 zRu?e0CVhTgx5^4rZ5m^`*o{^Svduk0p+6`oo&53obZ)^+i)DC>5L5VHw~)`O08+h! ztE{P3kXz_>HBW@lVhJGGxw#eI>uh1qAf|^VMsDdrR7~Om%?Yr$&d&;}klyLkPZ0(w zHx*oy&K;dfwi(8bHpe^X-ozG;GqJE#`y3PF$8*rNwuHX@RYpW;A^{U4l7yxg9s;D;=~ zAKAd!i%&?T+5`d`K@Fp*+m^&VmwX)}(+n85B??SD>-`!>N;UciWIBzUKe3!8Y?P>` zL_3TvAO`VUekbq`6_acC?g=%RrvE9)a05y1Z0YEa*A+j0?5yEVNJ&YtB@64T>4ey3 zpS{##`NnJ2(S4a=x+H;#Tm5FO^~Ayf{hKq`&z&9p6mmT{X#Op2!97Q4#+D@FJqu+c zM1Aye?w+20@W(i!=-n24qW3`BgXBSg-g)@o5O1gpg_Fs3IC=+2?7+(4NWqHNW+ov) z^Od3P+qr|NUA2F->%MSGtFjOuF| zBAmYRX`N^Lb9BsDv8`#(NK%sVX7GJVHnXn3R3^9Fy7r_T%#>=~7kChs_-$L>CTDma z!pC4|8L#phXg}0JZ%Tvwh_lfvwdF$S>EaW}Xr{}IH!~A|?gl~J5!_ZDM-3z@qT%!( zt#b4JEiR(eeZkFJTr$-xn31Es<7}%}>wcOl(b9v6f$__$iK}yG+rol8L8mP)u*b$K zW`bEHm&3TEi1fnzBB-{|&BgC$Wo}RXTn#F-Q+>+}Ub;EqPzZTkaUrCeTyIxX-GGz@ zD<=sUTS1K;lO*|Ko@9FBMo|G(<>jMY6UXc86ekj-2xAz{@AEpy?&vcPaHbbWfiRdc z$eAto>>|E*Z*4j;VfE7|s9yaGTNdD50XyahUjDOn+G_oPSh^r|e|#=``EIqYZ3lX~p6PHRy=M?YE4?0c_7 zGv%{fPY6XH+CHwOnVKwWulm&Cr@l-|66Ws_f|JPSP>8yL0^jE>+|y8RxPS2dPOZwnmFym$7=QMky=lXX`}jnjAuT}$@qR7;deVle4Tqf&PckO znxvy33f-u*4=$28xKDj=hhWks?r3owwob8Rd8 zBnyRTNJ0ySZ+5>qLQB|+^h(zg#9))98ScJBXjxf^)TpmH4z;L$*Tf9mLUb?e`nsYI zS_*N;=3@y*>#T3Kd$i1QnNcXk++B-mNQ*DOg}`T}&xuh@)F0B;>s5Z)V>zpN_oLyn z{xbmPWHoZa<8Py;ic4+fnWH0gck!`uT^|M1&;LX>dlk3jr_`*HH)Er}Z~Hqaw2v~H zjQaKO_cDI+#VK3q`RRe5w@pkd(v zr2A<2i>u!=oRN@KXKjXEXG%{45=$5V5dm44Gfv+0z7$U5bby!kZzIm1R!$EnTYlFl zJy(-pU3-qxjkK z%iJnyJFeA})qb~^g2ThW67O8lYgK7(gr6L~kPjc4Wrlg#=JFzDtl<1Ae_y0aO@i3E zPf6d3_AZ+FN=lZZ^vhw7oSghoUT(B%O_i6WDaR+(e(z!?ea)FjSS(As0`D=AyKbAu z;<(V;Kqr%WWK9m1ItVba)$d11ktMSo~}8?9|~MH&Hd3V z^!Rhl_+oiYh>MnqR4?=#;vBh+%AvTFRx&tPhvX2gFs5b5Pz@!1({h3fkfAA;)*&u# zjK+|iga|3w3Vrx9m8X!z2D#Ndyk*t5Zti(pX69|OpvHEDga>tWcIQB7F##1x>`sg% zgIr)pS#dG=*};QO|F`hnr6qtfJK7&&R9grANwv~b%=3DoQRly0lga19OxJ;+!wS$zY20UcUPnv$F`IPiHV@aV` ztc<=NuN0tcDchKg_ywmglfg=@a=s;vSp{ZXl8Vya;iq?w z+ANxOMo*f`w#C5~i(?cD_eyfX@v#>6$c}}ekPuu70rRtO&4Kd#9&_VE*bgF-E9zc5 zXlrYaS@Q%*TC$fgAk#HhFvcG4SBJZ!g`;z5Y_wWX6$i^?5LdK^GZNosy|!}ejq zyUQ~X&-sGPTq8qUf&VN|wVejmr>ChBl#>25&Aq84kAN2!cAXFFY={5XX_0=3B#1n% z-}d>gfS1fS0jZv&gX(%}6y($vCMgkoD5H(nhB zB}||qv2C~Q^LTIOl=XazvuU|kRbQ%qOr1)7zVp+i%!OShGN5JQ-@hf7udn+`0X?Ru z|7pt!m#AM@zTk0kc3iPD4qN;KwGQP+yf`5TZ%j^gboA3tBX)4d!{bZ-B(3Z4*)_2V zfvnW(w&lh(J`_@PZDarb5u24gOmd+Xh_DvCvqxmvTS~pWKH@lqS?TUyYU>%LIvCO` zaUkk*Jx<7Y6FH(1z=zau!MfMQA;yaOBJ;adAKoPu9via*IMMeL>tW7n*|z9y9B-fF ze^RI@5*72it~1%h#aoWiWZwS-BeP^)_(6){Daj2Y)BBP^bY8)?KQwcoB7q?x1zI)b zmkyOCye;0ab(pd1dxf5C@EN(vX;IGf-?8YBOBz-9hX~(C->f&_}rF2?z+L}nI zc0qpBS$8WDtrhj7{4Kh}77?;H(-eW2aal{&q(@1N&ux^CbUEji8Ou{uXzs5Rn-9W1w#?8}9l&9N9vpbn}*X@~3!ed?G4*qCfVVw`#r!`>?zzpF9$+eWX~W82D_x zw;m1S=zP7mgSR&9V^V46UnO-(mpZ|;@0_R@Nc4m7A#>@4+?CTg0Vu?8joq2Hnhif`k;J07zy_RGZd6wr}yO7q8G#WR%Q1V~>wh#wy}W4{Cd z`=I^Y+f!WC9CV|~KVCUd>#~t>`tgN7CdGl_nE(>oGB7+iTH}_O{{BWl2f$EE7SO=YH2LYpl$HKpzphkj z{C+a1!A}_;TI}d5neh2Fe3bf9G=1T!vZ8u zA)0>mtslR*_xEdPk|XaPJ>;U@VKs^sm2U9{53t?VNBmEO*BnC2ibQjjP-M}>+}skL z(D4L>Rfc5;-u!6jsm;JDyzle<@Xg`BM4N=+^2*TAFeMyMLW0Bl7|04_jm{fE&z2xq zWmD9)uv^SMg#HG;Ov}>`QWweSQq5ikEJ#XB{Z04tX}854m+eORP#g%T^8#R1OhJmQ z@dr%z(&nROByWAlPt6ZylA#Vio~)F%vC&iBIkz{~Rs2?AVqq&sNId^&%=trX5vsGb zmdoCxDO>FN{K}iN3!(#EJw2y}mzn^M_bI91$AcaIytymi_Ef+RMU6t`B$O0LwlN%4 z2AgkXD;v`tR(T>~B5Z8zHks!!F`AA~sNp*WUD?^olC}b<7HM<0NW^0A%=~bVgH0o# zF)Aho%ahPA$)Q3>guc%3_uK>vI~&{I5ux1N!kpaPf6jdaz3#ya<-a~Pz)CbRG2x>G zH8yHLDG4zFU36RG(Mwz>4LFY{-d*-8$;x8EnTtY+8`}YDwdtR3iRI;da7#qqS@ytL zdRuY9|7FTHTtnpr%V}oM%Sl`EQ-5E?KbJs5Ru>nN>1r>sA9%vd1hC-F zB+mN+JoW6fE7IXLU_;($kA8Zc#2J5NZeb%eQJ(pak?R8v0^I~s_xkt1<6kh5UD|fx zL}urzyI*4Xc1vpG95_KGwZDgLefAO+QN90+nA0chFeC^};$WCXv9?a<_ARMNP5Xtu zsw$81S`(St11{1~@C5V$Y`CjqWMl{*|N9f(GMLqYQ#J^IKwJXkrq^1CdWJKfHer zkpgcmEAQ%k62C`(o(N7n+4w3t$9Gmj+t06dBGQf7nrGe&Q*z2p*pDSRi8I{Ow0VH6 zq`f-jUNDt&n!x2Ja9z!NH56UEw}w5c%M&Mar3zdQcU1x8Rhl`~R}%TUo2`SkZVpoR zYC-W|nH?2mk-92BJmkmZ+c^{eT@kG-;f=>To2~WhEIUT~ZqjLrwViv{%A~fm8CH3d zwE<3FH4^-}b~0Fqk%J7!p(YK}_Zhj`RsbfmEOm|PI?HQ$DlyBwAsZ)O_XxU2-QAJh zmTNq`O;e4A)+Iy>&KTn-XI!#M z4UY_Uw{|Kj^f;1(3>eB<1=)_Tf>xG32_nxh+~H1@kqb<-z8m|ww3O>E(b7{Gz>W-E z5fCQFCjp=kyl_(JCC!|dJCd)PpOqHIMnbf>6lt!i?~M`>P)1VB{*bk!we!oDRC~(qV@LPiWuq z>OIuVWtIc#i^Kjc6yJ`ey)Z1q7gVXviu(Hc3GMoCwFQa^Pd9Ge`%Z)M3XoXIw;5)y zp=zKcI8#BS{SCjuzqQnUW~(nU{<@4c${b~_vJ*n`U_HT1R3VPW`C!qNdEfi%TG9hL&e07s?mQ( zg1McNrXc-eL0)#60{@3OWWqU(J`cPT)8^ik$HnnWD_uIZMJ%TD-npbFNv6CV+p@4J z>Y3`bCL{Tyy4t93??3IXR{<+ys1ucNyM0?bE~?_W4aT3E%dUWoiq#~l1ejnR@C!g| zAyVU!czsI`an7xvr4!$;x*YcUYeFK8DWcFg_ohKaO2qx_6Ag~n#P_wpCfim~!+YOB zW(d>)8Ug3Z!Wu*HVgmBO#Z91Bq@`!~wzN+gj`p?@wsh~kWUedWpSI1KXe7?h&DF28 zO#4`AN%JTK+ULJv&T+LSPvBj@tvJ7(Snk;MyC|j^ij6EqYTgcGGOoBjss58P7EjD# zoP9E76Z}8Y{OjooyGQcOQIhj5cPWxVpx|jNaarTVQkHLUx+QTk7BT|Sc8G43Th^bC z4kcn$YLvGztz1x~Xzz^95KqAYVXG^kko8;uhlb#YlRyJKzBeLSU}msTg}Ek*oG;>m z82&$LA@8HTh8+53_8H2}`IC7Kv9kFpxLb=$xiBV<>}+lS+JD=&3G0{>@mW27xQ8vu zKrV^+?(f+CmaO9#1X-2yxu7BHyH@tMnhQ~lb2Yf9b<1iCF_Pls=1~+S&4YdD0>SM1cg67cI9pYgV1E+Z>Wb^}>CqZr+g3ukt^rB?Phr+!af^ zt`@6WeBpUakAT%uQVNM40z6kuaUq~+GU38PdKqw#epN?K&OSiS0cf_g$Q>CR?26|k zbCc4Dj3p!{$j#3c@wy6*&wcBpz`FO1%efdj9+@6NJWiavf;=d=?hxUVS=eFLw)erg z`j2OM#^Xd?Nm_8l<|YD|MAP2~+FDSqluCTp(0iqQf!4R2%S7~Tdj!xpBV z9hQ{l=jTH9FCqT-hwUAZbO(18wUBqs_D&F%Z6SVg8knl#;vS5j|5&-Jr)8F<%Jg6F z=6&>bq=sJYK}1Ifd-?HBTxERla>@SPTVq3}iwkD#c+`TzrVXPhv&R->T8i`mz(|wD z3c_y;z(B8^sX3p0|FaO72uexkU}$T==av2|mwcC9CcUsceQ%}^p;xFa$VWI@Y{`mT0@QyGo3ilMZs%6 zKlm2`m&j0KDaRL178^DLvTFO#QU|Kv-qq8jUJvz5LV18mrgMGRIEXRwm?D>~1V*I@ z$Iz>WxoowFfu!~4tNkAs@T8LJ5MrGJnb3^f68F;U)OT_`BZPKnb*PVBkgujfhmHr$ z6K!}f@#JnAityr(R*yOB-0XJ!!-*Q4!kmeE$BDJ?chb-s&Mk5}N$O0I#B|Xu!|{RY zGgL$@yv&Ya8taZ%8eQgwEf*DnlYVg1S1PvI7H=YpiEgIVa(vQEoqzhr<)9Uf zBFDoQP7^A08z~7DK+FxxC!GlI^H?I~T{ z1#0puJfGK={Ck_3Gqmc|>F!k-&LJB~Xyx=o0}`$QF9JT;e{-o{H+b7E-bY48`W9>y z-4lF(7uOmZc6|%Uu^`KCjBudJ0??OXljm>kkCuCy&=rS^*gp;Iz2a) z+iKi|T!GZdRhY`=stnxxK<;j9Yx&GsqvBhwZ8!y}A}NJz4PX12ct`cINe39h00k4h z7FtZz7pmpgCvGrntVv7=pwFArb9^6(H@C8N$HoAnWMaR57LWb>#=*&(@S>)?7zgPk zI?wY>$zFL0X0j^X^tyVKmj<<7r_5q6USwueEbab|H8M(l_VEninC?1MTa2YhI_AKk zC&1#h6E(NCuJpRB4kTBoYct$1k7eiFg`Pi3CjG*%0SJ7|?Q$})X7lBJFqaT@+Ip+! zC1Grw1{e{tdD(=zexIdtZ) zMiciWqvZnk#{s-sFNDn@!Ezyx77ao;Sn#8RQ9lgKYdaISRY0rnC}?C z+>>|a4<5ymP(x|0f{mWuQ^TmpI~r12L1MI_R12O1>sR&0X5hz9L6Q7YUnM6a$M#Ag zKA~lyV4^zj=g-x@O9R5z>a6!Js4uiqZjelS=z|D@Lc@iw`#mPQzk2JGl*TF-X;-YxL$nlXfXGkaRPV|+B6 zH!=#m{rU6f$e1tBCK%C#6ZhJ4C#x^uky3Wgdm68)SrYv#?pE#{u$2Ix85!VJJk^&~ zc}G(m_H8ZSZe(y6cyDbT?0nUB((r<+U!E(km)>lG_1g|xCgY-}ieAO<#~ zB~6(L!@V+4diz#z@E%-RDfxVOh!@QE9n=;&?T)Ml)iwAi6(Im1!XU25Cvs;0d_0~! zNLDY`4Hkt-)p{q|JgU#bTj7{9);1wcmJ`2pWHm$JJshuCJOZJE#gxBgenzpAoOU$ zjzLcu?y#_S8Z8hKMytYZ`QMsjKjQP))rPNE#XjfTHyrKKpc|;76nggnBP z4{mTcadJA8@sZQjmDkiE#A4(8FPxcK&6r+&%~0Zc?PuFV3uZaFi+-^C)Odl#oCqc9 z3ui@1b#7*S(dVdgQ3lG{S$SdP4U?TGB)MB<^!)b~G){|D6H2YrD3?*D?cK9fbATQO z(^XjOwM|W$>MzD_|5#lz_kLgfO};0>?3&=FN{(YCgJ;#RG%RfX;}U~K&IB0-?f3-6 z1feTng|mC1MV8pv8TgpM{@iM^*Uh@lu3Is;xuC=ty=Y=;rkF5FoE6!7)MZsPx#PY! z86eKs(b3VJ{Fo3qn>ZNzW8)7jVHxq>_I_)aslt7L^qO-E=&8zNZ*obbZ$8j?^N+Vs zuf}Gi9(GCCdOTdLk9M&==hl5zbC*j(9(Y~)N4}tCrs{vSrtU3ri9;kuB&X=l09eQGdA% z;4f_oIs%ATSs}o##eumw9?N<}8!c0V@-!ek8M@gpiiKaE_7sLc-5Qzg?RV>Vw}I;Z zj4}GPVgk3_5jRdFAX1T{jOvA7hF=^Ej4IPiDGH8H2)iDH1}u2xcSdnR3>m4j$#wT{ zlA=H7cc`x(1~al$d;V+zb__y$?_P~V#~J;DZm^hu-;~v0!+teB>rP36r8J+JkM+~p z;4-&++k2v|{U#ej0jR#E>6y7-xiG$_WeSJMQxq&*vG8`5{2b z8Hp2%w-G9Ma9@1%)$cviF9zxsp~2bxtji%X6&BkmHj<&P_vijqk952b99;bv9X_%7 zfFO`BJUPUMuplO@4HY?)y{{UOJ>Guv`IDdVCu@4YxXGxy>AkB`;!TB~RI+T@S03li zD$J|z0_h$VjF>z{hkB-bcI#USS*R1_Y56TjY1YGy8#p#*lWYJN#`dNmCa1o4j}6=g z-b=N{KE{CC-{&F2s~d1vzLbe{q5#>{O?b{A>!S=q@9HN4f_Ws;KzX#LktFuZ_hcyO zpwf$=9(DTHKxCqmpXn2#^L@ z_TJRGohKR8Q5?sqfx&e3mhe9vadgD$xr3Wl+`w`Xq|&acG15{{!iMzV_Vyq5g(Kh} zponQmt#@neq>+|x=|;Liy1N@ex=R`a1f;u5K|rKKI;2aa zL-JqmH}gA=I#=M{6Z`D7o?3Uj8Vx~xz^#Mz?5}yWJm^mukQ32#+6#+|8XCESy~Bho zeweBLdqRwafN|5H&jqrKaIX0}XtGsW%~l`Z#KxkzSw`Xm=gw`O$HZCuzJU_t7@L!(*F||h#>3N! zU#AVYgA671sVVa#0@ezM-j{xx`{FGPHn&cd))GJeFz?lhHq0~w5sWXW+o1}+d2b7# zPrZ%$?(%rI-+eT+k*zpZP4Uqf8A|wtqoSh>9$ zs)O0;WUtChD8;~Fg556#GNs%x3e?lRKG(X^(!A`#Y+W_Iti+7VzryMsChUm&56~Vw zK!adRBt%KStoYPyq{H)XMl2+}r~EG4$nOR5r`q>M1COA+Lm-|tt)J!9m5EaR4}FK| zxF45xMhjCv1btzAmc*v{-x72DAouoT0jH}+DKb?g>_%M%kA2Fj92^J;(k?W$UffaS zdtVlXo55giZO-sgXlHU#Q%o#X=4Eybcgal7OL#b9HNM5)T?XMMG5-O-$06@~!3v^x z)m9_CAU`3dyeLOzx0?pmud#Nigr@r*$s9{@wu%zK{^w#=s$L z+OV5{X}F8|>(HUsXKAEBPNF9P@0J^}>bWjF5zY3^y*z&(& z_b&5X=(3-KV7%|26XH!_=unf2G)!Y8;q@3aVusTSAR(iAlh{|J^R|sO$D}fG5z%1O z_EcU+{!oz??`!{z6?LfN zKTpD9_B9nJyhqj^)vxPl9u@tMfbW(CN&pQbqwCS()>jTv5ZDjMJet?eNGhF9*Ly2b zAkE+8IB0B`a>>Y0_ajFkKN_shQn(s*Ta5g`};VW8Fkea$_D zp|H2(>-fv)OeDe8jby0D*TVVwWg`Fbc}>jBSa2A{6jr{!^v3}o6zJCb@I2@Co75B( zjIF*1xaZ7TMH_=~`XdZGeLLaCF$rUd$*Ipbx6t6~Px41o4jl_-7OXeQbFd7H`?SFZ z%xdhPR$ty8=Z_5cTV0hM(QQ)BWf;W~n9(@C>pRu^m%n^jGdnqmmzdB7rF&i>fA=r< z`TUHL$^-nM`4%rq9x6s^&JR*~u;l}mMNrBYh4E?R%ijNM0XkL7)t$CdO^uCD+&J*y zQ&1GMKip&o?7zM&{TlnYRA?umwmBgH>oU@~I zlf!&*;)z|G?Smn0aI50rJ=LnR8+0D>H2dMo zMhu>m@^pa%5Cg6z9E0fpPcV8)y;&Zjucv7SW`md@nC5Jlf{<8rwr(%zLuHy=j}+h9 zNmyM?g4$%-Dhia3ZJksOM-N?WXTd7&`FGSA5tH!b?8t}7{-Omk7Qru&%QLwc>_b=n z<*l09>+wm^$$PB&OP-lYPqE^Kgaj~%c;AXkzXp#|=xe;%TGlchUgw?D>$te8N<(eL zr`kmOi>pM)M&MA}=t7&zYCn$um;>kqfS$oo_;+CLJ2##@$wAX4HQ(!57r%igvp0Xy zRQHV&d_>yYCW6m)IlXUSSTV7_Efu|}yqs+Nl6!qo)B3M!Pt>SPG0L3XU~~=MMeor^ zj}iOHuMKOCe!RvjSD;Q{&LxCl;e^_hH7*;MOjEbhO#{s{m#%*-q7vic2w5ux5L3sM z%F1sA^S$8UN^H#pb)A9afq^Dv+MpVQN5ObaL>;^LF)vrGVVaG4b%e~z-0N2)aOlnT z46U9dxc|7sgtV;&OA0|}GDC2CnHH0fuI-R*Wt-m+6I>BIHrOiU-uhBf7z{*^ibXH>m|W}(Qozzgr*PIo$n;$Zp?)TBP+ z>S^WeA@7sfZzk2uk1nTsKMa@{-bLODeiQsXjORNpAyyM;SmBP8C!X$GvO{!oUq9(N zI87hC#gM@b_tO{14@6-JmAEPlMH`TV=Dvqpq#P=Bfp^>=I@cU{(lUI|t=OTczp<+0 z(q5w#)H+op ztNnY`^dH!OYHC=)ng=F2a`mToJMR~o|MwH|k;0|Ve;QOxTe)a=OjaF+rO)v~e#65+ zukEX~sdEu$wqHMg7D3;Ii0Crt>7sIZfyVR*tZyO{d@ERSB4!;jl*Kt!MU9PIoaD;V z!t-??utO#KLCY^G4LT2!;qvmbC{=Ia&iAEk{qm1LvqTz&!#2{Gzl@o z`*EX(q;=PO^^Q}LFWym57z1_EjKig0Jxxl|(e+)#xt1+WOLIuO;U^xA2f^VRUAemG z#K5>5Aqg|iMN%`MRTLd8^em-`tOuqVzQgR{qr3SXMpX-KXew$dl~qh|MtNN>gka_f z8wkC?tQBZ0=(#)g;rnO*j%6SA3E<#u7rX^0$*e^?DDV81lxNo`BZ z=+S9_2JEcR}SP`=2I=ny#$0R9Z@|nw0|w50A%j$zps1 zAC%`o-QCM^RmTw}<&rc)%G(7Yj9V!hxr9V9^t;>JsgLzB^Sgt(gHVid5`lbq!b==3 zEd|6_DV+=?EN(d_fi)Bq)`N#A9^6I)MMSjh8dn{eYmQqEip@-Z(D6#y}(l~tb`F&S{FiBPVF8oNL(V2V~b)jWRth+iVJ` zPR>Zl$ytS8sC-*;cp5i)yA~8CNuE>N7#nh5)~*78pSGtOY5X4r^7?PZ_tAKCzI|ni zIKEu|&h%$88QZCB=h!V&_qtp0R#OwX?SkzIH%~%GJ?=l9LMODB?E&u z2b3oo#`@pkgR#$dBLA=)iCIrTi}Wt?(%I>dUT3-NTODuUpnSYC(~?MlK2#Cy9nn`W zKpGCdP0k)FQ%}}Q)4c7akw7%1!oyfV6Lm+Q_%7@6P@Gbh|!4G znQwlm?`6lu*#Q^#M_V0xK&wy%&e)h4x(?Ibh&%{ zCtsEMwwBW7dl^i@ylJaF1MkY_aUo zn^0Y!=O(xoh|r74{f9o<4zJ&0JD;GZ!9*Za!}-xU127BBuj}}7i{FX?tuLR&vXCGB zE6iC7-t;COMsR_!zrkfm*~vDz{tbYI#=*kb&^|15Z^OrY|G)PY@B@8#n@XUrQ&>^aHn^MU|6V#D^6Ojr*(-UwrS0T@ z7$(Id&N2BQBZPHwl7qFr=E^JKmbHUlY=*0;bh;sz>OGgX4NN)O@JyRywtwIfWfQJe za=Yt3+jv%ykvqHd>GvYeQqJ1WAHwMCbp4>eb5S@n6WIIQOmbeMLhvLVES- z{k4rYKmCgi9k6uhw(Vyz-jAe|+3L_qFFyLw%)xlVl-aitb;;klb9r1=yJmajpPyU0 zb$M)aNyRd%*e1UR{;8WIoD;22K$u(}GaR0MxcS`yv&Zty#A{htHyv-jQJ5UdQo zY9CHJgCH_SOiV1l7-7X8Jg~m+<~!Ll9c1kZc<1TtD-ou*>$giHo*2ZBO{=-mGf(1= z`%B=bbvX~@U*EysW4Gvv`)}X?d~eUn&Gr3T^xt+|R_x*xJDp(alPBcJzh0LW67qtt zlE&@7*73IbvpO9>B$n%o!SM8B=|Vzg?63I8AYGoP95_7K`CwQCFDP z-;l?u(rHu}^dbY*ZsK@D(T3+ag$m?<+Q7faw);kd<}p!Y|D1vX3+T5vTQmP9nY8+c z%sng?9lK3JsY7;Hfd3v5IJBp!?GroFV=T8SyjZ*M{?*Vy!)if*!!}&u`xGxmB;8eX z-nr6wGnLq*&i|x*qm%6cJPW=AphwR8yeUH&ev5jW*wQk$m>PxisSYx zhwrMK;YpRKe%8xQezBW#S{jO9M-0`$2Vna_@BIDm&d}nI=P@B7G&CNURI&pDz!RSi zKR#}MLSOo&p6Q?Oz8*-HeIi0;>da+_GGwf0Yt~1`@D4&HeVf$h{luQ;kC^!i5dROU z%ddS^WML5)_$5+y0ESZ!4fftaKnvc=!?CktQD0Z*(9j|~d$zg;4Ddqx9pv{fC`74_ zi+sM~g2jF!r)^Zj8`?aoUaD719oMu-70J!3M@QO{$;m8W2B<0528 z6_O;!I;0KrQ7jyvP!%)RXsp535fW;-u7?sDJd)7v!!5m=wZaK2Z(-&qXvb;1hK`mw zbrm(c-!XxS&1ZGk(!yeAo8a?jka)EZKPbVpNP-|J((w)esU1kaZ>`NWxZ-;i|C1NsZc94I;G)@iHOzlnatR38%~`hN)MkJr z{y2%xRaFis@r97aw~%=+y5|)wLzjJKRh6QrUNv=Rh^%E6#|L=e5%%D4lMQnb$|=}C zcrU1|pt{=3ChC2sOG#N(DkWowL1-G0fsCod%^-a{-5Zw!?|(8p>X(N za;hSqFWEBs)*Ltqu}wAH@4?AYQdxJU0=xsU&&HLf zbOAF`m6cUjILnH1#=kAn!`Wb<0VN?V?N1^?;@q3w6?>J0MDF7-D39UIk&)u#6L*)G zwy>|bimn?3iTtv_G@{K=UsI9}ZDTXv^A!Y>@8Bb!46=~WFgg64p9Ymfx-AptIiOB4 zHpXGqCk@OT_ys|gJu4?W{|$JQm+gy?m(gr?-#9|8|DsIW-o^ZA<-jfEN>m9wzUyIh z$g)GD6-L3z_)i1x`?q%yRxY!zSW@QNbRY}}TpXKD-0i+~qEY(F5A(7sD~pOW-@M7B zq}%)xRwQ9ZwQ9!+0Vgt)(xMs<_{?pJ7+LDI)W(2Uw22}R$( ziyY8PM^C?Z&^&A?z@kyYQZpDMxgT$4YYX|PtLuRib_?txV==DI+n971XcgZQvn|%*y%@WzHYvZo4;*h-; z1s(l5mn;4qc#SC+jMq$bt5E97J&-6Lh^Tlh_RoPceJ-d416N1mt%4u**R;CG5!G?+ z4x`t`G@~z#V)yhiwX%9pqM9mbZE=9@l@lh8A5g|oq+F7m`|4uCgV@JB-=y>KF(ggv zRK-2y* zYIAamYHOX3zrYA#Vi5Y$9;%J1Z9>243R*l)le?3o2U3nURDSQzY9tsEZ3CrQli{J1%q|a6) z4|ax-KAL!&6qjITlTSfeaOkjPkn@teGYEvT-#;sBN~6`Y(H0lQer8P+0y*)z4Q9yQ zj{7=9SfR;!5DM#uVDg%JOG^4}J9P%)P8RGaMDIDPtX*_>fB25QP?ASZ0fEEnR;mVy z7M5BB4@E{x2UsP(8bxh2w`DyK<5&$@6Vs7wH4|J8w!nb)G8^ds46Od z8Zir%ENnS~0O{>We7TocptE#0S7lEd7#iY`vK7(N($W@UDQ09IB>Cl$)ML`#H%Y?h z9s2Fv9D74UsjI#24ho`JmD7Of-Rs55)>gwUgUj4Z&kZ`ei;>chikMFg4eD|>{I;Lc zOCDXE-tqhVp#d&SUEOKJ2lARiq0YFhtoAPrBBC0Sq8gQrjsG0AUwV*JOAwXu)il+? zuQDxN?it|^fq@nl+oLQjzS%$T;C~fBM@*|qr zR&VY$F0;|1l;Gj#`}bRXPF0M$y?Yjtr|u#eoX?g@n;#?n{`%LNvU>&TUDJU7L|xq& ze{%!{UxBmMnXVu&%f%;QrqX0 zun@9DtTmAvuVSP7jdbtnvy|!0*l*N)(eyi*?MgaI&5K9s+Co@}-xX8lO0x6p4WF7m zc!%(AI=9r8$_i=s8x)!QYUZ}J=?7U;5v(;Bpd+4yejiqgZorffS&p1N8`3*8t)5m? zj7#$Jev$^62Fu=Hl^%up6>y3ht4&W+=C_@-8WInusuo7Q9$t8Z4ABvPM zb4wb5U&JZUY`>3 z`ZpBN#78!l_PUbr9z1S|hhu&%zI1dQ5i#r2zeoSh>On)QaN7pxm+M3(?e zJh*$cjU(MCX*2Kbv{co8@SzgyE8?Vh4F48ej=p+yv(}@RouEesM`wzl>@nd-1U7Dc zL@zUYCsUU^uw>^IO|nqCSX@_@De>aZbY+)YhdRQ{+LC9}`AYb^c#ZgT9+Pp5v&azr zl5*QA-zlmKM?#>dr)c6M>TPs`I>Wuw zh%rZAp0yylBfs~qQoUeM*Xxu8A4gZ*^M}h>!QH83f?2q^vML zDZVn9t-ZB%W$XIC3bg9*VY6tTsHjLL-~GFOiDhZv%tZW7U;O3hqlWam|De-Mia~eX zzwUdFd=^9o@glUjU4<<7UjRb)WzKbqiiQgw%J}`sBNxrQ><<(R!O33z2Ul7u#8!Q$ z1(u4XWttzNJ$lk-mo!y?mMP#HYi&)26^s7RDgE5nsHiB(F)O+P*#kSRtdE)QUw1ei zVtd6Y%bhj!ji;wC;d(}8@CMu1L2d^~q4S-Ko9+3oQNz2qcU(^(+hNm_+3L&0e{-SY znhp1|;VJPd1GnAhvg}g@esoW{PAj23Ut0Zcl(d9{M&ohsks3vwu?VvMSktdD`hJ98 zuR#W<_Y&ksnMreLi+cuNqG&ubhbbZq&Wn}hUm89Lel6E*TP^sR@-cSJb4kirg$?FI zwRgL(I~`T*f0@5*MJX)SJ>r??;^x6$-oc^4l#c^`O&d=KzIZeF3R@eLug?(d3 ze^>u>tWln5WG~wwwNScU^T;Z@p0m}#JLcp=uRhXOR-CaNT%YAD1H{`o<>h?`V|Dwx zhmLOfW5ZZ7Wa|V?s36FnzRX{M3D<;({sbz%>c#--#GdIP zjv$X!4qRlv8o$+%P*mJUm3I&rc1D91tRrd5L>(ErjooS#NMG{`3o@KwvFVL3c`NAB zVQ9%qP0F~(nbr(h%p12UMcMa(xb|UO8|mUS;pxYUjDi)$;b|C~y`03!-+NTvaCNDp zL)AR}nuO1NI&vi4n(z9e>ou9PbAF0!o99ZEe~Z-~qk?iwv+v4JC$#Fu>e7kkvIeKq zh8ArSNgrHjbS6eeWpeE~(?5Px{VS=iC+~VuLj#h^SLuYRoLn*)1|6~;MwO5M9wf8$ zPWhA#oP?Wx!G8EK%Y@xMFuwB}A8}Op`jH8FrFviZt^VPy9+-9|fyjw$9=JV`~N ztBKkpm%0fLGH1pNKIWe8uuHRnBIDPlGDoL#&F=Ht8n5Zm#Uk$_=y01mdf%vuSripM zpe=21+gN?ZZM6T83Kbq};*a-M286U_j@N6CUVUU>AuRRncP#JwnK#uoSKCyk>C$oe zX|N*u)P?=M+Mr7_+XXS1_~zV$1~UU*pU{BTPGlaN284hB?dPoW=%OMP+qlxS$-j1s zOj7c$2SUx&RMB?T$vmDWzkfNd^gQpcs@iXNOD!gKHrsP4i;EqsAYBON_O_J{3BRY! zWyxnaL(Mmzn|aybGjFHjox*fq_Ek1mv-oyr$NpFf+W1h9fz29Nu$u#b&hxj zS~XHxrl*D`d+Z`^jGpFN;>kPKoR4;VjdVD z(2%_O2<`Pa-JE9hYeMg{?mYXT+@5H*2H&HfQ_Q&@zBp+Er~EahaR=u|1yN<{+wV** z!LsX5+-PmLRWhIBuEv_X5s?yiM_}!oqwgR2>jAQZb zGQH~MAK&I1Y;CVlnEY<IO z=iWF^Y_p@I=96W&)WRjUf{yg=N<9ExYv!ASsmsG;gv6+b0xw2}WK*62E0*|PUY~Vd zgUT}!eq3B)V{2pkzD3)Vm5%)%^85uuxyq)4dUDrsZTgW%Z0%!Z#UHyU$TR*n4e&2{ zl9Ijl+*z+xyO!%d>R0`LEdae<6ZM7TTw}NzxFeSu-2a41OK2{w+8xc@E8bck3h*yS znL|cU#&0=pYg=l~j&_~G59RZGHkUQ=F-6cwL>iYo~~XY=Raf(Y}g+l_Z5F@m-6E--8wo0FZupA|6)zj;Sy5S-^Js zDO%4uzqoKrU&(Q=zq+be$ZN&|$>V`gr$ae4?a`-0r+8+JE7gdq=!tmgWbrcr*IKQd2eM_nv-9 zgai{_7ZNaYOxL;3zvm++#Msy$QG(745fhhMH*`b|jHT<(%Z?O7bvl>5>|Dr7M8q6Y zT>m^t18eSvJPDuURmF#fYh)08l2)rpsh(2?rVKNOkJMXHe)A&xw zDGFu=Q|;H!mqNu>_{Dtr?WRx>F(FD|NrZUR$r4R|Df#-mypx1-J;r;KSPvc^I^^Zy z3{$-5Ub2Hz@}CDbWx!-_&b0wX3B$HQ)$+Keww{E~C%-Kl6Ol|e!9Qfks@WGC&|I#| z$!2``-IOr^v+v5%?J1Va6&!YZ6wboCQ>x2Uz#g1oqdH~tHFc# z-s4ugdpLIrG3Oe*7u_3I4k^&K9(4oxH+;X*jp8>zj=3k|a}6GYoz~t`7Bgknx?f1j z@6FNDGCbqSzvLbxY4JF+S(#Chi$dFc)SYsalxohpau5XlEwq;*axg>JR%qdlpf9I3 zI;OK|(N*m@0c^Nkog?@^HWw>e8chF8?8TS(G#|*V3p?VBxw;JfL7Ba~)~<8yLiTfV zd!(geIYgyNPX9=E%6t&xr*HFq*I`x>5vKU&#hd=Zp^R%IeSM{ouEGL;lSMV`gtHs3 zZKtbyh%Yv!8aE#3B$<1=b5zV|i?WKWhhba?)R)iB`1ZodSVh?~`Gh@RD6n z5pl(e8~i$RQzO_GXS^;7$xM=FlEd4di?9KTtZ0hsXrAO&$cpF zp!5Rk$FfLnrP)~!e?7ru**2vQm+NW2U)mXn%pM|A15m+?V+M0d*1W%qlhdYT#P2ki z@k^oo=LbgnV}d%@R6lZEye7n+)@pNf+T(h~sEfXtzk^NukB0X!9v)gYv)_NcN&=s0 zPB(xXoVzRkF09Qs-0$j|nOpRyG7p&l@^I?g@HQKY6FrhYZ4xV2-?rOZ8aqn5QqXhk zBzT_oHoon#PX~dXwu}=DTY7q=5t3e)Y9p6-pOuYGT2iV+2Y8<*dNZZX7OR7Fjn}8Z ztWj8wNp?ea)hvWwB1qr7=`+tz3!f3LO-RHx9n|=6Gc!AY`{}>$2QRQZ8=FRz1O3cI zI=742$Q9R@)l)~)EKWxxjicvWLct;zwEAxQKOQ()yAAJfs%Zy?pMNJ;+b|x+18^V0 z(1{NZzZF*FxjSb-nFFmMG@1fFxOWkajt^l!S^Ib;PEAd#yU}E@m~FV3kD>p$EUGZ{ zKyjlMU`YH&>ow6E{_b^n<#&HqsMFAd%4)e9o2>1}_SWBF09wUomFJCdJd!TrVAd7pIIKC4n@Wc%`wJ4cqTF z!&}E%t|T^^^*_(9aIiva+_H(du|t6`3J*0ga@IwAjsKrl;NHE)2omad8+cH(Dh37y z!qEkpnXj+mu#TBo(Efx@P)bs&ft$xZII`#CiKSZkRE04ln2&o)*u8EVze)(l_M%OHN6M zkx|>kJ$Ow0b75DKm4z+GU$`R`7>G1;d^+}*w5ft$n>l%6L%vgB9Cdb(2@44{*g8?f zN^Bktykmm2%Jpf@H2D%yK%bo46E|DbeDMO}Y(BsvRpJHHz8kuk(T;*9t$}$ZOgE_iQ z>n=)sqiaEPNW~N4Xdb84yWCzuJ|R|?wg8f_4%)FItaSI58w>`DIrhYM&h#tn`pvu% z_tB#N)JfzRCIY|(XIY)DuHp0XdIqWlE+;u_`nPwQa%ukL0TrCuHa0$873&d)o!coJTbAPm;3B z3!1HZg%BYZ$6TZ|c?}K9o+0@`O`7iRYZ;f(U0vUsPHxJ-!H9^2%(CQ=keZr=l&$aI zSCz*pORcTJ=ZyI3K5gL4hQq87n>V=g77Kudkqa_#iFf4kr^u%yB&TcYC_(&h1Sg|s z^EB?d(yGyCAa*F_@LjLh=a2iXM97!y-B1D=-Lf-AIE9s-=Ee*^hU;&=yGzK9N<(5j zy-7<;%OWAGqN^(>5%Y7&E=>BTV%z8Nko%0z6+KIKmTHWTA6)O#a(@3cO+(6@ms1rc zl}D#nbw9XMMO7t>-?dOcg+0UQ3PhN&tG8lyjzkjqgJ1GuKZAGWjF~M%Z$Iaa&h#S2 zp1TyqPLyig-03u_u4QFQmRXn7YX_!>Jn(>l&R?z|O*S*c=&2p2m8vSk%w_8%j3qnH zx@wabwEDjsdgf81KD0$UsWa8%%GyyrjqU^wPdd+*>7$DlA08$B)3T52mj` z>veV>dk4xG2s3ymGJ-V1Fuz!BCi0lnw*rJ>Wt%J6iFzfz31?-paAcolD5$3JM|Ff5o8VeIsx7y+vakQFp4BF+Bf6UxX+U9}Z|Mx~V|R4pl)W@8c$i8zT)-ivNmRM@~X$_-1Bi0H5z(IS61Ihp=oA z*NJUK0%$Za!0O|}j~h-wMYYmqKG4lJc&*TLiFte?rhR;p{gxUEOb{^db#ydh9pgjf z0bRfBNCA|t%8-fJT(LhrF~*Efhv-k|ANX!Y82tLw+RV-8&C+EEHQeV7U@xRDFZq1F z(?sOzKJMWbh-hT55=_sfg z^31_F>bA|UpBr$yi`eLo>Qh=1lR{U+RK+5F;K-T&xhUa#5w<=5$O>g9Ij?z0^7?gu zPx;ZJYOV5F;j!|VYBReLT85}^%*kEEM|8)S)ue_vtl@5}tO732ltBZ;mp!uz-!?j? z-9b&hji4wo?zC?P)l-CZb>}gjs*_w_v!BrDp(e&0zJI_|iEjmyE9&-JClzhzi0Y`B zb}<45w7I-`q1Hgy^{>XP1#s2BCVrGeh=Ndlc@F`^P1VOPX{EM@&o6x&yO5Hz`O$G! zgzd*O`h(s6aE$kGRj%S#A$X@(?kd4VFzp(M~;n--*0e#e2wuCLt|-m-?9m(^^Ug^(xDHY!%=4#4i;%oa(LKbTw8vnG5u;c;G=6q^9}{cDT^+P$WTfjg-H3ySmFO73!tm z0tR&(MyD%Ca@MDgNtH*(Wx!`XTWhTZ8f*iH?Xj{~&%OEP0hIh(N|Yn#ao3d)2OF2( zQMBLJdbFtCU{o<6(+&uqD^i+O9+*ZkKOKWA}mi1=v zgOzSNy`~Tea#oHp;Bc~~%hM(w`AAE@Lh)&^F9Ep>$fXn%v7z6tbT<4t;90)d@)Y~? z`Z`X;YeF3jl>s7lSeP(>;k_yWZg^nf%W6{(dYb!2!DTN=$m9j;YwGi)A6%B zTYE_jEan_0#Ty-My~TGdzn%wv%GOfQ=pr)T#1NKIzIzc#Jo{EuV@vpZ4-iuT=D6|R z0MS8+frfyhPa%u{j-ODHAU9TLVOgQIrS0B)$9*t4J$yMpr=ekv*c>Cp3zd+O?Sn93 zVrOfpgKyGx$(wcXzQojP!}v#-Bxq^=W&&qwQ8mkQL`GGWo1QG*RhcaE?jsa3s#h6c2ZWe(ncQEK z9Eh(SdBttJJ#@R-nr0aa3ab8D>0QIM4Ejv?3Yf7_Tqr*RWbEdB{eQJAHxK9_?juw` zDqS8WmRlCPj@K=ojqF%ReY)Sc$EYSJtjkjz5JK@oX$>J5LcVHVgUWZIn1+Fn@(&9M zjrom-xAL^qs&ueyXg5CBJcok}=^0|`%btJD$F^}J#Mc+Ga!CKa-o&O9_NJ=H1$I&N$Yrk3_Me5oW6sS=gXC#iOp=-)r7u|=RBez| zrO|GUNSDGC+$O*NfDmZ^^dhl-&!Zgqy#$Q<)kb>};e)S`Kei>UT9UzGI#IMHjlF6N zmz2x<)BqTC6~Ry91n=hkzIo|10Pkiq*W`IB(>GmhQZZeEmQ>z;A4bE0R31Z);b^_z zVX;O`S>zl7ieVuD_X?ZX%a=kewbIgiHCzsxKOVg5E5tR42W-(xa){SsnQq4Xk}KpZ#MjnYk^GWN)@Z1F+wg+0es91`hV@dSnTw|nK0FnFtVn|lp@*PUmSV3Jf-Owu7UiBaAedClo3nQjD%C z+-6G+#oOPIwTu;D-C#Yg4^-OE{-$N1ynw>{2(SG5hL8{*_b==?OIk{f^?EhMG{YqR zi~Ro}?vu)4s`|Usb19bOKm4k3s78Q0FZWk^vGNa{IfeTvGRtg^2%TUtXyiMcYxG(? z;?v_k#-+L(NC^V@tr(ncqn}@1aCvw#3$~!u@fu(YcZ$cUP&D5BS4YEiB7h9s?sR)R%CUHnIg3XOKfa$>DJwM)x^E)r2FNWEyT zIJwgC7qeqsBy;`toP=k)p0BGMO6UqwQ&PR6WPe+*+CkBEz^TolIc(OF?T7G{u@Vv& z7l*158A;%m6Bf{sk0#NzEG>3$87l}3Ki%?D5^wU-ZXQrDKjo@QP0 zFP`uJD|MpOuM%w^i#wF&sz1P*no;V!T7YjXde7(-|H%45Bael)-pt=cqo3<`mg8ud zsP`_@xglS=-n}*a!w1fC^*34Q#(ajUpb)Tmaabuq84n;I&OaK;m$cjzY}ejyf9j3u(pQ*g z#(%V(>Qi_3`3F*c5)fu>4Aq8S;OCa*zoerhfB7;!F{9ai)`p+OeWgi-Rw@j&#mh}c zxWrY*+?;<6a#!_6S=v6E5+Cv_=qfb+;fX)G{!nn%d$y#|{j__gzlIKCZx-_KXN9XP zrV)PM4pkqI{rX6_fQ#_k1-V2ii+@w(hlv8UZZZP7mld&6%C zqk(4#>husNsFoE~pBB;Y*o{CnG!j2~MuJH~hk=HZd4`!opUjJqs7{Ex|7c71lSJZe z!~Sm5le0?=d0MTa*j;?qvqnadgohpk(7KvB^A)jPxO4|>9!>%NSh0M+>|e4i!GlhE z0;wG)r?$v$waypXCJ$9)m6UU53GPEM-tcsW=S_Z;$z~5L9wk}<$-fUAFeD*1JRq!I$w3?5aqgnwo+J4EA_7=WkbJwX_H()9%FC zpEGi6f~)4uH&XZw(-~r~)8~oPKq0eDaRXo)i2%A#s?t-chz$OLh|7?pfbyB4ocw%M z<>OcNMMXKv3L1enVICqX&ayf>*MF~due}5^20x8GyV5}po7Z-@x(^L`8(!JDZo^9o z3is8o3K_}dd)tj}+$CDK2*vVo>mJ(GfrhKmjO_!K@KuXmivE|!l(m`E`kcT?U7p=e)c?s~si%`9_mCrX+qJYt2i_>}YcE92Mb`_B>Q*iY;ABdbFy zn4X(JybU!aNeUh9OYOJnyFq`Gd4pNr%) zuAmqa70Fosr6!Y^ep~4gi*j~;(YUE*|KoFi5^K9(Dv6;Y2A-b5aXH9XND?wqq(`wd z*bn2oe*XMvzPD+o&G{ISYF^WbmefT>Uad~Yo|ci4(PzX0DAXN6C8eVh_i)1%tn8i4 zmdZ*=;F)4#UIcfBM#^JjVUYvsbqy$h3}6jCWT9z3$VV$=X3@jMIVa~DZA~aU=^1I01)5Hbn`?fb@(M9=i4#Yi`y8O&nF{{F#s0d~&FyM0`^yVG(6(iy3Aprc zLdjUPZIRhw3B$TU1q1smLQuipT-Y%8!w39hQ4S|vk=sv+ieoHr^45$#v-w!XMWAs9 zyk|`##2WxGcvCxzix8-rIC8A}PAFs?9KIDn9t-u?5-($AWFxY8A3tpIZqKi?0Tl72 zEW`WF;hk4=fXRY7tBJ0lM?YxrDeuRG^vESm6VsUY?>-291a_UXb4$F{_CvW&-U{-B zUwj2z*<5pE*TjVowz}`!QzQxXV3J6 zMKYv5vA#CTdbKRpw>eoe5!G%^DTI5&bolKy3Lm zk2_ym`^6ozxZ7y%uQ{-Cp}UtxsQMuLn&_}2%))`!6P_L!a?q<~nJpQoM|_G<&D6;% zemOqvXIA5?WPpsoCjGq67e0XO=PF9_i6kfEUSuH6FLTi4_W!B@gwRrG&g&^dP?E@X z4`(X)vrz)F^K%HbvVNe;$lP?iW3nnKAlsomvMT*|ejY>g@B>h(fYrNu>jZxY)Jm0q z@DKxb17K)7>ggsw*c)Q}IhgugcZY`0!T#M@U2GF~9oolJA?{;TBBvmD@(GaWK^>dw z*?vk~ey17ea=D=&Ib&NOl_VVFM?pdff3w;OX+$L{A2|t6E&mD-3lce@sM*yFjRpmZ zPYZ$v#viod%U!b$j;^MpBgI$9G}{lX+&tJTYqkoaxTb50=*jJw|^C;cvUQ&9doC zs;=i1W2TH%Q-4OQ!n(UDSxLzRx;YtL3Au!*_(`8d_cU`e%1RHxzzj01`#mkE?jBim zugXF12LXG_Ga0*2EsAKtv7nu$*BT&a`lGk^Q#svsGq@$eLCeLi`U4pV{F9{PM`c$3 zqK8*DL$0u^4W^eN0+50{@ArY5nkfjbBkK1Sa?y~!PYhL6VPoPhlf@%_N)Zc}m#M2y zYd!nKkv5RYHQxbndbq~X(oi;x2)e^G{r!H*1JDsQw6#GZ77{|Jqhr^~6yQ}?Cnvge zTPM z{t#VaO#b>cJw2$Xp!nkzbz%w&X z-1pwszT$SDA1(p}w$wqS_sCC8CJIoOjKX2_gR?{vN4z<)TEHzNp($Ag8wbQFK_KL> z$9jm%h)d$M0(B*=dH0L1lVbEY>Vd0N^V7H<^Q{j+bCK@xme=jz4<0=PLF>Q-&P=^> z+$h74F*}8)nJPVvf8>e>@GCj%=V5$SHky5%+O{1F{@#__qMjj~tLrokz1yDiIOKk$ z`4FUDIQ>&lk-#Qk6*X6q_pshtY`P*Vj>^w%|S^8 zxFo(mfz!E%sl@u?KFz03AK7Xv?hkke1j}%_&fSg>*DD_Tq9e$*vIw%1wfS<~Gn5VxW1awd6m9*tfPfWgcOLo4!M{_b$75??R4NzYI^EVx_L_uc~ zx4el^jg@geSvZBsS5%F`Ig@he*r}O?q@+r}DHQ8t}Rc zSr?#(5B(CLy8P=@^_te`=sq%ZCK44b=vW-2hS12PUk#(h``Uk46jK+RUoKk zU*g|i-Y@|a5ly3o#>I6Yr@*)k&}@f_(w`v-94?KODJ3O!b-Tk;em(*|tc3}F9@_R6Z$#n%-q}@;$T*A+#}8BeND|WZE*Zp5cM*XQ5(hU@W6zMGVD~s_>nWa8aM)n z$8E1|fJYeJ)2C{(s$A-xu)1`#A@~IJZ5q0fN&0_*;NAUZe4-CG;PxKOe`+Ui9`moc zA;$!K{>bzfBYxyFR~>^Bm0N&fJ6vg$*_O?7g4@Dx<)-y>Aa%sfKw%Ib+Y}g1P&_$L+gG zOu&YNV7{4}V_-w9WL%?hk@M=s&%to#GO@KQ(@o3xEWVu#hZM3uTp6E<(K9^H&(2qn zSDfYeGdR?*ET;_0*YoXNZ2!ov;UXL7((IUi6U*f6)OMzEAlw8`1Vnb2KRQBqm7m$6 zs$E9ZNQ8LsY@XOelokx;AGpRGvgg`I-L<-@q!+Q8qmZ;*TjBiE>T~SrBBcMQ@=O-T zHEn4Bpi3*`pINjzu#RGl!zU8$dcd_lo0-bE=QHA0C2?Df?A#3+hueZn$RUw)v2&~UthyL8QjQhJ8vbgqo zL*drTC2O|i>N9*U8oVWuMe>gR#$_@BN+lj%uZW85HxDnfC3MFV67FJ15m2+;NtkJT zf9v_3CcUDwfhT9-N4@{1HAI0C@!v97_1RwI2JvMMayvYlT&n14ocu0C#lphu`jE3Z z?sj1F-+KFKo8zl{gio7EReB>-6ccK#n!%vaDVEgt*Q|Aw#U`lr4@7W0FW#)M-XcMK z3Gs)f>2xK;Q^S>$JhZYzYcz0l|4nJTsa7N^;!#Y{FWtTiozCC}i?Wz*v)<~?(Q1-$xN#mC;V@{_tRken`xj>wY z5xQMvzYW82&47;ZBGX$o5Him=i%jlrSM*Zb-3s8t*{0y<2`dI<$F-0LZTAFQX9hw1v@?5uO zUa1N`WZU7P?0{l@V2sCp18+PEBpvgjM@%tYAn*p(My9b5s9TZVQQ{JvY3Wp@HhX;u zBG8wG)cQ>hBfClygp)DB-F}PPj=<3R>f1GZa3J=)e$WMrwZwhLGfS68cgr1HiPj$w z0R;5T=YqXHx9y&-ncKIa0~5y^j7rMN?VX+OL03VNboU9PoE~zQ8zVJ@4I`?eRo9{p zdUyR1e8w-QKM9{Cuq&%-HukoPQsLt`J@mWcxBMs#whX*abgDgUJJD4#o3T#X7C_L! z+9#Asou6q+FrGG0%8Z;0_B#OuFi((@ERRac5i5#p75%v zN!NOeRTdXNr{fyn-8pJJwT0%+*GCYC>}aggbiI}|N<&@!^z>AdrOGzK>k1PTdLbwA zV^zb>SDL`;qNSrM1D>8fA)^-dWU>AHJn{;bMfe9S`PEA$;OxYb_-^qy1$=np*^~F5 z@fA~Wg=_r?jM^%O2f^?k>_~g#>;DXibJwq_H{{Q2pQAD)r4O#Qwct?}{k<2SZ!C!< zb;?dD5to)`S+aFgbt147;KK2{WW0TXJ$sG!Tj80I9sVxL~UJC}NmK^;U=yXQUbzWLO#f z>U~3>a!c<;h&z)XEUoyL5}ta7Ay@R*znYB#(en6O!IOd39W*XzJqPm2Nti>RA4NCoq4)x0MQ_jbdMx9pBN z95{g2B4RrB)8bIAKKJtbt*NRO;QeKKv_mC`Pedfdx=ckWC#@_Zo}w`6FG<VmH>8 zEPE4dIYA!gu-Q~ml90RXY7YvkQhvqd+C~-lm`$gN8DN3clj3|#YQ5viM$@owwh|E%XTX~_ekDI6ouk&~aJxsXw z_b}ihkZ0#As43A?QU<*dNAU4^dA1Hq@zC^gyYGHzta=4C>7!lt@aT9Fd?Fqp(<0cz zU@KO+3!}l$?e9{sG)9yz4<^Na+Ggb|BRNftc(x8m2~LA3-WxFNA;gSl9OMt@T{H%a|-zQw5>n& z=IbM6|5=vT29|pUw zD=WUOy%~}$kztV>)(Z_VCg|0!Gb$bh8D_qfjqKMbR0Q_?BE&Wdx za>@Og9>@@z8nQ<;%|ZD0LfW=44^PP|m4S*ejdxv_T1T5$c>4m#?S?B@?w~WNd#0sV zXTy&Xs@pXs6$K15th`LL#=SU|*cuLd9OcnG-FZb>R2zaHf$9oRE3_3=q3}_Fp(rfm zzK8o$DqWV0s7rVb!@&}1HFmv;#D#hj*7PWLh7GrmC~$L{Md9NS1%e2ARpAgs%qtnXnK<+jr$^qex7(&k*-MaI+7nN^ZI(_qK_RGwCm${}1 zop%HVl+fdnX_)mpSL`D{D4;Z`>RnvMpdg>%n%_BW(0#e5SbaHPSvX{ij-ZUpV#wpr zGS9hLe^U31@kZpVVc8I$$Rh*+8levy?cVBYoE3yH0EpvX=4tS3q`kad{v8ZHv$;O| z-4$x1EL`T|Aln?2%%ms(kTj#(*BxRLxsmbpJ4fO0hVEw6**mIo5v(Gb#XNwf^kf@N2$-J}AY4 zdn@FI7wB2ST~U-PuuPGfjE@)jKrQR~=3!dFnqy0=QQZ@H{8On}D>m4hr~`+fVb2X~ zqo=|NTtt9xowk%PLskEmn?6-l#DU%5^|T&3Nf99c>o{!Cuy%NR(ct^9kmJJuwNB~r z9GWnnS5i`8bxe!-{&vDmU}-T7f<87~{wCNBRJVxA-oMBqf+!qu`4kY~OrX{J3Q@Lq zwkwYOVPR%|;*+sV$3|um{1N&B#oNIm>r1R4iW8T3>tJUGS;0FFtTB@K_Xq*UNgZ6B zq{D;9$C{1Kauar6;kI|LORkzpr732gKBWTUW=$~J>liBP7ah7U!4vr`JA;OS+U0sF zBEpo#_^RQt94@zX`q#H<8Aj@?^PF(n=!n+O+XtRhrS5{<;RZigOK5CD1qpp5;8`Uy zBI&Yl&?@t14AcRw|G1h}uhphL>r|p&s{!BafyrCyK&{h!)VtrAz4;(4d_yJQnr(}+ z;hgj`BJvwxKik8Zyf4^+^d|qk5CR_yEmw#{6@@;AhC)705JO*0ZLp&{_AKvhJ-y`n zxRyuw$+fBTXPcSVz7Kdk$t@|= z60{d=yWVUzSZrkXdPr+JPO z$?4hFs~fvX^u|eH*NTFG_j0lqI@)M@DUw_p?;5+FCJ{4T_pn`z5QRUgHE-4Kg{EpN zZkX~ch_!La+!cU_>C!24KrDDUH9RvHxHGsH-2V#q#VtzQ-wk+hWqUF&vBJ1;*lCb@ z{nX5$EPcdqV1-`f_B=v&w=-g-ev*e}ceX{Ypngoe-fM7~VQOm+ZNdFLt9!YJU9<~?NT#vEPFJwlwdE7(;ugib{E=(_1(5d}6+h);>p zQgL_C>-s zM+w*Ut){Y|JRp{Vi{da-#^o>c)1iv9Ce-;}k8IDJkI)K=RA!VjO&o{p|G>|7WFil< zPGk4Y_pFb&dU(Mg3aLGiGUBFNDGGDBB<{a9Vc+)*mw9>(L$IKRA7m;8QNbG1TYkA+ znOdvmpmTU zY$YN=dH$DvI~Z)SC~zn^Gy;7mR6tr}&GNq9|GtG2b7!{x<24!QpxQ>gme;fwq(snV z-p1UyzRJD2_Wu5TK~W*RUMt{Q0g5c)MEz^2py5Z}Zmbu6FMF)YU|s}2L-DGriUvdu zB_{A%f?{?Tm8=`_@01^4q6hZ(P4`J!UR|nS|Mv#2Z(MPQh=4`{7>AmbfdJD;5Os@& ztb5#~44bVp$$uZE(~fe0v6j9>IvGvHg zz98fXoiRFPp4WS)2X0<}*Ft~rygeP~i0bvK<%kKK zPcs}1b@22L_#}kS7oa)M`ex`L&NME8(Vl{;aJtCQj;XBK-sv)BTZN<$JYC zK5TntgH+z8fe$%bb|iX6xqz} zf=AlmNpO=Z@im1!$GaF!Iz?6IF7LGGJJMDqcsqwGn1TOUg%`B(T z(&^+t7$|Q9zZ{0|K&tFL((Q_0!tw%t6(l!6tcAiSCff(9uUY;N5QY9c+vIW;i(V=i z&7m}cfDjZHmjz%ulbK=#sp_Bz0?|lWSs7#?x$bdce%hWYUIb9{ff_sFrM!wUY(d(s z(kKn$nayA_2^lfeBJ<#8-gBnKZw=xAWS>VtR@Rr}WbS`P`A);zFIXMfVdzlM1Jj5r zvZzsGIf!Yp_rpbc%=awWGpv2+SR_<}d600Zk!rMBv`jMQPG*LoHr7wAQ&iqb#E*=e zpD-DE=*sPkx;(1dzZyi->zg6NbPsP9_ZF7p%*qS;rxD-x7FTZRAo?Ct*Yzy8R!1^U3B0L8;Iw5?d6LP!;uCyi! z&?aqEb!ixBpxtzV-;UJ*`mzkMu@&G%E#d<*6kVU@-rnGK9qoig*cW|U}(8s_GcsQzsNkn;@GTcjLjGAv9YfRkc|E=5L~*sZYpxn{E6o)sd91 z&mL41nVaWPhdiuqvaAnF{`G!4nTw}hL*@RpyI-aZd0}d5t*fQ1u;HP{^~3(&M41G( zt&8;a$Vj7FS36$krN-71Dg+-5jo;WHO?Gbf(D0B}_1zcls-Io4N zzPa${05aH*RO|V|3iLEp+0Btrh7ng8#|imBW%Gq!}zvDjiayr3|d)p5Hv%I}?6W?{APlDl#^e+~{n-*Ama;+5Bs| zrKxs(cXl6c6!^`-Oi9dI(mFVHP0HWW`;3e_Sp0GXh?h21_x*+34wo)#lsduW{@yov z9H8!udlZ}>ZNV-HmO+J@^;2E?4<3-5&?PR{Ut1X%++zXG>0_{EZT0CzL2$5-%fzrL z%cx}5WGs#6bIPL$aD-G4*`;{%QFz`Kr#hQY$>elB6Co0z4O^j4>D9hP@P>$pIVx-L z4~HrH+x-z1u%q@dVtyeQkrOMIq|{uXoSDsT>iDgq!=jDeGP^H*eKdQb&8Svb?oH`q zWPnzFVVZHS1wE(iQ{;>K;y#h?HHlkt1X9^jxy#JaS;T5Dy9^nNKW+SuC8&!oOwHO% zTiij~gMS)@&YVhXs(-NS)J$ompXT4ZQ&=mD#Xkdbl^>tRTf>(M102X}<{F3F#y zZX;8Z4`kf4NeOZF?-tKYD+Mae)dy=eqAJwc3V&UqI#^zQ|LV4*x|3TWscNcBH#|5r z{j-Ue>u>wQtr^F|H?QBYxis*yGaKY@hn(X+IX<`Cp3bL5@Og`H;xDu=?z$;kauB&$ zOy^~%*81s5)l@ksE17rS%1ka;n#?{djLi>TTw4qTg*rI1D2?>Zju26EGjSeoB16qs z&d2kMOe-4`c>==;`S|&NHPz;382LFo)MV&&U76L=VEr>Z^pM;6NZV;08QVWC7)2Lv zyRpEY#9SwkY*jzwK)D;oB1)9oTq+|2I+-znULNjSH@Of9e!N2d%j3T^i4mEUKGFq z3xR?{OEEF==9(7_^x@ijh3_q-{x27RoTI9uz~eGf3kaoGoU8`dhS2#e)Z6aY?uf!+ z#NKwYmCVQl|N1(}>+^$rODl>Y798@+r6w*m%*`{TNJ6#rZP})qYAYYK&RSw`iULqh<5WKxOB`$hvm}_SC^)KR?jse&UvAw$$<(2 z%JZ=FuKMn z+AzP|6)mr%g@t&@{EDZ{@;J`%QDnh59^wAqhUo;6&(}=;xYv+B=9664vmFV3o_y-o z!wvz3(q?~Ig$cQB2TGzMS$Cq{Epf`(mbqTqeeaBzwou`w9a9=jk5fxtGUhz58!um7 z#PJKKxmS1oEvP%~FqE!<|YT?k5 z-J756+tgxLGnwkyhDjZIiX8g40B)Y9vyhPC9KT^Ha17mYcRh9@A@!pv3%Q~?8w)6} zgZT%{J(C@q9&YepaR#*Bt2iylFEl1+!w-t6a$Kh*3j8mJ^iOeZZoC&R51k(hLb&cN4=E8?Vsg;m>Z!tCto zvHt0!j>E5z(m_p~u3wn?mP)^ud@HJ88Zuzs_I8ChT@bk8!n(rry0mHQ6Xb*PxD!>H ztMH+QgHCPzuViY!Pp_ug2P4CGi<5Qdv&dqbr}%H(1Z>_ZN=iv>4v|*cmz5V0_X1m0 z_-VR8@-s-Vhp)m}CY{iZoVD^ehu@A3{^*e0-6gNA+|kK+hsSkQL0}Jwn^d$H)>PXQ zU;iy80qBozbSLo{88r#PLzNuaxt6-6>@6)KFFHCzeJ)<6CRS%d7kn#2RRXKjbuP4E z8dFMATuRd7Fh-w+B_l1vb2STYT?K9JUyIhoV}yiJ`is68F4WYS(^#SJiq#WaYdtQv z=D&K+_|L@0Bs48qftO!#0SK1Uqj;#PgtY$U*UL54 zGH6%Fe2eV=)5T%G^*TblxlxS!i_<`+v7@2kQ-oJVWCl_r>Ks>N)6g01)AQ_ANu$(_ zwlkL+Ev*FHhnz|Z8Y=3_)?0tef1z?Mj`kN(3nXX#D5`>~A2o}`O4i#OzZ^rV*4Niz zZuE!sWnh3rRDmiZvz1#iIsw9q0y9F~o%lgQeC+gOm$>*_iQX=G6%|y37#@S1aM*?vjwJ8>Mr6RIF7<^AXfekKuq%L?^P^V;UNg}vEH#C z_G!<4+me+$+|fLhI0m+%?z=A@KNnV0d;WKKaN&;~wmrIozZivPjRQn8aBx4NeX(@f zR$*4v-AzYI+X-iQ#PZWqxNXVx-r<{6jWj?UK~!V}$RWT-YB;?#ydZ=nAZ0EzMdZ%I zkL8C}jtVKXJpLZ&h#itY+fA9jcSPmrlqDq1CjDe?`C=TPeXU0&_Zu72Df!7< zADN%YRc!^mquQVG7dCSf9ig@lGmX1X%Nk6EGuM6_zA^UKQgK05SNM#WZW(ow-6Cu+fby>3pb`F!yCea<(=pujo(bn!=x24r8W=6VWA^-h;- zo35vax18J{q=P)Pw6u79uF`WWFI%$l%u@DaaMG?Me_)^Xw>jf<>eLSba3;KYRB_nEhy2JW=iMo@9#@Zi0 ze`@}_?^o~q^c&_<1R|qFBy!1!1_wj`5YiUY&aDX0fV8JWK5*-nYmxQlUZ;vB3gR>N zgDRR?FS4)ziOy&5N?Ili1PvjpuB@m3?qrBuw(Jnq zheBTAyBO6CgdNq^bHApDSoJ!r07`@x+zXo>Qc}UKAtbMU=sen>{Upi4%G50Vx60l+ z>~mxFLkE}*DJg2|n`K$P`{9?e^0LrdjlJ5gMqf|wRfcM@5dJAuhA*W%7SlZgQ(COm zlKzn)xgY%=Rjk?ipBn24O!f7%Bs1gUV#;umGifjB{2Ej6Hv|op%1cV{ar^}32+Wyg z%2e|Fi^2F^&qvhQ=cQHTpzhZoM z4$yIi?6c;l`G>SGa`A4yVtJjFQ{eAJ#_c$)2#iU{{Di%gH#pe9mTi?Tdz|dS3(VapmlHugHLvbJ4bQo(%o^wbQInN!t48Z zSFQW8Od~b`7}O&QVDyiti&>vc0iYj)rI;vi^ua=a=Ke{KCLBG?>%H>>IZV+P~fq zlh^RSjCi$t&Xk(ZpSOowj}F1fD}9i#_z6BS)Zdk5Medc`*?Boozc&2wH!#$ImfF~8 zF;BIm!Sh-#Plz%SfGC5zg6b0QT+Z<~WQP__E5+aGYv>xs!UDZ)ZTb1m2C&O!22&?G z>^0J)0J`YaJfW?-6WVH-fEM8PfB^%sb7cUn_xjc}p)j~4(mMB(OVX3nqZ zE+eNb=O!OOtUcV92t=`}K>EcY25M+)VDq<%WI2XVKXGg-^G_u@yAMwco`g~8T*gsF zelfId1M+LJ(1js07YZY`ML-IIKWsbXgh}__h5Ryp@yS(F+sCx}gUP<7*8b5^Dy;Eo^M|Fov zUYPEQ{r;w6Im7$qq_b!stgpy(%7P4s|EqE)zGSfuG5>G;U8uV(4TIjRe!bhJ(r9W` z4+RAJ!TLk|qgl@JuV1WRciswZMMb0qs@-;&yyFM|{Ycrr^zaH&U9~hhU5~cd4I86Z z_ai&bh}_?909$d|-EM1PcDQ*_ceWr<#bUZ{Q2XVLL@#`+KLpsImm+8to;$ ze`dWCfpKy$wH1{ZLr-vfKE;qz^pxu=sxt#d;Pg*S`@HI&n>>KL;QpjX2kUJ5#W!j0 z2@{7ld1lv!_@rR^_%M1fjC(iDrDa%o0p--sjfc};h^|&GU%vd3kNG93PW|O9bC##G zuwi8Z16E|dNx~v_RQ$yxY!zamSU53oC{w}0q$ z{wOojq5r@jg19ZK$aqqk^(x!`f%_fTzbNDgzNfNonnWGi-IjDhh#W~}`Scv<6(GA^ zOXJdXFYY#QTH-%+-%DCldS)1*Cz@sUC$H#wT9@Tv(9aeK9#?nIXt5rwza4jbOn@ap z5_hH6pnJcY)_}F(Cg~x8{gPvBc}=Ih;q5m*f)rnQ1CvVnp&#~}!n#K0*V$DTSC&tg znd2Tg)Ru<(9-uL{bWb>fEIPfkwHE!*lk5^qCBq_^^d_NRqOSNgJTiC}Q`klX3RG|& zVIt09)UHKC;O#hRkkYO$o$~*lcfXoY7+?a)QrfO82nm1sVLyLP-&*l3gSv&xA~zUe zV$1g)BOs-6n~V6B-Af-zz&gLx-v_k-VHD2JMnY_2Ohf{>N;jFAb#!-QAl?}*cE$4e zd$k(4@`}1nbZrcWV+~B%Bax6cO4rp5ATa`gV_x`c@oP-P;7lp&k|YWun$z^dTN7tV zGfffYo}lb|5i53*#UV!(17A5P!=i_m@b&d9sHa}GRbslE>ip8p!XbUVGvy}}ttQTs z?<&#f@fQHxSdumQ`v?rqxPg+}8@Af~ddNW~gcq1rTmfiO3x#}9*a?g!+%R9t`Gwhh^aI8@k>9f(n-Bm~=wtGAqo z-rDA+{;r~Uv_64V+W$p}GKSDJz(jR=y~I+pq_SLE{B4t@H=LfEH&AHiMfVp=eq-K3 z)>~~!J`wc&(zdcqKJv|{^&aA+^F?e-1e4{kBB9};5fK(*c2<{`=J$}}$y-U4`pJ`@~Z<1YX zZGioAYY<7DRQ*IK0%~gP)Qjxw<67!omkCii z1Mb>b%|wM?ToGxc64f(vI1%J@Vb|5Bx2{a@etMQ*OVg?+h+Z&WMEK8RP)=5M0jamR zS%g8|^7+YU?CC0d)?uor%LsKzBDZL~Ls;m0yrOB&&dQUq%U? z`EY+wRz-u$lc=<~_z4TiPO$k-#QI|tWaaSi(EX_IhX9WZmG1{tOE}LQWxb$#5yNZj zBDo=PlV7{ueH*Dabh#tOF8EbUh%PqIyN=wOufeC^XmLR=)1-qoM=~flLyCOn&!0Tt z3e0t2o1sV&bJ|Sh7|IyvsKV$h?zP@N&h@rYF^savwwSZBt79Z7%Zu1w74JtlkvpVaOIFR4yWjl{3a0LuC+xEl?G2G8`yKx) z3wjuztxc7>CM8Z~_G-PrFGfKeVt27x^5*TS5gfizc44Kb?;_hYM?tiRpbI49Df|gv z8O<;n_WeS$uEN-)xlGg*xsuB&c)vI9IgG<8`%|1tw?2uA zYYp+V(Cj(=9Al#HbfMsJA!Bm*^M#o<@^?#ZNkv&Shsk6MJ#r!Gn{S%{75;&p?n@G= zIx)Ok=Q2k_woOMB6!_XWyo-ir;W{5nM z-~3~aK&;xUkv$d-%R9;D;2+mxM8!q$S=(mE8~83*FsOATlm7)%06Y@SE2F!}1P@b~Qf?z~MC9|?)Rg8^{CC&M^H0o$s6 zcxcMXew3x+_Xt-9M+I3~HkZFaZ-z|-^aG$_qY*vd3&>foZ2WlRv60Y|yKg@UKLuMn z!~`PsJ+Qmi$NbQZjFdydYZE@#xG)7qM20;d0uocymu7(Q4zV9%XEJUo9~HO<$=Iy_ z-Ox4Gkdv$CoJAxt_Ud_e&jG!vEsL1Cv>1%<1s=E7y7;j_nzM4M^O})10Y?@UT*sOe zN*{{V2Woc0!Zj)S?t-W+V*sv!>%J(bhP}OrJLP+CKee#_&Pfof1lBWi2+m>ZTrATO-xMkZeTNDBxaMQoEoN4J<>P4 zbu&F>&CP4$dmv#jZ#BGP8^_M3Y<=PV^}^~t`1Xrc&Ny-L@VpWx@y1NesMrEzvVb8W zo@@iyV~%GSZxh-Oh{Xt{FG4oQ{8YK=%#}XL-gWuqPqpNVb80dw3dj`{)m7MCo`(A9zw$MacX9XhRj*Dg@dF3@O-U;J%j1U(bPTIs zq_3W4DSklCmu=!nP(5E|=bcqOX7o;ZFXO1vh>Nz{-c9?p<%T2Np`$2_5>Ru531Vj+ zDRz8H&Omh~wo^%c^-M^df$7@FVbp0|E+qDd4WTctEkxM-21}JDyl2Y^S(!NTvQzH` zj$iKlFa}MMN_tk_#@<*i7G+}FeXPG7Grzz8%$gsz^eE5$rm#6*QdB6%h=!jZsc~HQ zf+uI?LHNwd^%bA&#VeAqHFI`F4SpuNWe;}b>xwrF#ng))+CqU}^JPX7mft8OWJHdA zO;C-)SGY(CQK>vUOf-0#dAM408GK&yiL9`wSmm|F(Egf&>0l^LV!P>!_z$dlUse&> zT;3QVydfS-lVk0FMpqnZNpE;x@Xi^yr5qeC{J3%lenb7l&E?hA2c)DEVbjozN9xEP zAeT=>Q!{gImwx{;%w-{n`hMwViPzRENFA0aI^~DE%_yR#w%VBe#P#@5Sa8$<#@_{9 z#|v4$ZSQH`nW5|3IXN#vx2C@;cr)T@RuhI?*xBWH-X=6WTPSYw$lGjc%kui6YLyA9KHtkmYZTehgj&!j&db1XsvXXwVzAVoeRn+ml zT^x&}`^Z}D%V`e*Ghy!+&8*a{ha7w2NFF%?Y-t|HSDP+Rk>a5RoP&CM^2=zW6VG2! z^2A!LsT+yWS4t;jR7{&U6HMG1P47C}AS@;=A+_N?K+@YM)}IbB(0*=qF1&|8)J1#F z4in+xaXPmP3@nOzcgo6@OX>zhKeCq_B&BRjPGNPEJbJ%%sx_C2Q^Rcq_j z3-&=RFiO&S2w2Sk3OvwALKocgpEI6FKjegC+{JLI^mQnh{lIW`tG{*v%B%N3ye~{H z*ridf(TKX^R>v}pZN$$WS^W6rMLUn%%6A_<^&e_-o9NEIN|ImPZr?UF{jWKz(ubMd z4)I5Zf(lCE4%q3lR#i`X_M_N(ulMlp%4km6kF6DS*8O~!skd*}s3Uh_$eD?s9fvHO zm~@|t%BOp1xqn`U1;@|lBkkrz4M@peRr?23HLdVb)ITg1`g`9gtD@j) z8R=v9H81{W+V=xM?&>wZZYwCuRaesMkT2kV6rx;PHMz8S4n@*Un2%A^s{fn*kOC$sat{v>6jxgLz;_=`UJ!f^Ab+q4f zJ7`cDz=aMP{yaA)M@(YJ=>iT0gQoEzDt!ML@dhs2@wEK7;R}gxz407(U7uyhJhJmr7Da%lLdI8tp;zi>ukFk$ ziK)H`I zs?UQ`{^I3p7)oEd4hT@-u|u(m%1!)|_}^uPt5e?`I`LjnN$LAaKnRY5+^gblFvs+2 z{_u;znoSeOuXAx%u`TUr{;ofyJ>0y(LrWXgx{Qi|Q}Rh5xs3j^cb&GmR;}!_?@zpn z!;&L2Tp&KrjQpt}YHiM%x{?y%y(dirB{pNS*|6V3(*?4dkS=N9m(+QiSek-s8F-4A znb9%5wuKhiK}knOMMV?|W{euALGABKDk8xlEkPC#aNw$wno0kWry2{80qfB^jOv2mhA~psu5)*525qNErt99{RrG zfY2)&yaYD;!~=?hL%Rbzd-#86N4-Jx57}qxv1?N`HAY6uMn;CH2=!6QjUyDqj?>SC zV8TxVu&=z=D=ikd;r$~sS4&$?I9)a`pKj;hq@tj#y|Qm)uz#rE#KLItK2duHrT$Ar zNNcYD9xk4)s-X^D16mRzp&%+Zq5&q-BqUcQ%Fu*?52L9Bmexc1n_&n9G+;$?Ma`NT zDk{p_{T7%v$Sp061u4TyY;3AE6BWq`_|-?rFkCglWWPs*)6(XbM|>*5uPysoB4Sir zUNlB;cI3!Rfdz8O@`^P1?2-T_Ib~`*Oi0baabS5CVDxTL_pJ|{Sns+5|KhN^#E=w9 zYy^c;R1JwBzqhis+^>18@%&d&Jqb>33v8GKjEQZJ|0-kT~Z4?(_P zdn7Pc0Dajxxrm><23)U4p#yv230G}X$^ltF=sZ+G&sEEYe3+~#m2 z`fVwgwbTs_eP-_DocCL6`NQ0v6Sc_a=5#|1hZ9u9zvUUG-fsFOpQ=qz zORX1c=<)p*m=f^)p`Ql*>gFvrOhlio3UFyg{&YbmsH43#hwI#0Q``H~R9HQ%7cdLU z4kRBwG*6bTGI!JkN|iTXmHndG$`pnXHMKxkR(SYjjTGlMg+Ua=6w-cd3CY4tA1(su zFfqMWdGf!Ir5{;YR=V5LgoF`)2L_~Zj-aQ|{AqKK9}j?e!}$*Ym4O_NQzLF8{->6! z??Rd}3-c=gGDNi!_|xSb&8ILCUn$ytHq)XY&RjVBj?a}w{=6oQo77B@iLb-kWg|fV zWw<~KQ+pX{JC;%{E^bF2x()0v7m>(#wca>p^Np;zB>Zn(l2Sm7>Kr))|LeU9{2(dm((TS(&Y3Mf}HZw?(uznag=?5e{ns%&W z${Sb|ye!thH1SS(Ca84kDDnS29ZN243G0s?Vb8m3vwW4>D#JPOE^d1qpvpvcPc+0! z*aLwN1dWQkqUJzm6b6D%P0c?!M_pds$jGSC$aaDFJ*>lr_Vz?{5Xz!q{4UFLQ=lmQ z(}N{T$}lm>AZ(Sqsi2gV8YKy1$%HRs?}>Vo2zS}eRCm}B4UpnmN1G$$1K98h2D!`G zGqa{htwU@{x;p_kf{n}#tKIS~!2R*&F1J$iup~L#s0Jq06lkQAbd9qEqR6fN0?pQ#POX?6F}pT!dQja7gL`yDJR8AFqlKNORvCCQyJEbX<{=F1BgL1* zf}Ydarbj%~X~sslYcnH5)q|75v?S5Nx8IBpVbNe5t>3%cXs(2=zdf$j>NGw^!7QI1 zx{mL)Wgg`+^5U`AbxjB_ZSKkAjh$fE%Zl_*@RUn*zHl2UoOIk7oj6Xa%eA`wN#EGC z+~zVee$owHka?j)t&*WfwP{ge31-rp%A@XxhF=X@1JwIQQWE0QQ#G_OzJ)2D341_F z>3tYKO;(@AG#90mxPsfonGp{Y@8;^-8kgs44xr*d4UsSoi6JHhDWRHLEY`V1H z=dq4+>=%NSuU5xK%W3R4k>{i*qnN+J zYm-UI?m1E3q5EIuh}U+l;-RHY17Nzrlcp~$_rGOj7I%n?M-mwU@}O!L-Mnv{gvFBv zfvy%GG*vYzGjq?*J`=99hw>p6oiwr}Aod@71JN397p z)}sn%^z0XipQJ$!?}ZPgnWL7vGK}HdfPUxIIiIgy4|%+f+r!O0!?B(7uN9RHNg1@* zM74s8-Bo_vzi#718+q+atZh%O-pR-qwNpW>B3;IzTcBDH`yL0N%?6CtG64j zwQjfz{^gzAqqS~V8Vt@2&z)A^-rDnM7@t)GuE7x6s#_ND2WXoZcSB#zOT)N)B9 zZ~R)rp1`{`0Ng!MR}Gvfa9Gg_PR`7|cUX-6dgdr6pE5BqE@piH9;qk= z{^n+0@nRSt1?;qXE&OH>>1=dvF=JqAFqDdnBN2=cqXIMhi-tZYuDU>pOlaR}C2ZkK zSt3x#XLPIr2UNYj2{iwQsjrTzYU|$KC>W@KN(mAQN`tg?w{*93gLH==f~2(4-61WF zA}QUCh@^xdo!{i%`~Jo^27g?xaMmiduc62H{2vdTP7H}+902jCAh%FIH{Z^+=(U}4bc}0#2`az5 zfx|@Ef(2Wp?!5B2EEESwcgEx=zvtli4+?zQ2E4y`*wicPm(-Lx zv?|?0+h2uZZD|g4v@$h1AQpBo(1yA1IStf*)FS44#3&-8N)~WOUPP(+NV+C ziWFfnd=>nZ*iz&y)Ho;9tdBe9H8Ga1o*XbTvL!yVB2Hm|G=$lfw;eikwA4D)uFEx1 z0dQf?d!m#o-frQ!7Ych(adZoE$ZjaOM`2~}Z+k2j4MTxAQ_neBc zpex3|S1K$ewDJFy2P3Do_x~vzPG8 zsY9vM6fTT!i!%#@0 zcz=P5ondlP4F#F4KN+uf#+1I}h#>ETwQ4NWlqQ?})o9P7-?M;rVc)SIk;N8)s$GM@ zo9o7QoE)WLgm{Q6zAGk4dZ92y6l9UZ(NKGQZG9FZ<4f44pm!moX)oVM#yB7te)Rf@ zKH^jB7Eq>Z?^+LxywxBg7=tJDbawZm&F#NWlk524eJ4UZ@wqiN!e7gDflC?G=y_p# zif>~{w6IK4&g-+2-2D8Pe>Ljt@w4wCl~y*9q8ZuU-v$QrXA@L4!vkOYb2r}loLZ^Uw z>l?lqCr+-`MtS8mm@G9-wvW`@NB9+mJbQLpgO~e?iwgd#H{3)>@4egN4IHmFaThVd zNh)?Z9X?vNEg{;lsw=#W(VQVR2np)>yM!Sk!mWEY@%=xSsiqQn1{S5euSb6!ytv^%iYR-xBKMdak= zAK!g9z7AuJj$Yt%2n&lIZA@EQSoyav+zOWZ)NPa#hQ_F*d?~p2@ok>JRBh+n;55x3 zCq1R%mBXhxyG<7-nZ}wtTYX6lGc%5=EArTHw7BBnQ_D+}A-`L_K2AgPAt;FHOckx? zHUGKy%ha5+IuW0L9%|UMuDwc5PA)AiCCcNpKQBA=O?UF|>XP(mi6QQ_bSC# z{mPdKn%u-T*5ZN2;;nm|K~Pi&xhdoQWDBu8&$0+NR8>?Md427o4`Xr$Q?#u#V>cVJ?|@v!jvH6d{RkYqJ5P0h*8RfD-h zx4xtB6sUd7Z(oaR2kVXY5TfS~H}}c|Yl2n4eZbV*{AEN)X_pK4EI3sGx3DU!D2oaU z9pvL#mzB!!=xDXnK?Zmr7i7##cjp)8Zw899P{XF{#o8PAF^}=1Ff}DL?z?mJN*JTy zp^Ff`J15(i>*z?$`>3p_DAl~+=uq*oTeO=`Tu8g8pT8-yB5`C;Fo#AmC!0S5WBPAVax;dL@nj z>HGIrLf^J5WLB|FDCe}jG&RdtRZ<2Do|#g9JV6FnKnp(|Fs*PBdTEmP>-V>ge$WcF zx}B_P%PA{^eJK0^HYRecKj4}+9^FYE`4jK@m(0N{h>~(NE6Xs%xK&4tfQQ_>+_Ip`p@)iPQL#0HK+ibMuyDR@Beb!_T$qzpJ=@;}?5+e*VtN<{b{4 z!z*86H0S3$*)EiUOk+}FZQOQ^Z=&y?0Yb+3^5p>GXtw6%|2hc1@1;NNs|C08Qb63) zK0Eq-+2qVL6BmmLOesbUhcHkDcF@tMaCQzoCVU!+CPu11O?)6j5GZmWNku=!h>l@{ zT(49kQem*Ma9=r7)6GnJ^*Vic+z=ZOH}Y}|BV)hs_ge>+?Q}I|jRQpKno0P3uz9qRVd9`)FR&c z?tVODTDh_55B~J!#B$hYgDh0bI?RNGSyj)7Q^I=$zv7*$DJ=l)o}rTbWbZrIIQ?n4 z9QnO2?G32Cy1A(}ALlf#qZ;Mq6>ij_W5s9i?e1d2oi5_ zciLKCCEWlqm8U_9R6A^mZ&vbalOV6Hz6G?cB_pIZY(1qHE77qe4d=>CJ%ao6`T zFJueV&z+jBTv7Rj zGWW@@+H>cn%Xd*~+^R2jy>5=Kb?)tpr+a@^*bL>oo01YrPk-|HH756NSS-iV#V@nw zUW8EHhw1zIosSfuBs^~4`oqTfus&I&Tt9d`A8M_-v4GNeamC583Ut(S`8kyf(l(btc{TMeZLLfZ+$u(wS*JFq~N1* zR`v=YJP;2+xY=aP0pQr5 zL{CR&+0-m#oZY5N!0+t?bq?@{7h$|3x9zcqSyi3x+sc^M z_X20c!rbR;BVT6kdzEruWSE-e&cE_LIe7TLdgsDVmw@?meR|ioaXDSt>f**U=f2Is z=r(}g4*Lbpj(1@O1J!$6iND9kh6KShT1}SakpxWOj-wzN@dQzA;Z;>-FDomi-Lc-` zbfE;afU2vc$;9T$>-Vt{u4kh}v@Hj472?9_a^(BOoPpmLAl8zLhwPTcU}vY^;GsN7 zZNM)+t|S1++6IgH)LkG&bGEa?Lific%SdT&XzS`EAN(NS!c*%gfgfG0pEYJvdy8yNtY4Yi6{5X|a3sfPgP4xf?lH z_JcWY{2|7r;E>0-HIe}CX@Vn>Z8r7dT2&*^S-oA3S-y>!d z7Ez~SZ~7zJkpyLW;dFL{&uv>cO(ScM)=wwI`%M0kAKe zE|={~CZ0i%%F4Z*KHb=)nI9<0&_8C$gQQ3u-s-2@qi4ta=|eamQ+*mjF(%Hks4Ltd|Ib-!Q%`*<|~iAPYs3rxG7KP*m9 z{|ZE_!9nH#Od^M$yJ+Fo+DQ*LKtUI<7z1ZzB8yiA(28Kvu<0k(nYW7g0Jy-;%E}iK z(srDwK=oUNz5&OL57JQN<@MTz-#M|VJdU7TI{My0!N^zN?NCt(ciGBJuT8&_J0!&I z6m(ZVDSg1qMcwDrT5<81d}y+=+tf_D$@k51hK4GoofQ=6%x z0*mS2)%M0R@=9_pJ1i>H8=1Blv~)cv2zc@i5`sl-o!u%(U(a~lo$Wr+l;m={F6lD| zqaow9wv|9s2ME*TMVq-!jTfO*`=+T47xaI?l-=3sde3!mD_BEy3D6K;56NFUE+ABg zk)_L@KeoH&crEqU?8Xye>rfdR<_&a#QQy}~h{$s=cQ`Mqje}!Sg1iI7XPJFla7PWq zFvrG*W@^nY8#m??_^<{)-n)T{H2VdFL)%yS6&@*UY-}F)ZS)f;s@BU5&;7r%1K}AG z(#GY65S=u++x;Kr1YQzZDYl<6T(>WN*2|bq z7dT(#cE^i}Ha0Yt@>36az~RyLjnV6MfX4Xx%oH2-!0PqzYv0+(er(rOS^v0&y8W&b zK?q{pc743fHH?3}Hh6pOQe-+=KeMGMSeE!&>^2dLb%iqsB`q;bzK%!0pr{z2M&zZL zHCw7~3NY1t!xBMYHs;l|VL;KhwBrJPjbi&-9It*EUeczi?qgirjOVt06qGLfc>Cp# zO&~zs&tWjl%`4KbR8kc?5$zyCnkSW|tcvKIk4Q+dZW5}Tte9FPaocZe?ojrd>iC^} zpET_-+&N^N#eAHMAb`#*`8ZJ20@)nQr(-DdBEmDCgZX)PK6**6Zi*fMX}jS)G7Wjt-hW*BDw&y5z1Gp z{QU!f9fBfL_8~?hhmE@DRYj&V05O6YcH=Vd+zjV>q|xslyGn$b9+&~9KDi~zZP$;t)0llHCyXx zbF$LBxRSq` zFJp!M7xD3QuQFO*BI2-p`$bN>PCqYlFf^y{Cn%CypWJ(w87#F7aJ``w%l)>nL~gzA zWFNmfFP3~cnD?9%O33>7Rp02%p{4DkE1=_o-@jx>QBo(f znbpU~uQ>;hMv6myvwyIj6qP{YwDoARYABh>b{z<~>|1|KO4Xq4oh@~p;hak%e{B1$ zUjQ~GGG>#MxqX^3AvN03=$i&k*B=fK4nKD#f3|CV2xANx45xtojcxS?Mo3-gS)xtk zVrE+VDEt^^WWT4rx6IJ}X=X&4M?)=4S=I)kOjucm4>o7JjO}$Ay$-HwD5MXClc2kt z?F;eu-vwYuTa(>-?*8T6_w6;~>4IwD2!N#X^QFwr&aw=DGe|fuD=)>aa{-^kB!!qhiRf}z_;WF# zh;Yn#qT+vCfU!dLQkSVseuRJew=*dI;O7`&@ZQtYgCJalzb_dVm`UQ&eGkW*`rfs? zE?+ts9v%gPyQ5=U7_pw2_3_H~P*DC_ZLJ&P1LOC)1Pge|i8MzE2_MWh+Vj2Ys|tva z2STNVDm~+~em(~lHF#D?>Z|^`S^ety#l)fYfBRq)@zZko<2h}_CqG|OOyf*}@D3=}--Z5FXPO%y ze+I{oSya2;;uh@c;*#{#^k~R*y{Gef?%fTIBF%=|0WBLlm|s2EYinvE#WS1z+R5)p zn5iEDW8FX$BhDs+k)4^2nyR(xxKOQ0wL9F(QbhL+x1VIRr4a%*v2( z6L`Fu+XR&?*__LnrkawzzF}a?eB#56_OORehldaD++i(M15gSDvfNTo5&S(@RCFLL zHhNW0n#j8u?*ptf<#@P$BxUL6xjI<{6?SLSUCm5f*`xk-x|@ZE znTEXtL4MTD=+;hHzhnaokInB`90p?Hf$uyYoUNlFKOhVcj?RPCMcZXBU`M6VP#_=GPeuqTzCKskW~5@?>uTX{7pT%Yyuv z?CA&C8DnMaWGz(go$$`4&}l*S7MkFfzn@hnYK}lGkfA`OMURPK?d@_@y(tcOQ>0OK z1LAm}5dz;EBYdjiz-PT5R1>;&ILALnG?C_rg`BPF`jxfy-A$qT*zC8yS2p&h!n@nr zpz;e(xtCSUv$T|rS27D@JvwUoq-w(~ABN^GU~EFS5FNdel=RRr;KOsLU#&FkAhSS_ z>tTuYgmx2yUuh}WNg_x=2*2{@<~!lY1l4D@>*5ANekA1s)3+O@hKEBovS4MR^APcw2DmU@zB zTLy?d$=+f(HJ$vh1(q^$m{_NzrKO;y5Fed*d@St~svO4u01hMUJ3qOcx4S@C)qk#Pa|Nb#H`6|e#MB4ec01zyah{Qb{IK6wXyTRW z^>ov&{TWa^@F8MW(S%N1QpQTh%Th~F+|)&1LDI!=w=VSn!k#6A>Zj{Zp>PLnW!hnH z3k5@AQEe)|w4}M4oq2qA_Hlc|>nlCwCZ}WzgEQB|!57KIOz*7*=QsNwpB5X(KVh0`ryA*7K4^R$7&>be6GXQz{Aw_D)rTow$I zR=jqHPbG+g1^|1)B>efV>TC|Jnq%2lG)q*tRlIaUB^FoHX+4>xk%~t^-Sy~@kqmmB zaZ6lPnXtZP_KV?Ns{3x7oVOpa-aS*oK;9nLTC!OzOwRR789fo1s8Vp8{p{&I)i$Eq zn{va8WPeTC$29vUa_Jr>9wbNE-P^g?)rrDyq#RQlzcwtf03jdbzD=?ZyJ`wGMuGCi=wmC}}eLIcY5M}y2vPo6x9!FJX0)55`VmEj>e z^6+vqUc$p%?d$FpAiN^Lle9e|Xp??Ki^rg=`S)b7QHDoQkSv+o{oS>lK)u`&iuhIj zNJDdt+QwuI1PfJOQIRG-=n2+v@K2)@AkX1HSRr}taj(JB(oz^R5qW8vdQdw7rZ?vC zjn+eX-@Cq}_+UqZ2w`PgE&AXQgsbOKFxzKlW#Qj;6A~gr7t#J(_gDV6Lyg_Tt32Af z3W9=P<#ZWHpON#B{T>*?Q|%?eyc2%gi)7Zrt7ltrnXTl42juSLET>;GOlm5R^ff5x zo@Aw`Cmzm!tjVk>EG*R0Ri${u_>nZQ{?x-EA%a|8ckWNC#Z6_fRZe1&(UcN`KcOt5 z&Z2({jjW$d7b-&sbSvr5-H`6=J{6VpUToRJJdEgwSQQn8UE-euO}Z)B-^H+?bfzQ) z&v|+(Ds@#=3T$+VNK|yR8JQlk8_?Q?&8#D-+1dM@<*I(u$4y=*GOA)z)5E{Qd!z>{ zK+2zzEju_Y7YBK>!%D=wUgt&)hNh`;@v)DN7osBL_m%HWkp2q5C@IV>%+D9)Lc1c6 zFcwYvM%~+caf)|VP8SnxOqeLJF?@wUM7Wh@OULxMELHEL;ZO=!q_A#mP*4}u{dOR= zNlJ=C=r|gxe={BxJ-zD*eaB3Zx1mE}Ox9(XZ+v-nPQg{=mRIsSw}95jUtP8?=sT?! zB#MsDAAV#wi8_=}qIO)T+#;&OaBwRBZwbtuHCjewW*WMOt@pQh7?8=UEBjZWkr=%{ z!gnL(a9r{Y332%Q?McFDQRYDI_3-4bj*hjhL6h#lv>ra-6eD`IrkZ{Ur9+8|XI)IjrLwk}AQ>WWUE^Yuepm!8#p3@Ep4wK*fl~XJq%$>a< zVIg@13-xv6NyX<^;mM61hwqP%2XUH0x@jFm-jGi?yhb`O@ovs=d#A)p-VOa#YrA|t z-@paU$Fkj30=#~9H+q8s&T*A9_2d#u4$oG)t_LlAkx64J6UphUrVA1{ROkq zI~XJ)zwR3z9GQ@Ksj$8g)cac?lBMuwPf%Vy{g2$=odF)wg+?Xu= z&2O!9?pwn=d88XJTkCpqPEw${KI!b@Bb5RNb*y(FI*QL$3}HIMVIDIc4NXDgA?*b{ zfw?ZngP#5`e%P+{R^A&huK4X@hW}O%zQ&i!$|~@g2kTlZZZdLWXLR;_zwm7}jlW0? zR&%aYtFdP8k}|i<#k|l;Us>s74h?$>JyvQHyU%$^e zc9#qQG{cFB5qP8rUt>124PM*PAr%hFj;zSankmd~BFW8I;;XXTlH9|Q=30w56Nb6e zdA3;X?)jGcX7G~3pzt>`>Sq=zA)bM-7WzXSDG3P+OH1qj&^Kn8XX_g?IFWM}1_&*$9)Q@e6|JKctx zE_+XZgnIvop934C0dn%E2?Xp##bm5cA=FLnY!Ssk~ z;_EbO79|9eHPL^tKOY=PoUVWK%`gg6s$P*2+knh-_oV(X(6=}YH(oV8BST$B{~e z)OhqvOm>UUjniuI`PjK<>o-$y!Y(Vbe-U!KKTc3xLO!fT;)UefD%?M7#x@0o-|E~i zY!j}QRj`aKF4nZmCW+()hTgdYgi&6H$sE4Oq^N?T5f<+7c18a3)5*^-Qi3Re;Ay!0 zOWx=48b6)%9@vhs)s4J(a8si5>h<0G4J?dIA+j$-r@-hPz#jly*I?aV%xA0RzfLX= zsn~_a4DAYvzaS$M3ml%K##Ve(yo@#8O}q}AL^JoEie9@(B6C(la1)rq=~k@DIGmwx z8^7y*3hZ0CJ>!QKk zkwRiH-y&|AK}SruH8VS>M5}6Lx|IfsEsok&+K$OE%xt!7dHg>3+k!4L@P411KfGlC+#({+r1^K4#Jh+=|`lpDNTY+)Ogr9 znA;DVk{HDDZ^!9(PoyFXhac~AO*P&$y4SUz*xQr4i^Vfj{ zYfo>F25)&58yzjn+iCT;(p>-hq@?>PA3QdelFqoQ>oJ4j>Dbx97tfN@Y1|H|BAy4A z%!$^o>*p;_bYJY}2P?ChdO=>--P`+s%lre7@Xdg0GYvKL%6XCy_e_Sq{>(8%0d(=Y z?&tRmzICZx#>3t@^1W#ws@3RL-by#=yEQ`SZLJ;`9U1!}da%5%Wo2Fn*r1wP3g(uE zpR7m9c&>+KXPKB*4A6!_rIc7Ef#YQfRFd8Phnw*U(y}q zLjr-6lvJ7p5z&1(xmMiw+J0?k*#AYRrl#?ZXQLvopwf$S`Ht5oD=E|9c%YXchKqv( z{$F8#_R(+LCg$ia$gWZWaezd`825<9Et{*pq8NYe99I)!BRD`!62BJDM$(~4_}k&j z!DP4DeW<-|p$ETp!Kf%L1Nu_&2RmXuPe#4&K$dh9=Pe{hsn zS6@j3A3Oxs@l3n_(Y$D$pY=QPXe_RZh3k=&Ra8?mOwIYdx~ZYAoJGnI9_z8DZ=GLQ zh>9pEDB!|4Szvf(Vj!$e{wgvDtDLOPVlEm$5=Z?`}d!l2eKJ|c7LnvqH z$%l~l{Wk`PIXGl2we($GTj9fW=du9a`=>u0WbK}CBtv@m9GbrzrBZR?H1sweynH-l zpnjKEm$;kt%Vm2~S6kXK)q)Rbgi$$td|vwsrxREGW@$R?uitt2(!@YnL8EG~q``E< z>biSE&`wat%6KVRfP|#vdChM~nq@@Z_={0f@4!T7qQC#sa`ONfqggwRMhujWjIc83 zoWKi#EEi5P2w9_XJIC}Q(a~kVXL;qLJVyWeOB%qQWUalrrlzK;1Ez3Z7TK6m(lD4j z!w*m9UH*#(8wWuCuy^>2v;rj@{sX?YuK3BDouhn#@mnnJ5<2=0=xfo?{E_CxuGTh+ z^c;(8egZiI@f6c$q4nJC+>h6UMsFfWnjAjvT_Yp2`Ijbtd1+5a5`5sbSYFOAT6o_l zyIPIXt6it|?Fl*Yb361NV}kjOyzKnPG&E7~KNMzW=H{n^ums9yc`dDb1h^9=HYL0= zJW`TU&g;B^S14aRjS|gx7l~?bXarv8{as?%=!;{zOFDlVa3A1IC38{J@h~$|jbz!B zSYuR8)VTg;-{8L*2Wh%jgal8X@4ebwP|{XxFc|Y1X123S{Pt`3Yq~y~tF@Z2lC)Lm z!+`r(_<~s9b~_iU&nl};4y-*BYnwdrJhp>K%!&L3Bcc}SZ+Xo<7E^%CRt=V}Jd=aB zZ`)YCErnXpcW$9WBKwT?3CG@gady7lpALx%VV0CW?-Tl5I9ljE-3bZT<{B<;(EVtQ zi6UHCOV3Zg&EMJEDKy5(MNtg_VZ4v>Qgg=*e)w4Lvpq_?dp$f6B;TkATY|E@Iu7An zH0F&;v^&lUs)|XRw(Hp&b*xN;0!tx z*bnse7cP^33!u6P$Y>}oW=rYA3=X`4v|BrAclQA2q`b^NS1CU~sEk_`BTMAEPhz5$ z(UUB~>a8@sE8;KzxJQy-?hIyn`)gLs^9wnheTlc8zRk@1bvV${`mkRd>+Zl!-R+Q# zc72IS^^e{cfA#;ZwD~ZC%FF+joAgcEul)M>H&Stsv+k?zL_G(yQZ)93tXJmQm(^8s zlJN$s!w-3MgK-|$ZBk~E7@hCNtL<(rF3vBF&K)g_&~Vm0#@}CnDssAk-a3e0m-M;) z>gZS(=dSu~hsxuFjaf5ozstfN|LqylsB9eN9U*hnfPL~IRC&z8%em3p=h_kG^fhpo zGGWkJb>YB&>*?*lDJ^tHLvg$9-@W$Sk&~6A2lEGb4(T(+=+8w#L6U?+IpO?AO;2y? zub7rot zzZ`GE^B0`&kaTvU`}gmBl6kw#d3mX76p9Mb7rx5ljQ{NQpDsR>kYwmFedHB806fUa zX=rlHi+W^vg?E=P+4r>l`orn+2Ai$qo`b?8&v#O~IVE=cbl!#WoVK)+AK1*af`clb zs{yD1zu;IYBc;ZlCa2;423Roj_6{UbotCm3Qn=?Kob_|6Yof>xs*aO~GZbWh4@&xP zUz>s70nf$WwPK9ONPYN+u>vvniuvJ|(sGM8dgIlCSqiKp7ep5W-~N3Ijd^gTX$n+; zRouI`;ik3a0OrMuwbGgwlNP3_VmaRWOJfCN=NvVzJqM=w1(4dHySR>yu0=(~Hu+Mj zG@;3HtwRTPy1zUC(f9L*cSf%iHpV}5`j$H{#CxP-Yuh=5S2D?=-nqQq8uOcNz*H%$ zuzM_@Fi8f8Tp6slEN|YNho9(|T)PlG$CBC?8u5kAE&9J5)0019(-adP$hwVCG7MWM_z0R zz;=^}G?X;gaA$J(^=rxA(1LFk;=;w#nkWv1MQ7OWQdfw zxomTKZEbdbe(ljUmzw<-ZWS(fOXWJN_aK->yYN-M!WfQeF4R}o09 zU;F)gKt_Q@sIsr8w(b5~N8WV|;R6mE`c+PggK;Hi5Q*T5 zz5}S|@w$I{?iYSlJbOihNb(-(P?H{AUwuZ5s!?bJj>nSy2*=KKRCH#?$DezxJw*Bk zhY#()*8;sPKi|Bd7?(!H%7p~?b(eah%j?h>BL&I)#$p)<%V_{K(#P9B544Dd;}hTz z6%=P{XsT8lwrXHcl$KU$mFwGrOG=WAqPz-tn3=rL`Wnaj>MY*~?)M$Xy_u8n>3aK% zg{uICiS3`cZVs|GX*#0-X+ggw9tTo!95}T#6+K(mLM5p3e#*)#D{HGO-$B4lQ(0L$ zg4D{YXRjc3$h2aF2KT-&nZS*lQx@IbyUx|;-p=dwPgbyDC33M9NlW*cT9Sv{-W|1? ze8h{#6!m4i(X}W)2+-?VWxQZ1)l-?20}-1uZVPjDR}lk)g_XOru17}zSpXc3;uG^7 z&ykUxm2Gfqrhp4s=W^htB)^Tk2)%=M!4%MGXr5cJXHW7Ji$#PT|MoEeFyKG@G$#{e znS3t0yZwwL+qN`t0^eHd4G9dIw_5}Z8IMkLgxh785qj>cbyY@-zt~tJv{f5ixf1oAc+qWl_G&IVpswGnu8E{G>ZqFx6v>L%#SX)p~FpR9u z{0!Hj-FoKJ@unZe$}z!%H4@kr4-f|AeRT9Vgss4GkOBLk()kT{OwV2&!5lRfZzoK# z6G+9=w(0@)Gi>h}?!H_&(onTG3mK_HUy-deq1*DugA-fdNRc(Mj(aRDi<~4H=@h zIp*z&8@?~TTB6fHK{VM5lexWi^Y=6nQdS-zsJz!QZKF&sdqymr#g`^$Y^0`ruQQNO zhjBHEPs0Ql2|Rg{VCdzFK{D3g56NIAJs-LkyKwO0pl837(4Lpjq#Gvy@&PKb&T>4P8CL`W*B?bi#iO72J?m&vC_mtmQan^OB+Zwr?s!#iWn6bb_b-F4 z5*~=&6CI%uH85k^ow(JS@^iK&2Py~zr~~&aP!K4vZ^fQ^{oVO^)KqNB$bS_9rh$CB z_0(r$V2u@AjH4q3sx-b*Kxv+`=+DP#3d3iY?skQZ%3sj#=Wfm3-X2sVS|=xi zFjYzY1AAjz$Gp&2vh-^h59rW8Bh3s_D#X*}XHbvNdDFkfCVs$Ud)&u48NVz`WZmf9 zEp7vmRRi~pf!PaSn=H`;A@PH=Hoi}l)SVsT7mb$llc7_(jr-SqMw;l7`Apj2+tv#G zrQvsJcjYbEEwB32@JZur*jQR7K0h--KHDC>_`C4n_VM~h#Wv|=|F_j44_1o`Gu3o- z9zA+g>b19%8xat}lJ zYipeh*sSuD)-Fd-{e+IJBd+B+3>sCOw2#pdC+ib~HfS{uNYH#F7Q}SxJ00q)%=PapRST&A(K zjht`!wx(6F{ErLJt6lHAzp`phvxA+k|5Gjo+;c!FOG-vJrkJZlzeBmkN z7g8|{C3gIE(?)(wu>h_ElJrO9FqQKYdx7O$UlfzYFP9RN}Qp^7La`6axtvKss=x*~%j-d7mIX%x6Afa~Tu-i=_qs&yO` zMDRRSSI0|0%|$7A>IY0OO&9hX#E5Eeq3-O!zYS=Eg2~vGf4ldSM;@*r*N`POJu)Dr`_BEK&+Q5QTzVGkW3YwEIB4`n%4&2Orge1)hEeNJcpx?Z@ZV!>_fc z;L&2x-mZIf#3>P3R>s7Q=C6y_eV6EC+!*%mM5+A+OfdNR;qXYNLEh6y@^br~E*fesHCYY}IHar2+5yFS_OW|ujD?N|Od7lr z7nZidJH*FEMm$$ytH>mw4(#>Kf~|G7{@fWi*ztF-9{`+ClRSSY0}kgO?t9X*aN8}` z_JEsP5?EvE=?&PX{FO~^K!wtzD`kl;nZ;|jF-h}=?`MyV(VAOvH`C-)L`7x7hUy(_ zT&1JRowoRxAhASt=&6~78S1sBIqyS5QDc?sTqG@@yzlywDn-Bg`tsk>?{=ymBf+&K zK)G+GVIgX?v6LSD1lSp7r<7EkKu+}NJpwEm4Iv9~b#=dUuOa6BhkElFW9pd6%HSiC zg1BWoTprK!{E7;0c0Lm6BeS%yK0e3&pLykYP-pN-r&JtQx0Ww69IZ7toou#H^yw0f zSJF{);`U@w(=c53nH#mf7xnUxP#_1&0aaraCoVoF*VC2V?(R7i8&rfxd*e~^W_q=F zq$T;JOvQ9IRA$)Rf>5_PIy&k{K8TA^{&RFHt1~n@Ji7gaJYG;<;X(_ifED1(Dd)|p z#{0K-9aZTyzmd>9ciORZbwx{P6^)7kuCt+}q-0=mhH0g3Hfq$Q6r zO9BtaW99GhIV4}7t@DMqMxIs|z6t!d+48g?(&&$q%-Y`?yN*y|2Pw+7r@oilZm+IV z2#0B&wDcY1RgK+M?US_D5Tv=c%^Y@5gcKXSg)9SnDko?qkQ?Z(B_#zzt<%^naF0n1KMe+RJmbG33oQBeW}NGUrzx87oI3tt~s z)9f{ieY9xL%Epp9Yufi^aq+UMY6)OVngmX5=hu{0Z(&fNX4m2(rYr;+GMdB7`YMX% zmZmVCoSZSjzidbJg4*j!#&pT-R^P5?c zu7|%yn|i20KTCV!NG#d?F)eWcwnySBdUkLmV7Z1PKKgAy{!bMpoD|4AXE1b zao6`+CTEE^uX4|5qog*b&5um^#7}C(lHWpVH8i@D86P+rD810qaC@52dd(;GVBXxE zdcR6#WuYa&t+uWfLHq^P8-A+7cmbiLqH|v$>9C$7;_9zVN!c=kW5-Dc0~%AcHJ9}J z{|x~I0@?vlz9FuaCkYU!cmir#=3u6Q&U>{=-G3JtuF?EhHlP$zl#hj zDd)&OfaD=nZ6fcsX^|d2_oGQ^cg+Ij@7~&tYnZ^j)tkz8Y?@Nj9330`y=Hn*bJC3G zYzcg87&7c{Oh-l~Fwl^lEpPh$T+k(~KCd+R%V|L7*VRQ$CUgezQh^K?zFU<|Rp(z8 z?J~#LAxmCU<3o{ljl;%UE2wYuaqAl$x8}Qyg4Z_+Ki)P5UW56MiTl@Kh>O0R$XOOg z?jN4gu*>1T`qPL{(SU5Oyd7{05UtYd)*W3^h(2&w>FeeY=NjaIfyGz%XS$>xc-{3N z6N4n-LaWSlwi)G2E%F|KdZANJ?`&E>-imX!n)?*jPWJkRq{LOkfhg{bv&jOF8F|0l zb$0wcae|p*c@dBP$jVB_I{9IM{q}Ce=Qrd=BE-S1$5uViuKz`YT1L0Vavq6mjEuZz ziFb1p7E_x(bi0GVVC2977v zyMt|$b|-C>XFPA=HcFMM!=g0J2b!q-FRJM1*t9rHwqO{V;e_QWQOgQ6W@bhoQXT}r zOi?N_CNWZDuD^MK3VKWVuNYT7wb_CJeH?i#8xT@a5mxmyg#ip}EqIUpzQ+v7DO#BV zwXD1N?0rclD|fQs-99S=KFyNI1-MJy3LgB#oIq+C5a44>o0q zk4WQ1qcmH6U`qEV4BUv)hE`$t1=!(FF5JB$Ko*ijAw|Pn_A&kwk>tq73^ZKT>17HU zl9B=fyj)xXSm^4Jk+itClD4B#BIA}vhlio<&p@S@45AyN93I&gQFj4d3$`mdR59ta z15lpPN_5IP(IVpF;`jtBuv?L5WxmtR)pWr85RZ?Gjca0POhvWR@BeNLCU1;J%F-2`U3ihV-bU^_Dor+W3P)Sgo zne?yvw?09;&~lKFRfTlO{IYy%o&^%;!f0$wSussH-IVCW$VFmDzb7_bs+9DbMIQ*uNN6(!u@fkU~roa@^E6*~U- z(kARE|BNga#exT(;qRI)1jwHsDWd$h8F?36wXnG*;nf~%a*4Nnx-jVqs<$CZ%_}N! z-~Dw|REH@g^_^3P;J>~~1{>Xqjfh7ReyIP)cge{z!ATPnHEHt^WcAmh z$SqGSwww#P4@^=^su#nRiDwrYm#;1Z&%uGf^KaM;5rd?Vv?G5LFA!%Jad4Vw)sI#U zhLx*rG^WZ^0zTMXLJlQ5Z2cCy&cEM;9lGxCQuOJ7G~##E){YCh=D8ZHVoe?SiJx@JIEyG}$1vAKNA;b!73)*FmVhWz_hQ=gY| zQt&byd$?v!YnZS2356#=ADX~1)3-EiHhcz62mr(5aZzy#{ke0mSS$qeB21^gPCH!m z(ru*N2>rK6al{h9x}V)6z?XOn)i-zLGjZ6ymDk!ytLgBFQW0Gp;&V9&=)4ggcnw4h zxCF#Ek&7b{Izhn}cg#P>=gV1gO)`Crn8APwG3u?0!!Z@6a`RjoFJbcdz7kCBO|%ue za)N>agWd%OJ>cm64ns(jYMlEffI}r|`+MX67_2U=xZQ4gKEg5*(`0sZ=gyt<4c>;@ zh5sJB@dM$Efrmuv@zU0$Q-AZe@G@Kve;q+ZiSlwaJ!SEEBFNrZysOjFY<;9ioK@r2 z*5b?YiUYHk@mGF_{UL^c_{69L@1sdzma#o0fTbtj^46iUf>dH>;yXJg2|FDPuUB^< zG_X*aL3yJR&t|(`m2?7BohXVB3Go+p&W;n_hr(MrGk}#H5OGF)jN!5A4N3jcKS1_& zy7uI~XohQT9o$W3(r2g$Y-fNtWaOtS>FC(*wI!`!_KAa68Z_)M^hsg14@xd}a+28$ z(+2k?RWXb18#WHGD+eTmJ2SB|67YUgl$U=*2C2yTx-!;7i3Dh_o4;-vKtTW#TWEEH zgJrSj%+dXSZmtd1sX7yc5)cqOE*{ejgyg?{D;n1*1ND=+xj7KkBR@p@?*Cpj4}Zue zEUnDJ*Yszyhs%^tR=^8-fA38WpX^_^9X7GhwP4nYHSI6f42XggeRw;F%1s3H1HJB(ybky0>`YuV`Hw~(KFEvKGVS5aeF-a z`y{cg8?!yVEi=j*K}qjFcq|?l!MaA&_J5xO1PREX-T5G^BWp}!gZpt}!<+28+b-xC zZ(Sb{K2TCpI-aS;!@WBaHi_OIN^EUop{=IoFuklLZ+o+rpHDjB4aRej4eH64uDAdg zKQF`s_zv2f1xS>^{Wtxc_CxiLAXuTK+V-6bNWtSG8u&UroK-TEd{s=u{gQam9?`Jo ztl6CdMA^2GKgOlxTf8nWBE3r+-J`wnDZfUz1=jiahmWG|sGsA*39o$Ij_U zlnH}~^Ey;xi6^^2WDzvArl;fc!TePbKKl~+$^8O`Lz@6=9%#1QpebY62 z;jjs`iRw?U=pjD_$IQmD!hr@yug%ieHTd``z1ODf5VV3XA5PX~l$OpnM?^v9?}@Xu z{Ck_Or~DZMZ6Uj}pZD3pUwPJ#B4R+gG$AM`zxjQCW;9EmK=JM4IX;6+WOclOj*V9j zE*2bzuRGbq0l!Uk`$zHy03u>xDPBA4i?Mtv@Vxv#uHG^#%dTA)z7PpfKuKw&q>=7W zLApewLqIyD8$?7(KtMpcQ$o5V6lsv|QV{9x-luE5Yp?x%V>tZ5^9VOr%z4fugX|6< zh+WzZx6!S2>usOAnsJLW%T)^3Q15Kp@bK|*{+_e*`9|VshRJn+I0BC@<0IoM9k+g0 z?7#P~ug<(L3OEq_+-J<#-lLE`UKSdEtOoV6rXO^!_Tq{`0x9k|`QuR6iXmFiyXzRf zG~0M&zEZO1aj=YzfZK2FojRzaF>q4%>)l;DIUF(S4j;Ee>Wu|h&G@&b#T_pl009Qe zN%{r5ck(|;Hq5=AIOGdV9b2=dH)i{A^Lo2d%1FNozOyw&D%D*)Qaqwf!4CB&f47(dLPP@K z=y8XV#QX2pt&SJ^#?l^Z=2juIdS%;7fyyao`VE5Ky8;}2env_OkX+~XAraofW%?Mo zWLQ2KRd!`{i>Eg!$hgbP;FxA-Wn*D$N#kKPR2Kt&Z&;STf-VW@Q;-p5UU+*r8#GyJ z+1SjKMw?Yu@@lXdygf`zxND<(HU+wT9IB)IqI^+j6OPmFjN%**x2;qdmH>~GRqhk1 zaK#AG4JvhRv1C&W1JMX{9aO(;VZQ08R?$?SdC~RqCOzG!cBSN)B?tb(6YPzxiJ2EG z8SY1vVj!Ne8{O};xs=L$nm@%?R9f-iNYHZ!;4lFHlSCRVNM}H+8HhvabUK|4r%Syv zVGg~0?nyi2m$l}X)@*EHkWz=*Toq;o!A}1Kl2nuRwDPa*FDg^V`xKRmv%iUtVm4%T!Vu`@($jyY1QW|r(B6w)LFnyH+&cP(uS4}*QWx>@?)xL6ZeEgK#M z$ZMqMOMn_{s@k!;*gFv1;+Stbes+qw?6M-{0v@ZUt!i)x1WCPJ3GAMV3yDSx_#yK0 z7W+AdFI8mjB3>e(uvXa`O%B|-AqyF%6U=XJUjwL$gm>DiJ6!ZPRb#{$N6KXR731oa z@-7T0ueQRy10ysbOD6AGgdb|v)-(nJGH93pee`RR`iSXrvy#P@gdyIFG?d`>T@ja$ zt`2eq=F_(iOIl)$WFN#Q>)rfse0RNlF#T>A1a7L7JTyPkGnA1rKPD!QiHr~9I|gZX z8l!)tUt>YY;5%N$h;X1EpebU$yY`poMKyiXzUw9h9_h~U#La9}}T0Ufyik2{eBQ3$mU zpPVCnb3BEdJZD`&!N;iHNM2D- z|6akV0rSmm?{qj6g7PIQz;Qr6ys!{3o#=0QcIMQWk1FKu7*OBF-t{;+-RS?eIp(d5 zf!>6RdFzg%5U9q+hE<4XN+ANlxk&lL(FAvztXf(Hypzhf=(P&^i!!j{ z^oH80qEe*?bJA5f{w~QMVuBDZPU$D@8L~EzPz0Kdd-*F2AaKndFt3}`hL!00y`JMU z`EM8qdNR(xS{xv^r07F~(}I(loNOja_R#ANZ{EQ6-Cd_oILYo%`v6Nd_Kwrq)_WXU z*};AZ<(|~`;~#%O1CLvM2}8*8rukfd|LeazBqYl~&Bt^nwDB|T5;!?kOcoLz+NBro z+C&c+{x_9(^#q(KU2@fYY=uGa+Ux8tic!Q}V@3z6uQ0TD>d?MgdURhl! zR$Gi7bQ12}Kdcao> zmJ$Ki-Tt{sG(6jKMmISDo3%LR#f~uOwI74zRyPu{>?Q{Q7IpptP=c~=lqPh(p{hN-h_(_ zVI+75Tpw=D%$&~EUgb&i*NdJ-09!Y>s~w~yhqbzvSbYks-M)>~#lF4jB4Nys>D6Fm zXl+B$$a|)R`{3KQx{NwK^@yTV8@k%@Ch)BWd8v2!Mw8pgDV&f1(M=q?5?#)#RO7dk z6t>G%wwxIGm{SblgB_!T(_Wi5QbjyLF*rH%?dR<1>GrI+atf>i2k3P#QX1W2QVoJ| z>g~^b9OM#?GrUTUd%^cVI6?+|fL_xC&#Pd%&VsIjzZA2P^>@zFAN+$n$us{m8ECJn zyl1%2f0bA)nfT$8k*(#q80)o$XV0G!Eb?5?VFhI5r^iAM=kZE6h-Ur`vR0=iz(CPq z4&*t24-iRH%{QXFrsLV&HpwMO-@7%chp2p5V%{Z?#C4&f{5&~Rmqj2b_;apE?Kv|u z>_>3k=vY|%^B1&)eF*j3p_G>-Zf68SOAD@4v1LDDVI-%m(osAaimx#g*Ee~Kp!#^Z~CJ$idN|Mc|qZI%hF zd_0E_0UT9SL<>ta-xq(2(HAep2r2)m6n+qJ*|hZ{E-V@sPkC5|vS0MdW07 z=X3X%ExW;2h$y{k{yO*e9eJ9FjDj2ph(JRNnx?!=#%EXwsN>qw z_9icxo`HGO30Q2{f2)g=xOw?Nd?G6&2LZeUGo^r3JC$l1+nPSY3NU|uCs_&N=80z7 zijswd`IXfkQ`nG^ELToQ-3abvX7180*45GyM8>DI;e}2;YKTpV1Z}gSNm>L0Iedj5 z+NQJ3B-Xk^=o#n}09|8SC`B+CW@OqnVS{0DP-ffnWMK%W|ijS6wEFtAk~X|*XAop$Ta6L zVY;4(7xjqnrT+ht$-mG0uYNQ+UPS^D4_bcb3f>t#JtfKsca2qiqDi-romn#^x5RXn_)4MsdUR3X*RqLf{ ztKS$L>GzR$(MxB-} zX`$NyMkyZN;kF3M;@r;{zXrGwMle0Hu_cNcmjy?D1787@ELiZ6B7hlsCwuHWucnuL zrtOP)JhiLKj(pw0B^5V)3!fSyCcAd0Mz=BKydSrm3v%x}RVI4~96GP=%#V;EGD@;H zBi`#*8bQF#Lk;=RrmAA;V4pQI(&Ez@(2L~wCszj4uAGi^k33UX`LGCjq|89vmDye} zR$6s_uZ8mnBlAE444pCRz)mISmi#M5^?cRG2jY)yDe`seQ=x<9g<(`=-KOYF=-^Am z15~7G>Ik4kqMl?ybHl6FCO|Sc2-)P?RbgT_8=L z$*au_%@8swmREqU0P2vq0TH3FQLIL}fMw8LmQyZg}Rq4lTaXRJLf6j zyLk&dVmG;?4SX(FMrSb0pQ>A-fwOq7t;+$FIzYYwjPjmB@K)I5(7>qVn3SsOL!HkS ziAuS!LF!HMD`X;Oznq`*cmV51-i&q!gai4JBBhcTe zzkoiUK7+4!=mWz;g5~A=j=d)yK^cV3TCwNlH0T6g4M>N`%bgvpj5H)Bt0_yvzQ(t8 zdp+<`?m$-d`2Ws{fsg-xE(-T{<<7_$@B@b0RHg6)((0(Fyv;#XVI-iZZw0!1aU%V{sw01QAXX_LtJC{k!9HZ3Xhr6wV{@b5$#7XqaPjsl}VK zBfu;+>)#IM9B|b|K8k8rb6cR2$s7QZ6`mkgKFHg>$1L<*R`nj2Q1REVkEEnJuk;=~ z#DowJ_y;e6f*BrlzqMXcoKt6Y6}dcKW9z~f(Ao;ZNhhIXT9&=GE4|Qo0^=thYs=;^ zYD7JKs;(YW$-@g8lc*>)&}R6*dF*+#I@@GG1uTDuG{yFc^l!QbVitJtG?thG6_Ol%Ogn8yoGrSV)Xz7^dKgf zpRSLCrzK4VW_FO_pdw*mk|xg-_&s$X<1OvOgDcl>@B~8^(CTWzrtJlMtbV6^fR_Qf z0MsKe7TX=>fcL|S5ku%Mt(^2y?g9ML!SRvbOiM{g$-r{Q^KxDjLS6tdm^;SEs0Kjd zeqRjP{K{?Mv+X56|WTrJ!Fr!mqe$RL}qMcQ~}Y?NSDcuBU5f|JBCKXHN&q zQ?QGHTYXXZXy>;$Bm4X<-SSFJSN(E(Q5T z#tswcjTO}we+@+U$HFXi{Pu@mF*|tv8a&8upW;m@9c>Nx;^Yi_>--?kd%A+)*L zq9(<-VS#KUB0LOVExW92cYpsDXh-2h2-OMRCzEq+ldwbOWaV^rJA#>@%+Uem^~2n5 zpW}I8C$R#uj&9@LCV~?cfWH}Pl2Yxl z8qaVj|4g)1js|&^VW>RuHU(nyj_$VlmD3!#sjalHU@_5pvz{V zaV^)<2Wy({{}Jacf#W$WJWP73nB7|{8vV!r<-W6(Qv^$T3$gAVUO^|1M;)%);jW^) zdqBm$W#C0n?ipw($k%Ng9QPi#&^8q^!U_OB8&IyoE0#RNx<19l&+9_g z-)C%irL`C5&t**j} zcem~9O{tBpP(XO9D4Y4^3U_zc!6qm2e;gX(zkM105xnEBkJ4Y~)XDwtbo9`B>~2_* z^B6|87uEExsJmNV`_|#+2pAh0;Y)S~qDvc#JEAN(9cstZ5rsOnPABtd2BkQSW)V|U z+TMN7uj<|0Yfe%`N4+8{J`&U=Q%>*3M8<9whQM8dZ{X})(d5wyvd`|Av$~f`P+D?P z5N?`%uqV0!0cN4eASPh&A23>4bTrs2e*R&}XLK1AB%|&8ZFcfL@w^RsEEARZ;7%J7 zHQ=q3VFPd9NqgL)sJGgB z!NlYeT1`h!KU$=H*e3pJ|7|?zhkXy{4_rSJM&y2RoO;)HF&s8^v?Y3+(5d?}E8CoB z($(sqNl8<3I6X5rVGH!lJ9mX#{!U8)%O$bWQ`adYKV)uj_1R>mvU}d(ug?q@z`apX zQ538ubLk5MoTY3o#g{gv>3;g5$%9XyItgVqELJTxZ`yzmaYNjv!|>?$Yzmu;Tp4^S z`o`xmAor%4X*}&r+q-X(_t|FVOI@WaRFnWZHJ0K4VCW*JQz2x>So773wi}D%WJIbw zJzqCK`)`Q#%N0IFue7nLQDH#=n6L23@t*L=+ZJRL4#P1}f*KRk0~$a2m7?fwMA3wbzig$fsPCpKfYuN%L#Z|Iv5LwN=S8@B@J3=la9z0dLj+ zc{~850SSl`X)nNtp!e_JmiZV0&_ZW+34SQO#Y+nm5ZsS|PIj4=hZ*8JpNG>Xf;|qO zOgV~fvgC7SZ)6M#xCd8>iezNvmzX*LXJ7jBSE~K|qOjG3tA1kQOZNBxr#%c@85bbZ z0p}V7Yye1h!$g|>Exi5y4cN&~JgJCOpK6c(-BCc#O!Q}yjY9FeS0|(mhu+p!dWx?k z+G$Vo;X{k0czHZ|?)B*D-UY1Sy6oVTeZVYK<*uZwh|LS8VjFKYXl6 zh=hn+65k`h`yp<7pCQ`9)QEsARnx#A;_!UDZ&W4jl#a$csXrEzP2iigcDsN>UMSq>lg&JXQZe=c@p2$$#8NIESA3Go5K z&cL32k{@>K092-+^tfuD)d-ZDsIW7b`ZmZU;~F$G<7Z^(YUJvgX@r4p^g3_cPllF0 z2=iJ7%?#TYl1CRzPtxyLD6mp(C}X<=cJ zljRdx@$x0~1wTqp%d2^IYIt8~4FVRGY>FC^u_+@;3JQu#mR^|2(q%vxS!H%WKrKnI zLX_Bq@8inz^EW{}-~$2z z>q52z;Z^6dO4D*dJWy9Xo8T~dX)z0$m9DO>JY?@?$d@w0;8v)B6s>oDQ8rG67yY26 z`S&Y5Vb{UIL38~tMK6AQd3b{=x`-9R)D#{llJKglJ3EiO@3A(feGD`VxFtz|ZmOej zdV)h%D3P1V%BjU#m>JDHe##&VZDQjSntR9RxHn{zCMVmT#stwt5glZaVZ}a@mA!!4 zpwX1-4-0m007^d+h<=PC_~*la{<{9Av~=eU+Q#m=@8%iw<~=#>s8X~_Vh980t*EC{ zvgpu?jRN_+9H^lyT*v;BGIW3cZap+fXgAl&9nZSXj&Gc~skcETzSkOFsNYx<+K?O$ z=IAn``#M&h!ii9D2KfpgXD)YGfhHwLzwX=2Kknm`Ao{g&8uwfGtF2awToJ_c*3T{gJ z?Y-s6+M%$|ORH=DN@)^DOTaf4r|1wVn*}uuiSY?4Oa!^I3(%w9wm2$3#Mp$3hE{i$ zJv929=ux`)&s+CBg~^^hhvMmt>EKz>x*Gu*@k0!*rPbJ>L2XzrOQYBa2fAv;nK3^@ z8-2PZ`1)ICZR>`9d_z4|bRc&HmkPrTCT6Z2Va5GJ_xCdJs5)E@4+yanXQJjPkczvG z4x4cpba!k#wu`7P-C|*GLIwJ$1;QqOp9$SYlqhHHUf2p5jZ>3`)j9pvrI&gFNjJ#y zipzGRbAVj->Wa%F@J2B^D^}B2ZukZ<=35G<3JqZnW{#4MUJe8aEX{m}OOcgcoHO-s3aW9b zZ+mrje8(Dv@e-=Q5;ljcT_gIp8G|L?wEV_dpIzm#xE9(Y~h>7w6{g;d@A># zPI9eAS2~cB!rA%w+Cv#lNh>K%yU_|1?=u-PK3G7PH+BX&Uibbk)T^zqic{KK>S1JN zSM0O?_37@d0Jx)2e96c~0O?rk8`ee(q@N}PxpN`xbGD(5iHVEPPE+ss^OX1)R0J|c ztBduro_0JVbzVqVq~jv7XCKND5PDBSLO>dxvhIE{xKuJ15+c99?-a)oUo!Pj8d?!Y z+d$IDwXc|EeN&gm_j!bEGN2UL*g|lTpdA+ar>-^ke?#RN8G+44^2;qnNPoIWFx*}hkeosSkBNQ*P zA0`w1OpHHYDd$18vNC>YZSw0rVKSV$x5ER;B4x6MEY_Y2he~E}Y(Q}J)@!-!pYgxx z7OthsKv0|FAVc!W=LejFAYTC1HZdiVkODvOa&sip>b`JdQ*<%8L&v?q|NaYeOPnsp zNA|IA1gV&0lkwC-v%yj33r0o-a&edStaw%;vTW&0RTK$z^_K@B^j7>EAbv%A&C3#3 z3`*SYpBG$|l~+NZp#}4(w_Es#^7qZ_HVig4FFCzK77Ee22X@bLoLSn#NnIu`K+-r< z_oDY(X7O)TPDDjttJeLZ3k2%OYZ*E&p5+>&4WdSg4 zIgjC%;hcJSki<{4E9>13ZGG`uRf8kNq249B(+H(`Ls$%fd<`!37^;d#P;(bjwj16M zNRKD$E)_78bt5QHyV_E#r`|0tdI_O^z}U0UHPQU*OV^gV8bjEXzIvs_nHaJR*QQBK z)VG$s%0_!_<)ZoQlJ*~9IZ^sj&+U(n>KbPvMSBE&q|9f2YvRz>WZ?;Yu`Oo2P#>BG z7sJDSCNC7pWOP<~Lwt#SDfWFfEx0d<2saGW$5 z<0dv<5JM!6ijzx|4LG;vjb~erHfD^E4i#;3ScpQY#N1bS?9aP3P?kI1+Gl!Gik?+g z-wu{MrEu1;8)v_bsV1IlGxznd?2HZh#O-M5(6uCrp^F$!f4T!Jz@ss^p+7n#dtE3Kn~Nld9-rWH{X*?*`iV|1rP?}$B|S(>FqR9Z|F6X%zy2(XaEu@VpbJI<%) zgoqm%zU#`mz4%oc+{xZO|1M?_ALOmo<{sHm2zcO?mfFn!(c-7DDFT{31DypM^&(vU zdEwIV=(_-E10m1z>1-=&KC?*z>1&Z+JQi>$BbHV+9JdZDQ7$~B8@%t+-heD6NbFs% zwgpK+VUxK@RpLJwQjkH0PB2D01I;alwq|CZLIkN|DgMag{GT$k$Gbe z&zh2N{_p-Sc-e659R5znz~Gr~neSNa7|OjHvP|QHuEd`89+VMEx%USBdW~GGs#|}* zYN}$*&&mqAaT*(=POWjJKT{8HbjzW1+Kn5CQ(1Z2g8iL=(&eSZpk=6?11MwUftFFJ z0F4mQ9f=b(DamVo3N)R+{JF}?`s!VBW@ z>RfJIR1{6XBNRjm9o3#EUQ3WAj%{#w7?4f5<4@<^vQ0RcF8U%R(MA8_(>JV(Mn=^A zbzI3T?u<8bM{1G-rCzo1hmZhZ5tB~7=R_R(Cz)2z-|T8PHGVvL2nun?Cs{UAwYMPl z6#g*)f_4$gEuW#!vX47@x>9((xhNw-m1X4mI-*2TFDw@4zQZNKL`=9QezvV91l||A08$T!wX7Q{>p@R%pT4D(aN4i|2tYVG@%?m8f+*bTi7e@q>E_BK$v5&6k;{$h=g*3N`p z%^#PhiupQ|^H27P-&K>e#C-*!_^NL-S&6O!JEF@_g_7S<#F?Hh^CZOPpdXHsD}nKY z(!~|{Cx$Zr-;>xSkTT9m8wiTN>aSmS{|NF=$g%K{Dr(<+*S9c~$Mca#`BWfPIddSv zgb=xY&ey}rQS$mxI>enEZVvRW&9=bQ`?TXvVbZigUQo{BkmhRwu6POR;k_(JmS%xo zDwc(~=oqy8;$MM6PWx`JiaNn#X^_`ln)_ zC%@p6AOH+I`o{K9bi;}o1NhE2#eQA;$Ku0_ZGA?md(#3cEknAt8cX~W zy3M|5T0^YwX`)vJUJ$NRQjqcfk|DYilGqfx za(SC_56>RQXgq_YIK5h*`pz97GvI)4H|2dBfh42R5|=im8v);8;uiNjNqCZ7th4bI z2G(~)o|Dnf9YuWpt~s~#UgV5@_;x`oSYos1aRVvl%;OGw(!!5!@%^`U6&X=t=D4Qo z;O|n91<@nYN=X*?ZUk$NKMnmLLziwx^LS1YFZc(HtD7^W77*=KcJsNA6d~bDf*W~- z8Q=Bgtbs5f$v^p`NiLa~(O_3l&{Y=t2A27!+I;2`bL-RR+a1*yN46}?%okCI_jwfo z1hKSQdvFu_LvFo;e@la7!L1H zp12&oXOfc(feEfMa&8OhW?>!~c~+WAO1g^WEI{-&pA`;_~0Z0w76>n7ahU=ttRRkV?l3e5Sh#U?3DIIPG>x5DX#&$G@G8l zQJ<;M-x<$~@H*790~7=e?KIt6*iSMf>ZB2&K5=>BpL5$v;m0lO|+{ zn^z;`)nw41BK@0W*9p?%o@D<_2IfLre<|*fm26rkS-b)|0)$ny<9(HMZ@CLDsGs@R zm9=BMb4;~5f1aP&N9h(NV-sx-5Bl-jh&?mq`@eROeh5Dq%RI~GE_WK3UhdamRDJua ze;e%hpyGzYt)*6gAHMGYE7clEF9DKo0(2SNNg0m~pns|&H~f315sd0-Bsp~nt1Al_ zuwbJgfv8vk@7otTO?JP#m($_hVAd20bJ-ZQps-;>evOXB#l+nVXcB7$M#_^a)$LF7 zPasSf?uBSykM5MDVTGR*j+0qhXWnc6RNR+@)A~WA%=Ony_!L>ILZ*gIvmoyJx4`3U z&H(tp?^ICfz7vlhC^Vss`dWn%($l|vzlVaP_HCtQ23R9EiNReW?6WTOV^d4(lk>*( z4aJU3qu>qHWDO4wOXO?4qqUxOV*#34pLUrcdHF9A z5);ypDVPUa@V7PfsK*5T!#yDNdtnNnB(RUzFhkd-ZORA##06$h=)>x`r5mwXUGX5= zz5!++U>$H%ze-#c&up`@l-%|E7_7Y{0k^*{f!XrCjp+*P{BT&J4aEKD;HZO<=fp9m zyGEZTQc|-0XZ*@rtoFh0>w^;ugU*~2_PS2XEC7k|g(`*nUITrlf%4M7@##_zTz0H{ zXL0<(-2CKJ%M#4qa{YrTuI$x~m1>v=2R6FYwDM&$NmDmIx|i12lx&M9 zZeGp(+Hw2zqGP-1?;aW~=6lrduXJN4w^X~0%C*+79I>1r@O@-*i{l}5ZQ|rsRTXdY zr|wz(+Bqq++2Y8B8qfCM!&p}pq-|I1Z=n)XjfFz~l&ZDlY{10C6rK+8uM4Wro&qzx zv$$5>-sg=1sd>Pcxh;(z;21yoAd@vXxEsC1jsm%ejqNez2vp9=NE&^aGe!hFcbG%m zu5Ij*JOp-K^ws{Kca_~5tU7508F&Lw$R8`|Fy^=8p#N$ym$((E?13LF1b{zpJ8Rm~r5t z^@Mx#WS3F*J9R{YArc8B>u=&mSPjYrg>&N#r5F03Vti|+bY-W3EG9S9&Hu7$9ZCuEmBY-?VLffF5rB1;!#CYFNt;rjIe8WL^9 z?RbB2(eXat*7#V;bc$y`*q>upbjULXz_ff*nIQPh;}++NsSm8ZKRY*uK6^e>AV(zI+t@yF(+Ud{ zBUN_#{D3JTX&*Y_P^I0}7T^0zTg2~4wl`}(y&MsKE2?_gWSawPCW3@_7f&;x5ZOPETEjJ*HTw6%F1$ESq)>LGOWe#^}XXpUhH4>7p7uy>~xuD-6wtFg6NM? ztYWTAhwW>}N0kUx6xyCD;(ye529nEg4~m3TW;PbU2dw7|8Ss5_QV4<&vbO@oafm|oR8MAxf5@Ev@|W%_nb}U_8hEjH zs_y%MEmG!Va&$ZKcG#_31f;GykQ}jp+AFMfUwOR7Maqhw-JqNvV1U(t=&PS;A0TsY zF(!t&tIMO#w{zTRwsmXTHA{#1EYvLgBH+)f$ALK5H*Z??W#5b~vJkUKAF{ZG7EmX| zo*TdF_wHSt3Zu2z?N|eZGD1T`ZP8&(M?t%pxFh!XBAwus0KJq{dftm5e^?tqWnlG* zHC+XYIv5GcDsVdb-lV5VWUIz2nBq@LDQiGq6MwMzP^Dw8ao~|ceCc*>jnGSadYVWT zwHAU(V~YT#+>x=pewvS^-p*Y5pV^aojJ+OMIF>oIbwCJ!{N!}ld4 zC25#hn1DifM(~UojYX3y9>NZTMhSf#xh8zTU8qnElUSF70C7ayUNY>Mux5+k1yMC<+Qb{1x)UH5Z_Sc%*LfME+( z6Bt;GP4Hx;q+oI2890UdZ;(Z@vNN46%=Zt%SWkcz-9Q)u)8fd12` zr<1Ys%A5oQ+tL(p~L-mh5Vk{&Ezo8{l8C-MxR&na6H|2K)e8)L=B}l8N1AD3A zh2;_;zCcRi)+Jqky_Xw)xI8M=7B(g#(y%E?&TTn9OObdrEPp}v;bWAxUW?am&yn$N zE(5vZ_jVJa#D%?Wi`BRaBEf{9yiWiL!XQdYTCwh2v(>+2@XLJEcJ?G!AR$m$KS(k| z7QcR_(WZhNdauuiuI?FL%m&k*q3^GGPfabs`+;mj0qMlLrHAQrKs$z~T>ZYi;1Wn! zYAm@Tm39R8N$R|Ln{Y46YYpYiOVc_92CL~mTF9@X^%THTm1pX`AXPQYWAo2|Z{4d0 z08YO!&TUTpWezE*-@j*qw7va3>1D*S70sLpJ$Tz`5_VTeMs07dQZcdjH@UdG7{F7C zJAkA9b$I2Pc&6t=1p=t1o&nvj^L|n!Vk>1(wV-b^yQ`CP>)#P~lp&)xSKpV&sI(>{ zhR%o2B3N=TsUOhjyUsL`GCqz!+wzMRE9hn?=9Q8F$q{$|dshDQ*~U`VGS6hU^z5uy z#QC{D{2DoVS)aB!r4HGkh6xQ#N?ZyWGrUDh+-q-Ke`KKqlK$+ekU2gwpoj8H2a0XT zAWUh}CnLADuKbR)Giij70R&MMlKdEIOy=;HcGrUF;95iYIGHV z^h}?6Z1%ZbRhgEuq5ppJ>pbgPvBb6`4`)RC=i|Tzg;pc`vqWYD&cdl1{qR1V-JlS> zBn36%6~n{5+_b_%9-no_Qpxv%4Mw*^BrWE++zv!&X}z6!d7XfJ2=6pv>R*9Y;<-U? z!6a=RGI{0$asS*cb#?cX0O(v-){tImN4tVG00gALl18SMg;{w)IAxmFz^_JS&?GCJ zogXBsFs_?e&HVJMfwD#5N1A*~d?@ZZOkBjq=i5Vh>F3W@H3tXqL$JX71zR6hz$0vD zXRi1kM&-Z^t12}AbJQ9#GN#SMDKLCa%M+mG)=`ZdcomZV`XwGB*WGn9G$j8SP}zc| z_O+Cvy7Fq7jVVWvX(6Rm+xb2J&6{foBad2CtPI^87nc?*PMBP28PEMnW7qo#fC1w* zxPUP?0x~xAx8*~efu_D{dU~;~N#2}N35}S+hsc=L#Zj_V*N>Et3#kt0To_OZu;?M~ zsO7i0l?BQ2+Kc$UjxN*Eao}aYXmLjLU0pVWJi7P!b3XJT!mp#9keFU(gAdT>hRzck zz2wMr6rwf!?%!i}E06s8`&A4c>tPGew$9=pMn-eK3YWuwEX=JO9b+Vryu3UD%mDEF z!XIX4aRtQzc_=Dlk>C5v?@)UHw}cKM`kT{H{3Xe^yW?80 zzWp998;7((=W8ei&!OiDB!B1#S3I4!F@RVuN-3aY3q1^O`Mp{*y}z$ecWq{RI4c<) zj=LH*Y`q*7DF)EF0H3Tb)%s)nwx=O>w3bG@`tzqxRnp8}E1yuyl+(O(ZBHrdEd97f0bf46A5Z z4c_Mw(N*)8p~I5O*xkh<#|d1IZ8e2qGS4FIQbjfL#mg5sh_fy`DLW43cm&;n_npvI zpmN;Em87NXzh-c8G)WjJYn*ngw=bHSF6QMII1LX;g~tNn!_^4m-2r3>!3t&+d^ZL! zULm%afE)*I2Ru*L{Q7^k$(QA44r_|JI`QuOvTcimd(}6rGN53-ci#~zfIJ9e5xRaVlMQiRtPo3RH-x0+j73BD(}b8z?% zfrl?`&NUE{)~gH~1Cr)bQt(_|^&u(2zt|_Je1L~9+@nrL9~Jo!35*noL*aUZ{FH$z zAtaDHFRrCNv#@z1#O}Hia$xnGl|;x~!ro%#9h>K5(!j{5 zy>ilt8O7Bl0VpIyUe3FvO81#~qIdpdD9?ETUAj``B%Q$#60R;L4wh6DNLc<g4$fyD-5b^J6%z;moF}V?d(mtdLh&Y zdt(-ku?X___q_9wU*EzK^5K{hEm>Q{_Yf>qNLpj#CHT5OJ+B57u(*iq^kC%%qT@;d z^ut8Gar7D6R#}+?@`=~GD}hk{3weehA&~@};y5;?)?wzk zu+AEUBTzS?L&$^u$@}2a$aQP4)ANJoJ7YlX>^nj)GaOhRI|c*h#!Xz3L5N9?!*6-~ z0o3DFWT?1ItWnGkfkDU@JEMm>7#YJkamM6k=W`oe6My|wQWBq|C z9oKtB;loVLu$j8aY7&h*i2hfs8kd&6pV<5>MmvY@PD`_yxnk88&zQ?Zudg&HRc}uF z?(fqhU$3_B=#>MdF12^H;021Us#g=*JgRnArXij96zB(AECg6_B@;=pDI@@wj_xy! zjPRa?C&^grPOX7n1V*UaeVQto6>yFOJ&%)Iwg1n{868knA%((+^xrnFZCO-zPhwH9^SeSk-dC0u=C6uIoWY9!R`Q3 zB7vmIK7&E%HF`=m^+;|t&R<&D=r51!XvLu<+D_vYCfYYZ?KbABbNBNhUmV|z-RSef z$LI%hO;#-N3L&A&}$9jAF9z$!yha;UFp1#cjs|G zet2DqRB{n`cLyR;ba5%gn|$_g%6MoZ1)O)^g4#j`-vmqikPe}al=-Pw2ZwDc2OOyL zbTm`ct1{R3v}}?JlB5~qj?>3@mxjSbKrmXQ|L)f1WlOI95#_b!+_p={pFR1E0#Mg@ zALQ(jX81^tK~)YobWc!~AwG^Mv|;HNr?pu|NXRbaC}14ewT!Wfa$D^kUI6}qd@=la zYeg+o!`iS+v%{vKuE1k5i5?4UpL$LjxlMTSbMa3tXXZPwpXfI&Vk6`KE=5bbl5v~= zhBF>!z|db-z;Mo7>_i>ToxDYH16u%I!d}7kQIwx6TSE#iK>Xq{0+mcqsBGAFJ?#V?KoTCe!uddaAMbOOi7Nuua^j~tpwF`ZZ+eGZnFUoq=4EfY+pYY zP_mN~3|L{Pg&5djc}ASpEPXHksESZ>M9Q$&&w1?cqn$Rwj=DKrhvOa!qxD6gUa4}H z(|snqU~qr$?cqL|-2o`|%+)UDqV#j|3t+ENnwjD*02Tt&aC6-8lTxlM9UeK(k27cYp1w%bqy#&U(4(Z;tvKYH8w}z(7H$0 zr#-VoRXt&M)($0u8|r9qbt11}O!@M^xWg;w+riiMlCMNL6Gm0wy*qm?uSYR9vr#cu z4(fQPgT||6MM)my_63YF5 zyRFx0UvQzK_%_+mZ`cdbgf8aejX?wgv&~WC6(b$%HP2t68YrW&MDXINn>>IIrhkG_ zc|VnvPZ(sEoJ{n>{1Hl-_b@_;{3IvZ!YE6vcSMi}{M@y#x)#4P5nFS2)3w_JZ+{)p zv66SPTwg{vI}X(l$uOGz=Y0Q(UF}_81$)lApj)9|1I=yZSI(F>&smF>kWy9UTky}kH^@?HMMiG0cXZX6X z&;E}IPys=fjQ!DQOz=WG*wa8~TictPEZ|)VM*i%*jQdBn%v^VsQ<}b;)9!D3;-)^4 zYF7e5#8;Ecju(%S*X>JolcgnjHN%U)Hebi#<|w7y#v}4$_>;#77Xey5T?Q8=fZ+y( zgU>?vx(J}FQ!Z4jY%FtSFDl~#6JrA6eN~pr<9z-#Li!$$uXxyA?6i2=)f#-Ms=&PE z?zptsV!&3Tf^K@HgDScsLvNd#Hh#ZZI^;m!;a;yOE52F_CA zHi!b-rj*q9DKQQg0fefPDIfjRV{GQ9)0%aJswtPDwF#sl0-N&lwj!_& zp9NE?y}Px#bv9mK-Z0btN7)DE+p?VU76u50$VLU4yu6l%OZ{VF=I&5oARzveO#BLa zb|$&D)+ZD=woQ~CflKS>I*Ag%$BDe&a!R(Bo2b%cY`u4pbN@%Fjc$tqKJWLUPC%Au zqnyIlqX;`ah;`)Y@b=o&oq{Fj=RK+_3cdDDV{kWhb5$5;kEl9>VuPAZ%0z5EQJd2n zrd;jjs2eUdbf4C3ZQB!tdYfD+ao~cI!Uc}TqP8;+1JDuJ4#uqyW#dr)<}g_EEmc+H zBG1&-Qw1FTx{I2;W)I*u0C`bQD~rs;>00%B(1V2BY70nxQq488%ZMoQ@) zIFcOfU4@G>ix&5m;nwCkJrBes5ddI$g%;3u@ADp@(xxz6MZCUF1G`Qs>Kzri{Q zo|M$ec&FoCzgf~KH8IrY4-YTyMDm*sKe!?m8w|bfch9azK{1?sJUgTCV|)z$?P+SC zc@L90xVE2igP&R4M)?If<&QCN1u9&H{Ul`0Kkf1ziXvb4Z`K_B3XTal*$dRKp62+q zQv=&~`j(-@E6ry;^)RW6c>Z!d!_yCxg5Hky)2qA zJ?>$ofX>S2@*9k!{o~K#S)WV1`qcex20rEO;8x&bt|6%$uN<40@Iv`Z44l`uDOqFJ z=KQ83{)gJV)WQs)DWbY4y~^ETpwVU|z#$nnFi^%tfD0wrV;B3Q_y-h_{p+f-s`f*t zF_PErHKi@j6XO1mOYXnNuOXijNICR)h>?FF{_={vMZ#lr5JRRBS_p+z*&c9_u#=g)<>pR(>-lkavB|TV z3X|d@+yfL;NLoe)tG65@Ucfup@IqeRRCpf-e_-+hGiaDQ@zw(uI?{7R7jk~nKM$Z$ zct5}Zikg8Q=Q~t?`gFS1**m&t*nSO#TKIm+*RN$daZ2AxD61XglH&94M9O$=o+U_7 z8zypTvhry&=+!&qM@7xA5{81do#Yl;#VX^1MQ#K|;likuWkGb%k77L`sVD8GW=n8v z2Fsir%4_gt(Vn^DP>3M)i;o{YRm8?`CI1*ifAen-oFmj_G=D(nxr!?0@f*=8exNhv zU)5xN9Ohf&6#GpZ+FJF1KY3jM2XV2R`}AqQvpvn*G@}`}41}A!d~&Apz{|AOMn_JsZkVa1wRo zq!AkJ=%=P;yoCUVJ+7&vt(DNUi)*>(t$Mu&4ryTGgZ-QOxBvO~|HIQ;235U&f5RIT zQ4~-@kQNa*q%@M!NOy;TD2;TNNJ~kJNK1D&(%nddba(f&{5|u(XMAynIl^Xp?d!YN zC&?if6;w79SmE3A`*%G_m)ag39si4!2DxW?z&bgolm9?k9>Vcth>@cpDYv$kH#;9L z+i^3Qt_;Qw(+_XvbP$DR`?Vm4X2C}bd?b3y#~Y!jz_j|J^dUD3?O5NRUt$$~$v3i0 z!)4{HVpFIf!%eq|jfjs)h{))8N2kdeDn&`J$$EO;JqAlJbX{B!ABXV^+S&E`vh~65 zW9FfpLYCI=rfZs+^OlxYmv2?0ec8m+80-FB7w*loFKH0-^yiD;W5MRsyU5AGQ7_S` zncTNBFEJBS6hZWc?Dc8Dvpy4-?mNHfb@aa+kDO>mQDWF5C+iJ~eWdd9v#hLmRW&p) z5E5Myp*yon`;N}l#KQVTv0iA3|ziP-)I{=`ulpzDoUzsSH>+ZszjbVf7%=GQ?8HxY@Csr zm4}C>tz|qD0C{X|&^D?lm<`^KM;!Ba#8ERg6jr8<3{4-kz9?X~>6jmZ2R(8l@F|bb zx$>=b@CoteJ_sT_?KMV6UV&!VSBs7b)IwP~S!@}quM*aACGkL3Y+GD(a)_CqdBuPg z0GG~^X2pm^>Mk`ep0>{bSGc$ldcCcPR-cIOuPUg-}P+ZKx1Q zY*V(r^}8_J3GGKVlc#_3239+#QH$!SzCzpoCVta74TRYW`&;qEF2JrGSE7Ova zI=Z^1+q;Q#^pA98A}FE5{l%SYAh!5j)?>b1fF*M$uMQ`o6B$AikPtc!q#r)8y|M)% z9XmVo8)e9p8`kpPUGvh03MPV$?Dx!7Fq*@4UR-QbV^>7=_yMHH-mfrIGc-yn$jCXQ zBqv}S9j|nJ8kCop$2MUF5D!Qk{>4P>RqQb?7(vnIN9jb@?(Wr7ZSi~PYdJZo!LC1U zmdV2X|4nlHG*EZdkWldzpIt5d_nS>|>#Krhr_L+baZko>NX1rV%H5ogEA3OYgG%{> z0G9_u`QTyTjix1)AJHuq^?x+1x)(QdA_Aa({_;hHOcn17l(NE03_v8!$>Y)U)6g&@ z5bOTnND3xqo93ssy`9Hi%NS1E55X|_#HppH_XP2-2j$|<_NbVROS5Pm_Bv{L0L&0~ zE>E8X2)iRa2XXW}W_8%wOwWfziv^0j-OzCr zlE8}19Sge>4w}3>Og3`ta=hhL8X@mZi_F#iQG`73o<0>~i8q#C+&%RY{{5KaE@DU@ zEt6iWX8ynHXVU#0$evxhYYc8iNTA4z>B4Vve)JUN0qO^{_5TzOOD zbouLfL+%+Rz0wHJ1Bz`3$`53>1vj~}GOop2V~8+!7vp8EQISjH=DPaA!-4?guAu@R02#BruDG+S?)HfZ|%O* zQSL9_fb@`ZjO5Z+eVJA1#aUQ09a*^mzT1Qydq3VB^x#&w-{28K)+qw=R|-@ z$zR`y3K%6hYGLDKJ|corlnK}t7UfZWCSE7ghrA^(iG@7uS0#RJGFYQG>9i!V4>4I! zAin2=jMP`cVUx9|lWSpvA7|tmlBtwT*4kPJ)~@}w@q{`$ItW;-W1C~2zy#8n$X75` z{Lrr%)+BSlIhOte?o1XRqCUF;df^!n`odt?oVtc{n_mkBLxm3AQ`w(uh+t)@R0XEf zXY1`cm0LP$O60h*Ldd}ej}Qa9nW+3BOaznesY@zT(I;a2r@mhSqgP?j#3wm_n#VaY zH6aD7kKOL`6)Q2y2sqCAseyA|*@1!fEU0<6fAcSAhW> zX?=rWl+Z!lp7ZF?6;*d?jVZO|3ZAU_=wL?f72VK5frWTP%=4~%gq%b^@Jo|QXBsct zC(h}g#PQ&jf)b8nQ2LxH7A=9CL(S1aRd6e%Xu*L8k?n6D@Td9l4 zJ*EkMYF5xSxb>U%`k_gCHqSc)Wp~Dut{R#j>%#Bl>1SV}fe$KShb>+y3M_Ezn3x;y zIRBA$_QrIE^lkeHTuUWGmFSgtkFU6&>nfGxOKAy><||rzUbtYR-3@MK@D=U8T6{BM zgcVaV9~B^sh=rBRdR5?!Y3-U}(Ya?D@u4aYiRJrDP%r+B*L`JmHQxC575^<+{hNVb zCbClndy@1g<19U*2zeFP6Zx4~AxQ77N827+~vQr_Q1m|ES z@RA;+iee|bBil(V9|zIsE&?CL3q+rgHsL&T)hyN$MxH;~7t(Do6s~iNxi~j%YG1k* z(^|LX!s@v|3G=7$dPDvAW?YrygAn`tXxf;_?-{VXlQ_Gy)CS###U6o%+fk|K@=GY6 z&d!i^AEdOtATNp)Cll9@eYlLhIm^VifcHzDT7G@S`=3POp^qSWxPQmR$s2asaeb$TSW z&s|zs83hrEgImYL;3h5=0{ZovqjOll06GEpiSIpP5YHy|&(+wJ2#E1+F-XVBw72>H zm!AnLHU?COJB@ATF0+6EguQ4)^8Ef?BBI}L`7}GP2E$Xh_+$HujQtcjlUgAx;?% zq;xWqk&L@Qa=K4ka9MhE^apvXx6PcD$MYXyPk8eQ9|KkLY0qo7tH*&&d*}aTQ}XtxW-? z#8m1F9*kA+_jq%U!Cl`nPk5nYAOxCn7=i|8FM&@P#QxQ<@s}>Xth1vBc+hHMF1Tyn zC`;q6OZ&I-%h`STaETnI2YxkzHHXroa#l8`E=IbA!*ehJj%5$_{UJaoG^9aA5R#|v zo4xV3ETPpuQ9S$X%hihRH2Qs^K&+X~l@}i{BF^Ppr7xlGjYL+%7Q4<`-3j)MbXYl} zBql`LE2ScTN%0P1iLDP0A|ezoPq#;FR59nq1O_TmCD`A`?l3lY4dntS z4-QX!yhiR~dd{Q#?gG#K9H&{KhhQ%IVpG%->u>oi8yEDU+vC7uP{gIAYmV>V!#YusR-pEE#4TyN8zf}ddr`a@!%FV;^Yu~YB&Dn<9IcZTv zf!DeD`2w#aJ=1Dau+C>(r6oPq@!nZ}tg~NM08E>mG1Uz@Nd{~)>hGB2p0#jG9J#;2@Oqs%?_9=v_jb{Uqd-E`ti+u12J^n z>Q5Nq!f-x;ooQc?fKA0rr$Itp{efRI1i%48?@w^mra#Mfi3zI)^ z!ggBUKxZ8Q>}Wo}KLRv0;BQwMdv!`=D?r>*t=!!fN-RS0ByqX{1%V`xEH3#&Fd3dU zxV(p%o9a@}=UT@1;PQXu+cX-KF%Xf_tNxkqQO*y%_ z^YUzm{yoQ$z2E<$_UiHY0iKf&>qCE~v)+o-CdlEAg{QS#N;Pq_z08F*0`A;hPWwGadUBfZCl*mWZxBJo-6#|gdUNVp7q*R&ZE(5YBauh`87PC^R+m$ zQuFM(mJzyS2+xE7$cZ$rd8t#&oQ!aV89dV zY?aHY7lXkh|&%i+I_ewp<1{w0Hg|9fY8zv_P-jGs!EA(FrQq6yOwy!2ce1P!-88ot6J?lfJksHDa0^4pqXm&18Uu9STg%w;T! zi#@qNwp}o$G&Jws^@A$5t*NTkp(d%ePZ_1TS8D+!CMMZncK~QSe$VKixt0(i^$Gv_ zDyl*E%`1}72yqHnS3zwVfZg_c*NPSH+ud1@$7mwdKYGFH>VH=dZn|r4K{s7g=C~g4 z#DLnsN2FCyuUT!PYltYc9q8@jfudmuQPwZQ@61K^430uUv`|qH+wtbS$-usUm*UlH z@YMrNNXqX9;PGTksU-+Oz)UVj_Eooo2gA?hs)u7m+tpU;M?$c&)mwE73#g!VkJpO@ z2Ymam(FpuRveugj@ic(8=8kF|I+jP^8YEBA=PWA&gSmt$_LsVVz?h9!y!k(b4u@*5 zkbxhC&?V)_C~)wj`qUP^Vks&wHtIzOw5L!v@h2F3IV`588gvUPn06b|+%#+7*~j!H zxaX0WLx08i)XVfVi;1F!h=ksVRS=BSf^!V1@vp!&aM_WO->Odste_w&#@!2YAJ5+c z8kwT5c#xrl0D{Jw9c62_o@q(?{q*Hs#M!rK39bg5gwes? z(efrZZ@3c5t+oT@U-UoUDs6mXHl*|Y^SXO2gIb4s z!i(fyZxD!82JKWi>OU$gK<1z#2u&X%98dXvFE7333Kk^~50g4h^!Zl&EwEStJ;JVI z8OKNPKiV9GClCt|zyi7__q{W92v323qTnbItGC9GhYKa%-?JqQ%r$u3S6up6AH?@Q zLGytOfC$R3iYhABw)of6V;+m;yf7A^KMcK#$asrQ&?JAmLH-ovrSD%2%-(I$`R!3z zel)zw=iL4a7a7hyG!O z{@{u8^A$VZ9$j=%wuAiQ@7dXA3g6z4eZ}WGi}Dz_Nu~0`*gEA)PTmz%+yN0K=>324 zte#LFnE!L7y_<0{HBA8#7hJUnPa+iPt&-mcWAsGpY<^jFnnD)KFX?Kl%N@`N)b8>( z-H%~UuUArv>&&;m=(Cs^lme(RJ7R#!ao9I!vyGcfbk&&BwwAE@_ zq~?0O@LyO!Rs_}EyMBhNOT9l)s?3r?FYyJl(86aTwS;Jv{l<#ubm{?n~`L&xsdy8^96N&d!p zxIB|t8Y(*T)(2WvR{sU=k~}3bG%z>TAIUD7P}5vr-@w6*v6!xIAMf~UZbrmfU3iW1 z_-k-59-Z32Pszl~`uyzt^W{j^kx^fC68r-)%h;=|!ptwN@6S0L zCMPvYQ;z}s0u6(HQOjf(5j`InL^bmpOF(li8fxn3ZpBqvzaA8-rfi#_q$zxl!+(8CJfE8DN)2Y8BxOHDyReUEjVh~ zHX{J$Zlrf_=4OWzh+=Zb*VeZq5+Z6$&QpLQWhRI%{`T#1nFW3pfFN6|q_v-Rq!h7V zQ25(H1`7ab5Vv*D&)0IQ6fQA3S}HsQB{%F@%zsB-S3n5GBb18+!-sHQqxJ3qFAFPj z!A$LDZ9I2q`&7AAZ|X-z6y)at!dg{kPkaO?O3e!@`WM5S{qUb{yx7kJcopC(vpJiN z>tCv3br^6-fsqS=cnStb5JQP{kIfxahqfvMqz2GGd&W|ysEyCi+v^!PXloYC=vSnS z&%}y5a6~+MG<<$tw9koWx1_UMr{h7oyST`*Wz*MJ2AvYyWwUG!gMq2-{iSGTv%l;l zg>(39F4Ee9ubL=(Ih{_(XD0tuN{(4f51N!|0g(uB+Im!puS z{e##6^2ppyy`-n?8yt^6v@ZC615f%Oj4=`xGj4JkZ9CINGgU#VMMr(aQ^y;#FAqkl zp!``peF*MIo#1-IzeX~YF>`Kvjw6Kv&?XqnQ!{Ko|cNA9Oe%;70o;>hNiv6Tf9f#S79M=pN17DWGWjUIU0>8g#O*zDKtCf+Htu@c~*)L8m zD}JQs#B$Al?hZ*i_RC`!Um>Yf|Cy6}v?~lhEeH+4@j)i_sLDj1vP&Ux&@1 z!OlUw$t@J@CQ#<~Dh^!fd!YJuYh<=Jk*SPE|D}I#p_0jhV;*0|qgX)Kh_iorF3b-VmZ& z8T?3Rd|ybJ>R?pEJx=~(gr&%Q`*aHZk*MfWWzOl&$4+RD*%UR zq*##peaUM|N=hg-d6>wIWWEIo^BQ&mjXd_(Z1iBU_ifc`a5m0q{D6>?mseNNSXo{V zSx`AZ!Cf{B39V2qp+qWYr_)Wm2hxxts|V^6RnQ6pRkS*rO(Puo)ZOC3I80%{+EVz)ea|z$Kjptr3pE#?W~|e=7GtCAVW-h+smaDybHFY*<$yX z3+7hVwn>AT@D3O*F9VZaSWz-)#Og)=UC-c1*{E!CVPObv1~y9qD4oNKP~@G~cCp(< zMpW$b2`{$3A5fR_6{f)r$c7xZCner-aar*}C2!OMI;jqfI(qt^|K0yB;Kj}kt&ja3 zlo8_3%|Acl#z{*{AI=>4n-pE9U-NhWO!TJynfe5r+bn@^uxQXwU!+ zT?j-nv9RECK1(W;Zw_;npcwiw&4ahP+Y!Kbc7?|7H}yO%c$sQO&gpdq9Q;xPZYnWU z8yhGp9QtQ7s;lY$!Old*k=VWHiqI$XE; z>T}A%5c9{6?N5a60oy1mTd^Wd2ny-)-+3b@(hAll_7*=K?)-yWQY`QSvf;BZJ*_o) zx9l!L&rsRQDl5ZJGpBI{K4tnvUwY@QIH#fK>}Y3@?Vsslxy%B0ZDV0+Vd00aoEiiV z{<7;X|1Cfso*C{TWXULtjjgd6O`pcBZl)y&gMN48N)Y)6=M5jrOByZh9|;LWAh??K z_}br1`>C)Z;k9`RQZeS;GL3}T$R>T}o?fXU!{0St8M;E2+?s?9zI;6Oe+^ty#Fb{s zZ2UwHKj`OR2a6sb*op;Fg!WgI7o%aI-SzaH)p2!4(UtRRo>!sQZp6D!-PT4tRl&x_ zBoNiv*%=xvNlkCSVM$Q<&Z075+rf434#7DZEiEMIrzuRN_^ZE6U$!;PVMrzxr$}ii zkd=R%Rao3L)Z}`6;FGl;_sEo%)f-2`&_u)6yUsj@Uo`KjWZIbJ^pk*WI21vhg@v!g zQs)NRy9a;Y2x*Z=Sj^xy)2CV`g4YBao^EHmsIlMLxaE~**gX2HqC<- zM`uhg9&H-IKM&lKuI?gM#_F&AgvcIq**yXc<+541oZ9;C*y9Pz=;$I5a``te_(?4p zZCFZFym}4)_vX3_z!w|n8}h7e3t6Tmg~^NDrv3<%d-cvsq$_wd_y(=^3x0)<&sJAg zwx~P$HG%V;H*eMB;H|6-Pn5mWzsjiladin!Q1sIkI!d7|CHPPHCgz`VQWmhXlqO5x zzF@?3Ku}ZDsdV7de`mM1vC|vKi)VX~i50--Dg_wELz4;pLrdDVKYtJd6pe`z>Cj{E zaTY75r7$^=@%!ra{7yat;@wBn#oaMEvd3-iUq9MZCQMe$RN9|A80${YXw~w}IJtVk zo82^R){*2+izk-EQ7KH`-PLDjZ9PI&f{MV#{vP4&X5lViW$Xk`bCD-3L6TjZTJ_#_ z34ZaWuKLC3K>=Tc(-o**W=u{_+U^boydh(KV59^GQ$fYT*qF)iD)!qDGaeolYt}8! zYG+*Hb>k{dm?p>!UXW1ddJsJ-(k!!R2NzpaqM* z)5*}*dIW~p&!(>!Qqs~pBDzF8*2u+3NC+x1zv1>1dbR3{=Jqcdx;>Z{Iw3qx>@*kU z3h5Wcxn9@K5a*z0?RYo3c=uHLKjUYpAwLiZM%mLvKl_T!A=NyU-D@{RYY8rE;$l6f zf}i|+ZYP3w>{;PHdfM|~m)8jAqQun55B%xDarhXBF~*-?dF*!Dwl5I_$=dPi!&Jwl z8mU*`SuG;{YNk?_O&h+i=H-0!m?~CGjF_J8+3Gt!yyF-8lEnr2|Fi&?G<6W?AOwGf z)nVTHD5I!o@R~5@SC9W20VZjq(n}6TbbR*c=4HE`5eCV5dM3CP_4PCWMQ_!QP)J|5n;?o-g)h93Fi~7H+=CjaAsFq`YoVoDPT( zKQijnxm8du7>xK}YkJK2RX@QMpp4+1)(YL48%);pNYlsX zaCRKeOm1%eg)i4Unomzsa0dkN!<4~g<;7Y>_8?+Ucnt}%?E3pOw#`9e98^*X3E@%A z`pgUouetQMB8QmEvjW~8MGa$T^mcYL^0=AZc9!7H)!jN?jMtlT?EK(y*}rIwh zI%GIe8L8q$4IvNK9P1+nxi7m}&wE5AuAK!WUK@G%v@h^NtS70&9u zVRrrY3A>3}Nn7xTUfsZ(A6!g5e|*bKuA_I^3zgrM;7XD^pOAW`(`j@1NhCuw!xLyl z-|&u)5&N2OG4nT)N!B7Xk*g*;pdG#jH5J!O6OxFvnbhtuFE=sS`|XSS4*7|?RrZIM zq+E2f(&+pehmvIN~=FuIpJp9@le|BjEsvFl6W zyk5jdsdz|Py_i~=tFTa$rodVS{ptG~vp6jfuW@LT&1faOz})%X@cy{{O!zuPq+29j zrcQ3*K;*Cms}mH`zohQl*cNKme^|84iTSX)0*dE(sU@##zZVZ2n`$S9FEG&wnQY%h zAR164$LO?Q`L(aU*nBO;EHrtyHqj=2MP!z>i9ZcH~E?O*L*SV_2y zFW(kzX!vR+7wSBcN^+bj#(TpIp=d#Y64%$RX69-jP=omNU&DE+j0qPL$Jy;37*If1 z_vxJAqRy^4`%fNn!(|8NITzcJMd7E}M@N?%$MF~Tw7MuHesCu&?xU2mvj@EWFV=gl zLJmUROy}mKBv-Kgzl2%^tTKw|xyN>onf)N$>r|S>BF*;o`L^S|BUEAt4b|S5tZH7`Hvq+E z4ArO7itekHmry<4PH3h8Pv};v0$=~zhx@G=PBsGqT!m(hzRnbwG#2F5H zUz;!6$F|64ij+x`tOTQ-gEzf3n158F0h>a@FbViizp!Xv!}gi_Ydh6}QlmeP_GdVz zwB5|?@ozH>y14UHXt73P1IfGuFv5r!c3X2eV0qyGbmkZ{aI(H0C(a$ME4OB3DP{&R zX4puUP)B;s;G=uQLCWW1MZfO^n!eMkw_bPcrMi?dc=19^>~e$v^&P~u+tKHxqX#`3 z=)jARVvbYVc!a9m*^)VlsIF;BrnK2C9-2ibe??orFoI zkE^mZ@)YD%xp--!iuJY+;NkTGZy9FKfjraPVLPsw3Jte?lcQh%1MNm@#KxdbHl+o! z4DWgG%n%YrAW>Cf9K=gE2k@b8w)+gquU zJ#>$|&S{vLOBS&sHmx&qf^6U!02@0kHwR!Sm0u{pO^JY!b$5^r=bv{Qeh0C+>Z!)z z>jJjtEVQUL-CLt|EUt|Ar3DoN6X+i<79X4(7-8+XMI+_OoMz|;rg}SIWyuh*3iuSJ zmDcuB)iV$TPnNiRv=yGCXlzJ!xC6DRv602*t9(9MPz=EIoM~csnD`Kxn3P@r_w*-3 zVToZD8$nC@M{~n}^X;u_-WQ;9hT(*mi(H1iJ9}}?en5fh*@kp;aiiYLmkvvFjCeW` z170wSC^iC7=VA+a7>IY4mV}9NLIj~iu|<vJfUb3d)SOS|>0t;kB$B(i@ddMn~(Ww>mXzS)>08zE0Usnb?e+uFM2Ok=;I-gYy}U0Xt1)13$@HF zf7FDf#F||8r=*k&+z_Zx)A#_Pq7sdGldj-O=uC+5N-yaY-W&sdO0wo0dvncA*%ZtF zTGN;(px+Pmc$avxNL%SBPeV0B=FvmK*GG&;n!GpCYqsx<^-fAm|No2Se~&(yu+B7a zy=igT{F@1>LbjgqRo>p3+@Ey5x;#@|uwcw1p4jivOeW)hMV^#^8yYaHmF*g&cYB_E{*wt3sTf;hw4Bs(WSN}q7d#M^ z^{w`*Nm&w)f+9UO?)xmb$C&?hdkb&XY~AfR zbD?@q{HZoq872nWSEs|-J#UbF7pT|ob?gtvT7rN|Z@BFSXq90j)&iY|eDYQK%I?7>t+N&LVy4HG4!ok>ilE_CA$EtgGZ0tmtrW40KGu>%PO70>BU|wRB*0k8g%j zrfR4s_m<&cONQT7(_tqkHy0TZCE>Ps;`V|W$ZkQp@EvZPxHzb4x=ZdQOkrJB`?Y85 zlysp|izKF-mW~Fpz+LOfXu0ut^Hs7A4<6ba9N2Ls2$R>jFUlCdzJ&7?`m;XvmHo(e z4d#)PT7ApZX%Rq>C*|x4S++0{#5O<{Pwzj8SsH>YhSu8VkV!D?+t5Miet_uvVmY<* z=j?%^Lu@Usb7EAuL03<4`G{;Np z4~wkW^qNplHtlW{lte$2C!lZMq-LmXW@ifzR_w&Z#|;^c#B%In62127l71`A_|ZmM zT3WZQ7W(9^v;?sNguD8B$BbA<^{wn>NJJ&f@ip>J)`Re222GH0SZav_CQDgG`NsLz1@! zuj3FB9(7X3mK!5fH_gCE-8-_!T|v*wF6+iLjY zJ_;_~J@4ja^J_O1fT0yS?TV=l9KRFTo+7|=7n6)hw-}X z6Ifvadt2vp`g^ZZ^*-$`kKf(jseXkr_gD z>9JN<@IL*6ewg9yC@v0cUWRX?{125!3zC#*#d>Su{@m~1C#tKLHuEvsT>Y(oX(_a` zx?CL@Lj19p_!~MNK|Moz8HSBR4G6A<42NKVMkGIqV#MIZgei$R<)bz=5RLs4q&Zei zOuX{o$!YK;{knOi*tq!cOA`(G`elB5!{bs7bK_^4O7!e3EY}&`L+iH2pTAQ1^bg;C zM)Y9Jw~hRXd)!Z)E+50uWsQVriIf{h3p*s)u8%?^h}G63q>voZ%Jam50lMk_JH+*x_8r zF0{!WN(9Ptc><)%W-`jkY(Dx@WLFPD!$`9fh;<_YV2^1>CB`SbMJ`$7CmvgGc;Lc4 z1u7Pc$@-e&VwyDRf$z&f&PRkQvKM>1cUtW(Os+tvh4HiM({f*(7=KNOU2EV=kX2Yi z^)r7B`SF?V8*p`9oUXsNIFRa{BQRdOrN3FOl%c>Xaxt1pOiw-k&nf{H%fP_zi$us? zjrv2kk>!RvO}M)Ti+>6<6lg!KFro)2O{R~%L-;#cUnVk`F4R41|0ZkT;f{Y@S|vT~ zkwR9ca$S3-Z1CZQY2aw+s_))Zd|tP1UgD^2SH8>~ro>srySCKx%Z1RZKlZIdDu=fT zSRO<`4Omwp+ZRqzDf^yP^7Lw}dSnqi z+&PCsJ~rm&u@9qjvs4GJQU)^Fq%P*ly{}*HZHYYkg^vsoLetMF9CvcFeo<1|Ik}#N zAte(F)e<`1Y}sEWydsZ?Vfb;#h!gp|gDcu~@~fG%$6nbz6_Qa2rUd&nuD$7+1}ZfzV_R^Cfs8ge>~gvGX3P5yq@ECmy^T`~04Y9~q%aZE1d+ zmjCByuK!CShW^%x{jGg(G#~w?@hpj`Y(O@nB+=Isg1-izY>nSX9LD6{Fk^HwBGDXs zxT>Gs=EHNAb;@Cz-)-kxT%0kmfht>5&R3>k-MHAl`fg|rkBUbLX=%QA@O3A8o)%fb z5pr@ufd(MwD^4&IRb1Cs6lSLBSY2&4_4IsE<6zjnWg38z%otnepJ z@-1`-2x%GIutn#Yft$xwuvj$Jcx~Izpc&Z{q+mV@h9)NZCMJTZe_ZX>ZL7C_@C2)D z8d-kS7rf({O(Xv-UWhXMhMArVIi?+M$i~Heq+nP2p59|dC|Mwgy(v`7tC=yd`tmSlpN;MnZO;=g>${0G?+PfiDM$d%zrH8M1K+BG2J|Lt=t*^ZDWWX zL*N2+D-L&J{j-#Zvyh8o4URj3kNAH2B|eFBp3G(Tl5~1XcAWv?G@ciEF^#pt`8wA? znVnH@(jOwa_Toh~r4LbpO4Kwv^M77uFf+!*Og85QF%^mOC^#C!cNOL-N-!w&DME>c zl2aG$3mLM4`&LGIL)i~SMCfRS2YZmT>GK#|6i-M(n=2g543trAC`dwe)wI6-3}2pU zWZ`8YM8H#7t*3t`Dn=JFGQPC@+R+64Dlrf5G;@g!37>ntQ~S@AUV^E@*eWL!iKwp8 ziNW!4wba4)%S%X}vUg-z@qlOeFG|8I30b*$_Rqa4Dx9ad4bImxsiOyH$hMuoXP*xD zo3MUgiez>785o-8A#23+Q64oZi<_vr%QM?OTvwRM$;~m>IXK(Bh#CGy%7_(_G-y<2 z!thjb0V@K>+9xhfz2p~COZB;a=I92Toot$LZ*T28vY2kW_{oLLY`luea;$p?0~}Af z7^%O};^g;*IP%Q9nf=g8{$yj-w%~^kH&bs{r*h7Sdp%!|tUX#&7Y)0gsj*(4J~MDT znfdt^v!z8ZfkL{o7_x7 zR3(;<)VijN;*{v4hDo7ZWqx{R`R90(@#82HC@G&gu`I0Y0t(9duRpiD+mdoT9IJ7{ zG?!MBRu3*x;iq+0PsgeH&dpI_oV{!IGEBLa#;G}lNpqr1d?lTR70s-5*ociO-Lm_o znG1H!yGxb1JdZLpsExj1Uda>95wZCqk<7{c+SLLtH1T^h5(3`5N6rb6Ta>IC0(wmG z&GgiC3fW53pV54edbSR%%ZrMvA`l*E=^r}DvF>*SuI5<1qEIl_a>WW3HOKmU`|?^J zwrubt{`>#hZXhI69b)~^(uaCS)#;v6QKo;72fA&q{VD^h1_bp33$wc>yN2e*#BOUc zQv_pweD8bD{GlrCEHn`Q$RitFgo|^3bti^DPpGXlUVa%!l=cOUv zd+D~_vYqkw@;O&w5_bWOk;ZicN0_ssX zLK8VmHDEsZhkwum2l8US=oQCQo!bid(Xc%+BgI+5k1bu_jxyEtTzNn*%q-OA@j5i+Y*eZhrf!>4peJZdu7w?|^W`o3;r3cAnFZXUL8Dp{Of- z>bZBw7T>bO%S@2eR6Hn=*BX~>PMza08PBV|MErff@%L2Br>{%3!Q8P%I&~Ns@kESs z>~j3?y=$bT1#&zan12%avfUvWNl`TM`kq@Z#Fd zDtC>ou;Fn7~L9X+DfY@l(1#5RPd5~XIX>rDF@0XaW&91)W#WP*4@%hQv|q6C?Y zAg5~`nuz@~WWuL5`+;__zhL{FIyzT&;-V=LVf{^T!AuT|0i2}%@-Gu2SGy=v(eE*0 zyw*)t-zpo0$~4ds z4_ACoH)rnTJRjnSyOQ6;Ib6A}mYy1%MnXWJGVtv7)>k3kQqCVvQVdP+Cd%Agxw(~_ zsrld(K(X$0r8(XL>RnIZYNZo-Fs}J*x0YZfK3x4WS8GGsdqgrL7(@TVv@$nyP1EIs zv~}%jr6-oh@r^FR@p|RjH;=o<*FH=F^0JYFMR{GW$h#lj6P zhx!*Wwz=^*B+?=E)~6HmL-M+7Wq1BtBm+Rki(^bHYUi?s*KLztV0Xj|{^>F1Bko*n zabY1>>`Ri85-**)!|2C(?~W&2*iO|i;@;FxpN{y)V`nStBP#i+vx7{j?u^PAE2Ub> z*2g{*X?Zfj75sW)>wT!V8?Os;_p2_b@uv%HRF4Nak`o;GOP z-tbL&3L4_q-LEy@oV3aCQg9ig5@#2?l2SIXuzqF^Wycp0@Z-Do7E;nO{bx6LP`1YL zT?Iy_?Ww9~!5twpsQELM-m}u2L>2%i0-<%0KifXsOwY%3Zs4k~qvI8k>MgD$q?Yx^ zNF{IT3a0Ig;gON|V}IuAHz`5i#%HiY-V&Bc%8!R=(eM16o)e*9a`9(;+{mgWw=ZGx zPU9r{6hMP;wej08Jksyk0)iGA5s^F@Gu2AX&`$(5Hx^1iUW!UeK17I%OQ;vVXDbeS zW2yHDvM1qu(*2kBAVyEmqTKje!eg$VmYu7d={z-rlIgWw>-XUCyC0d?2>&pgp0m;0 zcu=3%{~vV(bSUUn61Hy@iP7&7k)R^ZWU1^(EFibjuw5sxXz<6gh}HfF?ME1(8PA(~ zTl{}o0B$SCP+g&24M>W7on{;%%4jePcF`q;k>Iq*QjG1%k&D<%MbJc^14V~%Qobrf z#8c=3_1lBHHJC}5zqwCI>zuFqx`(ejXEK>y_$sLc$Q8C)mSgzc@m$k%0ARcAEB?XJ z>@ma-f(`>-faP56;@vzdL!ld=R;W_zQszkm|1Wih~G|l46;wimj_bU)Y3w|!3deUwEoAO6fN+sF+hLhOBHz3aIw z@DQs>$A=?MX;VNcxMG*0^a z%yYpgsWaDA)1Nkk-K>A&FuR}hm*u$u7Kh?x!w8m18?k^D|pAL>Nz6NsH2p9Os?dtGA=FmV*)tM4#0&xZx$(w0Dgk zG_Pf3IFnTWa3ao`pKn=2#eN|~^x3B*uJ(Qs7Kqc9afYOd;2F` zR*wAv%`Uze8-5Ji{5a=Kd`z6wd$91&yreM-#MY* zrYe{J8q1q!S+Mi3xQRMj^j#1^UuycpRol&>CkQ|oyckf-(l2BznhDJG^jNvDHj)(!1vQrYCi8{CDz2|oje+P;bFGQdmJd}vN!}l0fzu=CT zjFM^)NDrKE$efRhQLqBGAS29IGy9Lx(*ia4R-!#W?GL-+PRuo_GJcVATVWuSXhv7I zT|n6t(ra3v%L9RWz>$AGN<;jeUqM6oh5W?1PXjNb;+=(KyS+Jj%u5^<@W!cSDI{=J z@BV)@T?16!Z@hoQ(pqiHURuk`u4UV{?ONWJwQSqAwrrcrwtb)fz2~0uo-$bi(g;wGS59^gfZ%UTHw4hb*KA%?P!o1X$R)2!Jvf7zX3DFvAZ#rK=(QQVx6-n1K;fHkbZR}6N%6x<@ zkG2pcIrR1ni?i3*$rjKW#2GCN;jTc%30+q@#aN;n+Q^#gFI|1xC3qBQ0H-RP^4^`> z4&C9fgTo0y<`!mGg&$2(>fi@LtH1F~?qA|NTEJ^QmdH?q*wH@Din?$^j)GQ(qAmiM z_jK9GdnV_hi?rKA-`O@OQwmSv%6>^q)gpfUS@sNS0rJZ!u%0T8Zo1l8;SB}z#eIi@ zAh2^o*i$%8P8yrEI*We4t+?VTgK5+~!-imT>qcoNb+pbv_i*cw2Aoc9B+OOYW+St; zFX0LnY&#)%RY#{Y>OD!J&$l&d5((t`WUnLplP<;kIF%X zi-VKJQEl|s%4vV}SyXv>ER7vdZoKg@FDGaAKxBni16!d|$j>%yEdhn^CiQ?9 z=r0gFNdzp$j)cQXLN7PDR4Kv;)U|0NK>x3$NxRAAXCGFr&CJHZ{U*wdi)Wi1CV-VB zB*ZO=KH_`bUx7h^fwbK$%{6ZMR{Xk$dM<%JLakX31+v_FnOR$k50Oc0ON@#`TdZ2S z3}gd{M;tf;qBlb9E{5L}NwK1?z`|hY}G52in482i$aB-;pJ^bFL8uO?1yZr8(!K` zdw$$+kJmewfNi8qtD)&6(rT@CXZW5PuxwxD;`U}$NCQxuTpY&DY5{W7X8-sSxqF|W zZ)(W|*i-&Pv)PKty3l|FFhvom+2m5AMsmO4eD4DV+BWiCa7`^uAbcEP0AIek@Vp=* zx4T@a03Tc=MOk$6q|my3|k0})tU-w2S~sMSc(#Ak+F z?=O^Dn!)EukS9>%J}^Td=w_#y-{QU@Pl4Ndx7vPdz2^nq^${FEN2a=@N#eZkX&9FX zISXxvFh`s?SBh#90REFH&~G$O#O)0S0nebaO5Z;m|NDzUtkP%FfA0v2qY$?s`Fx8g z6Jpbl{Eio8CtMmOCBmbp$JpM@sw>qzTXu!?lBP>ghyWUEb>=;n!L7GmUz|#Uyf`{U z&V0s0u%St#alB|Yiqdqjn?-&Lm*Lrr3`nxo8>gubHP7m9nqqE&&VxtSnStrxzLHBg z`y=G6;1SyQXHgsY(3aK9pjjV_xlOK)Tcdw8HkqP~3{6i69)kMk{bHDvJvlV#6?SR| zJaa36x$dp$$9Al@J5r0n5bV8~h7VH};M1bHn*!iTa-V<-D>~8QXkajA!Ws^h+1RX9 z9?G(cDQ^NIYT|>@7E)pXo^QU5v6N+X&IEOA`m?G842kcJL!~Bsu zg$k-~iWH4}i=&Fj4h_*74ubZ5(PSUTC^OMUT=mmr!|Q2`8nz?1)y zPME+L-&&MfGaMM*SXgVm}U$yn8;?UD?V4uWR1!IAs*Ln*U!z( zzr2UWjG5?=l?4yebZ6Sd`X>&u<$pf64cViEMh^Id1u8|_X=$JWlaLtvO^u^L1UlKz zEjM#LJ%|t*T3W)jCV<>#hzr;f?59e2}G5%UK z;Au(;aWca?+Bw+RIhd>#o&r>>Lxog^31Hp=J6ZDKmNIvYG>ELMLR7XLfP4Y>$ev>_ z*lCU}3?=fi4dr4^b)43j)=ZH8(*D_@acoHH2mujFa5;v7c#*scwdHVv9VBr9fy&a? zG-KmH0kXHfYVFfGK_NT_&$cJqi3X)-ki*blnjdF<*|1$@Y~W&8w&VGL0ZClqULF{> z7hH;i{}|!bP=!&ARGUa+#xS(7Y%ZOWhsBgrS<=@+G(YFwqES+=qy%~*ytLlIi~iF& z#3+CFu0L3<4X$>hKtLxPvW#mdo*ixGluYISrJwF=a6+051cKq+O zx7QLzKYCOU1!(#>P+wY`m*$uDa&)%757fiMW#W8{&?sTeq+w;qYU1RAfc%(w-bcKKmgd^&PuZbh%o^4`^N7#V{r{OVkkX72X#JK`|0y5vhKGLFs5BmBeXYs!>*6j`?{1y~Fbv^9>aS5_J@& zL-)8Jkmd-?_X7rW4rk>;DiC=v1~c0XBOvuO>Yfr7jOu`qQDlw-_0qmasEIe5Fa!qc z8{fV$9bi=l^eh2}LQj3?6$fU3mI!v1m8RRT|LIwdKu92v^CT*-%fb@|S}tVc>=fUw zbzcntk!DOsvr%aGMs)S!dOX3=ac4n=jYsEONCg%Q{11Zt3c-Hyl5M(8XGdL)n6C*>I*Ssg0? zipp5j;-d4m5<0`j@awYM?mUr~95~c~J#r0W?SbztDd~mJlmEpSAc&vv*r7uZ+&p(> zj`O`9ehZke@HD%W=VdY=H`mR8>t(ZAL4?$FQINjf?LbB^C(H>mpD0CiDM}q#07q*p zE$Da!EJ}j;2G!w1*MXva@PWMlcgh7=A%LC!j?cf5N$v;_Sh8Wm)`D|&P;n3BPv#Vq z@3&8&{@>c#*+let-uACo+cB!C@+Q5Wx-3D0`V-JAl!B>PT|~K1D?mq6PS=y_t2LYk z_2-um45aSn5;5>ZHv0aN!E@A9YhA3d;qpWiR~-~s!HrZ;*a3TnVz4BT`AIdiC36W@ zC`g?O3-R%*ekypv!wengN`rRNugwpd0GlO z!uNy>O{|cK;ex=g)L?egAB(YEZ%mKzr-j*TFj{xlhU2@p#UT*A(U=}bJq8t(kM!3q zD|gC%u|pUjG!10RP)5H++W(Q{hy9;#;uG%op{)>L@;$8G=(#<4pGC0bRe%d8@@mOK zV_F0CXL%U8NIMAjN1ngGn9|7l8CVg6g|bC*s%omBAYl@lEq|{~qzF2^r%the!vtV1 zBncrMPuqYot5MHd=R4v>#)RDL3^^`?0MrpLO+3s85@Bf(-S1}j$Hx{%0RlyqVs2#dig>c)s^fns#5HeChML(j z4D*P^HznNHBT33lym%=mh*V|-nsL=;)Bb@vwBkJSoy|0 z4M>7yl+IIz@%``@ftDWO_JT;-hrOckp#}-&UA&ljI3}!zA{V5Rt%~V0_5ceKH%jE9 zCfzzS`>a$4G&q)>z*$GM13Ude4v3z$|A>Jl2(N7C13DdNz01!HC%h0}(4#j@O~j|O z9g1k{XOi!bhXa(+5+M=@5EDT`NJ&Yd2_t{iG;4(29I17G;aou~$$G(i#~V!d{VOmu zpQ>L0&DgqP;{Qk-AA^Gmldn1w2064KnMvbvB!rOwiQGDWgO7rWOo$|vhFxzk(!1nJ zwA%jo``ag=5K4c~s*rKAi`CgtoMZ3m>TGY{>_*<4Izs#&0n}*^Oly;qoxeo3?cDmE z(E_`of`sBU!UxbK{^D~DkVS{TtI?qOaDXF9Y^WD5VknTKuC5*=G}BKx`c+LTK#8iP z$5Q%pl-#})+)97Y@cKpEAnqmuNtBsoSFD6FG!LDo6Q94qu+caNz7ByoFQ7Wm&#ql#PXTpsT3&IRS}Arxcqbgg|(SSdbZFf2rE_x+h0wBPUa^rZCkq6`kd zZ)dmHAXQTM*3y~tNZC&T1)sP4#o#rPQ$>kZqyp)A8I-1g6W533=^70@ehf(siEJwd z)-S6aFaAvl3E!8n0LrsuLw&Xar4}L45;EGEo2Dk7zrf!|BX!X~n^1u{3SW#E#pQ@= zYiq|XEaD?=0TYP}^b3}jvRDtk6(~Iawd-9T9-h8JB?!$O8l4p>o3Lx|)zo_XOIFA= z0>yAK%p_}Z!@ZT?b#*R>0jd=>b=AaQGBi8` z(EXpV0DfDz(p73CaRY#q=f>@)kS9>ko}d32BBxHCJyo{N$YJtc=+CA)&1V$&JmVvc zjRK{jGdJ$g0RRdDfnv^tUfY8)-&_?3woZpvMq0{Tz4H)I2Z1bWbysjqJ!jVf{H!4r z+dJ$81c)=S_veqnvnug`!Q5deP6k#7VB-&N%et5szCxjw0{x)}9}BP? zpV-g=O&8b;;kmfH;*b_`t6p;5qZEpo@tt8J`z-4g9kD9l>MZ0%9mXr534%*_{qa>S znPIsJm8~Ajp8@p>cq`%Z7>I#b#AW{>OP)|nEg)Lwe7i9iJ-s z7lpaA#+}hW7>K2gEoX*(9tb;GV847>o0^4?9$Iu?WclU!zNW7s<`1<8E66> z?!&JEl(nP4!;P{&TP--2q)Ei@g8$$v7E^(!VD@!MVAKzvw`&%8Q6JQOu4Zsq;_uQX z`qszwe}b+qZ!mjy#KBa_(%nRtK_i>~cOR+vM+EzO+Xh>&&! zsE!f&T+*>Ja9kfh&iBdcf-)zA$NQhCm8}ovi?wb+!>IxA`l&R$g?bh;G|XWc!fImY zI)4N0p_u}tM7-WN(L!V*x*Wg>p$cpx*-HT^M_f*Jul!wo=CzB2+JY1jcs)CI>d4;# z{*e4mbE4c3Xv^E*wdL5ZXtx+dX_QgY(52%O>Syu-Qb>obwynO6iaPoC%aflkc~epF zfL*#!EL;PeJ_fcfo&!lL)>_{}{v)Gvcqst(Vhle$wJE)39h5a!PDN___hS`1xlfS! z%EX~rtDsO>Mmc7Hk+a*1gqlbXF5m;v1EBP3HRHa6fFt4kRH#c& z66|z5DGqT1e3Tn)79Jw!b21co@bx9Lw(d_?Z*IgQZL-k%A`BcGX+D$E@;HtJSexZ+ zZywiuU-)5B^-Jv27v;fsnl1)g5xPCI?@B(s){&D|)NInRr0+iPvCj!WQcXoEN4-WFE=iXBNYWwbFJ;&NVJUzXzV88a)5G+^K zHFY=~Hn@*V$+0)Ye?v)4aH0kZXN--H#Mdv!$w8;9LpIVzn{%>IsZN8**aX%fN_C+in?^)x%@Q&_(@&CCLS z+Z&1?qo#z}a8+7TJEX1D+6aR@s|;JgB&nN&>lW}FKzb@*Gy&f7&iV;pybKvlMhc}9 zPIpA+`QK+oZIXX1DhZu>P?mJ!9N(`ss-6H?%I_7@B%$wayB2px{%E}s79al=OqgNP zXrf_dI6a-601cjjH$(UnE=X5tKmEs0H%;UW6*#nM=MJDTi8v~976M7#Dza?%DNxz{ zH@%9cK@-P`{9$V$6s{WZYSgR91jvd(xV%BI(dKL#P@+TZ%*Jkh<)Q;=F2NqqCO4eS zhjL>9@&{X~x`nkhd2zW312))>zb%JA-m_ZF1FhXWetBBi$gxKx3L%Hzj&=bGG3dB$ zUfcnm|8naO<--PSlqj)0g;^Ti`9DKI%v+rp25c+1J??QKz#V3`OzUWWeRT8u_}HrI z%0wC$NOMjdro+ckh)wc8Gd39v4kH8$0rf87YBLZ}yy)<7#)unnu2_IxXHeS`5Da=> ze*`S9shXI)7DV9PUaW_@`o@6B7igoAtwexG_+zvJAl>ZDjdRfvBN7NbtEcrRth{Wy zvm+hHe(wJ#eveG`th}FM5tk@ab&>b@X_o-z(?$@)w~5;mtKG>K!k+4uouk+f8C!ju zQ4w2$0VT!PC5y91E@Xt(C*J?w$X%mZcd%6@!QyVhi_TQXNB-%%IklWctnCo5&t2XH zlU#9AtdYqSj##be~i$@c}Cd^8R3XwP%@w@t*m+P{EWwIA-q->%o%Gp5ltgK)mMdnGsLtspI)-J+=8P`d~e^f75m#JHXZ5k;CsH~xa zg|;r=0Go&ilpDZO&oc(PuDzv7s3#n0Sh!%Nhr4}AQc{PxaTK73f0x0TrFaVkNOA!g z0y-b>@BIPp2wEj$y6rmcCZq;dz~K@C0Rj%7;zvnPl~aPvc>4U0o>uFN;99_HIh_m; zP3}>nlu^KG9PVs9J#8NXLa%%mJat@gEnj*oE+lzHck~=Z#yTURySsWbp*95Kd%Im_ zpW4lO!(acIwdjWvWN!nwo($XDE)(D7;GSsWK!0;g$!~zud0}k(so0-4Y&p_*CledO_(ve$oOkux}(m*yn>q#VfAAorI4I5p7p z9J(fpCt$S!y6#5AK;Ew@AzPiz^T*S-|r7#b4+0Xq10To4KE( zQi36o*G^Jr?(3uCV&iGRs1Qw(5u6_m)4oK^&OJIjeDxA8=?1N%i{rOy2F!2tzV3le zl`m&v0Z4geQ%yKpZUu`DmTAHC74nf%q)5(p_hyjB_?hf;po>+3Qn3MFFU&U}6#loQ zKpQhK5nt>jhUe(W)NL6-HZnUKAoZC#ZV(`}z;Xx7jLHH)O9KFj8VT_ExV3XNc5DQs zG+^|R8CBsoSfW%6v^zyC$?a{;b?{|QH}3TCy!(*4 z6yRAD8;4G~()4iBwOe~7^&jO&y;{#@ey)%K&2*NHIn$;K9gVBe^|4BEm(TqXTq_er zstS#|-RyPoF2yN`FVuhOt)InIqPorjp;O)DWE@gSq}0@x8|T*RqO!PrCVTIf)Grg} z*qFcc5An2nu(0w{a+4INESIY{ZGnuRemI8%vOuWk>*igvBdb#2PG>TzJG$+Vmq#{N zrgJlUsmS)C0(_FCq^w4>4DwCV(o#p~1=yLTH+L*n-O2%iNAaJ6lL}Lr zj4*p-D}3 zl^|39IH#peMO{J&oHA+2n@s!ie~wagz`}yI%Un$#)>wxB2b{H@hnb;$r-#~wP)*II z_?>)^fH#uZP+D0yKe-(CS8rIOvbCtl{ckD`-fa1||E?&(NE`$&^iZ>bVhvbCNE7ni ztfRSJ>021<*Mm4|-i%X+qkq-Hj^K<04S9629vB|x*P#H}6@0BeyIVMK-8^5wps}!6 z0NIl|b>5crhs(W_o~zL_fR_axw04sn(!kKWng%>Rm+6bT6r@mzTr%}rTkR0Zy{y1j zK|h$L*J5mGwml2_p_NT*GdEaBW+LS+2)hCiOiD^lujRCVvvf0>J0_>)G?{Hxz}wiH z>tDJh0!s}pUY$C33(qYA0@6ymyEgshSLf)TyP~#h^nNV_X?S>en;)Y=7_4{?>0Jrf#eKpI8EQsnD9ON!}PPVh>DA{a$Rbvl(;$0h5~RqI^ngu zy#HusZlj{Di1r?xik7^nAmd?lU8lSc1K5!G? z7RPh=Alao{IcXtiJ);< zw=JD-DrmXc`octE8F{+%-8mmOTx>*t_qc8`pE7GgSAngF(OKs;3Gy#x*XN;x8j`DO z3jOC0d8{-UAJWMApv6{aL}utKP>a1<6`ePf`Rkz;FL%d5&jk-$Fb z6gC=8S3wayy^!`u@xaaKvjJHdk!E`3gvTzT2L%nDSrz>8Asggt6wB2JD*MNSk6P%2 zO4!AuSxG8sO@BQe#XSn*L_#A2GsMeAmrs`GddL)X?}%+GVRq@tOVt%i3$;M9(=@0$SlYq$Vhy;E8>zA(9)FLl(p)#w6ui&V@l7*8~Mt@ z`28E-O83?@o-1Lgc2jQ3MFb(o6-YrP*8c#){R)fBmr^$`J<2o69U7O-CPwZa=_l8u zyg4)@h&}iw!ez3(lC}{cpBY8;d!pcfM0gLLBhp7Gp(zgN?^@q1I8VNOg_6BP3-I8g zVQn9JaHBbLqT`ALA#F0=`NZYC(xxO%k__|=i47Eg%tv-r$AQtQsJb`goZjT@jpx;}t2$9eZli11#!YvX zd}-HYHk$dnvhYpa;ld3i(l=KHl=L(haN%?KHA@adxD~XAZ4E7+&T;vYS%u1wKe=BU zP!{c&kFNR)VnEge0u^%Tm+)0iv*#o44rg8YjSVSfjP2#tCzcLdRYm3U@^YS9+rS_M z!-K8!^bAk#Rkzvd!{p>-fld-5V`Kc@o7P)j+(S)zJus(%Da{-;8=vP5NL$kj3C6&T zTW)={+wW`w+!B$4l76+-{dsdF|5Copwaz9cg5C)3!|yZK7bCzaC3g#5{zFvr!}n=K zz!I%-%$TxIc&kfls4&F@)-teginWxL=#1!%V{B~$Z0?4TTOv>Hx1@xS(ePuW&ZbBt z0MZ-#@W#C0Bh`^{gWh<9jww(St+%QtRIIF4J9d=S37fg)e*2WjVEr+=<>0^b{Kz9X z84dXT%{s6s=4f({Q=XZP|5=&20#u%CC)~f|;?%#I_C~s#b`XIYOHo0C5F4M>dJxt% zvSFSdk2(|>%pu*zs@5rY{W@ddlx_qO3b8+3$^VY<$ zAX!rb^6#Q(i3aW+&^U@>vVo@Y^%5#Ui#zmRt})plo2R{PVC;h&|4WGN1QG1x z?Yhu!Jlok7!$wR1uU#W4CC}98{N5AlPn)PLRAzZOFDVlq#-*)qUX!|xH^0Vr5zU^5 zH}e`vq&q*l%&V)vhRRCGNRA1WJoAO`>|k<+yXZPtkZIvfZ z%L`FrWG3k@23BhPZiBT+b(`=l5LHL~D?(OJ#Ls9@_z2 z=6f|{#6sl%@{EG(@q&jO3s5PXm0Qrf7Tmale=tZ68M&oA@*|4E$Bp??S*mC`2ABntvx z6tYQRsSfg`wlB%Y$Mql}Z1voYp$d@(F_4;w6B}bo^cgTnU@B#%FP|h}2R$?#U z(&%!_N+@*leI+A1qV^ZS0VU8CB^p2&|DD5f`}tDLt7{@*V4^~L10ED;Fl(NlZ7d85 zb3f5jksv`#V8ZEBc@G}R<)oyXdZ)!LS#1JW?>WUM$SBOir=s@oHI#pl@QPdFf9yYk zo%>J_W{kWXz48-DkaioMmNk)0*^~qVlnvqssCj% zzC4DNT@Hu&>-SkMOYacM!pVo6tEVc8QKNJ&XV55|GdA9)Eh~F=S~M8cK*n|ZyB+jc z;yOSHWR1?(7s864IrXSx%JmGa#)<#^fWa{@S{H;THH-nigjEaIh~fh@i09IHDrp=( z{$1vs@%k{~?@*5xZYG8ra&u!rw6nO&Z~oc%9S;|)i;Hu5D!Qzx{Y0VBNnu9}j4iN3 z1&7Njs4A-0S*`CybuHRdG49&ClNqFJBI43C+H7Y41ODwx3ZS9Lh)$N5QzPrtE7feX zvY_v5vtDQfaqwM3Y@c|E7lxXbfI#W}d;J8@{wj}LEG z0^zD)8q~4Oct{a-0mobbiGYwMu!p7MyAFs+rm-JusHy^lnuKzox~6`5!u+t8kx{{A zHvaj_3Gz){{Cq14#`T1cuA(rNTc0LP{x$4d|NF)uD8>QQ8R!wG&h!8kK+!P>{s#VJ9ai{iL zZw)g`?f)G`f_EhnE24-voralmt6?pO!jE}j0?8y>WPPJfnK%`s9p@+HqLJn^GHb18V;U!s|3 zmx^Qwm_={ZqD3n%r@Lo{JL026nrP!mIv$A_(X$#y3pz>; znc8+-T!cKg)O%mtP*YM0M#zA`F0d4JcwW;{lE=vuRA|;YUHq~SYG4^ZrTnBxU{Aq>hIg%xrm}lpo^%0~;`kW? zTw1%q{txpQPFB=xgAKr@2`l0m4S$7wQo@AC#cRg544rZT{~10q<9erDRCakRgY)7= zFmQw-l$_W0##V8OI;wATqEHFaKQa^G=`1-{Ygh(1}e&nj+riuSmiH+1!E+f$TbFz#5G#~(O zZ@@BwoS4BRHlS{N{!{dCV$6=FA{$oPDTO;tOKBI9EPhXF9#TU^Z*%QMr0}%NPDQJdKFKTAAZ3s zQDe?Z<#UE@0aoQwLcb=&F>}#=je*99f%VAl+YPj6PRf6kWZg*y2Avp7lao7D8|Sl^ z%L84Y>7;~Y{JZFU17l@DCyUi!&gE`=W@b(@DAd&_^fH(UFlU{4yu$i6y)JIryvE8r z_%p`O`himkeKOyW{a8JC55VW_Ew!2VTX0cs_WUidh!&*0vo7Ow(vzz$AlYCQq^$@W z8+&_a6&EvJunU49Pcsw@5nx0MYS{s_UJ;SL2z*{La#|So`)3V&4ttmVZ1-L52oR7f zyXp<7@x9;Q+~gD$CG~9*Cp`M~25>dlnAzBjjt+G3YC?UJQ21y+h%i5{CmtjzDURrx z(rbg!4#-c5iDqTL+1Yw;&4D&^smWh}*wkoyypRB$BBRgJ+`PwxgEu2`7S)rG2qG6N z=Y;RRNveV7Aw(JoB=%%v-$8O{R&&0fL}`0FlsHxu8QGOn-D15l1oF?)sNzTD%hV^* zo(HxEzE^JGF$+G;oLtV)UDM(yYh1kE2f3%3`bYB@f$H{|RqNk2QBWg*F-!Bq1L)7( zDtw3GR~80KVBH!amMLS}jjP)cDuDr)QxNS#c;BWfjdrEMKJ?Wt=&Do>0XtXKd)MWf zvJ&o6%{rw!S#(7lVp>urwhFz(wzS|VsFw2b!a~@{zM;XJe{WzQ0gMlXitT7LT8aXj zEqzeFK@{bTI?aP_<^`L`jDmNVh{Zw$=>4!4Iwi**W%al1i({y#aj~BREWz1(fiRXI>{yH?xNK>LO9Z&P-;eK`q+3l*tes0a9yXCs!q-!5_w2V`Cdyg zq9_fgRI!td6Qu3V+#nPV;7CMV4_sQsmwqdq2?SiN6>LdZXc#LOkI6)3JS2YOGiD$g zX?Y8gfF|R8VqAA)D$B}qt ztFyFK=CF_YIYu~N3rO!uhsGm>`M>eoi3{#qZ00>oK5q_wbc1fE(Z)qQ|7X|u=Ai~_ zZ!Cc5*N;|wOb1QuVk!wuwimUE)uuW}PEpi4%A(-lV8Z1k2I04^n_!D}iTf1wEs@Rg zp;g_wdc1SZ5TeIAjy;i_R{8y&WSwuWVjn@ipg7y<#z+)3mCN#B`KBAk=>aZ_+8>?( zB@mCnN-XkWJMCMJJXf10X!H^Ep`+pP)tiR)_&|Gy%t}J<0yP1vW>g3Fp$% zBiN4gtxo*nUL`YwCUe9u2!4Rt>mQ#$=Afw%WYY~j?SpmH%nAByX7G=wW6y{gml?1v z<#g0suArU1mR?+5&f$EMJ=7%0xtI(Rr0kb#u5Zgcyq^#uVsU49Xc#WqUIii6z`EDk z{8?QhpDoX14v@ux5&x|SDJBf&$B)1oysYz`SL<_K+PBJS-a2Z0>O4Go~W z4jw4-883Xau{k^_6d2rPrm8E}X3})y7bA(PL{P!Ni0hL{mKF85kG|(>u->B@ z#e&NXq9Z2eUG_c$``-?&`p@N@UkRyX9}$H)ed(6v-e}hnBnd5oUN<=L^@rPoFNA~P z-1vU>=EtmUF9D?0?RxfjmBl`P9z-4yHS?T$Nj^IlmKO8cKXew=U^zS2#?vOZm_GJe zx1but{qzQFDzu+3dubWooZBOuR+~E(T#bm(62%109;E@;sl?2H@ zOos%#2Ko&8M2Fc9bT@7p@ha1{dEO^(uI$7NN)Jvp{FN<`4$~d z5R!;H%05wXA^qz5uCapUK;El$TR}DZ?i>0?3~UFmH5Db~}(&An+w+<1JLo;);x+Q$D8+9*Mk68W_RlO=Q-;dw1CZ6K7cV63;09hC_ zGfFHh)8iRAqR8PvM_(DR4NSwm)zlL~>tklx#Wd)gh2;BmP*(q9h;JXgIy?NErNh;9 zuyrwDJMFQ!aT8+pSBiRuhs{pRqSFx+t3eZX*smacfCJcmZoYA=`>VXh0emSD*LsUJ zItE&wQHXv^j-d!jXh_6n-RbJdR1FKWIO{lgZbYH6&@}^AweNHHhkD? zbdHARHUXu`?9$R89x!E-$+~-lg@qvBWJM*Fg%NqOOE3=(-iVOI?ESBm0VIg2Cjh1X z!)*A01ww+V5*)05xo(Y?Li9EH*W9TEODyf3k(0krcRp`F1~-~NQ{l!9?q0?yPJ(=- z=#RMQHv)?MUS5JOHiVF`Nd|9(g+WlFTyGd|KpZDG$)a~93vPiCK}NFuvYA;z>0xnrv&GcG zK{Bx6vO0CT>d(5qeg`<{d$NR)eS6ndKfC<$nkXGG|83_%7AhYq*vASY2%oa%Hu#7@wM-F-KRz`hqVNg8fy;H*w;8fG zuSClfYis6JMOk&{&b^OO7J_Z2?NtiQulGi;s=xCk{YNj;%h-6T$v(G|IfS2AXEtSg zM{6xY_1f#=g9oRD4yK#xki^NUSS$zRg~Ob&)9SnHlN+x)`P%wgKxol_hz;)GByH=d zun!D)_=`>Pbn^LOy2V>V;wc)J-xF54zwVZjNQQw_2b>Y%lm#F)Mhp6oo%e;=EuD& z`vQ8Xzf^-E7Y@~yzYG%OC}VR1Z&c={!ED-U9P&SzsR1GK=DcSA!X-_CejyC`3OI8S z@wrbJ2DEuTv)QWgSOpP;i@ZZZarF4Fz~-*Cg$KhX2~?<{7=-*OVM|~2hJw8RfKg+2 zGE9vnL#+BE?yw&6r&{AD$q?j1K6R%&lR!8Wp%1IQOsf*PL*ihB}?4~3or#iPSZjypvt~Asm znX!IGJTrV8g3zX2&ju&3MeNe)@EtW$fBF(j+__hfv+Z=m>@s^9b9>Qq2|x!=PngP| z6sZItHa$Bjg7etY+WOri?)CWbz2fa0Yy6HXU8iwl|t3&65MNGa?`YwA)>LJC`HI+NpK4#i0! zTubwL@M8&H!YB|no@o|+(k3{ry&{zQ{XQSycK`${EDU0gxj(xxhU~Rvg?!b}Y;xm1 zc`1_w@nuf9Ba@w1wZHd8wY}Pgs(u1u327i|)9o1|(x>B>i{s`v$?L~kMTJT77bO)XtL^$lbxlGJcTC5)kce>2X1me4P&z9Y9!UYuf#MUDI5&J=3bt3!qX2{z zR+XKM)+azmhx~hnrw1+al!P&1A>qRVv%1>a_Q$h(*{#dDAAOqxVpZO}X}niKGA8q{ zwtCha2uq#^f~tB7iK6gC?3{OhDn`W;#-qo(pK>3@19_kD{QB0h4#w0A`F3|>4!Y=J z!$k;qozBJzE*qfOoc831h%#0|pw@kJ#SX2 zUwecuk&xsIzanRokc5O>fR?}5cif+;rD5q2m<2@~RG%?pUNfi6z&zz6(;JBXjcsJ2 zpskHY_#v+(CoVzN_v+Kuw$U5tzpjSd)cr&IX6BWLhlU1dUrBb#9tC&NOy7ovuuLlf z-=b#XH@_xyWmQEqs!LlLDf4Lm5T?l}Dkyz}u7zZl-n~?X{XZ8VcYJzUOGiFX9Y`lGNSikEws{ftc=UU~kFQAM;UXVivx?vpB(%^EiSpbJX z@Xux_kW#ftN<`9qcNg{&{Fz?vqM{N}Scm{JnrW!w6JoX< znzk|GKxXj_3yYNt0aPR#6AST2L~!+>`2NnR0n&zeNZ&fwG&et-)>kyLPi+pY>^sIQ zDXBPf<`v{9D-NA}Ch-uZ7892cf6+gu(fYdSF9?AYL|w&?fCutH|LfPcG|nu_%Emn!F4|IBKfX>; zkkZcR$`&-`ku!3ZPsu&dA-J_hMYSXRj7re`gs)JRkseVSYb0Uu9i&fZ|40q1b<*-A z@fY}z{uQ)2H8nLSXtD}Q($ZoA(ft8mfrNx4(G2AP&W)S*Gi#Qj6Rmjsukr}Ygds4j z_IN*rgQHWz(6(JNTr#$bvU1=?Pv_>*!=6q&{u=?wx|jYfyfJ2*wX_5_Lko5WXHsIRnIjh0UFtY3@Dvvw-y$ik1^VE=5JvQ7c0`2x4~*rW@%`ls%;DGgXLiajB{jV`sikJ~ z>Jys0xCl(-KnEn%>_I=midUc?NqR@d0}2{>+YmW?UC4I^tl!P(Lkr z6nxltbzx>^&CfGc=-={*9niP(B)N&6nV5VES-kq&p080x`fZ9VvR%0&u5x`I-)fVhh z&h7R)s=Ilrdnt%7L*U_rT<`=+Jj<^42iE{55)?4OHMg+fb-v1s^IwoHc<}1(B|mX- zV#PD_Jc+#=E4a}%+TAHzs#TO$z6Xwh+S-8klmgV5^t#-(6Hi1a_vak~GB!FQCN8S> z3^J)M($YhXb`OPisii(B?II!|g4R&*IYSoQI3kk(S)l5HzM@aljq{z9$K>D765 zaXmLFr>LmBZ>;c&`CpXB{Sezw!muv0?detwR6HG+~)bURTR`u%-$B!?5#K2MGh~b6_at9v(YWB?Wx!2S4mz=q>7V8up z!N7S!m*ts#=nmh1HRY;(5xDKHyF0pR3qC{-?8&b<-YG4$Z0QJIcRs$Fw}cyZe0dEk zfkd6Ar}X@P*ZIh#Ufx#e580qow4=UCpSgK*;1p%Z?!LYQoQXA`E0UN2z}*73zkcE7 zLm6sbxc0hCY05Csp_NqkC;);ZS4VpV_+L~5Zkun<%*EVyst|E@Y34bvp1Bw@?YGj)(i+2VaYj{tG)YZR9?)g^dos zjSdtX0G0pOExsviJW5yl9@$~ub)DpU}34mf?w!?2DU{Ug+OxHu!*4&lhxkNfr8 zS*@)k3>++9$WlQU7!d&uf{17jkj_0TyFFDJLvEcwOOIo2N%5)7_H z@W=}6$B=KI-jfyN0)O<)b9U-beu)qy$oTj?_6MAtlCq@4R3r#!5d%P=q_h~9$%6gj z%lhR#h`2CGXXa)}J-2N2DO^PO`Ii{kqFNHn zHz%uBA3X$!k@0S}6*lr-pMf~!8z_AZKS;(E7mqKi zd-GEU-ghTHe!LPvsbT%Y1WuBAGOopr35-YBzm&vO1$p_Ij*Rle!hTWuj)#ZuV6PiS zE~}3xz{c+_FHuyAZKz_#I+3ZXI|APmH1#oz$(a-l_2DxHi;MsKI}6_gfcNA)>s z)+$AX5A=TXKQw)1RF!Mj?t?*#h=59$Af3{sAfU9Av#3~Z&>ygaHw?xu$U zD%i7Aw8z}sI2Gla?y+*V5q@rAX6nGFv}0qh4mgtO{RS8I z9;RkxML$h@6`w#)|4To+O3MOdn2>CW`aw_Rw??ewqRn@Z8o{)JH|googjr2y8n7YQ zc+1jDUi{k-t$0%)n+5)U+o?7w*ic66sm%+GY#iK_*u;qVg4{I7Hz)uBI^^_%H1@>{ zOmdO4-IH_0=8a;E2gvR&Rx@>al~zIc52UN@uIv!#S|TFw;4wDeCn+69e&2^8ZuX8M zh?YUW0aJ3-b2CU4%h7uW(BTEq=dM`q1i+uoxdS=`&}_hY78#*r!WI8rJ>jl3Knw2N{Nn+rmRqn_0$#9&zOlx z1)NJy?49d_w*uV4?66UKmzEX@QoD|}vKg5lJ|Mdz+c*z$E(~3gyD^QkTxsto`}*$V zl4xjaQ&aYol~=6O+#e?ZETvx8dv$FMUQSZEPnq3BPnhRxe^H8}lKbMck{GhM_^u6)R@^WhXBY{eKm?BHd*IWe+|l=i{B-liiJ67>N7PS8`-{ z_$U_rbdnM;z~248phKcQJsmYMkM;`GEFGcYPX8ZNHQQ(siPfZyZ4z~L%+d*g}RIF!?%tPLWvm}#E{ zS%CD@Yt~%Q9GUTKDMo1Ep~<<7jr?{QV&yP=~XTw*$oY&yK&g4M5ud)LGJX?}_Ob0F#IY*7;fV z_K%Yx{3raRBJQ)GXTrw>8me^69uAC{p5*17z;n3CVc$(~f2*^(V0pQsQ|A`$TOBV- z!Mo;i05t_RDx~#;QNV9jK=S3 zJ~bL3N<(HPJT^XF?RwTa&vGx!rlB?wYG!3=={1YCX4sc=b6q1NBcMwSYf>~cJg|^7 zg0mc|KKp}%@P2EL%O~aU9PDK`(w$3*h=_T%ey4=RGIc~4zgtMOt*h4WMD;7CM)}j4 zX)!Aq|Hqcr751yIq)qH2yWKytA|4F|id{Nsc56 zZyNlFK)e8%Ng68FJF9QsGtKG$QdflPCPhU11s(4W8mQ&$%UV& zkMEwX=0>(UrkIOd{Q>@WsrQ7Li-|*s6AO`%4eA@{85L>oU*Sch;AoGIHp$7;;q%0N z?s|;ovo7ES;vZ1hk%eZb6}U|QeYCsna&Xw(!C4?q_2j8mZGLUjYte>17Re3+JVemx z9Q*87lUtI%_#8-+6_}*YesCiSI&coP3F8GEpPpPF64{z`%uvkbBKr2V8ZgV?!OJAj zw|D+6(5ie4FDSH4x5imlOuL+?`{&XgNT27(?^7`_G|h7DIMrTvt*|%5@||4`u8G|~ zU)eKHw+Gb+5glFUUz_4MG(OJO1 zgdo%PWki1UsV{u_hZ~cEHdXFNW9`3sri$zx?d^lBcuUjLX3ty9h(`;>WwkU^>mBAp z<|1CIKUn$obrm%0$^0&TKiiSvrSaei5Qq0AN_=@4LNaN_ZcKsDWM^H~+dGPhQLjSy z`%dhju(T~FBz9nyJN%6ZRI^OWW(9rQO8R$X8!QO;DfppnM;wA$ZclfAP}4!30eS$) z*mf421}_}hOdTyPEdwrkk9O@LJo;U-^?d;wNKLfnO{~;y{8kf68%$12a5s=}0oY*= zx1+DTksV8WknBxzpTu`OymyKGcj>pb9DeAw9$CDzeooKu=eQUQrDw32_{oH!h1L=N z9@t(2^fJp5i@GKb$m`aHyCaRXgr=Q1?(dJNT1ZQGLec4wz>k5PU;j`zSta1Ho&!KY z+5PX%M`;sNW=zpH{07UMT6{qe3bW<*HpTeHbof|js`e-t_;C=oh8w3I7&p8C1AQ@} zG39yAMMrlPe0F^{FtjG|o}NE21}&|0RK2W2Kk+eJ{=U*|TShJ32>`2ZzNK+k7_L$2 z8n|bmW`C<(2jdZ-YTMEkrj2}V_?G~5=7xrux>Zvs2tX0R!6dLFMcj6u+~EuFe}KT? zns?*%mBr!Ibc{+Gsy)?I9>c>o!J_36TU8!|#Ck{I0OxV0A8wh?uL_o(XsdQ_n%=G)*ir3B5f+ zRcH=QE4dD5um#CP97pyGdJ0}~c!uR#XMgx${;fJ9ipapU_D#pRu6EYHKHPm72e#?+ z*_kEk?R&r|Ld2^!zvDG1_A-TpCi_G%tgYXR{&6~Xau@qYGaq|wTL~8>bpECCMi3C} zqx_M6Eu}0o3ICKRX!E))`rwglwtlvmsi_)^_VQ7`{(LpTcaZck%7@THxZ!DkJ5;dcUqMLS9@j)Uw#u$5dI5@S5yddopms( zsv|`N?Oa1bwx+&AXqoxkF?&)0D{GaW>t^GB@Y05w3a+5xT21H3nao-4l5 zS*PJ2T;+HxJ8q%+d=&9Kkr2~9=-xvsOqn8oYk9PIL1(c0+RV`7YVjF@OK|6j{ifUN z+tGjKYt-P)*B{tGdNjaH*WUjb8k0W!U@pfiKxRAcml=~%?fUWKGwh0Tmy7kwPCCc4 zl@+ae4?3PgFVV*9qM!zvA}&C1$?p8UJmwCU3JM88^U`!A2g9Mf-TjNzv5TcpOL_)| zM1I%euV0A}7#N5+dF(m}*{=WcQ34McKR1~*74hh|$)dM>2zF_uSX>H6$Lx};KHxwx zg(4a?0K+n1KHxn^*+|GYk|Rmx@?iQW+s86&9?A^vs8&vR3jxOCJth1^qHNIp*s-PKV)x63*Thrin zJvs~qUPkn2WO$SuK>g;~&6(Q$xt$I3gb~0obZYF${}oi%{p6vpETy}B=DvSl$mSJz z0^oo`QmZoef39G05gm**Yl7EDyWUC1?agimow>Fnc*9`Ov472a&TSHl$=Gn@&)>Ds z5T`R>D1eV*SM2|N{8Qj)YQexzN2uZI^yC6#UgG2g&3$+G*#)GT9%$0Uc@oI=@Jjv?a-014ugFo zI`enX32BplYn6~&9!0mi!ESxGHTaqrSPz;5&*R7B0`?(o^DyB@GFuTNCRaTpsw}4J#wjCI8Tu(g zwE+eo^z(qjy-I8I0<_wH`szu^bwpOapkF6yhbRyDy+gOZS%9NvMPK}!x{g3O2Etek z?QiDE4=gN?diFTedlbp0JHytv2d_=8r2f57nU8CEbO&-Ic` z);cZT{}forR(O1=*eA;+>eUX<4c&+EeYb%Bm`L?pL{G`H7wL1cZcUra8M#wIJiSiI0cD z7|Un<`bQPPFiQ*#CuN!4^he|p*}<(_pz5u+hXCD6ds*3oB8#q@nFX9%3H+L;>RqNM_I$pNHNsp# z{Bmk`zQ3m%XdDAC3zMfk57sX~2jHOmNZS&2+mtoCyxX_5wr})@%|TgQdA-M7&YkMV zlql$;pwj(qWJcGedrInSak_vJnq2KUTD1tpF!+j3h7DK$YXK}My@c6`umJ>zZsy|; z1X5;L-|?j+#H8pdh-bXama~CcS!_Mu(9jU(DgoaJtqF{uLO?jyny-xueV@|HL6p~v z`_Cb~8RUx^@kDk+vJ|*~tavfwWGEvW$~PqKkd$8C2A4RRz{k!_zoD4VGc!e@5TN5} zNcESY1IZmd zBxH128|dxYb2TmEDNU=U3}}b%?p9zNVF40j&Kw;3`v$TM0C-{qA=w=hBXbPIg@4@G zIX~mu^9)53E>p&sz`qJ&+63mTM}xzlB=-*rA?2GS6EX%>Z#BOL!j{^g1B2GgnTcUU zFvBh(O}Ny|+e`WR%M7DszuLt0p^o_g?J^EkQ-j9{qKx@PiCbL!J`L3!1W>8%TsX|C zt6liigDu*` zo>Bu#s1d&k@Du!C?(V`J!OP_~cp7y_ksY=Jv`HUiWFZS})$kw6XT*Vm#C0qin=Jl? zO;13`Z1o<1I!rBn!I%n7v{w7mrX1_&^EUO?6}WLfLNN#zY8@UgdbS{5>xAs zmQA4F{?|YK`@mFn!XeQ1>$6vPzcMPc8}KLLfN>^mFg1bOxps-pvR8f@e4DI{g>aL7 zHhBK(z~EtK*4N$5zZsuC{Q|5)5Da0ntE+2#Bq2He=q{|W_gAPHzlAq%SWY|pXIam% zq9ANG$GE5;z^1^Jn!=JE>bm~XcwOOF3`yN5b9|A(sUOvXsKK zg6N27s5F^DfaNe`g@CFzc5OI;AW8*V?J36b)O$bbBzu4TV!VaAU*oZ3$HOA{8^f-< zYbGm9DiZ@I12XOu6&2$m6JQKBJSz6h8`!@b4AD{)MX&JgC>F!Y#K*ju;GKATeY5Vj zNI7Xzh-Ax89vfcZ*dFDUGe+|UJ)j3d&&%r%+k4T}+*}<61!V<=kD2wbuVYw~Ly4pl z=_{ZjfiJ^i-@oxs(W(YFa>LI9zTtm5`{0e~Lsx5)xX(0LNd0kT9hK4`2ZCth;^PVH zO)l$1&yCLY+H$R_vIYiA#|r7Mf4FsuPX3uXs0*e`syB8%AV>mz8<>Phfwxvw1(JUQ zI#jz~(>*TU&Bls~;h%?sK4#D6swXyPQ{!G>@ zY@+u0Js@;IdV&+z!K}^2BTT6-t@IB@mzCWZPB>DaN1f2DO@e+y{Iiwxr#`pI@ zU&qlRj`AlXhINFeon`4f;vnEo198|*FAWD$i8ZRA``iYT z%Xg@#_J3v$!Anz7RmqrlsUF8#f?+Ey3=9mGQ?8#QCp1I9{@1n~KY$x3>y<9ed@`)T zn+VLZ{x%GI9eDGyGVBNH6l=Zu$h(RvN@sMZh z{6{(^9gnAf(k%q`W0{88S%WqfGG#wq^_j}ppWD%^cM}S7W9kQCmsz2&uDcza+9TF8aj5Hd2N*kvRmjV!Ma7bB2 z``d2|9Xk4sYhDh;$;p90Ng|?}N@B68C|WWHw ze|JW_?5-P(8*(VzJ@OwY*%gzNYx(&QQk@$3Y4+On!~j{jgLeu;4hh)rfG?8*58X|e z{M)d6q60y_Fb@y;yL_avv+b(Ory|w@2ph)&ZPA9M?)c>JI8tbyY&P)_kfH$@Ipa|z zYtEd|wsgI*z}@dE?6pq)$B(=m_Ie!8P!I?$vnBefiJBDROMv62&(Q)!OFvCO)?@nU z8whTIIM6jqdpF%s6133}(19#_TRzsUMxn)`YQ?{az|@3|y3-4^!Xt8iRjTD0qF_7v z=K+Vbv|?wx4D|Fuc0&)jg~PqtsK>v=ESs($4G#7X4$D24?d@jt4+*}G__0$52<@zZ zW&uPWaBQJ?(bLi<@_r@fA}j97pzOHRAKh>#_2Aj_$6#imKfpqdjxcb?z}}pZM?<)V zhx=R=WR&sicF}Km!?B*A?Z<3X-Snvif|OrCyaYHV{=pAsV1e?F++Y_m-lfDtr^0pt z7Kz&LgRd!b0<$19BC4kt;9JcQ?+vXIQ@5<^Oo(<`KJXW@8zh90^u8ER)A*Pv+0}W8 z!JPR@9~An1p@%WN>#Coc@dLB39LJDi$ip(2jIPRtDU)2La1`Dsf$?A9zHy0r*8>R+9mNrJYD5f6)< zn;Dqs@n1Ps&d-7Z0yL*JBm!l22V0(U(Fd8O`W8Buv-&JhOUj)&Mn^?a-yVkS5F+AL zaz5+mo>WUfdj6>|38WXdgq+Ha!pu4@lu z@bs1)wqNtsWc3p{)x|C!#t5e92ybtzB?UF5@?s80LAUgVg5qkO&F#ZiGN9opGC@5>a>}Q!BG1QEWf2A3(TVp@ddDK}EY<9HyyUbxw z4!Qrm~dk(;y)Z*3lh9M||qVS$IoV={+K$@O$jMM0|S%lF*Kc^1q#L8pbEa`#?yhHm?d zZ{5sa^n6WrEG(IE4FNZO2fYb|l34SvdcVu%;u?ZAj*^F4WE2ZNX z26f(Zzdr?XUeV2P+Yu3bY+8qFA4Ifpmn+`6!^H)j;@cZDCl?9DM?MM?#$54Mq}(>V zAXkPv@=10e%o}BM|Mjz+G3MN@=gQ;UnUX*_h5SAV5y>e*XtV2y8wNH`q(Lz{hE{+#W736JJ7pxJjjR#M?|CFBNpN=;$ z^-#*uQO47gCHi!|a{FYEu^A}c$@O>`<+XRrQUR&RYqtBO(QmCyzC&KZve2Q5 zvA9+HTJh_TOkgxgo7i3E53l+>?)ZQt^}DZrX&{n7k)dG zGU^ZR(fR2fl7I??fr&xbd2wziV7|W;K(Utt9M@8Wp-V6&9EIjQZ?;qBC{$hZ? zrW(#+m=^(o&b8M<+Y;hJypDc-{=o@~HI=OVAW;g@I}b{2_p-ZB95yTSe3Qj)7ptkL zsK1P8owf2#Zdlkk2~9@5TKcM`^>Wqq^2M8%U+Xs7zXUF&RXLSkNEC{B#m_dkJ93Xi zBipnLgnrF<0Usg6jFHD8kSco)KpH54+f|KYU!QIs=eoAw2Hcjs!_+3zf2lik(lRC^F3N2JqeN3yycE$q{u3OnwgwmknrjXNG&xOeFrCYsM+kAPY zqj<|t6vReCG(Kcuhzmi-!MKT#U0qz$(`DBXnF}A%Z5nw`+$|ey&8r1=#b<^D3`UYh z+4vs{GQJ(OL_fLcZSoU(gM#pVKj~&>7N<(fD4mDII?BRkw)@yGpZ&duud1qYy>ULf z*ZnQ)=|QDfx`&LM?AFG{Lp%%s^X?T)`UhRQo#MDogk$pyaSN}=D(63X8ocb?UyN}d z8#mx%Q+m4esKvwVw+MpigdY>egpVn}FQ9k~ai=OuSjae&ECvR~0&~kf^sw1O&ud%O zWC-pt4eynCnonH7qg#>PQqqWj&?k7h8`p%FPdXDkE`u+)FF_ro$@A^mm4bocPY6Q9 zAs3-$rQ>mKs+tn;C@QY_W8(}E;!AAQ-BJLC;Fm$PJ^ewla!Yu5b0zI1J?d$7ANgkX z*Xp;eiI(nfnb%iVhDV3f6m!itHu#)R5QXD{s`Qe0DT-L4P%ru0W;tVdU(Len8XFT; zQ~qizVsO|>Ny(rfX41CHo}XsKkAgYjw_*xsdb)aiDE&s_Px0OM8BU zq3uk?85w)cnNFJpa(2}QP6jeIFRG(_;y&#z%~T8zmm{gUX>yd2itLD2+ut8ypI%Q1 z_sd@M@@>tk?REDF5Y4cV?j)DMeohoj&$uT@qpT`1J~;L4$&<^LH7gSndJ}rz;&7C3 zsre5lUUeMbV-053PKc*Xn)Ww;cs_x8YW!fi64Cv6#& z#<7cEQd$O@@Da=z0&26G)FoC}BICttj?3gSFF84Cs%rwopXw_O`Y`e|QD^8$Juj`` zy6%;^z_q~Oy0HjTYY3D1Z58c)Ko3I+d7e+LO;)~`RrVQQ>HdhB=m$&kg6$(*%xYJc zQ=;1+Kt2UgxuYqSzt|1I=l1mz9!FA?{;wBOQvTD+*L63>HR28Mp5q4vOVKQwmWE+1 zr#wS1GVesad6SRoeNKA5|783l{F0!cvgHs%0+sZ6P;btiYNrT4p16-gWNHei)6~~5 zk3Qc+kq@K58&-~ze({TI2*MUd_B%+Ew5hQV9&-%^21=udCVcsk%Bf(*gebgJ?(2Vh zpe7a@mvmN;V-V}Ov~!{|{CJtiulY{ggVI;=TtwYX`SOa-A}CgjOYtHHtE;QwX|)nb zXiStH@3q0~Q;_0T93IMRldpJHx>ceI(X{huhR|!y^tF>)nPpzI%v(1HDjcfyzcq0Q zC8owy2T;kbyc|^eFuVnv@Z?i9(ecSqpCf#2Sp4r$OLTb>k>KQIQs78%GHvrtjuS|G zh`7zwd=p^mlI=5X+qGO$I$Jju_8=q-)GmF~0lnGbrU((qE>Vl*t2#G!!a#t$XAUMP z5z)9%8=Jj*%$jJ(+9=cD=! z%b-Ac7-0Tjt$&G>mXZ2fcAmDvE1;kGZ{9qKjvfiDZ)-OgfcZv!3@oP~Cp?dD_{ac$ zaJn&?;#)P!!7+;Xr9WUhm$(YaXbw+@UB4PIxtuXuF%Vs)= z{B*0Qpyr$y`fUtpblZG+m}b(P*HA#51<`CrH>lpzGk&P;yaV4~Ve_<|gG0~R+t%%x zw`+&m=`%DDKNI7Gkv}V&Zi8Ew@YXM`lbMy4{Ry4nJgne$B)>83iu+38)zA4IrY;%P zeZR)C%5n_1({tYxPgOZGzs~Z~Vhyew>q9MTs?063R<*3d z9ByAF!5Q#YKE>wf$sfpVgadn!v!uGlwZeQ19RW2YXSVF&ZKv8dQ*W;eCH-9B!h8N^ zW$rujIyL)4L=3Z5#Ynp-EB#~bN4OfB$0)-1Q<6{aSiV92bai~d=ZF)f0G@c#^B4ex z?vd6P3#c69J+0#df_#tNe+)d1Ct3w%BEfv({x#PWc^#@lZ$1rZ`?)c2;y#5x*B{>8V2tuenjub9vd8B!2Bj*U)i| zJtNZiHZv-yjO&vyXCI3`Xe8)!AMl)??@bf#@0XO??&dRf5ke%)ew~JfCb^I^VN95T z={x0_zS!hAbA!~-tUfa}rm_8bR>_c*fDl<@V`C^P$OKw%wXABCzw6H;_u#YrcL}f7 zq~q0#7TN8%ecm)b_+$ykQ@QnCFBB_M9p!IUn(7^ne?EH1NIgHdfD89Ke1hi8vSk$f zGSsw?O!APC2h{!)x}5kG&CT5_9i{j9oGtIMXlAD~5gBRewWkeC4)HQmuk9X8Sf;;8 zh}X0$SBJu-VSW0lQ$9gHr6w^d2@P4^(>toCh64zowxvU61d_~{8L723)f${yTbqNBo55qZZLuf|k&KRK=M)@bwzsH;gRe@;64K9>bKwmd}s2w7m; zxNDT!bj;4z&zMi?aSoKgTc{}TfCy7#y+2BY^NF?RVv_DCh`R_aZFaijW(6~Gb#;yW zsm=|r=n8JcKSWlv#=&AH{Y$Q}<4D=W(KVRo@S|OUm7P9HiiSm7kQ^yfkH1?h_#wGL ztIRE_IQ)st3x4P=Ub26b=afNE2Pj@t{z|Cb{irYX!*I*igVQ$zVIzS=Cpd| z&-%x&AwW*Xxp3@jq2;kR6+J_{?zWK2=3F@xKs;RB0lOAB^#|maKe3Mw{z_a!d*T8gr|5&zZC6#CN$A-pAN~L?oXY89S?J#(5IU+(hvJUO9 z-V-=59x?nvYj7Iw`@Jx?z~&H?2WC;=;%V;cH-}DX&%!2jRwC?Wez2Z-z~&;g%Vy5Z3aPw*0vB4 z?tL(Fbj(_5CrO4LSdoSd7s1nzg-V`8p+2RJ3?ZK;-enMEr&mD-R4ThSfm3YZ`THNQ zLpEnFD7H=1ogaejhoeu@cMbHik1kspZ8+jemiOv}_p))__RI(L6aPM-fpH1iaB&nYTUhymVEh9-+!-vn1S6r6dWPZJ@PDVL}HCEG1pO4f&MtvCzzo==N`Du5wuRy=> zo_X=Lk?rd(R?BpKSJyEvhL%22a&uiaqpakR!@NF!C>5-eEp(DMvsxt(ujV)Iw60=T zL7$~4j%5y8--wVWX*^T!fH#Dh!sic6Lu?$8UtPfJ`Z|%hr2vG?fx)cI462;BwA4-0 z;vkLd-j3tyaAm3nh4Wa2^M|RNa5&;zU0j5wv>`~sr-_Fxu7sRGTkFcseKm8Z9p}w! z+IQF9TfN>2QPZ=0Gaq^EcDbJ$31@hK_e*fqB?&k$k4&Mx<92O{@BxW6CB0$lTA!pk zuOrkO-QBL8pQENK*Rm@rLe?rbUxd*VrMNPnG){D8E&aW;|@Oypv5+HsArMFV_>0os5e4De|$f$lt(|01%uhsz_=H?qO zUj_x6#e|a1`_@-}W9lwQ6xiY7%`Ew zOUNVhTZl|Dn;bj{&bO?rxA!E`b{bI#;Y4*9<=H4vqv9t(rnN z>cXF&8(C*LSuc5}pWucW9o_2vy>wFu-Oip29LEOuSaIT1qyqap))H!de>uk{$~6K0pVT_%Y*s|i&^%7v+eezJ8t{Z-e~@-$=^eX;ZYeQS5~*)2Nd@QG&lJ8N;){$ zE43ShbNU@svBN1Mwe-);kf5OisQreOI;=Lw@TFQr50iAVWMUVDLcV-KLGEL}?aV6AkkEiK5Vnv@6(3MmSam^t(yEz6s$nrl zaqGd@xor2lLdbc>CPIQfg{&EtF7?K-8+ym2r(Z*W`EakPXut^!3Xoseae~bYDYh-g zjb_KXSgnu*z{K>9*N;W3!XeeL?F(ny`8m1oGx0j?>ITz`=!(OTwo9JGR+)I-Y1_^9 z70yK#6og-UxA+PKmn?!Vc#timi#jz^p>O#V#t-CcRJ6NJ#$pUEUTf^X?g|-VZoRrs z)ZUtye2!A_A41WeOoHID#xSRprtX#=O_NVUfq8FE8tUtret zQe~MKi4`y#-~ED>)FyMy>$xNw*h?Z(Dv>I5Tkz`380~D`wV~-s+Z^$F__|lgS&g5>TJ!FUjNu+8&d$7wdJc)t!L`b z%Zf763RD4GowY%EJyyAAU#@BFiqoS%q z8E@-PblBFt25L*di2j~3tFb0_b-PXsb_!p#<#veDn3Vl#6zClF2;J>f{7*Zn4KRS@ z?e(7at|!6y1z9djrzh-pvm70dQbRPWjW6?0$dAfI+~0Z1Xw8vc`CaD`c3r4+yebZr zxO8+Vn<;wxy?6atIx}48nO}^wt4Hp=l1%slQ^;9x(eK;w2TKsBA0>*o(;T;~wLR(W zPf=!Q=^PZI=FBe2af^#qZhPGYhP};mlkX%-4(Gg~U{SGrD+hfVv7Rv?MUZ(#M4VFg zZl`v0)56)AuSSvjnOfC6IULfg;Esh?2}s|3n@g8^E#5QQn6R+l^#?VQ^1gqmbl|(9 ze3B?^|HeeUB~g9w*DnTaY`eG;j^KZWWNYAYEsTxR2Oq-49QcXs!QD!*{gnl+A0CE) z`497kH!D^EgcWU~2~O8qcVo<8O-%`5E%xA}e@v*{xZyPy!6ENB`q!+%^4&W;%o{{R zrqF>Q+shrL(ng`Ii97=Ie2^1vJUgkrRzEY6ee*2RcO<0*+1o-_4j zk?m#{I3iiDIibt>_DxMW?Cfopyu17LNcywo9ZsAWGiWWMS;4_|>%hJ(^Z-E=N5007 zpFR-2`gcNBM)s2y%O`EsfosN!Do2);3eH22TXJ*#YKpT-%5$&vadxkE!PkQR;kUWDkEk2eZ+wuuvY7e>uk$BA5&N5;=XE)` z{!2nv?n5IZGvL|(yN0ewoX68&{EErpwXqC5!vDmxxq;`;#dqn^r#e0m*$#H@-YZVI z5cuU7@4FG`Pd5n-Mw~Gn|3Q<)$fJ}2&;$S))V3yxDHUJUE z{kspH-h;K33ftcIvp4F@OiY-a)E2nL|J{!;dq=St!}qlM9d}JxEfup+Amy{8eJjdH zsU_B##@B4XA^ZoN<>eVAW+!A-kHZ^r7SwTh>tM8GbRP6b`Gxbd;nd^D@SJd6lUejo z2VsMR&AnG!g5|X!Sz$pvI~NrmGCn?`sX3vre+)+~DDtFjmCyu;1<4xDj;IMP-rLa7 z&_MXE0{_vhXh#1o)->h4H~2xc?0f`>sp%RZ4I~@p=K0KkF#=TT&!sC=SLY;n`MfW% zU~{uIiuSlY$XlQA^ zP?`Nc^=$QYF&PO|6dDW2u<_+ z-Lu-_^f&Vd_V(EOb}*;R%uLisjpP6fI%r<0@|QPxQhcz0M3SZ@Gsye>KzE)b;Vpm;UMg z{*e(2(|1+Y8=r1-#{SnLT(Ko3X>X7w`J%csTbG?Mz#1(bTMr*0)U4N}9^FcyUzAsT zsgaeRxhyyyB13z1kQZ>GL<5bTo54R^goCYoC;Qk&H`~+quQXvhyP>Cm)jD~7;r;(s z^~}7;(%Q$CH-tUO-UHp2(zs{*0nnEO{rgup?fS;im937Plx`ed9^ z5wn4YNPtOLV$Ad`o}Lg%%)|S8pw35wfx#$Udez75B&hTBB?H3y+ui1SVMRqjPNqx< zeO`9nMK4d2$a6y#dv)h8>E5;6(tBnxe;FI+_wM2;A&t`x3o$p%N#cP|2~QG+D2van zYYqNTOAeG^I`b|1ssFxw2$pBA%CgnfB5`_~%kIxn;BQvRm;rLcT_~wgJG}vgJ0U4C z{(Z9^EK?DC8ABCW2mpXw66jcr^=E6|;169xpwm}ip<%yjKA*7X`oB-3f#e!l;a4V{ z`{z7XE2|q2gkC35`JF$MY?1!trVpI-kJ8UYTBk20(t5UhUV2Nsiww66IFU^72n{Br=~(S zxMSu9I3C{g3H9JyTC6jFXA%9D4b|6_frY-as|SWspEpv2b+1>air?9LbVSzdD%x9R zbf}(@j6}%mOK$Gh^nJ(S3(JqqvlM#%IJZs%V_x*On{KUVM$lg$4g@Fc84j6b!>xUL z))b-3aHLg%j)3t46qvjF*ZeFTKo&y}}fho=bfD9hIR>Pr<>vP*ikg0nhCh7&(~F5P=@RAALh z0_!-+>>K$ZsQ@SNN%j@98kXl>_x%NRY1_dDY01c>ga$ZcVf)kHt*db3c~$umBG>x5Ca zhW*mQmqc1nNUK{$5~$i+lmD#!1k!}ZCnB};SKEa%A}~J=(5UN`#-&_G8>h~^rgN?+ zp&xW>FLOc`N0QDU#Pkhm_l)-|$^YBU=MSx3<zI>)`VuG5n)!Fa|Qr)a` zlM8T^R2NOG&|}*6tn6Q^D58QS3gWCLfkeQF_!$_JhETAw8bK(K9F*^AcN77>eTCgB zs_Qr;mDjMGrKqk#dWS&zi$ZItJ-Jh^uW#$;GHgD+tSBFKctSlz_HfbdI1ZHWPY(9? zC0c)ih7GzN$uq^A1q7Egp>J%=#L`IVxPTW9eKGCsZf7pSz?)t?ip4@gLLnI&0K)@Y z_(~(5CFF291WIkN_AX-r#3a8|7ug@EriZXu1Zlpp(NL8u0P%iJV1`uaUO^6xFuVGv zANQA52#HgMrlvOL7QlYsTfjjWNPy`Bl@}CBKxP1Vl=Bf9O>hjmZ{+{Hr^4`9sI;_H zT3hxZ6@x=i0w-bMEoXwahI-s~a_roGkaTmf-ogm5S}<%~{~vGKT&h0dJYt~_4wj#@ z0x>cXP6fC4oY7|`xO`!$elaw7{+wOd1=Ltm1 zjQuFQOciw*Q0|VB;Iwr+?_1&7Vh2Mq5nwUTJXhBa4ro()<>mf~t3Z`5mthVGa{Y$@ z+k+dV9Fr}G629bmoO-q@I#B>>JK+ud!MK4;9e`kTFg>YL{lxF&*Vm{khm&Fh#&&#;I#7yYs=6nzy9(DI_!t&!0QgbMK;Xm9c2i)#%=Gk!e zZ|3IzxN1o>E{>wH^=+Ph8oZ9N{!3ZqcZ$}z+|q;nKUBGr!D1}g)>=A7f+}W{?>8aZ zqAP1Ym5}qhVAI0z*)uvx0R|tA#3m4r(q_Lc8hdwLXiAY=_4tw)T#dk^6A>jyjJ#(k2MPy@UGx0uY>LdV4&V`9TsWb@hyb(DyX_xrbuTfqU2oH^q`1Rc?>xTQ&Uq+%(4K>oo_R}0a@5+p$?2Mf@o$9 zWvx|5*4JxU|ID{h7dBu!v&wFy}dn5mB4XB(yks(PYl8v0EErJAhRJq z@an<;ngghA7Fj7->CRHn2e)UWW^3kz&O(Wn*DA`(D=R7v_7A6PH$0bGZ}Mg3i&7i| z4WBj}E`ANguNnS{h-f)Uu;jLEYOi*+s>TQp&s2?Vf^jtA0|Li`#UyB4aD< zO*adS_I9>Q!5FpwM%(jhv1s7g*%dwGk55jXewfD%T@GM=P>KUl1TgeW!G=`J_RWQR z_yrmbQO&bqQg|R&ncq1{*Pap*ay=x6*bf_fGWWF}_cCihfP7>Ma`H9G^~|KD^Js!c zhNs+5*T2~3MfW7L6CefCwUJj9eJvf7?0n@lHE0M2M`^=M5fQ?o^<7az6~HBaO8T1{ z@tkJk@dPv1M4@&y-cbTX)1)jZ=RVX5;Cq6G1!jT}9<)@eNTX1&P&z|x0|BAp5l)vJ zWjUE;85y>j_W1AN;X1v}LkOH8Fl;F(*n})<@JB8r z0P_gt=h3@7_XYSQ1P1_71n(jl)CUqC-$> z<<@yHD=ypeJ|NWd**)X}Rc(;C(h-zN!^}*5_9KIC`40#RyW@p#BAX&^ZcRrpJi_4D zi_xJG-71^Cg;{rKJCN<|ol!}w?2Cr_EdhlbMn z%UExBi*+xnNV>Mj=5OMdFmZ}Mg<40%dInzHb5>Q7@+n433m50jw94I!i_zu{>%g$e zZ_a>anXVW01}giq9VI%ecFkiTorW3fZ+evlb&0jPO=*!pY-7KZ)5S;~B;NrMAnIv) zALgDl&bzv}LO+RfQI8ec{BMV!h5FXw#6*R+-bIfom?9ErHE#<|V7o6PX|&{RXhuw& zAVm{E>~Se2w66|>2j!E$!;JESTg&R(heY~s-hGZra)mLypg5uybMOBF*b<<|*dBJ- zU%uD_SO$Y^@G;|>XOruk%t7Z01TpE{Us#>}2hB6$8R6T%>>EJfVs(|c(*vrPkR*P| zh_F-zxaDOlxK#mx^Rd|HU+*q#AO%fS1eXRVeZGF$UC}>Ntlm7kO-?T2{{AUtXD7RK zCxcyCc~6pHP#GCoNTS;=lX!$g#>;@s*BQ1>3VFjr7;D042&maA{688 zvsRvCwyyz%`VzQ^;`Q-{HR%6Xbq*h@5_WV1s?Ab4Cky`QtkO;QUF^A#%xWD#Lu$Lj&WGt&!96GUl!pB)_=zKK8|ASs_ry0qeJ$Mxg~>p!{=ylGj! zF3(L)QlAX8i;}Mxqt++%-Im5u%FNKS&^EtX9MA3L zm7(YJ&D8oDniI$$ApIQ+bdNv3!9g}}3c@TTjQy+TMvv{-!U*r$(hT>fFMr7kc+T$} z+sKRo&JHj+lgX`-4KUVE_B}Ora;4lbWo`_tHG9F|B`GeeF6HvTM@?kAxrS^U_Mw_2)Z( zF=ol^W7axe>|7)aX3If}gH+3&;W3&ZOw5d=bE?C1VKYp>7cWZ%wgOm@D)!wMe)7>w z@oTQ{u8KdnKCpX^@H(yAL1qS2aLFR|t`~FkOoXnh>ksjd?tHW5=6;L@5VPkgIwFYU zqNu6HYT67t`j4ce9N9D)0>rwxg#`+NCXfIxDzIKsgo*OK7n)b?xp%6chlu$UI&z5; z8qU(Or2`!2D_+NuH6s+&=(kWAAYLtylV2XwZDF>B?qO#HU^0rRfqsL*PA&W?kfOW) zJ_EhhzvVu^n3!;VPCAN#WRBF1nAfi=g#X&`*fS*grC@>07 zL=#N-$MLW8pDAF*;C5JI(Iv0`O;#W`%90!pDe02;B0)!CG2_nGY=o!LN2q6}e{g&$ zi%{SNNF~aVUVK=Sg)vmE<((g{S(KJ>fkaTenDSDmV5T}DMwhL_bsbKFb4%Tk!C_(B zSyNeaQUt&adVDBjCk`}^7JK7n;iKIVa}5tC0@Ctft@YT^Y0Ka%CT>mv9-amS(eVMp zP)f%%blZPk&y0de{Q+)r_^q0srP}z>F!O}F{WXxhc?g#?kwyKc6PvOv2S#;wLXqS zNB)d5r&d8$@3;9=t}|<$e*zv*6wy7Q!$!c5CoGBEr`511Md%p*+Kl5epnzag2(j6* z0845+XcU4j;cKpaca9n_b}Q&(-j@PYukVhnz#+k79lIR)>woxFU3K(h&n`LFbwt;{ zNyO7nLhPom=u*D}=Jl(xi-}S*l-~7pbxhVY6q2pH+I6O;5aDmQ&xM$h`1lqAnmTSg zBM&3IdxQr>`*&N@p42q0H>%%pEYDg|p`HI&C=ZV_K<*OU!{6m~`;dOo;yOw(*|@&B zinsfifm84%qFaw?VQI1DMyR4$opb^IkJ69xjD#2~@ksOJoDb=Tp?7!$*>UbuYZX ze{}^LnqD6p0+u=zCM10Gf)ZQ@fIOmv+1Bsl3QyFqZv|Mp(7+*m!&B!I?~O#$w_!lg zOEHtoZlGQ$H9$+daAeKZurv7fKw_+`uuz=rN<=Kw6~8@{4IX|aQNO%g1BS8^vu_wBC+4-?Ev?pJJ9;}t)Mqk_tg{@plQo#rbKx!0(6il z=`77bp%sgOeZ7&`S0dQw&)X@Rxm|Z3@amVJe}QX3@si0-(`m%(_cMR{yv`HrRy8nTriLcr-54%cTA5iImO;gIa7ZEewXo0# zOoQwX9;KZ|nNmV_5Y+0}XJ3hkmt&y$l4wuI-8vGQyM4{kT6e$6{o3kV3j&`V^GbW)c5)(=ASbP;3(?DKqCku)94mr$HT%#f|fj~7+Zm2(60X!^?h1hU0qq( z?TSk{`&fv6!C$cKM7p`h22SGDHr=+@eFS@2@R4yo-jgj~s=GHRbWyDOhfRPEja*IC zNj1Q=u#c5$SNXx87mEMR1B}~-$3zrD16pQ3%WV@sV$%~gH5~;<1I?Gj6dvfnXyCt? zpZic;Mz*q`Pshl}G5!W`|MsjNB_EyVKW~!^Pq6X>YYr0ZE6y5FfPt6&l6V#M{R4WM zh9Yo#LEhTfYSh?8)WmlH@IxH(cwEqZ^%{pNKA9CG6`H`+vPZno-(|TBD<5Tf+{Wb_XlIxLb!&9 z4Gf+VsT2hlkY-UFwLEc85ug@t5@Zeu9ob#uNQ)r(YliiS<7lZX5Wj2$S;3YM#{A)h$y zACmh=MHxMO(scA8Jw+`dpxf?h`Mv^Q0pzM7XT93@G#LJk`zpuvlC>MMI6GTMB$R$l zM?sX7*to9-z+FSXZ}>FB@Ov4C#xPSv_4;z51}co8zyL{!x4_##u6z`!sq^wIpu8Z1 z4LvLSc;{IY56|!BxbFihk}e;#95WvB4t?eyK$5SbJ{m$ zUwV2TT$N*FM0WB(%!MHS?Py;4U1qpORM3|Jpa6@J;6Y zGhm(65uJ3uz9xz?p!ug*gC5)#Mr$$t-G~=+DZ-A4{vafCAQ|9Ga{`-#_7_&6GuOfSA?20Aa=XoD&Uz#t4MZ zxqQ% z+K*o}03Q6Z>}0&`Zy9~Ii4MdjpRT_wCMMT4SFNUtSHJZSLca+A0`a-%bgDL{WuNx+ z@yGw4k;PF_iMP9(M@L<%iYZt0^e`de;SWSF&+4kG6ixaDyZli0D%0=fie0Cd6y(tP zq5l;0_z6O$ufQ=`&0#vqlo|d`qSdw98g35}_?Q^pdo9^If7upIf7{28AAgA{BSn3( z3LK(-!W((4F(-|k8C_{()UUr>=zPD>(eZJ(lano`H=ltE6|*$8l7$vYUhCT-RU*tq z+47f=CgTMswpqWuhFbyismbRppF1nXe)Mbstc%@dao-2}IMmya$K((k-b#_|tZ z_koi_e35PEW(S~pX!A85eR%A4%=_TVv}|(t-HX}Tt1$z6K;ZuURXsu1Go2UGY6`l> zc9u#qCtLoZdV-GL<9beZpQN^U#I|pPRwvZ;f>j0R0t4~$DvwR3tMX-Ky84)m|h?IHKFxVC7dZ+r(Zqi$TQU;1!=*$ZWgyIL~MlYeDJ zs5HBxu%Y2upordekU>yVVAM5d18V;5o;dVb23sG?FX9;-ytjFV&OUHkJ5*s?Lklu;9U4?oK&~P zUZ@AY%+1NEs{EQFk^QrSC4w6UtOPYbuzn18Gjdxv=vjwSCg$cTLiQkd z+Kc&P5^L+Qyak zg=B70;@x^N9R<@Ip?8IpLXX3Nl2?a8?;{5D)r~1n$PI3r9(mO|;9K@t(f5Kz?FXL&?9ZsgtyoU4s1kr?gC_2sxGuEffLN+UhwV5u+b_! z!W$Xp!Xv;pD$#hM^jHNQ`2yKcUkYgNZu=;GaLY;-WO)W@NIzf9^j;cRTW>X-zCYhM zsWOQq&nWZr#EmY@ZJc2udp{2JZ{O}&uqqgx^&#HwEw$U31o$!C6FR^TjlXESz|`Th zJc5wN1!Y`frpAs`7EU;xAMK?yO@9Cs4cVSEAyPsYbR)If{na{9YFkc7r6hw{lnfrH zIIbKV_+UfK%8rVFN7Kx-;!lVaA!YRzeuMXDgxNbgo05vL`&C)QsVYGDkpry!SJJt`1J0qQUZKre_3ac8T$*+v;yJoJkVc z$atKuoJR+gg75mUpq+W_i$i=5pP1a-#Jp#* z7;4}zv#{Fn{pe9tv? z#;M}JOEcK1PXFdqZOj3q1tMNpUZ-oe?hBiaGa8HVdyT-;pY&pmTA!@b!JR=ue5~HzVLo{iIzE z;^P8^&fznN(A;*-}FiKoXSa02Serxn~hG`%K)ea zqFAQke3NFe_RGKsy={d+5xrWo&Azz65XH&eL8ksxz(Ozt-9eS9C*bj~&$=lzO9t}f7a^5rh6pCB#}EJ^tCm=!AhtQGiV_FYx~{!cu1TbNok0kuhodOyB&KceDNg zYiVr3xQ43EJRoqyUo0HKn*&`S_;ml5h{v_td!f*Z;kCH>#fzRDk}2xb7P}p?=0mG4G*iS zgclX&r`9>)`!IDZzI~&^a~lDu3M%B77(Dy(3RntTLT+#~+!nl&xU9Bb<&a~y$uHx?*U^o=dkK)suW%>%PnDnGQ{?qRZn^#MmY*F{PNx!BQ zFHgjfz~~KFqEPH;H##JW#F3htl$c$tTX1cC)o#-C&yI5YX8GorAzl7Z&S|*8*f#cn+PzK=kO;e6F#;;as__sOU%-8w=`qOdkbn zbMu4viW~mzA?2kX0|N+K@u;GtL2?w(VICA$6c=Y&Sy`DZ7f#(khO+Zr(OSFMs;LqUexpd$tfbCo;#S#d7NY^C< zTy`x#Lr!HCEzI^%eHX+s%H@iQB`U`us|D|u8aRFb_&*RD7(OK|%mFFuf$z^>D9yhv6 z&q(j@581&W2#B7zMwy;}K7*}V(f8@)wbxrG&KCCegnWzwF=V&uoZ&0D_ zdE^JPSdbPFliX*};kW)YGYl4kFj*5tz*jG|(lXi*bZsvQKDRCB2Y1beK4k^5i)7n_ zhj{q-lSyPiZYEDHD#~vFMI;c;`p96=2A0f|C01qL{83)*b#<^kR9w4>lkjF^swPAs zC+edZFt}s72Ztw+uotyrK|NCvLj*_QQr%Pc9d7LgMS?ytQEdv zeT`2Mee*YYSy>R~3g*#w_U!=s&FVMPU>F(e*Oirp<`G^A+y~=K;ko6y>3d<|S^*Q} z`o%L}3Ol0r!cVz4Mn;G4S=o_W^@baQm$Neu4-{okE%1|H-lL3mXa0aZxR(C>M#ka^ z1HWKub6R2|G0E}@=D2vaQd_7^R79*{8IPRinC4(kfnI~$kTNOLsIcBBJc6_K`W1dA zMID{tfl0hpiP_z1xHC0^&q_*|bQpY0ak9W!dPm-1JNA(38N~Xgeaq#So`-WegADz39VwqYRdtqvovk#MPSOMcm_`y>S&X-j# z@uy4LH@coYJ~*(0Qb6p&3(Pv8?MvxkXW?95ULP2GKH5KW^$Iyzh3AY}zlYO5xq4xp zlXyPO`o$U$`?kgP70B44>ET`T@x&w{<*d-M!hbQ}{S+TuT*}^J2t1>n0VTV1=&lMt z;Bj1ayS}Y9bLkDh=TvC!@1F}(<0MN=To#)%ma^YRRl!8UV`49MPVI5q!@vg+ZvLIJ z5wKYD%QA%@qz$eN0?bU##*sXS#PqYO0J zqlf!T45A8=$?pFGGES;OhUW`daI!s&fG4Z*k{EDYSi+`8A-v)*17}xeMYENh_#1)r z4g<3*iILrr7QiqYi#kZP%~(}q`U?d>7}i(dF|_JGCB=%{+p&3dmH*&d6Y%^&bnGj) zCWGN<>G={3*l4-f?l;3^kDZHy+~6b1x7i)AZ2=;7P{B(mfWAb;0w92zj`+8RmRJ6+ zaL?zIy$xZ%14NFdk7K?s?LbI)eD03pnmShb>w3rtf>vUaOej)lBZ2D%jXnyVc%-_ zL9`Fn(C8owtf9WqjR_M2naFSmwBTg#dv*Xt@bOw`A%lEKdQkh)ZLsk%b1E;XyqW}q z<^}vj0;mm4%-fdsR8?a4f9Y|D1+9xoFTH9jSRe+o_j`$&swD1FPTcR&J!3Q4oUkfn5FcE`z9?lTyMxx6@C32*gl^uIdSk03ftwN#qJb7 z*V^)EycijlzCl%GRT~>}#`b6wOaux7j#!2yK9h{mr!j9lL3yw8;@bldM^k=h3;I@F zTkGfRduM6&c3h#my2@_vkk}Qe{UWTPC^-MM`Anl@-_(>+V6^uKyW6ONfyqQ4p~ssF z@GX>bl)CwdQO416EA+7C+sOP2F-518%eo@$v)>l4x+mEzMhfCBHuZf%n#jI?7riGE zCizqNVH5@b9kTOTAt9lyvv_4s$Fjzd;-8-Yl+L(3S7S;(Eh4?2ZCXE3yXKYhwP$#b z&chMIjj$uv+HsT5_Njd*BO(Efly1J+t}Egzp-fyX%_J=^RMzx z+*>^GXwo9!yim6ReJu9ES^1P0=LheJ>^Jw zz)VNQ^zY8-r^U}vZC`SWzeily|DJf+(J2MWzx6GO;<8Fu!R$wg>~`>n-r@8hBML6d zWW>aXxwID5l4X1PXZ+^~mu2xzl}~s7fA6Kat;nm{SV}hF^1$|jS;Q!KJY^%%qY6(5 z>H2!XWw_(bGb8)5#IAWpcS^&myJS3}sjmM&k0BM}FfUGLZr?LV@8`xcg%?b?{(Iv8 zo)G8^-kwoOu>J4-{r4tQQMJPS{_{(?G;}4LIm;aKOHFuErKn>s z==+E=yJN!(cRfAnmJ5C^Jm`r6q>3%u-PTqyiTR(~Lf!z*mj^qdqMqsbsa#Ts%eY7= zDyHV{TnnHOpExF$qE@mLWnIfD^Vv|RZ1doQ_V}h)WJPv-n%B3j$oEn-f{#5yTmHlb zr(PDRCQOHGPO849w)#SM>^b=@T#icUY(M(o-PeC|x}l!)n3?G%JX}rBWt0pfBWCp& z@~X?&8(5U(HCiS5ZTB7jRuC+dFRpzI2`?{K|P#_zz z6lbqyfAsjBrnFJC2&CN56T}>Eo-Y~Je zh-elORkWVjiuDbFY;CFVu&}DK{6w*bmrVEZ7EDy5V}KT;uI|{M6PbDwMnXtXYp0Z~ zQf@g&jhd7~do&nrY*=_hbZ35RC~I9>a~eedSy+g^OGtB98uV>fYup1H81D6?6T;_B zK~YOggDZCXO|grgE>R_Y#7Yw^d08djF<%MO|7Mr9Ul4@)DdhM|{6M@u*AlZj1_lS@ zDCLsBVJhJN8#7GTqs=}vdcMS+Fg@J}Qk4-!xSt(fqe~eyl0+?qq#AIo+4uCy1oeWm zjr3Sel)q$4zZ(M}|3#>Xq^pJCbh^Oyw+FL7;)4qKVbpNR5xNM5U4>Q~yharlOrd@q z#)Z?3Aw60d zJMi|xjm5a23|&-~l!nLtG_KEb<9~D65!MWA@6BomG-e`2LNveN4p7j&Qcb>3Y;o z+Vaf+g@KqpmZ-!JyVRpO$8Av2Jg_2rHr{`YN*oPN} zNtKI#-?e8N;bM^qoz|Hc%pYDYMj6?&H}t=cn3U#3!NtAa1jLL@TCp=XX``G^&hQbU z{A}m7RZXPY_PhsPy=-WknfFCLog)v=MYE2(t0Wzc4>~EqTGP1cqFva`)U!*YYTNlY zA_n)6?VVlTajndtIm3*VfP09(fly4d{@I)Rmo`u1f7Q%(rNn{-Dd}}tQ&xx1q&MEy z)ap;Jd+L2^Z)FZOPSNG`Xy)QcpdxT}_+*KJVrfya-eB|of|-X`w6L|QznT;FE2A{`W4590AI15lf9COqMx`0KPmf0NP}SGQ2p=@X>V=@H3x)1F`{90tw$9|MceZ(JyP3sW~@1wb;`U$Aq6 zJwVr#p*H0oXMW|^>0dvp>vHIJEc&5&C8vn>$XM6Z~)5s>b zG}3!%h(0NawC4Cq{PjAoleX`H256^=H++bHa zP2%5gijK}|=%!EzBOy)%oS&DsE{mFm)Ur^cF*aWAm9(b#K;qDQ5%2Qotjy$?{bldI z`QqPKa3Ln8=)aFzSW-|MRhP7ZL@GN`ba%|^5`dAa*2xNC%MJMp(+Sp zRUwvF(w-c2?^M6wlc1Y^E(r8&y8hx7m6iA9pFUT8A>$VHn3>K3q%XD*A^F-& zOb&Q?SpICu>^718*X+)99CV$<|59h(OiD^B@@_%-*4--N9{gCQFt_k_SRiyT5V_MD z%=^;aR9ziN<~w3Oj$_W&N`soD-9MK%Hv<9`^YX11W*1r6I5_)A!n_m2#o9q2q>wzP zqNElP`ATRw6leM#o+!`A8hqYBzVL?G%wxzl0jZFF9fJyb~16N_;#@U%0WSm2f zo=G}k$9t~&*2zQ_#4`!`K~&`au8GHuAk9mv{Cx4Ypd_i@J95DGfaP@??wv+;g;#^8 zIW$FCPBco3V);Q|bUGVKm~1A-KNPZY2-NIMBF~2|R-9MXI7RCs)4tnIwa(Rh-bfpf z-4sZ5uW&9NmhT=@9S)#z)0Q_>Q6+sZ!5%%3m?IFBJ@w6))u*c^YE`*|y)?<|*W(70 zNH-j$Pja*F#oI&Oek!C|(@8N}>%;B#3Wx8H?MCwl!t^ps*m1`IWR9U zQK*Lm$;h8yk@IU@aFTyvk31Osc~6x^+CEoKf1LiM^Orh977gT)+38(X{J4>D%I}PB zt5fJmAI|V6-Zpf^_T?`8beYzk@ln?E$j~E=I0KFlkxu>R&KeSfeEbAkJZ-}g{kzPY z@`40$)Q!?qIMSUmw|cNDy}eP9zR^(%g8Nhq3{WDSvOX%WY!uN_HVP~gxN`k|979Y> zFxL0mGL7qk@Nuts$3#kL7+R3$%Fc5?Jz_+OWcn2+j0;2Y2W?f~iC$vF`8(d_v;Wb4 z-NU6))RD!dFSN6A)95oIENAa#a4H%7&O5rXFmm`2`X>fcTeIEGlZ!SIbd;44%{AOA zlV6Hpf|zVuw|$Y*qr`vxFy{5I?4&j7lkmt3?TvU_)Y2ISo-F>1g1YZwBR@qi)&kZ{ z%-`BuZy56^EAtp3%e3}F^d86Q=eUHSgVL?jB4gLXT4C}SnKs^d!ZXgcdPGbpV|+frIbv87O7WewRMNHFZfD~R*uk}3(-aX8W>Jy@i@vE9<)cT>RdsO55~b3We6OMFB(0ZDFI97o z|6RZbqOs!PTY1;Sm_)sgd2RYnM+jb>xXc3__%LIxF9FTJm6$~Sli8Q{Yf|!EkB*-1 zFA^!TtEj#R4DqC*p?M$spw~wf0C$hd25@5mPT(;*g-qw;VvThM=CJqgMSjQuPYpxs z38TC{a-^b^5FXvh+X)%9l_g2PdV4|Tc|YzGf9dy#yg(>!YrN~Vt@SR~;|H&}Q;kJM z6xjo2b~NI?xyqyg)B|hbKSA+jr(>E_={rWPpwM&nJ!5G97ez|OjoUZJp^ND1aIm-c zmuyY91ru9p>K`C%e9@ON(WZ z1&qJ!?AloG2L~5evLn#X$gvN3#wD+4)Pe{Wlg2I^$9F)Z1+6z6Yeml< z67=kCu5M!83IJSlac(X{DHl##RC50KIb-IB5BHD8p0Hc)w}r8>aV#$UdBkuM&2Fg) zQ3(m7tBT4R?&x^qoLh6@~5vv9H{X=A)KG{QteFs;ES~7KW-KSa$Afj)D4xn$;QZ zHzCFOuuUBEI&Nxo zb(3x0Cj7E`e&hiSqfz^G%hjkmIXrq-xh)5#;}3BH7z;P-cpI_`47Wn%foKCA(121T zGx?N=(bm7IZ*a2nRTW$sm_rN)FqQ80?|3|kW4)i+`iMT?>1Y8xM20F-3U&}ZTlSUv zTa_DvfXPSW{cTwzCKvSZxL7dwVJhJAZ*{!84vGorCMaiLpkR|H3ps0e;_cLza5Idr zYJE!lK}U;w<44=)SI~e$D5QWV{`amfDkfS29CG}(m;Jynxguiz`PQ!tQyI7tL&J>O zh?20>rds!dq{zAFZx1itzc*=InE>tM$KKot(3>pbyZxD;mzR=JeIEYg5kqOx zRZsdX{dwG}6|gDP)Ri|@H$Cp*)jAy5!l#0Sr$ij=+H0uFTfKIn^%YH5GH2NdfE+bE z0vxDcuDd$dT5aq3HtXwN^Nm$k;ixKysnx(T^gt0&T5k`&OEVB8MyVGYtdWyq z<>Y=HnH>N3@R40V{}z510V?=7G`Cr~LSArSsiBc_S7r|LXu(TP(~NkdB}bM5%IeX{f(b6()MhL znWt&%-0ez}U)=b93SDF1-p5thRl&2PeX)__B;StL51*|QC5v!*E=U>m$<#f0gE#3u z=YXMkb3^_ICWs@Q^psst+0f8(%@wzr4sf;9JV z+XF2Ql&^=HCa*oTNBdS$&&SQ65U%Xi+F#FbJD^aSG!64P(GYam8l-okI+E^}KK;Tk zr=t;b^DW1vgObkbot)c9_;7RshjY;q(dZ##jZW5^haq-lf8K0=M>a}!3X^q+q6uFNpfG`;_A{h>WY zFE0i%I5c%vGHNQ)@!=XjQjS%bo>eo>c29CpWVzgBbvL(cNN%J$ap)cWVOv{(hpA;~ zs4a4NAL6G>nY3~7@WP{FZr%)YAtQF)8lhoQ=xMyT$miOEAZA*coWF~n528BKXMxsy zuR+@tJ6TG)3zlMttW<351y6RQN9VWo;%t@c8wo9A%!NrkCVDjeQrC!WQvNXzNa7x? zXxh3bh`-`==_9^j-rKW!j~5gaNQ((`=xeeG@d}p zU0`@*EAEnFayBBIt}KVP@owVQKDFKWO)^5;qSR8}@-e5MZtHG6738+o*tz~Fa%^ML zGYe+kT`f5ny@N4*nz>aA`B`+W%!_`fM6!w+g|D&p$sJM!vI@y0J~u=-Jy1L==7Ie@F#QUB(oP*tqsqHpyf%OTzBG zltqYG1HwBR+E*w^Zr#55^-#|qJ~9Z8gQ|CHt<16g)u1#k_QphGvuAdU@7Q#5qNar* zHdoy7JJYcVjW-kJkX=-Rcs1+)+ZeZnu`u`+GKR2#byw2bhA%5j@s3YV81!oYoU|%W zv+PG^K4WzZOdV+Y%ei z;T{vJc5j6q>Ep+r{}|UmdC0z7sw<+~Gu%OrKzz4$@`g_G2KtduBFMpi>hQV_HTSqc zj0j}Hi4(>#CArQIm<#BTnVWZHVmO??t@-?bnS|)A75ol3><0z3Qw6K(uUODvVUrK8 zv1RU5b4?Cb6KRFn?vzkA7i_Qj=uOS z;o@l19zvn-<*7jk=HY>6y?#&YpyawGLmWfWJEuzp%Bt z+Hxu&hn^WVoqwrElHY-GXt{Twxv;ukY_suL?Y-1vLb^I53`Z(+=js~fRK7~r2?+oa zaOVTwqo~s*-f_p53#L7S%iGiPIQfmjtXNeut!sAgSb%6b#M5o->e8ZP@E}v_=+Ph8 zcySQ9r$hGkmF(8sP_jY{qkUcjzD7*E+{cf)<@?6wC+TW$c#ai3VBY^IKK$26-J5}Rw(X|H@rj92|ih$WM>poptexFQ5 zK{Ga{FgzSEY*GO#HzhjWeX1m z4egQ#|L^64r$o=gPtSJ}$Bxh!76}6b1LbTgo1Fjt8XxXLfwqJI!b14UN+qM+|z3RCv*xESYu+yACW2tjT#D{rJ+*7!9P4sTGT@p`VV zu6m?K^>(%R-1rD97#HoHN0Xw9I*9w+=jx6y+HGJBP@MN7-07xunQa4~_)&~CVIjrlVpw)`)lp*;Z6 z@?+PB$W?6$xm-s;w3mL51xlH)t*L6;iysN9U9dr?Hr*1hdeqs4`VEBFg^b->Gqq<& z({~Q(6_DpI)d0vALi$@>45%d__;WVHYhwgH9QZ_uL{jRBt_~VC_kj8{F~;v}=mS_2 z@h-*Y#jP`MO==W%hXX|csRt|*$07>F3oQu4E2}noJcfeAUDkPrzg5SC{$K?u7BKM~ z)bEmBdnR2*b&DWpO(u9Ep+3i_?qc$tjtUA2I>{vtoUzg!=2{M$hcnglR*QAOn%aH) z?kZWF1gWm$MHh6Qonx68F&WR9i)`xK?1i%Ys@7tlO9Y0Z^<<@E%gB@~HhFKylWztk z+@w7-qo6@>{F@+uij~amxl)_n__bz5O7J*Rc_xM16>MQ6l?WO3w2FTfm8|TwYk}!} zM_|SDZ8xy&09Q+Ku@An!zQ9^gR@JFbb7AOx zW{%p4HQ)r=wQrdaJOcOHcUvGOa!RGytFqLF7{g%gJ{wzGEKJO6z8{LF%J=q3-G&PN_|(fbGAhT+z{rfq3^&W~ ziB@&kOotOrn*#@aezYp+Q611y9%waf^p${Cd37B@FnQ5HEiTqZmkl9;6ub_!O=(i9 z;x`eu*k4OaOB0o+eL%iF=d>ieckk7X*~~RwiNWFfYx7k$BW|fZUzy-F;LI7${Q70g z8WE}_r>$frR_YBvJVdt)oU-# znn`0(GFz$6)uq39tlntm#$T1uq0WvDfveOaE-yRDKHIW0m5&q4$n&g-1b4-lW)vob7VIsGeT!L=gPUdzshZXKY zQaSs!T2(N}kFOXXNg}o6-1=?3#5!g1FY;a|u(}{uKDXVY_@Mdnigl|( z{pD!B0{|-c{*m`EGYpIj2=@;4_q^1SrFumD-MZypj0NZ6zk|$sAEC4^`EEU5;M@h; zVtA4vzs`V-fbIqFd41Z`Zg`wAETwVb?=dGu`!!k!?ZkvMq!c49t@ila>L?J19Ux-* zF(7b#(lINqR?Q3O*^4@04I=&ITQ3EERI$g}ru@VqaQD;+PnlgiCWorOHX6V@bR?8q zYU0r&YE&d?!wL}wlVW*Qo;@9`32qzqZ}U5rrtFuQzKPUG>I$%JN1QwvD8&@A0o1nFT|Yc?E-R*S9(2)>&QB5ZD2fAIaV zZ0Dle>@R{!*;IT3x$8e?cKA-01Soe;iGmaIN|OrMfvuVU1%ea?1tyUPk2z70>8#Ie z@p6D4l9O*z#Ey*4Y^@s33{NE=FqyKyBr(B{0W=-;?Cf(CEvi?3nfvx0hN&VsDpKw< zFnmA%0W$`o@Q)J#+k=(S=I@_{y979v=2jEM_KJT7Wjz?jV8=vEgRa z`~<0g$Q-&>45dxS)^?v#Du^IvB$gNm7O6j8kywjxRLi?CT+O8;Zl^89n)h-#cRe_{ zeZ14$R+Byrj`$l{;0%(m!8DJZw!we$HsXXyjxa2A7vM9(+Bzj7M3!j|L$%Wai+8Ex zZ=MgmuL{xBlphmVn3iV6Rrvs_?u^F-$PxF^n+1$XhU%`6`RiWQ2|p-gLPsb)C7evy zk*A-6;SK;mx%KWgim=;8V(RL9W}Ag?Uj-((4zMw4>!roDIQOs5`o+hI?=?VmZOn#? z5D`&Bn4uac5=@c9NJ$ymKI`Fxd4{hWeH~5M*^$QQ*r-=ticMBhTPs`{LDmsdg{YqCPl;0h3E z@NQ+7l_ho@5bE0=GThnIZOTf7JXh9oJe|&L;u^T~IHbdv z!2NOm{rrQu{^al2sY-BIVTEL|5y6TEpa2f^syF)>2qh&70&aNn^1Y-bLt80rIawxl z4)$oms`iC2bjIeYY9msGGN-Gh!Ss}t=%QeCZ6#V=&AsUEGc+A!8$5?ix9oVQgsr85 zc4wmq{jS#rE%km@$|b4Xq`dnPfV1bD7=rH-=c+xrWnSDfK>qywi;e&U1i{l_Y)d`R zQ5JXE=a*V?*Xp(T%K9qIq|B2l)qq5G^kX5Z9(Ab1kwVbM(f*%a1L+Fx&hGAmc$Bn~ z8cqLpY5g<5$M`WEh8Y=?latpT?j(kh zs7?(Hcr(13nBeQc+l{MrGkbAS0$v2b-9kY^ujmL+Ie(Fw^AGOPf_N{6Fz{m7Z5S;sjyN810bG0KWq@wNWKHPF}hfW{Eg> zcVPhX;k$oBZwFY5hfW_+j;A#1gAS*RuxNOg3l#x_J+B?!aU?uB3cJOcM)dNVzLicx zd_X^t9)V6h%x6+qF>ZJm#uMahmv0}$MjSU*fF(rd zlRWAltr8>ePqs^b_q6R^6#frcZy8qQ8f^_P1p(<0ln|sFln@C)=|;Li8blhT!Jv^Y zX^;l#Zc!R(r5ow)hHtvhIq#p3YhQb_7rJ=X6Zbvm7-NpX>?=m$u#X-={oZC-H00~> z8mNQrq5o&@5q@EY`5JaB8~xeubC2SM*EV*{z@d6ex7kN`yyu$wwr4=-nW+* zon6dh?yI5-3dJ8ASjca%!qi#*+!fR#e~_};BrSY^1fpXl3-cI!e4;k#m}uOctqpn{ z?ZU6&N)I;sUFBXcNwh^u(I9N?OKRU8SZlcvc~U>@&E2+^&tNQU_Ws_|cRp0f59X&J zO-vSAP!88Z#=+q^kA@)IkwGziZy{-6q1xKoK?~`d?~k-{LNJ*z%D;xS0oTCnNyN)P zQHuU|d6{Hgo17ex25cFJZ))n!YRfc)B=z+5N>qIGtDE>RJqbADE7Q$gX{4}@TZlke zl=s)yUI+%(g4b&~lF|Xn{=HhDzD9sF)RXWmKsFnJ=r36d{}O=4e1HaB??mZ|t%FQK zMKp~TVafXn6=dX7|5R#nzv?DQDG8TnBV!DsGC+jN(hWj%yCa^TKYTp z+KwUjZ`1XML9SbspwCZJ3`vQrFF0sKRfrts@K9%YnK9AZHt#RSh}PYF=rSg)!9 zt#rAsX)$oQyR_KWLD`cL+cH8OpW{zX8^(YD9bhv)QW)`@+aJVR4NEwMiG0-C&>Zes z1bnt2x%7Az)b{$50L7N%c&bKRq()xp4 zC>0_VnnA(-$CAV^eY9Ox>0^zem-xFJCB|4xDgC*ZvGMrRF~ywX}(hYys~0A~ z3ufqDTpjWzry;lBA=7`!5wBe(mRJ5Fy|-4v!DMy_v@J!MA7~Xbd4-yX3?doZ`%gF! zX^$R_O09pKFFnT)i3{18FSh3b)Cguc0r2GEu6`tRKt=`wuUi{7mNuw}!?+hT91k%0 zl5ZhYn6S2FA{OxvM-+2aq6sF_>$0ors<4-UqB0*AGke^$f*@>!E)cu_1M2$^Eicvn zz-^Rq@B}65E~55~;+p&;%BNchG9nCpS*%3oO^4J4V@o3g%je^RGj(6f+@K|sycH&# zUM9)}F|5qRmx_C{$<_mYSbjXZi1#b71l*2j&0i21%$cZ*_(4dW2j%-f&|k4v3Q{};MQj~ zEftcj0)N=rp?N%UbO6INhl=WVc4a;6Ec?)}-xrooi{U!h@N+=m4lX`ex=XG_rF`>j zQsvL@TZny%JvROwPkHjMbT_ZK7bg$eS5EBi5%lzo8n6~xZ5Q?NEmqJG%V{(jH_e_~ zQ3T8QM?QIg_`V?6&m>ES#ES+KF3?sS$aEX0CF<@6|DP6sJWVk#m(xOp1WKJTF#gKR zXYlor`15;&vkCVuR(Vy`G0erYi&5bQ4weQ2XboT_oj3vHM-HyBkqRNh1c3K+IydQT zOD{i;o0}U?(b3VqV-Nf3T(!Z4IJmHZyMuOcdt4`v@188s2BS0qlT!KHg9L zaXaE!#SsY^&F=p0fQ69rc6WE!-t zUlJk1jH+6n8tINCISuXuOcbKvtXH|PBe}Y|0Ppd&5YIA&j0SfA`IJy(PtMkME{uFE zCNibesDJ)DJv(G&Ks)7gqwyLRaM%XysYb+YE^EL-2dH5$7BN9_tX8A zDM?75F$b8Dykg(mMIg%hol2@_s`g0nPzI5EZs<*@Ktp{hoR@zcR=#wyJGXbR96RD# zZU%|>nWcHIBbWW!g1=d34^e-0bb!a?^Ji1W6RV%+W->Ex0fr`HG#vg(2tP3CNYEKM zjJLs~0i0Rna|qCo58sD#GrI+d;CoqCw*UOu=-N2D9wQSh(`|2$kR3_rQif%Iu&qi$hMO)bu*uL|B{(=xOFMzSL5h{-9FplxxnAD*2Z^m=#oyVY z__ZW^NYmk-Tkn<&V6%mswPgQWFZM3xTL>*fk~_h(UQYLif^Q+1lDh*JGoP#>-rt&A zoWsfsKr0R!B>KakaT$x$VPNZR?=@*wK+9Px)#dQ@jr&GfLix`9=b_(1CofIaYes_y zxz74)6m5Cxx%-WNMT10^SQh2Rl{l7N<8b==&iw{Z=JC*N1$+xyYB_lAhn0+Y@lgU3 zfh{*vIq~|eSn7h1#+7y=FxgqRs8pcplJK4^rf^7eW-vSXg~F zQ*9j`s0fyv4?-p)=m^T;)K(Sx)7H}+N#4hDo=nUEM?4=iP%vNPt|1UEEY@6=pD~p+ z7u8cfFdL4{Ff}x@y4v`R%3z{6uxz|FQ8>yDw3MGD%iAbWuS@4VzwOjo2^*1|$7?5+ zgI{D@#GNka24ACFL`D+{$ThaT-QUKJaG6ljLWj`KIwZBeor7tyO^u?QB!Sr^#A^GR z7n%3YIm6W;=f~^KM;1O^J|X%)^(&nqwxE%4wp#I&) z4BYqkU#3Gr^{u0QB0as=R@s3$Q1r)q$qB{te?Oo0R}iK)gj=Y5pV}1ly24f5^iC2P z8PqGr9JSzwp*lS@Egk&YGa{<^}c z!=Uuz?c0NPfjw%4&i1J>;n5u8a$?#t8C69e?vg@wqj%&nC3ALBKw=dWckC z{Z;TuvTdYkfrp-NzIfS?{%l8x$g)2wzC)> zeBOD*iK7ir_-k8Y=;@I!-bI0<-IMB4w?+nxu*%saraMTXg+x8q2^Ob+r_bxcOVsfa zlyZ3d=kEOpCoKY1kZY+BlXZcVb8yl%{PYZ0`Z(ZM@@Wv~Ol^QGx1 z5FLu+vQ)AsiI801*pWbYB>K4x8Yw}>7}<0%*rC!JoBI6#pq~>BP(lOg<175!`>g!~ zbqfVqFZek|SHO`53X;_4Evtd01xJ&%r1w?TQuaL<`1 zN72i8FnW7^^F_|E#Ns_25nVYANn6`++{9fJ1mppZSk3q*^#N#v{2q#PFwaHcl9Aj6 z%P10kUu}|GFxH3Sws>)YYCYA2gXqK>uiu;s?PH^hBE~1Wi-@8lS=c+xU!Tyu2%D+B zk(5^YG1ETed}jE7?#=*FQ9(Va)jHtbw-BX-rgZIy@IyMRc>Ts+k7|cmyx6k!S7BFq zv*CCH^_vZcpPd0`gU4&PZT!2lzp@PlVt3ybP`P^zf#MnI3PWgSof8S`2smt*kuhCa z4x{rCv$4?!f({i`knd1a$Oc=Nm%Au+2wdVs1OgTaxSJ3NtO-04 z=Oyv)eueIx#@*1$0cOtKp8F(TmMVeJKQhrj0)W!vb8 zR|IOMsuh3R?Fg&9Maj9C>I(!un(j#Ro-Ps8Ud`V^2r#?eL9j7lPq}Q%8{S9QF4o#< z=xB+cP?^6%tVQ;f#ZzLOWIa^HjZV$tzREnJJk7vEr9h=XL_5Z0m<|yOfhtMYgUsUr zCf(dm5z&E1)3N#^{NV)QmC-!x>kMAAk^Gu{(Mzw0o-p!YY)6>W098wSk_|_Bnvls( zyF=qtO~8kklj%Bp=5i~|q@*(O_mv4g)2`OF72hX$vw@QwibR4cu3{$@)DUlR8+{>2 z-*dqhkg(Fds^1iNFv3APzly;sgD!ZlvZ6|nLFF(%lpM6l!A{G>((6Y`=TB+29xUmy zxQZK5a!ZqO&9?Qo+*f@_p@nz%C(@_XsMIcFy}rh~2riT@8SVnSN9>Ps*uGZHdZ7Bg z)O}dJ*E6GxuQY%^JZ4cAC`nxyIQuy=9Ca0yB9$Zl*E?Z}SFR5bHTirkC#aMtpI#EP ztxOw3_snM^vT%Gh-(%^%@%o+!59C?K@3{T_{W)ZCu6TVAfr#pAa5*}LNz0K!y_EQv zCTW+6;({odZs5JQ;vEwi(uhZLV&jwXVmY^J)RqVCx^;1Zaa*le1Q zp`7j4tcaAP3VCeX+9rxrq@P=0lT>X?c!}2e-aIJk;fEIkBfry>@+8|{NK5YzPFo&f zg@mOv-u?&cP(}RyJvAis7?<>>y{*d{#4p5}JI|+epo}Y;t{{m2>477O(cafyR#kKh z%uI#e&GzW%DktjizSOt5Y_BW`lIdA-b?=gtVLj9w-o2tuOAANKxQjCRv=GZ~bnzkbt)DHMgLoEAH)j?PX{=0ZTx zVgE6af;>ZOVEG-KT7cCqJv`virar9&eYp4e*avu$ zAbkS%;jdq*Ai9nQpE%i^Sb;0)Nw4Sny^fR_J(vZ4KHY!FYVGo@wEYk4^SatRo71Ih zX9c*hwidBr^Xv$`@Ir15+^Hqo+1aqMu!@R}oNDwW;K8{+LBD8sx)WkN(wPKG2T>}@ z$(nvuQI>vG{n@r+I;hy~*=P2NL@Z(s0@2*&x6=WoBmZ8Oy0!L5A#SdTXtX z3qGzN6ZRbBVK~?!Wb1XQYcDwB$;b&Id#B2*Bb$fO26R?H5>{_crAEX|4^J{PF_pa? z6d<|-Su$$cv$zCtT<(4=0yf1dDSDo|51|Gi3OpzWQHp)NuheI0W_w$#@8oPqocref+KIEnTwYE)2+?T$jIoD z>PkqeBTtV5e-uEo8X5&)CevK>SDX~pS1RxkWiQCpDO*78gQ^4xmAoH@dl$zlJlW6t zsLGpSyb?>EZJydqK>h;saX23P3%|#EpfFhtqsaOCO+#0?+WMd<*vwBj3T^kGwm%6> z6wWVD9osE34jv%0vGwKY9h5v^2^38+&kd__Z;p|{5c0qTL1Su^1YH!E>op2Yhcj>c z9yEX(spr=6w0iN+*0xqqSxD0C<2c>b90?sZWfy@1?lQG;&$S9S3>o!8jF14k#D!<_+LlGt zql;$64fju>h=JuYFaYhM`1lqAluLbmMNm;dVfOL%C7~*JuDZdO%-Fc7f4e@!_-JWO z*3{I5H>d0^R=`IE>4-bK1{*7@rmAkix6BfcuG?x`v(eHJcGNb=#`M%+qX-GeDmZ9R zatpl;uFTSu*L(T>s^cXk9tXz|Al$%`1BF}%Dj78W9NzM5P^iN{D@*qcA3p#s!+L^N zdw}~zMYSf-E$gt&wt16azhSk=E$c(Cwc|O)KBT9$=1*Yiv)Id0i#M03$hD zd%^(8+>IH*(pjv!&+ZckKWP@aw`80}A>_{Ou&rcb(smr$7aA%LwqC8g*0T-P)62;c z@=5NytD4G?Ci-@|WZ7RnH2n2LU0pqg*fh@kan7953df`NFz~|yTm&Ego@k1Mc)Q-P zRlMAeGRcfQdWDg0lF+-@%8z$YKGm){+8O+no;0&wY_066V>;zMi``)LGoYZlO?Ige zq@UE^q&2DTf9IEY1~+0Z_iax{OX8?nu8Sv2ptzP<&ARaRQHT`3E3Zo-Azd}zgnq0q zvX>iOJGIuBp3ki@tMl>Rg)Eqh*b^cttY14e;TyIg7q_>HnbVoVs!L^`ZQ0y^D>Sc=O(D^=B-unUq8n~oQ;LD zJ&aG?Kk(Nh35&xB=ZV-hiYinqxWY-OJpPFNrt6aJeA7qlAfq3yoEA4VTHS<4yC%K9 zD*c)H?M<$|i2D1e)E{XT=c>2Ahf0xVn=P+ zHA!3}Q8s#mA8MRbr)sP4@>QSII9oPd%Vvbg(B|&a6HDp>!&~oP&?4SnIXIIM<qN`JAPUN+y>gL9N$r{Oa2g^xwo=km`Gv_ys z{HQVR7Y}s`cRuO#e}0Unpz%=B%Ei54*Gt%nBSQQw`P(B-jUt9zM8;SjRN^4?@a~_c zC-rWJv1EXbU}UqiRQaWW;w#CTs%*c?ELWAcKPGzl<5<3MLRL4EF9|`aes*?x9x;MVxAD*LfZ0O)_L_LkFmze^ zO-~`9PsUe+PH%vpcHcb(I>uBs zkx%l`Bkxosq)%{YtUvu(E03@m8Uz^L?jAS#$FU~xjK7Tpksi!)j_9H`abcWug7xJw zxq#qs&rj#G1sMFNrtcRM??C##8K3fp+etzp8R$CzVHcg+LB*y7sR%An`RPT=#x|U7 zw2(cpAKN#%%m#P?KDPi@4$Ebs)g zKV_x~rY!J8yF0rDT@DX_t!=~TxS~Y`G^8m_Xr_-J>^STf&o=Q)|LlcyoT;o0XnZkN zdhH$TBt4?QpHMvg6->T{JH)mdlso*6T_d-KleV_DHR-VX3_WA_IZ)j2(dAeRSIr>S zFg)!M3}A3R@DEF!u6Y;Jl+t(gJx7c}21X67%dsw!htRc?bU(hO_? zuD+XrCYG6*8G8P&0gF%pZSd^qam~6z^-05Vc2*Gn9#v#7J>pW*%8Sco_mtOH*v`pL zqtwwrMEPJwgnlN|Yh;;~k(J_s+BF%mBMLPwtB9;D7kx3V?<7s6q&cU<=U0Jhj=w=+C~G6=Y<9Ie4wrNsHJ)0|QuJ3L~K+=W4%S$D{gKofJ^` zFx+(q!T#wdWY~eq7693Y{GJAr;@*b|5N46)++6YPYhmLj@eIg6gUl^%p6ajkO4_6E ze2iq#3=qKcSrKaF|J3NiFEoA-n%}iC7cY8%1c&lL2mDgl`%i5akTlqn4G}yEt zqil-+2>9P3fI&y9|MCC4M8-~6=AO3Q%@gyuYxhJEp;7xb{o7|1E8S0#UoU?%%0u`0 z#7^pm{_1;BCxBu@JfFxVn%~_ouM$e!e2KqT=TSQ(Yi-wkbx<2KUuCgrO z&p&YzBqQo(o&IrAjUhF;=5EXUK<^T#SVK$&GF3#>0qQoUY5$iIp~rKZuM_v!>N6{J zl#MX=dwPB#drnU^2y1V2UF!RIUXs5uMyc>^U$m{SvU<@W1hN;VA3@j0wBeD<#^q9= zErz^4XQv|wlpNELGp!YaS%nEvk*mz;iVwFS=Ja!8;UAnB^VT-8(X+zW$yVlXKJ%8C zX{)RAXyF#=m9|a{vCy(c$;`D7<&yOJ)XXl_tK_4G|9rK-b z+S-OMY?XWUuSB0nMMQ5GP2YLiEF^wnPk8GV`^oUh$?nw3WA}Ha@|${n8^8FIZE1uP z;!qYO4})1z{`I8fPPy6L3EWorGGgOkI5;OB{=R=_Z4ygikqQ^h!Blo{ zdt%9COj|H@w%8d*NS{LLIG%0W`ad%-(^+Tx@vppI&+B4h)W@0Rf$Al`r9n{A4Om6yxVpYSx#y7iTau2oBtg#dYKuPM&Msj!3Tn=89V z;_4C?>$QtvpF}vy?H^#S^*z*;`;4`R{ptMVcy$U-LJwD11oa5~!oTNK+Z2=gp`26v z$N&SrtTirvZDt8n{!7i0$|}qn+x@-a6-bf=XRCg;&@JB$X#|;NR?0!85-hz zTgbXmBq;R=mu&N`GEk89u3k~}xq6sPi}5$p{uT=Qrp-kQ<*He#e3s6exl2L%4=vWf z0cTD6$5G_^5T)rA{rKm$JSF0LYtt!-@hamj*;mKnWNbxj#BV2>8~q4`J72XtdONm= zv8Nu0yl`*QZbFuoKrDmG8r;u5aEYA{rX|J^6nQg+-VYQ*k~_zIyvC~8MM_yQVI{@y zG&At@S(uG^*6hEMso=#PO&j$14aJwcbA8+jPYY(wh#7e-qCVS}K1Qr$X(Dn6nt z7BTPEvFo4nkalk@Ci9zD&2H6>f9q?ALBV{wrOx~}ov#bhSCx$OP(N7<$hwmx5ew7c zK2+WE`+^njk%Nck)A7mX4$X`#qf8W)^3fM>cJG%oxFVN3|6VKZ%#6sQ(T&XMY9>9~ zG~G|GcBROaRN9`fevVl;A7{!d-AI_~0yGzd@)?WYwrk*knvl6Tw(N_e z3~G05GnHi24u)&0)d`S{f)`frxv`7rt7DmY7=5lJpD)@Pk?*}#%`yHiDXb}uwTFZ< zv-fup=?&YXJyTf&mQO!c4F-DUHe`xTpUIQIp6Jlo>*=hi-O47k52p1G#4W3=N`lAu z+U=Iy%iY~wG;HgkN2P-VIg6F&UYwwKakP6!!sVeuumV8d_Z{Q;DPqsdx?-K`C5ulV ze$RetXqbWl2GX(g?5g9GlBB}$6lq1dL=<52Z z&8!3Hc-EBL;aR*%j8n52J3qDPgh7q-qwT?Ob=v+KEPY+QsZY%m?d-tXoS&t)S4)rn zJ5xbEHaV}RnqKMKc`{AScrcJ%IQooov@<=Td(j)%w(z^+vqkxR)za~}e)6^r-&=_9 zHb%4Qi;)JWlTV9&5ZIHBpgSaA&+?9q>tLh#Ag~Z~3d7ZLHmQ0|bmD%BPfGhqwSN+V z=kROXUYo5BV!a6giiPk9g5781FR$E1-e)^?b$6<$mg@U8KIFY-=jAn(ep8qJfxM3d z#fvd@=F{Gzw7^Ai;rA#A1a2!l0hUkS{^U%$g_sM2gyMwANb>W2xfibo`inmAc2Gq~ zP9(fxEk?aWt#p28*xj8l8b?3b+ZLbo?X9u-?m=Z$_J=FCxY!sl6RdT1w3(Rt`1dyvh~0u0 zpW)0Q%<(_G=u}dBv!VCj%kO82TFv3R_jCr`91Qm~aPa9^B)KNJ0Jw&QtN-;_a$8f7~R6j`|ssB9@PK&qfd}Ci}Hs?lK*=Z(aih5zYjM! z;-f%$q5Hqjia>Zd{O_Bjf6avKmjBRVQYx|7uxe>``$Co~n*wf75(LZ5C&CKentdpvCr;Df=5%Sr5;eTN;^|8L3vc{%Y+km&qC7)1_2b2n-~Z1lQ|65pc%i;8rS z^Tn+ToYdu|`)S2E_VA9Px1X#ztizIleEZf+$Jjucx&j4XG7&lP70u}#w%lr#lmF8K z?2M&1rhm9T3s(1~!V}Kb9%bXLbxmC$@UG(ABHQ{_$kN0pE7#+4y$}WE4n!Cke)v(npYQxDk`re&y(y| zW~i;5xP?2rfBdRV{(1DBpD-o{jna%B72Wr8{ixp5vAw?*-%j%jDhEubG)31fO4;_u_r@n3nr0QGHq` znUIiBui0}pIvo3?VbJX{4<{bFZ*P6K-28Oq z!AkUD#o^~V->CKwfp_aV^459;%m#T37e}WOnA=)&TixAEKH)DOy;4)tYIa+z7!URl zHo$PN_t+Cqzn=p$2Y7*^9PvpBv4gEV=Pph#f&~jKt1WB@3Z?OG$%fD~0 zbR?4t+t#~sJNokMov}8BWR#DO9~SwO^ra)*0A#)pI#$-_UWc|$PO3UJ&K>PD&+8@H zTE*L1vDataJC?a?8jSpp>9Kd{aM2(mJgOsXro@er&Dp_9$YQFKEt!8ozuwhVL+cB7 z1}A9OD&(!%1ocu}5~b56>n4Ke!|7K!1-Ja@8r=?~I zXdNL&BO)ZIzu650Lm1ZLe5?c2DfeCDn|T6W*R!LhI`}~)X*EwN6DPr<{KJVGm5m|C z^XNb$Y%*R&Wo2f@%X_Ic@MlW&iIQ8+PtVF!qMO<2FMs_R_%paqF&5UbDC5=Ylw#4Z zSCikaw70)G^`l5yW*9)n`|SGFt`{>Q+Rmuct^<)1Z2I-Q4%b6;+usVa2B#+vB#Pw| z8&~0q6Orp4uxOTE|BVl}8gd+d63=05FLNW6VI?Ns2CMsjm*$$~cKWVJ>ump*=wh5T zEXy^DvBvyJ=`CQ zc&w|}SCqz2LOfN#J))B%Z0^?jn{QJM zmk#%l3=J1|1*6)-q$OoR_>}P6v+UcqUt9G!?E3ZNFN7X$TKVA+zsJQ#7Xxq5h-jSE z4SE=Wpi+bz580yyW+3$Z8m$gPRv;N?R|9xVJEbLlRTAp}bXm_prK5H#9KoK3EsF(Ya zx|I|KZT}iitDkwagn#gKe|NHMTMKUB!2y^1W_68)M1z)Z?hVgl7j&n3DZREPF~pcDHnYX99<0Vx zjslFPZmCpdl*7Gx+cT|c|1*?VdA}omqSRK|NYLQU_)DY+I@i`^qcqdpLzloN{at7o z*WH~D1C)~Ozn}5*p5mm(g?9O%A9TazIa&t;{9+*Ym^ne}aCxK&5KL|nsw{M4ImFq$Ft ze3mn8ZX|wQZLi#z`zha#S3%4C8cGZ-Cg6gE3Xvoq>ywY zV`5mD*aW@vuBpLEIqvptiBlYKq^}vwpvDgrK{*BqSdLOI+%;g;z{tk-5)B|UWo3~N zNgDYF{9mi8h$!#bQm^*b@1o>$@NmXIHm&*C;2~+QF&&T@7AL_|%!ZZ@h9ZUhC!?M+ zvwBU?iV$Opzj14ao35va8CN)&-{}FjN6E(qusOCM*1W)Z*m1!gFC?R>iAU+UGe2B6 zD+bBf84{SK<)uYSdnwOx8bM=?ABZaq0HfFLv$1Jl6=uL3-+Z*`r%MUGuQ2&jmltt; z*2eg}=ZiTa9rAY1baejs{gDALd@#6v?9qwfh;KNVQzXMle&TdHh4^G)DN)<``kIC# zzAVEJ$708r9TyEATf3zRIf3JFQ5x*c*~!Ur&Oq6qt7zNH->w2=JNju^>E}CKzw6fJ zL){P9rY3FDDk>bcozefTA9D(dii#~O7&OgH%#7Six~xHi8k%1i8JS>tcXfZay}jkS z{DaU3H{u2K4v`TN<4qecSi`E#F8uBqiEj#d+@|E3@=p7d+?(%|Xz6H*73%G?OO?PjPexm`Yu%zP4momzWXTgmw)zRQr^^FVy?a1U@?vt<0 zZ;$j17O9Hc{w{E!A3cb(UuM_jPSY>ODu}~=z!%$7_>|-KY^llCu`a5xext?CW{uIo zgq<=xdrMpy*I&OHjvmVcfZ*aG7}l>}YmLEHU0N0HgOoqEd4d-adR%qNw6bzPHahz3 z>|6%}^NlnCJcEhx4+=HP0XdK*>|ZYa|F*_2Pa_rUH3ut5?jP-Oa6T#07Bqg{e7r@bRyeDr zt*T3L2 zau3jI632HlRpm3u$cQwSl`-+s9wj~`7eG%r8x=+JoEA5umm}dRGeV6cwB(6VSLr*q zT&Th-XqStpB?`8gX#xuMuepu;miQ|ROBXv= zEFN-uts*RsSJ`5uli_Fz`PSSeMHk?ah*VZG`=$N$5qq=676)H)s7OWCNG$fdr<2YU zH7-l-`(h5@{}EE0e3J$+n>(v^V`)(Dc$LAQ&v^t-ZEp4(b3n{xF(oM`<_qDhswyhd zbZOJxhbjs36#Q>Arpd(7(vO(q0$RV0W5f=x9Xb^d^*nxXy1cWsvM~+Zo*aLE{unHt z<29yh6}-WTEACm1WJ;D`mJ^nG8!_0Os$OkQ5*^HeoC)3o_WEPZ6k@sO0Zp^Kr5o#`}sWI+I z`P8FkBZ`)lWwy|Cyt-ZKKIz8fYnWCXmmJlzc;J5Oh~;^9KEF7}^z>pr$}B>Wg4O|x ziHT{l+Cit;WChk|>I|W$3F7Gu4ODbb*?4KU673@-qvP~l)~B*y0q0y{A70~fs$9Ng z?VGn%FP?qsXnXUW*$gbgcLr;z^|DmuVtjEIFs`vv?jn2W1mFrA?qwD5h+{td9a%6? z0)=o~(F_9%lc4)Ny0RLP!rir*Ld&rY=M!?lJj3!-Wd8=7u(!uFq1>cZlj9nwx9@`1+g|-5U$4{ z1GYe6w0Qf|HW3kFYW=;mv`58hkHT=*|NNw(?(CrQ!3+m>EXO16d9YM zsixNCIeH6d>4#QwKH-uKOsuRzkYfVDbiT}^cJz3{a0_Rd8PB+%W@c7wuI_On-~Qoh z069>yZQ~JiYct54V2|`-+QB}dDiH!fRBG8*a%nUNvWOYY?~_T;Ss;!1x{Bt)4OT6Z!En|72GG}0&04C5wR(D+e$AK zKqwUXilOdgt}D@b9S;GIrU!f9hLqu475LW{L69 zuo=6csOXYsBcV`EW?O4}OiY}&gEk-ESdm_HWo88nKmEyEL{ubBaJHY8rluxbKMT$A z`9-tqWCWI=F?Xze&xQp7L3`*cl~?Bf3%gtn{{8anjzji5?#qk>p-^#Y>F%Ck&+&Y$ zd+29d7rR*)rka{x+S+N+e5rhJQ%-zv7Y@1Yu#Tq;Y{5u__9QPN+*Hpo)T=Ft*ogOmO3 z+uxRyj+LCuY>dPK)vjvXuznk^_|AG9JlNI&@-{5h0C&fmBGykAC%jJEO4FV}!7|Y# z+S5{siXXb7<@!5+G{X&F%1id}Vc`zyGMBWnNJdGCMxm!C9`!)XkNkA}3e)is4tB$M)%Eo&FanIjc&`9z{rUaD2|^ zdr1)?d5;V~$ZUuk-G39}Lq2$CNl7tw_AoMZ#>K?wH@lim=hHe{>jVURzUz6k3~6?9 zwOCs6(m~QW!`HrkUUy_1$}w`VxA;8-1URzGzAAlw`S!g3Zmxq9hp26Hp9OxfV#YyYf-nnF zaAM;`o3p{=4=->Z5|BP5a6dXP+_l7`ji1#9~w?9BpFUxU+#BG4qzg@T?m#khY!r`yO^I$V;gqy<~bQh6IG(a4fM z$+}d&Vkd7(`~WID5eXlm&B92@#Ez9wtOEimn!}?Xlqxqy*V)`X-AqhGY}$!&`g1Fj zR;zPfPS_$WU2?+=A6_urL$BT((>{@1yi>Kmk3lD!Z1vW7?(gdrp*v&mFh9mko;cj!BUT*g7! z!otl%cxXIWJ?@*%XsL+%?GNB`0C)M7n5E-Seo|)DSafoU14g!&5)z!$oESH+ z@}k$**}Dwes0RC=-@krA;7=+gDKoq?+whs-$%3m$q)rZ4kkNX2@-5gGxOxcs$}+Mr zg$64eZcg2e+t>hWGz-D26|GvB!nCv4@mX1u?0gcR;fHlCwrIJqK;Trt#aUj~oTSk` zOMfiUM=k^iA-iyZbQbt$#Ky)_`jBl}6(#Yr$>l6UBTbq3lTw)hEgcqPR}bB?pqB7= zjwPOuxY7wr0arqFNadY3pJTFk<)oROvd#ZG>tP%f-k63(SeCOi_cLTdt*zd6D zRE@Na=WV|xFdQ~iigajO+}qIAS#$rx&7Ehl23_*$TVqp`o+l=I4^J~6wU zTak&$GPHhkDd$tM^x^Ie05=y-alf;33ZibDe3E^F%9QLr1^Vyhb@ujDpShuutZr~T z7;*_3?(gpW1<_6bcc4+im8~uC&aA3>O3OXnGDCF3#40yiV>&*|$?Uz%(ne1Mxn)u_ zmR1%i(MkJG$A(vt9>E@Q(ZG7`P=FRr)II9@{+P()((Cgv*ZBgf}f+mKk(Ck zm%g}MFK_zxSbbypQdZXctW?NXmde61zq*0^-?{}<$k+Hqa}`xhXam8|CMvqY>q3u( z$l1~1uKU4Hr+5C?#E-6+ImKpP1~VFPi%HE5gs!0@v^RY8ZXj74r_7x@7o34+L-#2}@znK&s{h2=92dFL{x6_u!N$csRFtptIIQp;-3|C@5Tt2V! zA0t@IVpNKzYN0o2O#nS8$lOTu4FXQ(j+279K3i|yt;);0{P%7~!B|pjW`Ch=^3B?D zqr}zuVG_T?8%gsod4YoiEm!y1bP3`H28@h5drI;MI>0-CpHzlC>_m_Xo$~O>)6k#u zNqluXKVmEe$cr1JGCrhiw-?sq2}6a;xB z$Mc#^YJf)^hk^wc9m!@|4DD!R`e5rq+;0YXrp{quH{}WSRfdruG}E$KN+4Pd#qI9x zWoKg2xYce6y{`F2fH%7m!HD9m|J^ zAEKeDs*nCkJW~H2qU~14cU(2%ADSYJMoX6im9-XtsI|ejWHsaWjQmhhrtAOdDc21g zp2chOQeUFZc<#3L3@2;r8W|cx>PPhMVq(k8$i%=49LmhE-(W_S78kwEaPKJ5nd|o- zzM2XVG)>Kfyu7mA!A6&}x{};6@7MakhBA*@-}Mz1(%%Wleb7~ru$iPoE%Isp8mS;Z zKl&bFbNv3n^YmAzytIFQ{jS-buz!>PE%f08wGZ$lDw<3+YXc?KckcM+^y%txIipn9 z@d-7jIrncTB&-u;{}NyIr|59UNev~QZ}#dPIz6SaWDbt5IIJ6`;^a50xj~4Dxmxf& ze@{5_o&TsI&uTb|Pv~0N6^$dOAo}eS-S3mnJC8Y+pEu!kWiT3W>IQNy5P42Sx z+N$zcPs_>ldmJrK@3%ShCpG^v%me1M#H6Fi&z+Pa^_j~H3ZK@8I!RfXnQ&4d(|GVn z=_+bQJJf055{f5W$EWq@4|mO4{RstF$LT?((zXEja0)?S8*IrTAu?m7E02V8ZP@j7 zA8uj96(6`XSnJl)(9o)@sd6v2UX$HEzmfZ2TJm}){P5^VB8+@GS{EP$^>e4XpwKE) zFRu^AnmW(|(Ln1;goeBpKTbo%D8NKQze9r`2pu56#<;?O&J0;pU~eS@(zS>mdHj&i zZ-v{o<`49UAZNi97NezIS|WrM9*xr4=_qRMK3I)nA3v4+{PgWOqo#X+Bn?qqEcX+y zmsEJuIukO(Yr^S|>7L3b$j4<5%Sg+>HI$+;wD$TyV=kN>4sEkN$*;uS^&@_c__x+? zBEMX^g7?$uW6L*hKz$Kz)SW;=nwOvN5u~8g?21ZG5+0R!axIb>kuE4BcD7bNu$a@o zNKeEOA03k%>>!(&Wi~RhPnBV~TXWY3_rrT1VMSv{+2Pb}h*6T)^OAluAtY^o08yv5 zAufSS>+1>Y2?_N5daG-1_$Mau@A?1%l4HgdxlO~dZe(JTRT(3uD7AJBt$}pUu2}QM zT%aTk1i%drsq`~5=#<-$%HLMfQzYK!kd={928L(ZXlmQj-_li}%lBt85QMOy11Lir zk7bhOxwjtz8Iv;Z^nm%ShNWUZ%aOE5nuTAtFegzY{zlY(SQZcO;WEp zF!}$y8}>O{N~*S&55xDP5j_~?x%G}@i>xV>KWP+Y*8@BsZsBro_TRRSp0R>t4@$#Eo zntBOn^2JT7Vrt_dB-KDsQAb!QqnaW{dJQ0k$EzusqKb+$?cGZ7UhKt^GX1S`IoVn2 zYH9$@czq7Bob7A*_4^r{pxs8_l}j3OpePFiOYGFodmOY(bXEC5;JvXU=mIwu>|2n_ z>9{>c$g7*lfShY`O^1^LwcQ&DovC`#Gng0>46DJhHEy^1J_~|5rD* z=TUo0U;7Jvc4}tJj_psgf4zZF2<(d1?Cf;#@c|PBa8H0m!$7 z8K^k?M(18ILBM^J+ttjEU%zKY_%?g%!{dsBgJeLa1uaA9@w*{MKL4L`=Se{dZ{Jy4 zOG#O{nX0KAN&pN@K>ew6bu8H(Te`@?!eYE*%!OHolUB?(Xuo?*VNWt4?T}?z>7z}RYpa{y+zS{ z8$7H4c`$r?%JTOQuph04rLoz3UV)-i*=EL`4uX|j+_IlBv(^rirY?W{Fr0GymGewc zfA?BMRMXWr(bCi9@Wi8IZNt!5ow?H)vRnsZI;QX-M? zZ3!DVzWqv5BuazN|7`MVGeG8lixB_0`FT9DsmzAR40?GR$4>Au>=(_DnyztlTdbO% z#r*PykcP@`GuHqo71_*W1;~W^3KZiNRwo6cJ6d`U1d5E&mT~giytbPbMlIaC6!h%u z&R_wI@-8>8@VPs$B);14E9JGVjq#Dj{}EPr2Q9B)!nc752@_Wm^0J8nf^d;892j#+ z>F)&s3l0XOz$j<}gJo>|ly=gQnFO#&Q?@VJ|M_ElT|rv)(0NbAK5L}rz%{D1O} zPygg31AN=hU4EE402v72leXR@&oSAv^If}=?Ckd8m2^PS5*lS)IK#XC0@Po?V|~TX z{QT1mHh$oLf{w=jIWOM0{{$UcSn~A;!E>}TH&>tV(U5T{cr1h&PJ7XOg=5PMwNL&> z@pAc6tM0W`Bc$tSn`qEPbvcdVT=#|th%&`!IcG4UAq!jT;^7C1X1$s?=i(xak-o~K zjqd{LdT1!oU-(QZ&^V(vtkks{%lzqOAdpj$AywOW=5!FhsCR|+-@&~1me>#y7%*4r zCr4giTd(NaV@ct0X#aiJYUr>TWPTmxs2?!>DvXgRMd_j-jb!&JbOz~=>p5ty6zNR7 zx*_O}n_HMQzK<24$yknV0JrAxX*y1To(yFsM88@}Z^?-~E!x5pkspZ&mg?>pAIt~uv5XTg&! zMWi@*`#)7_d*f4Mj|$0OF3PVqPEr#r9Pz4ZCHL5)6g>t>pQrR98YeD5R zTFP~;T~~KAvBl6W^`&OCh!-vOA+a(Kw5kDD7R)8N=)uhlE}A+D++P+a{Z|Y>f8{Z~ zUIlQ!$5WIzWukylnSqK!;M4$ap;iMoJ@3_BjaD6wc=Ye8Iw@(jJ9O$6O~PG1y#+{ z72l-r)aMuE0cbbC*B{ZZJ=;l9N~xoB_U+#;`sAJHtl#^~Wj?QBP6f#6Xu09YGIM_c z01WG%PdUmn4z8c*G3(~Ojs1#&QCe8HiG_{Bv9tmRQNGt4{+Tys&l^rzRQ?$Q_HFQ^GsXmyk{}Cp6Rdx9 z;Lo)mUApb~Sy_g|vZ%JwUq2dV>7tSJRTLomtc@=tT1AUL;kkmeD!vALWkvZn^d-)> zeBdyM>)1Nlz8#&&%PaB#FPu7?m>d5pC9T*tNk$(fKH9)ea3ZNAg6?HMM zVGVEClJI*hH>U`Hv6Zy3k?45w&U*Wu#Pbioqm!h}wY;RS)Bw8BNNT{J{6XnZ zuo|Na85(Y^R2Jv=+dA0+v~p#!4B6j)hAAa;=X`KK8R!70HdIv1>0-RmLDaDfQQ9A4 zD>KtC%9@&YKsW>%w!S81Wi7B9@i>)2MN&!3*1Q0y}{o1vYGfb5O9yae*yvX zoT@m}v73AXPoGvT`X26^nmwDruGJwbR}5mS74sIjfYnyJ7uXDxk-D0c0^&YfnVOnn z2mviEFqPn;Ru$6>7L5kgHW1>7$)#QGjuizBx!{~C>V%n?8?HrpU0I_Ux!DZSB53bPvB&zD-@)0eq7`!hzD>W0@Slh7w(AugM z=3rHmlB}gC;32S+d`38vI=Tn}JlN_LD*r}NpB5i920Bcs$_OmGp%{y@>=aCeTRndY#JCaK+8f z&jVrV(NbC27(TEmZ76RJumFuS6W@4+6pa#AM&kT@`|mGWz`+Ly&y!R3XBc#I3o|L_ z(nKhU=W3QZnkdN8=eg(81nT8fMd4f6y)G*y)a@tCd4j*LI0X93JM1(avwch zWfR`kTgG*Vou{$s?`GO$^iqz`DhLDGo{{?r=xsSzCzTBI3U#Xt=GwVM;a!Is<~E0t z>x~aXfRZhO_9x&HWbk+~=QoHFBCI`v; zeUwpIe)!AZk1u@pj7@lE=YV9y2&n2Ek{m6L>~97YTwJ)xBWoSj=>XbeQ}^4u9h@)e zkWlpRZ@~_6kTgA6CradQGP?pNvdiHxK8qc^1;Af`mfO*w?HYS=2Vn@m7)oH-D3Mry zV{nS_^r0Y@v2`Pnz!VfTAd*>4RdK%2@}rV%%H$4a&N7ps!PlZsEmPC;bBBEVhO zra(s=UZb~@7`os{;fu!_!ZCo$YG`-@`pVjMv%mApCL2*V;AS37K^!wX z_zvTf>U>_)|5QxZ&K66hx zlbZN=!QtNBF{SVYjio9>D$SgjhK9+&ZOru>z{5B=$QWX4lA=#ePAa#*OMH?8NdJ;V z#(y`n`5bJvN>J`E6UJ9QZ#u54h%i(<%7wg?NXFUPf?{g?%Ug>sfJE*NWlh$Ot(U!qnP&OFPb8@|N25=%_ z1Q4P_v|RJhUBs(pc~P`*b$4~ru}+GIY{HCo)$$*8L2kkPyt>25PVkF(dux*!;|z0D zqmv*9=Qjc0n^WTg?#h(5FoXbgFO+Wmck}~A_e0mFHRatM+J2I*e68)lSHNl5U#)db z1n@f8dp!P8c_w3ExK*rGgKt%+-k_l;)YRn^u{BSuTXbJ=t0 z?qXM=hAv&6vLhn3+8xj>(&CcKRSpl}Lh%#&rzQCPMZjXY8TX$!tqSaYO5N|dXBPkv zd+ey=2}2A=@d^p_@6%Ruf#~_Uxp>_6qmfuf=t5}3a0Pkg0PUzbT)YZr3KXgByx7Ad z@0$as!WxaFB!gzelWk^aY+@@sRe*%2wk4^+Zs`$xUi;)?z>h+tiOW-JS~@8-Wv-+ejUJHv<#J5&A=cROxt^& z4t{i>L_#`MG8*1Gg9E)e0L!z+GQmc3cX0x?==pMeX24Q+@3tN?hH^$mCN!Bo#MTq8 zfO>0WfEgrGOFky^*zEs^i2JPRbSQ?v?{WF3zfK=!#s?@S{DdNbC^x7oiH)@)_f`Le zR1VRB1>ojMeO+%|!xP@|GxB4Ry}I3%k&!_W`XQSklknEHZF~xl%7DqGq5{$x=QAcE zvY#>0AR6lS>V%NT6_Xftq7+Q?T^yhA)Ee!9At^xKfg~`JgTNOppgrJ8skWGbi4-r) z%(Xt=re(v@w_{t+9LWO+T%gyGgQbz6VS7B#yQqd4B_-jn)Z6I-EeH^<05!pT1jDa} zB|6o50B)%r-cMfqx7^P=T>}&~${$R@^VuwlYZ4Z0$E*9wj~p&5i~$@&mxt5rar~d^ zcXZH2m$UWhH42Bi1L1#e())!({ERhKwJqFNySv7O_yp1fg1bO5Q7~f#h!G%%at@fW zY8iZdLq`X6#_th8k}CKs0D-$UK3gp-ymDE$9=2{8mca5w_+C+&Y7sT(0TxHDIus4G4bI$M&HZ%w{0KHye1QQ|J*ks zAK|S2@0~Up<5|13O2v*2L{O47pIbw8&8A!fwAv%K{O8%VyZqZA9SSf5Sv@GG1mu-~PrlL!dAJG(uCO^LGtfSy z%g4v1g?FIy3A8l+_I`Bu`N3J_ygo(>d~5<@{MQdBSj`rLLo#n(1pz3gzMcSR;uJo} zsfnqIW(N)71a=~%55RnvK^!IZEibow5R(J4lb)d|m)oBwHi!}nj;|x0i!{@`LTXv$ zgZ-djgD50U`Uh>$)kYL;l^B93y*a1~8F5m9enw(?eO}86_5|;UpQf(TIBLcJOlGwo z{7(+tgWC%o0XBzm^~Ug@uVQ2~Omg%7q;1E~9{<=D<=tFAkf4sXC;~3@0oL>&9k`a* zfnz!gNK!TdcF)FAz?G=j$TE_HOh$&a5`En59|AB@k@cv z&G@*r8N^E6*zIyX*)m}UgE_Tkm*7`1bTT~ee8m}|OH{CcTM z6d|(DU~R@Y80dGA{6fDjd_WjVZ86^+guABM9)2c)6}6CE7NKAk6%{3fK>Oy+>dVaR zejYDcXk`sQK|@$nrhl;iaHgRLtd2lNx!6bz)Cyxhye}?}w1cgZ@>{G3-DO$?%arLp z${UG9iJxQPClY{>7InIQF!^xZYM8(p#e3GazIq-a4S;NhDEHzq!ICa-s)AwdMpH}E zMzg(otN_P7V>5E!%v?qAl>ukC2fElTCY^^{9Sk6#qp8{9|7%?f^blK&I#WiYx!^Vh zo%*EHtEdU`Zf)$~E?q0WlbH%ST3mA9PF=QJP$j$Uugb!G^eZe(4)F?`zzF;Z2!WLz z`VzQhz-N*(_t;m~P+4ftkEdlA6>QB9!zgu3Hhk1D+MG*6o>m{9oHZD50$W1m z4YvLQaQAV4jAM0mOi5-XK~viU=nlYAE-|qK0Fa}#bq~h32tZO3c-%>X8CYq*whwf+ zGvppK~}BmFc?m7v5dlimIw)%srh8(%B5C zlvT;mK1CtO>%z{I|4t|u>+bs0UQOboW4ieHabHD%Tq>~m1$B8C0jI%3YzlW>9gtU? zRwZ3i=;ER}u~V4`ppdY>kJ@br8ftcXIswvFAh?SJ-$$aKf`Veg+B+4#J?`1j8@yT)0O6zM~4R{mX-u~_G+`09g(rwY?)brAV~DQUeyH*t++us zky3UM?|)Id$9F>{V$rdwbA3Y`Ok{yQTP^Mn!y_}i8+#%-u4UZGTu1F%MZ_nmm44}K1W+zz zFc0G8{=|YPJ9A3| z?{#s>GBb)CcDHq~^gC4Ra--5=z;XdEoJiIYVZQz<9t=^t)mU}%XDZT?Myj&Y z5;B9&mXhR{8ykQIXGH+RM!r{ZQNJatB`7h|n4NJ>P21x!oFgKgW`=3dj@o=GfVi^K z;E{n(AXtbxxa&{Q*A1e!J}2vu0Ob9G!a_<3ue%ImA%k-<=8<7rY3lFq@zw-O18u&O zD5$EA&rIy>>@|U73FNJ7Hthe(RnFR$@^Z|7!(98Pa~bcrj>Erb0%;g%`r2OVM~YLz zMyl&*1fqlGPeS_kekQXFxS5GihLNzjPmMGC=ynRuS+FUsZv<}wpLlX#pu_@8q94T< zbJUvbYQpcz)HK>(e8bV zPVum8O;QBS%QKSMqs^%bAdQOEdUsqPuI6QC-ag~OYQ$9k6w2B9~Ac)3ZGsl5knlbw-llf#9U zgG{J817N)cbuzeEZ`Z4}U#K$c{vh!m(MJApbDg)Q17>Emm~hE{rbbqoJ*@$-q?3jc zyyk{Vl)$TR+IoWpoBpdb*I=Re!9i&j@Sbj<>8!C_iTf!c(k0Tr6W z*!jsLw*jpS6sKb&U{aGRE6YI&RI#Qme7>W@jC+&nbO`CcfBzXWi=*&e789|UE#Dhj zKm!m0?g$Z3@@FZ&`EW)22Sx<2*lNXahf8JPnvzh+vl0|peIz8H0bIyu6%z&w`oJUtwdIX%r)0$R9szW)jJ%h_Nf$FwHx)rrl3eCX*ckZw`i5nuJNH&$mIs@!F(O%b zuZR){ zui?hVJNAU6I1+0L=nXlz_lalixCOdMoVS^P0;EVGa89tDD(E_Yqu@wG*;8u)#Ki6yExe6C`I5) zf{hJ^Eja^x(kch&=?ctD;b1@8NFWORP#d6UE)hWh5{$;_DR4PET4{Q1b$5->=7E+^ zm))ux*t1g0CUvY?bDPT=8X7je4h{AGk&5t3*olhS)10+$is8z~ z4lk}sv~-Dk^Qzpxo+nI8d}0U9yEr6;FM0$Qpo7gTb-&BYEs_2{{UXtxQ>}9xs5ozPq1YYQ2~@-|n3t8~kx&Yfl_UHzQ(H=*@hLR3?hf9GH@0tRTn1R8*;k^t`= zm1z0ZG{k7geO;}G;M1X)TD3~CHk)M)Y z$+W=;-F_DgINxdy!KJKY+l? zcsve{G6}+vj2k(8PJ?gYY6TU;>d}m9PF5a^V`fWZqt++a&5dvG8z)(ymKQGEo4Xq? z1#_~~c-$g8)~X$x-YaqNgl%$RU2ZOUXPNzP#M+A;GqToVVi(M!B$c0~5&s)LKi;~| z_`s&&#L%v8_-ySJu_;m|KBT6Yl*6Y z2L9zH|MDX`ModCXMn=)xcEf*X&F}qzB@X3+&x`W}4hu!iD};Z)usTw1e$fp&g|&nI zWa=*C9~MhS|6Z>Yh`c@`TA=H1nPOG>_X6~UyQ@7@`}x0P;{SOCGIsm_|1Uv|jIx?4 z%f03Q7B2sO=swkx+~~NnZyl5+f?YJ#4rwq@iy)F;VR3eTadvibH^7TC@kpH3kSxph2mCY1&Zw8Cq#~w0iBPpm`KOBrMo~B{bi4wzDO@i1+kiiN ztdKDJgb@SiI7eF>$3(%ql3L$PfMN!O`#kk5YXpb^XRYgfOGmD98z(0ip+!V8V{4Sd z!)YPSPrt-%5Wl07ivPfJa=P-ZZE!1Z=sWcElAg_$7ee}eTy-YE#}^&R+p02Y`#&wf z)^Z&i?-kcp>cm)J5C<3v{oms$gn8T;(i<6O1pZNTbvH`d7_VOaJc32EDN9-41oSJ& z6v(UI(Ddp0!ZE4LKB??zwes_4)FB~ya#Sq$lxRAbX8ym&{mO9`0m@5Mr^l@caJ5-l z9mVg~ga0QDHVd1DuL~UXv8yN7dDVtNpFe+QVPj!uV==e1JV8T!iGXBww4H34A^-n9 znw_ZEgtO-sx)qRfn1wP*1rw6U;8r2r%9v9~@bfePf7#M4>HMyMY z_&{Ouqc(n9ISKsG{~o6vEZm;#2n(DG)05L5yF29^vcP*c^W4_6g#(Ep858G}|NT>2 zdwaR{a&ny=1I6zzX)U%zMP~7)!T-;PXnskpl+mX5)<}*7m|HP1(I)M>f>HEJA)*g=zAXAb+}l+wmdWd+;o*J23k-UaCip>XZzNc!7fBg+b>te{l7~M z{HfGoFzoQ;WQ3dW7@xiLXE#t-g=piYA`1~t{!={k|92BT$vkx-RNDzPLLw_fos{{`1&oAs zf?#8yzXM@aa=-mhJVMD-FueC+gRya>CBIXp7$RQul`oFRj&FK32pc-7*jqF{&zaTI zD3B=Zt;wltp(Zu1J-K2|WkEK?c;P|?9KJa@Im}FQC8Q)g%}Jhq z$wTp6J(%G=at;+9xRsUFS%wBDC#M92yB#_dBa!V{h@0w`vZbbv&E!Wk>0>kBrg&%d}Gdt38aI z33ki1YRZ-`jW#=EW4`F_@x$kRbMr4>$PHLOep38GPB28c@*d;!2MV8-5MmO37loD4 zTvBGPIszF5vA(cTA6IfV8mSD4T)AZoSL>?N@^BOOogocgA++J>s#~GZ?ChLrmZL6~ zrH97C#^_m6tenOMTe0mAj2|wT$Bc`!mICvnjC!11d-2_wQ|^KemC(EwN?sT*aWsN8gq<2~Zsu zYqjvoD0#StW>-|uGqK=nbnBIZD>acIAABn;J}G7U#)6!Y>Y-{~%?w&_q0tgPcy@I4 znV5XOozk>s6&IJZIKoreI$>~@kg#DysX`>)BSdc>7_j(ie!Z5>)eJ-4-YyDXA8lx) zD!cp;AcuOmbmB)K;~--ps{;eYClP=c1R(S`+S-I^O`gLc+Fx&y$}#%N#m)(UvwS*w zS=B7+$RD}t;ar}))#i9&8Fgk)wFL!k-RwhGq9|i!h(5x?zyva3wXWRu)O2q*(hy9i z6y}oBQ~2>Aw)mF*z=KMw%j3&Mli%t}Wx#w3tuDwn+Y{=@$gU3q_gfkwFX2L9#@*JZ zp+r;RM?0{X`<-pZC*kqCoy@O?l7!0syR#mH-|>0e+{e(L$!Xx{iCtNRJOyzJ>?D1G z^!!HrxeOmyvXNxrh8Kh`pS2<``TP)7n$osdt1qp#I^Q^&=^QArNG(**IH5Aa$HzxR z4urn-pjq~d%S}rCE@^oJ_L~E-46bq+_Z;LTkscXCa*$7_(8LFm!m@j{Dz<@Wrolx1*xcNm@R*oe);SJX?i0w$j^CttjZP%6M0RjlsW+J3 zd_l$RnW<(2kp?%?u`!X4USr0Kjqc^;XWhjt9Gtc13v+U)b$tc^kPP1(O1MZDomiOp zm#>FCU{0-&vGyD`IJd^ixS#1|cNp-zzIzf33c`PNYKYhD>;V;_VX|-T} z=V8i@VBhU*l_|Gx#kPHZB{abb!Whxg#D4_KXk{Rxh9V<{;x&BT3^9rG%S%WKHfw9-#{84! zDS}(sgpx<#F^9c|zWl2hUL!k1{?L&Bi~i>&F8v(+<+H(9A_vcyl!@xn@{q7&`g*#Q z;6=i_q^w%X>HI=H9wI7^Olpoy@_~N$AWo#Sge`(>6v2LvPtz)&sXvE&zY-oW;xuxL zxpjzah`D}MZDP4fZTsxKj9igeU^qj~IV|^H68_cREdfK;peijh$i8U$sS}`;vjtZI z(@HiHm=+WMgMuiz&glLY$LS}$vhSM{+U*~MwK1Q?(iNOcEv8>7i&c-~pR4&Xc8`O<|`HJAq<3p_Q`n57Xr!>hJR!W_yA$J9BH>L_!(o|Fd(=Q%fOVsg@?=k6P zRVR=hmBr_bw{9bN(R(3UPx6?lHe5F|MYrwO;VE`7Q_^ZT~? z7OZK0XJS2^3=Fk@#>-bn-;l$!IFGq!K0`)EzJ-6z_EMwDoyG1PL0lN|&tFR6AtrGuk>Zf0$smlVYq2ka=Xp2P6Vbv$oL%msXo!!}+8k^Kh z{7r4?7=d>8-4jn-GRyRy34#y09}Lc-gu_fE_!k&%&A1+gEyr~$(g7!<@%q49Ql$)~do z6(TXU=*lu&!(j0-Uu~cWc{9)zb5`>8G!DQuL?+!c4LGLE|ANiCEk-<}WAw z&WZ?=5~07F}CrM17`)HG92M(P=Nem3aSxm7?Y zjl6Ep0&{MDy?>1OVwFBy&jXz!59$)N(=KheMml;6_S@?Qh=NKZ+b1%0baIN{rygG$}*HkFLLg5{*S z9gVF%8dfc3S8{}O(iDHC1)^av&ou(5FagO0m*cxZEWUZ$bvaeGaM z48MN3F_0X6-9GnPBc4UZ&OMZxnu(T&nivX_?9Vsbh2G15^C}cQ>!u-nL+L#XQUyc) zjGxludcuDL!hZ@HFIY&%VJg1A6Sri^O6%qznt8g;;wDbf5_uxvF5tzv$X8a5M2y?s zzS%Slo6h9vdo`=2UUnpi9hF^|^9~z=r4x#6)OZQe-Ai`8&H}&ZTXf2TIrHM%^=)QB zUJWgK|7!23>4u>`MccF~s5yyAg6;IuTQqX&& zQ7-sHSM9%TBmDzmMqUx?#%b$#4V>}4r*V{cffuwd8&!j>5on6gUVLj?YjYtUSSO4! zLNI}8eJ~GD>6cnEPX>7zGI9qYThz}wd=!&0!|l>umdY3t5ynM4hd}oVbZSs*U&f1a zyO)%do$;X(&CjEE2{0`6MUCMB1fcC*-8MhmfMD<+~W7N zgZ6M`a&rH0qrsx$;BsgW2CyvG$0rpOl^BdD7a+4sjfq%|>E7Jb{H<&j0YOrM*)E7P zAA8ntbM1Tutl|2)@-j)~awf{u&Gq#wz$Fh{IY2fWD1;zV$~WJP*~!)va7z{qjEtfq zqs~vzF5TC!+Q-6td{U&-XZcwh3JnpFO2u4-v)X>zc;iv!lYfk9NH!0qjQBm|^A5}?Fj_477nw?XU! z#@Gf(F3!#d3tu&}MJbr5PX})3MO6P3 zu)uQGav`;&WAZa_tt>vY1i$1m9V_@`O#}hASYRwZCHR(;M???;+u-*<1S2cJzkY8Y8;3z97Dyu3xU(e=&R5i)h@DLjF3g~3P znk|x1=5_v4G-rV!S&9<<50;Wpdj`y}!p8FEW(=sdt`_~@t<1vnFU=k&PE}=w+yj2h z_&?|R(N8p^Ph5(Vle5MBz{dHhc#1w2YF#&Ce(@q8QS;LStgwH881$<=e>09GoQ=_a zOY96+4cDsW*Iv!RJ8lV9#i*Dmj*_JnFgW{V*gRNnT1`ShfS#)*uP&=DYf4@>AaBg~ zW9Pl&4gqRFG&=6t37Y4{{5nWJPD>^`;*Hx=7MehjoQWO*??Gwg1OgdH z%jTpwG>9EUyxtXlYIgP}2+hs?(Ok?4**iE1+Sqz(D#Jqj{7ssUugxvY)RYv83#_ai z&u$-P*l@mfL!+a!m4;g%Sp9w%W{xf_?Cl&_nS6Ey@Ege8;-@D(`I=u;BsMr}+<=q^ zR{slNCjJ5*K8vzzftN>{zcIlH-A}U2?Oa(_m4|_xhn1SR%yb~zVdZ^B!hNIcQQN?P z-NBXJO8N{H5>o#PIprJcr|Fm!O|IXtLs!q>eOTBy?Hyff?G{AY&4~MJ(rYu3p?DHx z!%QleUqwwB6$Qb5esIt@Yx(8DJOTFo*J?jn$ocMt*JT~jLhu{*AiqqB%gyI4hQ*LI z%>)R`h9FcQxNnQq-jyh04sJo`AB`Lp>W_@L-OC3HOlmL*6^}HvY4n~j<9(iuph5uD z`M3J@v9m&dtFS|K6PGe*_?wytr;BXeZe}}f5CeTW1m6^v=01lObp$RQNkW@(|7`i+ zgUA+wmR4O)zGjOGMu?$G;~UZGvXqyKjJ9(`95|!U4>{?b{e9SPf|kn}ig{q2vA^o3 z61!VSPR<17B8vbAy=l{Y;;=BsvQ)~7W_ZsU%s1%_5Npy*5z36i2m9^CT$uR4J_7gq z7yeI3SM3%im(w-y{x9S@lRu*J6?s*j$qy~fc{Qr({ik!_LiHJ1NnETf3F$rE(lqzb z?EqWJDW=P5O}0q!iBpx!G~ej0Qac>fs(%7SjQ=dtHd}9v-1M1mfLZ+zMgfUi2RRYv zV|>MZ{w;Y3F(tq>-*L<9bbP(^9n@>(wO9AoaH{xzs((NfuW}%d3+7j2LwVAo1!Zie>mqI^p?WY}GMivVW)e03^QLfhYBp-1fiBj$gldgjNX;cU zQUqqzX8G*e|L;Z+r$*8`8)sLsn)ZRhAFNN<-^~f|h_&)dOI@8lbP}wTANI?AlBJH0 zgrQ+u^zlnL8S`n#G0A7Uk{Ic|v~}4$x(SLFF*P>t9~_2*K74@5#Pv|8wynWwWW2m^ z-+AoE2TaJlizNU4qQ9dbhh17gr~KatVxb5I4i+8@>5 z(f7dF74;e*jZauEmXVN0k35}rED^?~@;7#XEBRuR|F#y(77#Kp?^6`x(;mCym4f zhSEo5eStoD+z}9F2olT$>%G?f)*lksvg=%KNlkIX;9fu`I~v5pVKB=j068QiNSa_P zhau%-jg`(O;;y|r@kuIXCMnS=^NkS~DSmw2(DU%@8GxFsJx*p1y7Ym*VCR6Vy7hNd zIaD&kG(z&ZY!=|D;yDkLv?d9%C0Zl8^~>|qmZk=8?fj>EE=BmpWJv@oowjfw^|wx( z<4p<61}(|lWe(BcX3{N3pEEEtL4`m)w;SrlPH+&%Mw6Vz?|#3E11(lKiTFPIt7B|- zgm7_HYxm#;A)tg45D|Py1(9DT-7zc6N#0wyc(9Nx>J{;F!{hu;q+f!Kn9+>LZb4x# zSl)1n2m;Sv7*P31KCEx@L!}KK7gZA-O}sVWTnO6Vi`=WyIn*9olz~`?hZI2~lEuH2 zk>hN3=r$xE)D#^4t`1FMnXa+NOS2rYr0EivZ#7+@6@&rUGteQ)$X#+SBjUNi&5>9JM-DvtW5CVt*$AWlra7ofyv0ZsI%&f z=aA?&#rjW9p$sr64Fj>pr3MF2^uFg}V5O#DyOSzk3S}-bU#JWdC(V6BdwO!}=m;hP z&?0DG{mp93tAk6JZSNS;)>PBgoFQeTI>Cp3o8y$i@^0AMP^0iI3FMmR0fwa-sA>#L zL*shcbRAJ4^QqZVc}pgm@9du6+f>$&p=T`Gv`i9^dQLg($+nF|gg+O(Wd(O_LYrEBDH*MyZE{HVkR15Z`ytVW1s0 zI`^O-R+9uV8;4ZO;%N$hW5U4^Ta3q1#Qu<_WP`_$Ai6_BOj?faQ_v2hm)9;N3rhnz zGdpCD`_~0(4cdWtN*vdsT~a5wl4Ipf!cZIxc??*H$k{Pq(Fke{GXkF|aofe#?w;No zMlb6W6B#~(kxx5CH>)(m`5s<`?`3z%Y5qoVWF8g4E?r+9{=>t=dR*XQ5q;dewT-Q< zjV)dO05CeZLPM5+^D|`790IZO!u43pXmY~(cAlRj4+rtRyoEw=>epWRHz19PGAom8&+5_UgP0o+YA_tc zlj3-cfXvEs^!$^U#T3Uv3LylzO53DoU>{Os5& zY{~UOEj$hqf#qKFwCnAM_Cs z5u^~ny*0Fza*DIdK{Y@V_#84|<>sY9gU~&cE!P%EZK$C@51WpVpURZJuwpG>Pr*5&{4L(fNK~<16%wl(~SE z84TRUq(obN{qn;;>B3A67&d?vysR9v&!E4xm6YurJ1%a{_vJg(n09aPEJJJ>&-tb%^I-YCj2@KS9-p0PKbGU1t4)O^4myk)q z+(J#ESpKhEQll@KXb`pNZP|&$vj-onyqWeJJ9HIsH-A+}MY)CDU&?}Uho5}|&=y-&4#;up>7*em%WbG{h9dg2C2|HzXHe=6;4;^`9Fd)Bzl7T`YwfEC}Ae$1NsvJeLsG z>9p*`a%TSL%%@SlQ6#9y$HMR6<1TEz*w;h)-8U{8+n@~V;vTOkG-T+=&k$eFM^mQ5 zPLOWv=GpD?ch^4JyX!o5cJKDG*!V->G1a|>9%lr9W-rV9g#3~0P3y|s)P)NH3XV?~ ztUoHUjw-S9)`<7FOmP~FcM&$Phy!r9Ufmgdf0%@U(Dl}hVn-GyB@SV~1s&$Ul*m}- z!lY#RPwZ{v*(rKm2d`>)l)dH_}HBGLgt&`<>_E%JQx}kRo zLYHSD2#3#8BdH=;z8G(@2o3+>KGW7+mrz?L7C`jl?@8YRY8^lKQ=_IvL@i|y;ggsIMU$R4h7M(8q6f_tP3Y5zksj$Fz zfk2S?mZlaF>6n6IYpg)-c3?`3XQ3iVkDb@QGHWKMRxXPv=~}r&4?RZXwKOEQZ`73g@A90 zG(KEHAB*XI{4}_n8Q$SQ{WMnT6fc}ARy9C`bF$7f^ey67YaqoPO;mqMN~nyA#*T$e zATS#X6(y`H&B5)yZ^T5nJ~8Sd2g@^w*9|`he{{4+R0R6o{w{F?Hn@PkMW!#Se}{)| zZ*QSrm5tuqGCBYqb9Rn*S$k{@(?9$r*Z#0&HR0=ZD|mGP64JAZic;Q_;%yIUfl7kh z-03Fyw?0>+wYXc7Kvi%f2}u z(>78Yp^+UPT=9JMQ$j>VgO-tk&<5nNbS7!5tN)0LGdkxtI&DT=87y^MP$(C+n^uE3O~nXe+|IipLcvkCs(aTY(HAr#!jmxhiZEXyGrr{=y9J;2mso)1N-V zYdtPK-EK;TgT&uPhJOI(;5lrJq@ioq)dd;^&FJd>pov4%j0*@LgaB^1wU-l3l}*9C zLE>Mrp}zt5O-GU>X1q zDZfy#*2?Efen_E0Z;l_k!hLoKr#L53k8TGa7y>}+T17{%WuA&U;g87mggh0fT$jX3 z@F}-GC%d{vCrC=j-;F@&9N6rt9=KPI? z^*`xfM^BMqA_Se<8e+!)Y0i&eRR9(=b+Azg^HVBPL#=}5xeBlU`aE_H4|lh>v$C?D zZZA7TqK)f;IgJzn$74`se(cJ^*WfXicVXWXgE0T04nVL_ z^r`tdBzu2tw$N})^dqFbZTK-YGDd~tP^a0vuuz9-q_(ck^X}o3y1`(Bq(>zq`Uvi3 z5swhz$j3u|AVNuhJR~U=L%z!)f9WULf5 z`6u64bwb#*2r+%=JV+Lq&c!OM#poHe%N zulzl_Iy0s4AI&5s?$0NPAHO>;< zbz?6nRngl4Dd7(;Xp{)XTJDM>%H}36T>xT(X`OwK>(k~QI54;}&N7JzB|pdXY18XY z+#z|K@Zrg3O0TZ5jOW}}!a;!aeI`8-Xukg~-$p!J;S5EAO67-dBQIVZtS zHRI_%(JoOo$`LZZ!C^g=(CGR!hF)$I0#M}8t{=g#O>c3; zwQ}0ZOj(8j8hn)+#{N3xgPf(fuziQzm(O3pT~?x{G|mbl9zjZplDhIKRp6|k0HU}+|76hBp z^#}DTKyTl^yS|&!1k@h~XKqgb!b`;b{5LFkAp*a?yA?@?a?e=suDA5>UWBKrDyv$3 zYPkh{e6I2?Gj4H6lgb2J1sd_)$$i!ymU}WMR}Cop1>JT&Y0j(?g64fNF!sk&WQ{I* zsA$H-j+}0zfYS*Y<=@eF{5^A1Xa7H*z5^P||Bd^Wj8bGvMkvYNWG7MfmJnrU?@dx> zc6Rob2-$nf-g|G^d-Gnu|9jroaq86JdG6;P-|uyOuFrsb-pp+CE$PQNQ?>QAZse1v zCkl0FmY~*ucs4akQbG1JEW*BwQ)clNb=C3R=bP(+qpSxC3qWXUGJyMb)`oX!Qm5Pb)k7eX4ffQ zS5NMrxAwG9jP{R5YVSmz;vm5JKt6#(!|%~=xnyEV&&dX67cQY|#X&ychk*kH%0nia z^Um^_P*>?h!PC`!k%P&|VO71FA!2v+jkQe_#NNIHm-Ox_7a1uvjapOJz{-wa(cM59 zm=~^(*3_)qA8NZt)%tJa3Hed6x1Qya_PUy@~SCj>b(z+*pJyqA*9C<%qp6z z@8;c}3{0ms`S?V*K;N3UCn8n)vYtUgdyLR&i7SEB689OfLcWZljb!JMt%&6^=9;u_ z^uFA3f4d_Yq3skCb{An(D=d$+Z%rQ^NFT*Vcy~6AhIxdIFFXbL_pqjGtap9={kAv$ zQHMV*F;Qhjt#qe=-Y__^Mp~vvv%(o4$;zgu`5V_V=Y#KqO~G$-enm6$Y4+%-n~L3O zChylmD=y1+PW0|1!SrcF8jqp%G!g2jR-5g-OGl3R-@L6($5Bob=65FTgw{SfsXcy@ zU^}sQ8xb36VRA1bLb5pN5or3U(Y#^ycW)q`frI}KpW>JmEek_%#V7hIEI}G^ylV5^ z^WEqoK9(+((+I4IfG?wH!T&Ze5I)^1%Od2rg4^cTMPWcp@gMuyhPzZnJ?jZ$KiAu* z-)rmI*L>b!Gk7#)Gdl)!5A;vz{s9Suy!^|8INAO>&A1~uxt2^vZ&!=k9iTQSf7X_` z7n~WsLw0x>h@Qg7|A)I)ZR6w85{^wm%i!b5_TT*J9(q2*2KkD$(G@q>B>qi`<@M+J zk0~w>eC0%$E!(Om{>nV9k1~JM4r%EXAqKK0S8qJmEXIDDs~E_>Q?;MorR=6MfiVQn zXdcBi-2{`%TaAfm59W8}D5xYRKh-mX$VpZl=60^Mpp0PNvcL;dJi|b*to+}8Ch86- zSf#UL{+}sY8m6J~Y4~EBXi!!Zy@NnhLfLhjv5Su(P#xm6rhuFU;jVhu%J;ewJ$NCk z-tVCE$FE8pDia{h4=I12TcF%mQTcxJ

6W)yzg^ljS2>v=%r1t-(yVv8!5VYa=-R z)mzDihCw=7A>)OkOMywv{{ApTB-JA&IIySH+t9)ll~Ta@HUb?Pl2B{?;znVOm0df7 zBMd}XSe_KY1Rs(|2Mub1J$8Vj7eqPj^WS{)M?`q*kFUG0xpTGdCq^sEzxiXn&y372 z&%dqy@U+l4bu(vi@BqFE*GuLt%a$Wmu{dP1~r3rOKj{TLH<&t&Oe=d zE#_G=ZXa%+70^5{T&u|AwR}_WAtVo0$831@SC(0{s_zFh1BN$UF={= zT&Pa&;A^ERbd8VhbvoBHmzwD=Klr&I=zrXSY@Q36UfS| zu^Jm^0t0~f_)9=^dxWB5{6b+e4D*($B8MMrGD?z)RV8_IqifpcmsZb#*EXk{*~O1T z_p}|d#YcEJA#Z1OJ+Q*#U-yst)(!w$icPy{i3Hw#3O@eZ$d6ba%qGB9;T3SgB_Rv4 zJt5cN1`=L_@+%H$)*^#0fB)E$YQMDzBzZSs7wNTy73FvdpZ}iZ2_HRmZA$mM3l;_BTFRsOy+& zeSTP3N3|)~craI~j&tix1IngW57s+MUx9LUx2aoGFMx?j<`^V6Q0f;ZM*Hq-)$r%6 z@v<1|0h!0&Lb#Yve*rIn&!^&zy6=Obk(w2cT{h?QGjIbo`S@UT)EAc1kM!-GrKNLo z?;mA$#zsh8E9`S~ayPd;IXpTnC@3&C&Z1g-h5Dl5{2T-LP&D-{7dKw`czZ8v^oUO$ z^7HXG(&)y;YQU%gIHhNLk7AjvNA_<-;f(Y|#Q$o~cjaF|K{nzFygt#~8SvgLq(rTo zi*X6fQL$GmCvYmcQ}WtMGFMgIn^W1$Vk%DY8V5@=y-N9q(l-H7`!>6rjl9C|+#^H@ z%j)wC-A^kdo$l|yb&NbaQgm*9l~S2$@e(hK=eV?t$Yr;1&vf`V+NZM`^d3BIDti=- zVukmg>ve7_e@WxkpJnBJZhyr^&k%B7oRa1@wQXPA&@x^N3z1M^k5hvVE0Fh;=O1(p z;c4EdP{8l%Xt#Ogf`jf=9&Q}mtaB_o)7{Z7W!Ot&@a06J{_1__j&a(nM!Hh9Tl zzg$chX&>1WUaUBGE!NF~?iCst8rW15pOOClV_LAZnUv(~{GS%ReF@AgPf!tH3jlip z-I*gj{n4eDXz1i)-O$j;{*j@eASqKbGY;-S32!o3H^|B3mubD*E;0adX@ z00&~&9qpZFFAHRDBU3nuk)e?{KJEGEqINICtejjU(=54%-^Bl{t*zY+_?B&9 zw5Vgq!@&id2N7pGJzGr|iOd+kn9vu8GD9HXvziuRFNE9F-e13K| zHag15r5v;NLt|pRks4@-0;SnPTio1}WO#Q@sLuqrT^APAy#99xJj{N-m zzfIjOZ*3)lRwblq4rqI!eYt=fZRV|8x4PLm#5Z2Q?jXP?e12glEN*Y^Zmj;*yTZRB z=Qg6Jw**51xsiT(RYhN4|4Y`X2;blL!q{#j6%{TiALb{A#mD zex@s|`^UjnytT!_*1iDG4;)E4*F1IktID%O!%_tH@hY9pkh;CcSTVZ^LS8OML3tF@ zZF|L1E71N#P^%S6WFp3~)#ucgLQc-n@abd-k6k=QJd{4U5;}4 zc?MbP1w-G+ko#$qdEQWNHup_5eY|KYfz7n7WHjqxB{R(S>$}zh>`&})0xADsqUqme z;~qS=^QiA=6ddvfv6gG5>p*nmwm%qgL%~A4fA0Hga{wMQE3=S=;2`#%)*6`fB{z6~7M@dASF@BridvHTbi+_zD z3?RQXem@1@SadHSB<2GxK$+*p?RJmPch^0K zbaXRR<_SXZ=(H6U4>2YOFB09l75ovTY5L~W%(Q34GS5ADML@avCkMH}m=fr+V zQ$}-&@e{iz79$3TElhLn9NLAFy`K5xyIQuqOr)gSD3>vR<{&~FE?9kO#XDIs)KM#b z>V#&ozlRm7|N7Q$aJnD3a|X*v6XD{P#2&7>Ziasd^DmqqoqVw{yW8SmZG(;uD%w8H$r+>$UNNS8x`vG2Zgi>x1L4lq;2Ki7V2=I}h>l?1 zyhz_S#32{c&cCq@&(QY?fM**Ul`_EO}pC_ya75H$v+(e=T4It~X-N|GzimoG25 zOG~V5te7;aKAmR%PMWb(U0~Z6Wx@#&Oi;PveMkYh*!5~1aD#!Ty4+JN)}MVPfq5&~ zU+UHJ!osBisf>uck+VTiS=p$ESn7RJ>Iq?c>e<}|6HWk}6$*f^&eb6;s`>471Z$-A$yG16RpcV2 zS7glTdnjEEiv`)n#t6%G$@b3Q@d|Gimb>7saEpg{gfgnzIP219x=P`S2uSsY*bW|7 zo040VIKgla;X5YEV<8MY z42=D2mPfTotGfeHPy$?Mi-*U92N-cya^7BIzmdD3OPF{j)Z^LQi?yhCKJUh2F$Nf$PkgO|p~#)jPDx(4+y5RPVV!BnJYk~g zXcc~=ZLwS{oeDb6TQ~<|L>c3e9vm}L3wwK(GiM9)TV7AbC-^hQiD2crHUEK{C+|np z_j`#SU&z;Bot&O*O_v=&RT!7qqdWnsN}+}ZQ6{Eez@66A`~h;O2GP%*GVr%Kh0sv3 z9WSG*wnfIFX@FnaJTpD>;So*)Vz8N`zr8s%-FKe=4@fqDKR2%Z>-^2h^Il&c4Y>Zk z|6(gZ<@Bex^c>?WuHP4x4k8Zi{E6{#ErF_uasF3i8>&T}h99qE$XT8~7Nn++3=g*% zI8)gkQ`H!`M?m^+rnjjDxY{3dOp|zy8fj9xU01D?G~X_-Q%+7yNa*PobMnBVdET(G z0MC3Q^@j=<=MzV#<<62`Wwn>XwU+jBlCM!e0ahip^0w0YDJ7+aw#RkH2!$B+`}5^P zL|#Sf0U6O#X4(q9J^>_YU=7BtQ121k{u&qO0-KKZY9N%Tz?)0eT{y=;*H3#y%=DWG zmq?Tw?9J*ze8K8`E)zBFXa8 zCuqQ;>H`8QTOX)zcXoHF(Ez$CEh(ny0MP)wq<@>Ws_Z+-w;Av|YzlKL-R1=B`U%C4 z2Wqp6iaMGbD^pVk21i9Sr8%kjWL4CyBFq1t9MUu8J1*dhqz*t>mCHcy zpmg1WOyJ#nJRu1Q?+kfZ@M$i~zAVDxa9LSeZE0^|ZZ=h*k7_MpYj0m|?=dhw-qO-y zVEjHMpFdn($LZzJgPFRy&^eeB6qghe6A%IEpSnWBM8ieiNbIFPm*=NMUE{obejDnk z?2MgIrTK z+|ZX;SI?Qbao;Bo{^>M}GgVUL`1QM6M`y+&tllv6?%PUFWE_M#5vw1ry zlL-(KbVMzDvHh5;;^VJm?EE%kqI~-+9(i0y zYyy{PDhO7Xe>VCqb^c|loBz#UYA&Wp zz|UU^q+Vd+?=Ug~QUN{W8^)6I@})rSUPhbnx>-wRS-=w4DFmH_IK`%m-Dq%lcn}yZ zq4|h^S5t)r*Tx_Y5}ar*2ZkCei+;LH{acC|D~H>DQd6_UBr?Fy9}#)=M$c7|9!SRw zjsYaXNAIpo&9y7(o-z~R;#+L*bdC+#bhH$|VLaL`QbT?N{p=3J## zJj8Kp1Jo_p$ecofKA{To+kA+uSY;q7Uf}kX;e!1ql=pu9EzlFU{{|zTJTf8JVam-~f!G8f)*5@D08HaVwF`_q{_Irxq7nfe-Bxloz5eRYt> zKv{@nSs2~M4#>*>WoT%qp(E%L8;Nf12Vu%aj>?kG8DMQ2!r*t&a%&vc0;Yf-EG474nzePk!1d(j z;zmBfZ%}%a`_}7#-4x`rv$3-`2&l=vs}Bt~ln^(93ukvX@Od(B0C5+;-2r0r#ra0b;lWX@-8K~igTl+#Y;4cEI-bn7 z;`@YB*;;>eR)6^zA&B@sJUoP22aKcBPYxhk8ygC5s`rH2=x$(lUcxxXpL>DJ!__{} zM?u&A_;C|-IBuw)^R>=~+C+|vm|QP5hW6F+V3Sh}!-E4!5V{K9G^eH{>DIcS;p2WT zs~Q{8MoNgE7@6R-9vpzkYiX@){4+Mq@-gBBV}|^xDon?!gzh_}fhFg4eue&$GIlDk zp%e44P}U}aiXV6C2?u{^N?}EHYPe`}+bb#dRy%-h;m|<*06Wz!M^_gKLtD-HA_)h= zMSWw=@q`yoKt2Jh;64_(A_nyr-i|{XhUyFk5&XyG5ANSrP!8dDooD3de_*3zVKL)v z=EBccd0_hoOoxCy6ihK$WF6Br+Z$5!)6mxRa}weKsw&VJ^7Ct`+EYAWSch=9IE}YF z+q}KPtU7v0aS?Hp_sEVW$(D!Fz^E6aeKN2xt$m?iAPV`^4t3BPTa+{x{4n zbx2FohX7Y9_VTKff8a_0gUPY3j_JAkD8Ue;2m{<$M~wfvQ8{U2GZU7dS)6SBh95aW zB?lxUw3Ab?Zb{&;MDzYqYwvvUSN)4{Vr|&>ux`KQG*0^mRue~KT+a!lm4Gk&t)-=p(H_7AKtRSbP-k@F+Ioa$#1d+G;{ioRWN8nfh z-Mb493n~&QK4G(}_d++kEc%tIxzKs^k2}i$cKkSMA#KsmAJfLhO1!zL(&dzIvX;%h z;<2BXm)Mz<7Ko#NRbqoQSZ1UjIA1;QDo)V9R5^a&agQ`-Bzo{5QBy-rE*+br76NAy z^3s!sXM`=k{UbZyBtbWjlcXSNo0-p6o;MQ?)B%#WzUHD2<`cFZ&({^$=aQVc~Dgpy#47)FD z;CATqa^b6}_@O1ZfUgAn0E+-A5PLw#2e21H9$v#XK~MklnDUuSvOJg{SRtPm3Qk~4Pk`$6HJB;u%{`BqB_0|CC; zi+5EE;fkQwKuJpaU6xWvim2wlX?T4D^l~RnLjyi>0aD^`*_o-vx*E^VBKDp6I4UY# z;1m5{Fs`zq0-gfcKdg8+a+ai83LpomYl6qp(kKf`5~srz*ihPE6&Dw83%cf{7k9~P zXJ%#|Pk#N0wfr_-(2d|f@B`MY;oi~Qhb*w(1!F!2Vjda-nnB3?`nSH$!`}Prrv=G5 zDyJB2Yt!Q!tniSc=&TIi{nUtpJbCgL)*Yb8evG_7uemU{L`*8IssUzu%GLi^orsAHPtu-&4sob>!u0|qwal#^lih~uD^m%sWV znf@^=dvS4zI4bhzPkJRfWj(k5J;NuH&mxb_-(4+r-a>ZnVqWPP*PzsM^10P({>;SL z1|GlDhT&-@%R8{I0oBV1VD!K|fBo7Y@^cX?BgRLOEG2PaU8p^ zrZekUKuCMz2+BT_+XV3lx(goJ#{TcR08L&S0$dG9Wo zcRe=Z?I5p^xbE1bVj}5zh_>z|w5mAsAdrrExwPj9G)x8~zYC)o` zM`mbQ<5b)HcGAISM(H@g&0(^d+i+Q#HsEjy4}kgd3MZh0Wd@ti)H)eAhoO;^e>d#W zMno{aHhd_6oX4$qwT_p2#tOKhU(yE0E6(yIeVY*xeHFX+R({#Ly|Bz^2|3I+#H1?4 z)BY(5?GrMTBiplg(K9jYTX>^Fi?RB<0hC;|F8HiQfij$Vv72C)w5GBSD|W`|mBzJ5ReW zHKO|N+>YO@U&w?H$rLvnFXX;67yt9cfnB12%k*S_f8`D&+eI#COmOE5JM8Vi+wzjh zis-yDd_HFh?T+G1mFv+?w+aGSVs5_0taW+5>m|x;Q>a_;ww42#aK^Toh@3<~>q2rK zn1MchqphH>um9fu->9dQVK5F?uf<1mzd*T-OJ$%Pz}n5rID9zQwUURUtfG7~?skCG z^ohqke0;Yp=j*M&0REDTb_1I&L0jBG=PcoE82I(0X$^~nV8@kkW9B&}tsr@t?`K`2+JX9+t_3XEXJ4mZxYhVOK(a14jB7YZ<9q^jiqs5q>1|J8mVwq;!jt*O?SK z+ME%`neymqkYPZ0mwP2Wu-Mv&V0|6k&Xtgf>_?H7sl_9PK7B|0}pC=ah z;k1vInqWn>R=UmH2cZuc@?A~2`v>>f$oU$8Ysl=B&+N253@&nV+L4|4p}5UjGsolc z#+lZsCPz91z$E~mS>Ca(G_0jMSZIuyaJHw2(~kPRM8w0Bv~M7OxiH;oP^i`vYl6u$sx=en=bc z_s3m{RtEVL=RnCu(&=?>|3jc~yD#3v3q5D5;v7m_XcD6G;r`vJiSItnle7Bwb4knU zbxd*k*qj@GPa+?C!*rx*y7x=hv-RTZqFtM{{|M8TZDaTt9y*xRRH9cVov}ivoqjJG z$Rn#k`+3X0u&ZW`4cbDyne78Q9iRfYZ!8%{H(^TRc1Rkxy?L!(8)*? zC+OIV2$Gv2tXynikzxOm=qE56>`&Qj^z0)u<`+*pmt(Y7ysx5^+3e4E_E5 zV2pNMp?}=?q(@9ljQhG`SmgS_%xE9uRQI^#3pZBejYcdAC`B+3<>p48F$gxp^W*HF zCOS(>eI97}rJ4!d>priD%Fj6d=;HKKIr&UBqYUtAaB&fN zB^5w#sUP7|1Y;n&;&*3?7I+@%BLho@yM8d^T;o&WEaqLzd)*Slw7k3Xszqkqs z3sK!ipr+ECsus|$YFXHY|Kbw-=`gp_l~76uVE@35~LFh=_@??qEMi%J_Vteb2?=qU7Ugihp~K zn)?+U*Bno!#|LYA_e}47op5IOW2R|u3 z@P6B{WqMAY<}!ov3FTwX+t`0oBVebAd+UoF+ivaVBv6F%vgm4!FF!6~4)F7m5h32B ztNB{x%CfSmAX!phfpInp5?^m=ckKLcP9|CL<}cWBdPizz$Ok2#$dYilwTl@WKYjL0 zeUX8bIPvCxwO8KOySAg7{|yZJG!ADyjd-}Y2$j7xDR7)F5fP`C=|3x@R;09VQ1q)qU!w%@otv{Ry;xuoF>okT`_OF?f#Q_ZCvhs_VDCv^aMk#K0jc$ zITg?vq;9hFrjs`HkfMT>@q?Iet@wzDvY6hup2X_MFSdNBHE8tjeH0p3x?_P=!^FVg z<+?kM7ltI2mp2bht&i;!lQCmde5CYa`gt7v8GiZ*bghMz>-)l?`QeC$3A}?n8NXXW zEgCNM;!LY|F+(JC>@hJ7G3F`DLnft}eZ%6z14H877jB;QIydTwk2B@7k%zvY{X{vEjs-Z^iB8gz60>Y> zqWi>GI+C+SQ#teHnsd|KNUW#0l0-z(`+9G9j-Ikbic#BZUvovW#PlrfLi)zN9V*`0 z7dypq#~=haoBBw4I|3bP$~$k?VT~`Hd@Rl~rOo@Cb6VOXmLEcA zC)pBkuEkY@IymGY%MWUaWAi(~2IYxELX-$hSC84jc;yCiXZbZz}ulXGh z?!?#k4h+%SW3~A=eo(ljQvmj60eT|ZBmapv&lbx&G?~@3<)V5P?1ZTT0v~Od8uu$I zBA=Rdc*(HG2@<2JF+*~H{%STTa3Q`r4H$9Q5+%wloCrwzD;Ji=3W zC_XcX`bIq((tbLBEH^D#quDv1G?oAMmD{r}9DhsdcIS=X+11J=ZbgMtX^oti+I!ZR zRpzE<>jIL>Kht96C7N3 z5IpT8q;(~VBhzBwVtVoA5p@|+aBJgxEASATQ;=$TZv~v#zjX#hp*BXYAJ|c@UCzY< zj~JLXJG}aF=60*sN&k7heLHbP!Dn^UZzr_V8oX8|Ba|3iuOmds_D~`;fXXR@jS2K|gu6FAtS&;qFnQ_%xDFR=Uk6 z7BCa-QDOu%Gi$SZ_||OoDXsLWJ*N^q!(t@*nUf;mzF~yz!(sU;_Mu~PWt5zW$%KD8 z*2u4Ss!AF>7Jp-Fe#l5l78Yced-b21-F=q%aB)e`Wc?pnT-zcxB9h2iIbBklwBSB_ z@x9p~QP9a~aOXYM`%nql1c`O<0{8r6SoDnGw#qx%JQr5dr%zv#5p^u>LP*MMS?r3M z>W+@~Bjze!-?seP+zb7oey}fWT2ziQOmLtSu(Bp&V0m&(js|~XA>;(~XMg7`dl||! z^FJi*8M3Q`b-GU@bit^fboKc3d-;8D1>m+|!i(-q;_>cncq;xxr2h2SJ zPnG`-%d9W=#Ds>&U3X-VCsGE{n)Y_-tb_(U@TwPZ)+t?Z^ocI}GKCR@WiOuC6{o=$ z|3Tj94roAk!@}^OHPOXzSYP{iTp&JbdW5=?DL}D3JY;j`%tuEjUHWNM zX54=bif-K$J(fL|v3#=u*Pr0FUFRz9XEHPOXHM^N6VuYusuBd@jvRAX)l-(9gvE2g6bGtBlqZDQ*%=Vfrxh2W!>!Wh7>$Vw$V`QL8XAMJq&8_1eaRzdUz@?WCXYZA)V8t@#mH(u|)zT?{@Y zFAZqcnGN8gW|`<8uW)J{S4g~H{I|Uw*xMn0<0+Di*J3sa{5=!$Qc^xw{V>&4+6O+J zeCL`FH3NgLg&{s6epEsXIerw~8I2+;Nj*%MdG^Nv&THb z%Ra{(!z#8r6so!6CF^cHO!)Tap!sL1>x}!v<>^eD+{*eRovmDB2)6rY@$?Jksl!8B zTIa)y*U6v7+Z=Rs^{*DE4`GwJWbzHU<}_+|yXZ}T?hAI&dY5OhUu?=Xv7^?HE*>M6 zyqbT)8s~nGnR3Sk(gvPx+khek3W{E?TN2pvH5M~Ayg$5KGD3D< zAtKWZ)vmuc@Vq3_`{W1j=p}w`os&;N)u%kc3q*w#5nEf zut?Y(y~ot+N9OiVpY&+GD>as|k@^XJ_oVs&gwFM99jBAP980LV3G0fFj$k1f9SYjR z!~fhPk)36&hk@d1N-EFQg_9V}+5E-omaO{><@CHk3LtdzE10iTyYFLH{J8^svk7}j z0sFHX=&Z{bEgeK%Ud&SXV)`nFg3 zB9b|fY$CpRErj_pTZHH~BCjHZ%yXh3 zqo&-5W!YpmcjH1TM`~j4`te7-FO;~1_(Z!;A={FXo%C$S?ZL}Yx8pgQ@Md@k2`f%{ zn)NCcis0rCE{?ULCcK*F$2PZ#&=`zu(V@xH1U1QFKMo<XUTg2BGogjZca>^3{q5PyQUT&&e6`l7S z1t@ZOnWIK7p9HsZkfmTCFy=yg*$kCkbD$`7>hECy;GkvJWFFaXHW|F|6%%DCe!b0T zPk}(y_;Q?V@KS+LLcE!?^bb(~eZ*b=*)u)(4;3|~36Ein49*v}brL%gRkocxo3VGQ zK8T14C+GSQy-?QoIpe(R#ut`sj$T6T(^CNAn4`{5vRfnLF+xxY5MMt(6qs*4@E?7= z`diRPb!+>DNvX-!o#xtqZMtAbeUH3lfAV5~dAB#)dt&0oC!Z*s zkBO~v+LPtmn4|?x9-Kt$o@LP_2my$qc*%qkXLSEKUDE1IhjH<23v5X38ohAIQUeTDE#q>=L{M2zMzRB&pY;T-PD@Tfh zlx{y$%I182#tLyOw)W;d+R$!;v>jDKZhPG0o3y!FNYsm~UxJAV)uP+<@0p`Cw3Gs> ze%?$SxW*UG4?;(Vb2Zsb&c0toiu#5mlB1!Wy6`aD$=zfSYfBV;`SP?oFks+HI!BpG z)SXdKK;ou7CtZ*4#6EG5(#p{AP%tUmH;8)FC@Sz09rIZH-q;j_jc=GTwr#l zqC09TS^%ZN3@EU1?ubl4$Z0vHV`SUcHzbH+k}0}d#?Bg^8@S-Q@dogb)-L;r5tz32 zVU5NFj4U&&b9A=cMtUpDV8KsGX=n}cy}uvTrCnjrouY#5tXHVqxQ`b;0Bq_*+dnli z9vD;%G}R^(w8~zx97HvDF%>BVFdyH%FW8VEMxCy&Clk#+GSp@Ax1k&u_=~552hu>c z)*@$Oj;eIs%+NXyPL?Ae<9-09N`FRSTXQww-+OO zw^qd*tJU&i%xb`_n#s&)n`PL-8*CpNSn8m53;w5(7V&l8z%L*mJy6_@=gTx6 zvAd{TnHmn1GCzbUeHvq8NEEXF=>PQCbT6xmd(Eo!(#cFNzV;udZnF28&fUZ3utC*= z-vLsT@eG2ohlhAf$4hv48QWX_9s9OD7|Ev)FoJpb;-64nOA ziF91i&Ge7r`n)1CcKi7zW2W8xLl2(xXdaw(@jrg~byI^|B%ic2v~g4Hr5H?jXr*J$ zdz;PHQk)RWi_T^ky5Kf$9@rL7v~3nG=fA147R>oT5sZa2n*Pd=i&viM<+Zumd&)ul z_)$8^oj#2kz31CYI;>*B;DKAVZ@GN2R78|p=6G0=o7>soR8*t>NxP^zTEOaJHS`FK z+&Gq5;)EQsivlXs(!SIQeV9d5Or$vIBe)%Q|FMM|JnPc*9k!To!;E;+ACWuuyX*aY zCsQT1y-IIXelf5G1hqC{f^2LPDIHfi*mGTdHtPwEgvYV$KO&`=iJ=@1fT(*#ijzVt z2Zdl_f7fRh!E!Yl7R_sY+$NR63!&t+7KwR@J^IXl9m0Z1glXyD#jk%{L!X(Dup1 zRoagKNN3TmRZvh6Kavbs?qO#q3yV`@wL1#?+A#vnwi=Vh7i!918bvTU5v1ochaZQd zxO;Lh_gA`Sm`2aL@JDy|{~s5itQ}6%EhRp33x&VhHSQRfe@cshDlPgiXKOngBs|K> zA;-IiiQI;Nq5k}^fCr5-#It}1bv!}}Wws^W2Nt(mornvMQolUqH}NZo?}R1J7?5dR z#-qS%9|^h1nxYeTRaV`WZ@2E=-Lzn3=W*#n4;omrw%<7&loDLuvp~JkqgCM<$@5FQ+Oht}RpjbVfYpD$$qk(n zMHd%u4f7QvaOGrs#>LO0t?!s-8tPXOh3-QR;JDd_6vqo^l6VC_Y!YFVFne9 zqk#KAz1I2p{t9n#j)i1;`gTxCTI(+01^7$d68l{JE8^iJLvdAEa0i;Z=XM|z8=Ig$ z(`zI!sPie7KCuVq_O1M1zXk?|N=l2ebMgWM<>VEW$prhsy;7<@B}EsCo=iR&m^mlQ zX9Gf*B=QJ6Y37wo?PPwHMUTQAUELFiMiBKqJ;SiX!PeK`|7~mx1Q)su%+}^s-KZvW zFpLf)y!~o&e4L2rG`lzuLQ`j1KRO0Kd>WxL1X5;3A}K}DhRIG6854Czdm~3Ztrw~B zflCP25SY|Hef?U`uWn@@{84V-4j3Z?^W8;Emx-;Oq!+C{fF3(%9a|q6sj^5+ zlzcE`RQfsP1AT-6uh=#~7?61sCv4aP-I;SO&;ud?i3P%BPPpz%7-yNqhbE)6U+g`l zt;s6W`xN`{q##v8V*+?dkD8iW`MzNWe4`Ms{FLnJu45S;mh!ng+saYzk1yum?iT-^ z6(8^a%wkQ9KYk-KSJR%kM~MG~3gIE+&t=hXW?NbLyo8+7s;hpGh5x;Sc?Xz1sp6yfeqVBbQ~~tb^5Sl9 z@g9~x_&Szp<|TV3M0{1(W>1kf{xe$El=F|23_BAriiT;`USek|Cc&b-WQE>}ipG`w zBI~^{rGWyH+?oX0;BN+y3c<{lT~QG!@-Qohj)tQVrj?OBVXyzTwn*e?6=vxg;^u?2=#bNHk z^zx^LCBA-AM%ziFZ`CowB1=k2nwy>g8w4s}?7o4aZ3Apg#kZ>R%G9gfGmlt#rIVx@ zKK%B=!085m_RQ%fb^TylZ<&TM+<8rlgOgRFH$ruEji;xG!y1=bnx7C75W+^UN@Va^ z1~394xw#Ry5HTDXyii_)1K+K4R`uMh)iVEfdIg2P(4&&6A`An)rAL`*KXd4w1ySOY zQoyHa4JHrVF3yCD^a+R3*tW%z{{gvK*Pm~>pBp|vcZmMt;7oDkvZ1RoIAo3j(G;*D zxXW0*U$kxn@ZyJUApqTKu6tF+aA z_wa-oAL>;YXx2EL*L3@uh<>?PQ7>_vKI3Be@~q_R1^Jukn8`{jA#? zn+(wCCD3X&n@g8wFhQ4KXX|0=`oY|tYNy^P%QL49yh>9SBrA`4(uA)zaUBl zQi|HT{`>Us77tGSC|Ij$Xqepg+sm2wkFl)x04z+@Z;jMm>HIN1qlkIxAugqKS^uj8 z(zThGpRQY|!zbU%Pj->byM6*ies#Qz_MO3VtQ(%J`u$)2>rejo!pM~jB_A+5EZ#l) z3F%xjpD}U!EkM7%;B$mY$SHb-0+D`Onw2`n%$>xv|My`2cPfONYFmQPso;PX`eQ(~ z3!Fx{(r}~$A+ORnT5&L~@!{*?eVt7%&lo)uDMdkiHd$0 zD*XCc0C$s|JUF2YLQZAY)s9P}ujyjcpHaS`Ik2dc{tca$KtQB%F3Kd{``T- zQl-m{_(Tl$^2=-=2YXqH;8uKMVh}EP#`r0)oU19T*1Ar`{8oomv2i@&v&`dkeSm0E z^^Koq0}14SwP3t_E+)JPU6$tH#k@Ne56Q7WMqZaX=lll&z^7x z+f%W&Yc{<3`KWFRjx7y5fO_2tAmw%EDqd3Zc8|cj|4#2&Vl{>wmahU8Rf&1i2A3x;@*{6TUjBO+ZLhrFvbWT&iY z5ymt}rP1+KZp5$5Ewou3476=IdA0#C0Rd z_IB9mABVoWCMWO8j6Q+E-{0_-V=amOi{|y=;S!h&U82r!3R}|)a%jND-$>)k z=b%P7;y*7hgMv}Q2o2Rjg$8-EH}*F!V!BT)VQtW?w04G$_&bx0S15>d}~qrl6hV*5vU z)b_$UUT6FrM3s)@x*qVu0q${UQaklWmH}| zt)!< zbYloB69)HZ8uzHKUqZPHA0s*o!(R~R7k3uU=Des!+EqoJ%x&#H}=l=*Yt=AV%}LAw4D zkQ!i4h05yI{s?x#Kkn^mw}8cfi5MI_ed5j)0_K}Xj{F6iWW=r!H{<)V_;ZSJEXpS{p^ z|ImcdlDD=hh`YgsMA+${=P;lPK<)b+3FI~ycXoD3XUN6)4vV$Y0>K-)NT?ps5wIgU zt;Na7&JGU?ixsk)f8|nX;T^YRS?>8fney|CHCyj5WuwYZS_`mg6o{yV68}RS3Gk)| z2O(8Q*l?H=iv!=5Ah9R-g5X?;F0H&=<>gD`v+sVxk;_K-ab50|1ds<0J}rdl(wA2Y-{NEu%j)o^al&sCS*II2c-6dxU8id|o{y6L!P> z5Ah&ki3X~?^?3rv|4f#!+TRRL_FJ##aU^`2saz1dxC^Jb*tSjfIbCO3&?bWns(X^GcVsFnAq1!^PY5hhYQc`4t7RXxpXDxZLqg>7=2@&2h3k(XMpE-F}_Eu`j zAv0y}T*0mUPDkwMUSn17yW%nApi+ieh*KaSd2G{?b^9dshV0PMN95!Q z8BOP|IaiDu;fMb{@VI89=ZlmzG%_}3Ob~YItwTXVV&c!+M`oHFv%bds-8}CG;uh|a zeFss`3zgTI9Kgc@1~1!vX+f=cfQ{~w^=9vZk*4FqH5J5obj^|6$5!Wc$Vf>M-3|k) zr|TWR-^B$4#P}{(^IRyw+7(85Nm7s>SM81;5)%RbOr0`;hO&vdgLVxZ#Q6p4rhlpo zmJ~hjWFc_t_4ki2FD>{qz9;UA`+7q_!rlKa<{enp=PY@%x}aPJbkGO8^KX~KvFljx zsPx0xuC8zJqfAs()0rhFkGtPhf%8W1Ub;!V*J1@%hk~|jN88@cp0No{$KQ6iNB<8| zUm2BE`n`P+K|w+#q+38zQt1xqP>@Etk?v4rR2n6uOHx9*OQfYiKuWs1q~FbNt^ayw z`DvCj%JZCa-+N!V%ifmK+ts=6aY4w+%*1qC;a!#P7JyF>qg-f9x-I0`)7wePZP5hB zBLTqzihs+ZXX8QWNBOAFpQpaN2EHFld#5-hg(rz*e}He?mgOV+n3z3TXd} z3~FgWw!%I4IqK_Ah)(!##l=itzVDYbwuFYX*`KJ7H@dG0-pa~K`-!^C=$YE8cXTjF zz)A28m*OTuhO->k9{e+k8=`pv{hkj%(=X_-amCUU6?+B;)#lqm9LbjZVgX%E&ep}Pq#+0hKt!lkU{eifR-qKMXyay}ZK0mg^jR6G zHd?)bec1dq8pbhD3$f}yzI8!&_qGGZv#n%dZ(+aQd`locWY)X8nDtNUyV+UVW?%Eh zm~mw(#ze2)Vwm*%2IDe}wMBY@<;L5yusdGHNWq41eQYr=D?^0IVI`ILyQZAk7CU{u z+T|t(M=vkGZkmOZtm^7$IWVb&_ksLUjG2!N2gtPzXdxgBj*kA!K+VSH?xU&k{5B?r za(74k8aGNkt*845pkL#EUfyUrYl=``E`seWoWe5YQ%v#IPVUsilb*yQY=O1+;VIQZ zey^TN&WIX3p=DlP$-VP}r<^&K?Ixo6r0>1*WEuB}Z2>E-Nb6;V@l|GNJB$M;O-|dI zmD?gQ(b2`+PvGH&h-!YH4gZ#$S#M#=Kpm2&qP8Cz>=bOo0iT_{5MWRwlFuA`EB<5T z`3vzMv=1NB$w8RJ-ky|{l&@AfVEn-T<>c3?Y05CO3FlTcq_LxoiKX2Pe89)q(89w-V+`|QlL-L@;Amy!oVGL z&T-nq1g95zPVS$Haaaircx{UcbDfARXXNbs&WAJ7^PoyXs6M#aq}a-84e{@uPxE}P zp>Y%O;2Zx`vX?$LI6%88g9Gxt{+*oEh#gS%9u%hL=H%-)xN?QJC-8b0Ia-~bbU%U1 z-ceFczAq?L8AvGRZ&sk>H*>WUdo|!RdP`01lNj~vGW!hG{o5@Y`EHa?dks0$aLe3O z*49Z;AcvD?b`_X8kW!wVG+u3vmZ$6OGa~Mk_&;V}!={pQb#_P7?wOqI*tj+bl9KA3 zg&2xb--|ChB@6lOHl5@q7gj{hMu6WDzT>DEO&i_HpuTyuz;~!kZ-ge0_q3Ijw*uc; zTVYMZZ9o`|jS;-C=N{Pl5J;!|yuzN|p5@(t7MJHcyIh)X>>$%%mM?`G?NFU#-A3m> zT|cl8#^>59vQGm#Lqe4oURT|`@kWxyx0IXlxs|$;yse&(tsca}OS+NwWLj6wpx?wM zOU=suc0tW}U@ZIZA|dwh&>0TEHyPRU_dmgN1jE(^>f0YbM5+bra?|sN$HwsS@9XQS zpDpCR&kJ(Y7gGfXL`liq&t2oE|NGBCwBTG`{yZvLztQ8EgYJBMUO+AglbfF$jS@7x zYET~@7;Yf>Jfu4yq28KbfmPTK{c3J^ts9nYnSN znG=WrZv?bZOgFsMwTVyA0rioF5=C%p{1Y#J=T!^GJxq9pAZMbhi#hWvU9fytiPN4C z9O8`3U@iSq;|Y}gcutQ>L?JPiz!UqatR#>K(Qa&NYwPGGdqCp3NR{W4An;~q}!L8{> zAR|_=+XhqHuil<)HH&2>awro~PC4r>gliuTX>cAD!Ke=jYSJBdngSX;WKA9~mHfUY9 z*&r-$es^$C=x`e@f`p1qG_0q5Vc!bAHF{szX((MrUI;*ufE*JeovxZ1jSwv}^`mH6 zI!8V-?h>#tEG-d~MRo$uBS^u{EKpgN|Kp-l@BT-8yx6Lp@A756nXanGjmZ#XZM49)YKvk0qwm%8R&6K%hd){uJG|HEzcT) z&BNfBdHpmI!m}P`eg)17hl2YtBc0%m@yg;p!_7YvB52|5fPz<+7jG11JX7k1|M`P| zSfB-~+r6TC^hirv1Io0%?f>gdeS->^IIqpPR5q71R;lg7&y>OrcQC?h+~(CNd**zp zu8hdBkq^PadeyHTm#1lgK>r`KB0_;j+j7S|OioVr;QA(UHZpm-a9JP^PfKoL?llN< zo}tUplnY{Y^QgkU8^hzHTZZu2}|CCjHk(pJ&Ccw})*73a| zjf0g|URT%VSI#V`c#=z*kwgKHmohq^UXzLV=X^390`jBDJ!7+LrV@0$x}ec zyc*mK(l~A-InU#R6Br^zL1Q0PmpGYzQo6*d<+a>A|EfYmfp9%CSOA%<^VU!|ehn`P z3`!R>e$GTZb!GW9fIuodKLGg}Y;jO%4YalA#+tChIgud6Ed%y5j3qax&UX(ej-dLjFzmq0>W8@;vs{;P_A zwh5ykeC^ZyPQxO&w$ngf|Lu7`%P%;j9%LbNb{?|gKkl=u-DhK`|})-RYMwBY#E=|PdDrEO1FXL)6* z$L28(!fX4I%k6jB@WHR1iQ~h}dxg3+D)7<377Rqm?dNZD&~AvnaDh^sj&hx2c&R~H zU071$`SZ07C>)wj9$kN79lwUF%FAur>P&qvw}J5*tF4ytHo`qc@AknAW6ptHo_3TQCjvG z5TvcWLxC0X{~a)-h;F^RoU%-9MWOH3+z5zy*bO)vk8>rpJfvXB0g)0g+o-p49@1y4 zF%D1j6f6?FKzY+lLqoUhHA?r8E`o4yymhM6fsEv2eVtT^Y+AtXSI(2|8z2g}ck;*% z4vcGm0f&~F+8h{p=x7`|fA9e*K}Sc2Q1S85K6m&85EtcnZY46lbtaz z`23cCULFZ#5nkryha9vWegk&p8QrHQ_O$%;!TC<1M~^zObzIF&m+9#;;ZXfM?qp9A z+F-LQNVmzJ;JUCx@J>ehYJYd>ath$nyASYcy)SQ`A|o>;fHl(+n_2s;tq7YCGfH;! zwZM(CHB(o&-o)`r@-cNiEXnlBWLF)nkY(F7*72B*XId^Wr?3JM3E zL(3M*f)tu++Di15u~E@naSCW2eBjVP3_+#%ccNNZP8%KhLPVObY~uP`103Ayqr;PL zv-06^Ss4VsMq0NOEh`r(0&6_R%^S$e_yqX*k20SkqGI|uKEF!=Ht|@}Qq@93fYn9% z#nMtWZ84)Uk=#`+#Q*1G9*Uqz{{KkUZrwy5TERkyU>O=)#}Lvy|Jt2QNm-K_pbBa89e2S4%bvEb8;ED0=aBjVd*DxT9vyWZ zO$}9I(v+M29AxRr=8wyGcwo2$(FZ(Z>UgwBM0E7P&`@U&GgK6>O_Bb9RZNI>eYC)f zrwE^^KGeiVpGzowS?|9mN{lriQOfzXODy5jr=e>aORw7D0rEwG5P?-u zJXT!MK$BFefs4cn%wD9|<-?uOJZxxZ7Q%E;8QOSWQOxBiXC%i2sTE~3qshO2^MWC% zZzDLYtzQ)PP(c395G>+BT~;e=pFqGeknGAEQ@s0eMv$7R(7{-my6AHn`YGTC&ggpm zRt>tX63!@pAp{4`a%uEaV=`$Rp8l(>+QnlIk2!Z!sn*20^RsZ>eas*PIh<=mC8Ovw zEl#8lL-kM7E2IH1T`ukS6K?TQQFHT&IJz=+R|$g+H@#@nK+K-sZF{@w;c#1S$JC{M z{|DcnIshD`T$s7@jgJES;qOB8*-4MQ;A<3jGvmP&DDi*ko!6dp){{O64aHK$m6KBf z&IrvPVyv!RlH#XS0Olg{guNUTRtRzfmv&f~fiJAZs2Ene0CemQc1CgK&v2#nX&i$D<1z1wlI1|5cv7Z*(i zjGqJazLWX}NP#%5>Lb_R^^+_?ks(L$79WD~Zl&s7A8nMM{@FFt{M2}8aS^mZAJ8#r zB0t=SBXtoLTD>lG_-|!>Y4Jg?R$14N;jzhICT-6dxN%bP0DjGwD=96$=?^i?Fc6Su z*}}#G>Q|Vd;Ol^+4ul7-gMrb}BjBz?JLI%HCo*Bm{8kVhog|+zpi+;DI7I2Aj;Yw_T~{Y}r_R7JZ*om#y*nqmHUJp*RS4Hv-MKf2+tJlES>T9 z1B|B;DI|a=hwDy)1lM+=j*W&19%Cg}Xp=#K2f05HCU;pZQD|eKOcTp(c4(#D) zW85I`fj-yg(a{&@GcxV;fMZX(`PNuotN~QW5vOo{P`$-VH>6Y`yi<=!>m!F7LG<=m z z6+1(OqU6_o?J+weI9zR}d?K0+|GWgKwCh;!zak74CMHhP!J9xx{Cj7zFv@FNGXDgX zxDNYv-Rs4es;V#1fy4^e^{QjIN*o_%o3$DY4CGwH-##YKLx5ca$UyAX zJ;cJ=vY!}>?&Du3iyMI5c{TQf;}Tp6J~IvBp~?Uxcpm(;g{lomj$EV1K#=fbG>`uB z$3$2kWxo!rLV$s#M3rD+Y7oGaDE^&>n9khb0B$7i%w+Fx82Rf}#?39ZX0?-)68L&* zt$zRAKla4CQx8Iwwtau-V~1U;D6LSId=ZW8CDJ<_(b95H{L56oQy%mCg}Q;=4hZA3 z!?uo(b^&tf?COQ4Icr->__9AXrc<4ffYM0h*f;ZQj-j2^d5?m!-#tN`_BE!^Eh*~9 zz2g6I(cQ<;nlwBcL}`5fA6DCgHs#-yOy3?T8B_(e8i8)00T=7hBj#KFHUIqw%9Z){ zFq*r!``2a9%Ku#14L^TDR&yf?@qhbP(D^X+H!o*!4>|(=GcN(_H}x3kFqu03`%aO) zmgzHHeRUoB#)Gv)sV9uqmr5a1JjQ|m!jhBbYCkPh6P~KpK)k^R@@VAzTmW1t zhu7l*D>H+#rjERU<}D;D8a|D_WlDjv<$Zirr*) zjg8fz)ij#)7{>89V0TKIAn<|DZSQsXarMy=4np;LLW_krQZ`j3$A0G4vuCmjpse#& z`17a3LK%Mv32z5|z-#nwI$y(|PWVa~hZoIS19i^WlLeb!@LUU9;Yl z;PwVd&CGpchU3g1&}s5XmJX!UlY^Lgs_veELS5spQtDyKvJH;=g{9xlV71)QtHH$# zytZ8aYJQq1Qbo`41q%UZ+CaNE5OFUE9$t@00)A^9%eZb>gmQU(JDOr+7Vnjmp(RP#)lXE+IuJ}gKc zxdVqb%vpb{Q4yHIvU?4Pd5p#$xPK_uq{bnbr3sVvM5KKfXWB($L zTwyx|oRl0ts3J>xTjmk-qi8^Qt{DF-DH%NO+|m4Z6?ZczC`eb(2Vcqn58kA15L8y$ z>N9{E<>W-H%Ry|7jQUC|FYo%#ja(NFZggS{V1u|TuqteJqvyD=V*b+qZNY!zF zV2{~@*eF)L`rT31TC|iiZXR?Uo%jrqh0Ys@0-^QDDXYAuJ>a9N&!59hO+X!a+g%Vn z07Oz6FI4TUX}#P}KnV|8ArR+m3pju3TegN;gxowR+BEO02NwcZMhj}gX@px5|AJ7E z2>^^2{!#wx5HSp)h@rJDH9ga*)Y!;d}ZEpnv zg?JaC4*{$vLuL%Msl9(EanhSDc71M%&u=w;nO(m{^B4DBMtchhbtllc1Had=o{^b} zjX+4f3hQHH1q8xq3i1jI)`JSDu6@VgtqaLd1j!hT`C^mr#87XIy^L#bW1!FciVaxS z+Gnj>l*rl|0MjHXLVhTSiK9S~)7G)+EKD-HDiX-f&JkGDU%R?&3#w9Ls!D5PA^@TIZ z`8Q%(IS_4B}QCtGSxUCOODJ5uDoZCR;WjE>ZA+cK;rxFf5IHo?U5B|?> zWn$rfpg84%Qj1HL!0q!UyvWbM zF8fK1r?{=29_YH5C>I{IE$Jo({gVVFgOqgpbj@*&dW~?1ehwBOWCDy)@qhR+12K^d=|8BVuV0!x zv-Geg9S60wNGy*miNxWJ@!^tGOBx!}fnt=L}`79fVN?B-@2afv6N z$;yJ!4KwgleDe78fU3S~=cdFMJ^YwCB%4o-2M^Hv$|+FVLP$^$r zzhchlEkkDn!L7dFGS?y1(t4(9pqw3>AReZqgx4@9Afy^lOX{+{gd#7*j}JU{2)zJc6~sk@@PVsPcYeeYi6o$T-p(C#TG&%%i9 zy)k*Z4#47>4kmbCg8=Yr<&pt_<&i83^MXjnTT^SaZs0~PbgOM<*}ui0G_6FouSpCjrz5Re`6H|`t6L#rTO{70`;?%hv=2DFoH3$!{HanEBji* ztzXO2MKMKF3#U6v>bUlAvWc8CQ8d(^yKgRHB0v`pWh;5kU(LcHKY%m)rr|9vYN@$` z6uat72w}(<0qfIfQaAf4rv{fkkE!^@E8}ez)by1a#Jd90homJXRc=cC++y31RxRfE z{=K#ql|bsI7{26Pv6_U;TI1ZF?e@TnBc9g=J_N?e2DMJ+TZz@jzmlrWdxdU=w>-=S z2S@!%X8`F`l^KiPLR8|XBmSocF0XSGuY6(u0#W*T*=!Ta?=`DDz~k_;de9L0;Aw_% zcf>y?%Trvf%Wwh(?3usYPN&Xr(NAk~j;AVZUu$ikbBiZ48l!xU?e$Y-|<&8!67j<^w(=MiobRYyQ6vXM@zsQNK? z7lvILndjy**}q)(p8F-WT-e|1ih34KDny2O#u2d^2yO#h=k!1zBR#T(j0}BZFN3e% zD{e!?5tEMwhOjZ5?>GE0jAiJvvu@R6kUwd~*yOc7HCY&Q{M+$pn&=_pYIxe4>5+Yg z8UuTa)a;>3#<~SGq zwutxMSni#6B|XG{sQpOiOF|qGaq6_2_HZ0=USss){MRdZ}<-N9Vlrh2fl_<7w8h9K1M$=_b z4K_H-Lr@nyDw}`Y{8br5YP@y5AV z)A+~E<0kQ1b&B5DQ zv386`M%?w=J@8eQ-~GIEh%4xRz?h1Ff84JO?+&izouGIFgLf@vyM4vJe)4 ze(KD$C;mX_TZaj!rzUX00Jgyp4KL1z+jrFW?|?AdrHrTco&X2mh{vWG=z*A*!{5tD z4iJZ+{En%s<|QE^HHKtdg)GQs;eP~TnUMz|s}KH|R^$g;hE(fAz=%33bH3-$30)I_ zWsT7f6MDizf*#(zO}J#X2qk(_xa_d+OO;6OEvxOrjv#CDsV+Z1xsZ2M0W7ZE=ChcV zk2atwD=#xT1m=)kn|QOqbl*)5=O(!Weo1QnUUearDc{xgM5O zG}R+xK8Ytiq-G0K^Fol%twF)cFv0_KyKO$6?WIl?+m*L=@N46Dj0W{9Y9}r{42Rt- z14nIXRfmrdGh6Zrpc(#3+gsSAeI}e2^^_|6ce`v)3i<&4qg_n@3O}YVZyM~pbiA5HoJgrduntu6P`-|o#SkiDyU?NvO+VA-3WX$$ znSp_{94dP|v!I%W;^L3-22?aG`PtvF5ujcnv#m~hs(*}#{4*HEu@79z%emb*<;q=L zAORTkNfLVHXLa4Tql!&`ey6t9Wc8vXW@V)6pXs_k-gp-+P|A{6>!x5scs2289CK_u ze8A_bFD#xvVtZ5oeGzIAG7d?1g5|8%*I;I@YA<=251&I`o)w6t^1pVt^kfWZs||61 z2EtFQOj zU~7QPqLMExK0--b?V^%CJ`%pK24qN|Nv2zlroM+QzBhA8{zLF?migT4r^9tn{$X_g zBOL=nMs`MM`@HI@2cH(Pa}g|-ni?q`!!T1d^QCmt=T3(t!} zBX}T8P0T9GE7#Z8Qa`9NE(*Fuu&^*aQ|tz4U}LJAj`~qx%k06H>03J|`p?AV2_V*E zV0~z6IZ%6MYD9)}E6{_NcX;{ek(D=RTz*#}sp}95{YICB`XT*B<&#pUTs}Szy8E4j=}$Zj zeHhHybfuJH5r4-}zrzTTehBw><~N>T3;%%2A;fwc9o-g8Hp|^&H-n8V- zr@U^s3v#E-0pk{rJuyu(27V7#`5fvhckg+?GiYVKuAEr?B{9Ciq&e|KiL0@(w-MVU zW7o^mKu!Go%=%1Sj5i)`Xtx^Mf0uuJ?}D$pdU^G5p&2`hb*1kwbShH6+Lpnltby_G zy?&okO5PGSz!;BLyWBrKEukbevCtK?O-`|k@ z-mBRm8KK9h*lqUMfeO=&UT1z3R`*cnap!ND9$hH!c&^pLM+K3_Dbw{!;3yfIJ8wJx zk#x?yYyCuBOl)9b*Z7hS6&t}SuC%q^uJYV3oL_mA-Dd!ohMQLL*X%&JT=)UV-`jIk zrw?1V6!WFxrj! zP)Zl2HE0Xrj(1M`oS1(Aes8{+L@gZD`S)p%PCd6HZ2GV@neZm+d?TQ4n@%%p4aave zZ9cwv(=4T?_V$U%9{Y!ycuadioDh4%l?X8*)*tr%5I1b^{4ylvV?-)dRwfZqqp;tS zHiJHZ88AuVaQf}eK7Q;=0?A4blqj3AxQYCTU?4pX>yh}^z5eF^|I^%^vLVxA`WN}; z#DHTrR`abmH#gg~xe0+pJ6=IY!w2@#xL6?~=#XF9U`tC8j_Zl{XQ;&RfJoRECD$^J zmhAn@1CDh|t%gXzbKv;sN#K%@Iw1{Rg(N5@aO?AXBqoQsDjW1P26P&c^Fxv%ul?&; zuG%3#DRH%+t8OX@)cR-hGRMwb-V~ILvALGSjlp){j!3R`PdamhS*4CR%2%DApFb>F z5cla=h2>Q25jbveZe#Is4yQb|!*`os|64ivZU}DW5Q^2Yq1Zjm{^8*ewO(2%)az8F z9xkO(Wn93vPO2SeWpCVR&dBbP!xZ8A@3f~jIWu#SD_e1)$MP^3FE-*}Z-f-jIAGUU z`lyU$1}!CM7w;dl4L4$FlH+$W$uZg^AlJ?0sZAKtm~^S(b6joT`WrDmZ@!E1E=gw; z@AkWcs2E@5sz7a#dJ4`YCtLBYQpHqW0bwHF1eoKFjN+cn| zH?m0W4i8<5D}n^PHBbR~%4rh@zfjW#F|67kCt`RJPk=PmhyK@TLVdB(CatcgPKp;x zMYV&ga}9$$v($1?wDpjBND+ReM5<`!+Dprx(kt>bG3Uch&a4E;3IFl&GSP4KUbKS+ z^@5wn@umBM@%FNxTFt|+?~sVZcpSu9lYRZw9c)n@uUbFDh=>ToWA$CYF|L}Zj3q9v zx}@<4DNcAdgpjJ5e84@G6y!WEY5MI9ubN$XCw(}%oFGG4I+W;io=xP@!=Cmhsr$G% z3=GUdO*IBZepw<{nyC&6ohFxCyzxh2IKKGv3_F!$VB?U|S{;ltCY^MJ>hOay79v0P z7BX~@IETre2ipmSG~(TO^WAnx`vyh$^jD5c-roiK6?TUu%22<74)*?oGUqFf0$W>U z?{jpV@0T`@5?Zz_Q5wm6w^`1M{9%Cuw$oQ%vIS-7kJpq|^RH{e>PL5d@5waK^P7L3A3s{R{|ycky;wMIJw; zezaIRz{jj2CwMh<3={9wr@ntiUm4k#3FQQOF~qDRH8tb3Ht%XU{&Cw&QM1dOe<#P*CoEyZeOG zbZ(5NSAO@OhzJfoIy!Rq_PYYoQGgf>t~B9{Yx9$SW;qV4C!FW-gduLmkfpOb^x!`} zEZ6Uqwa$kc`eM|7gkNpsz(YN-U3dOge$-B;{U>VE(xw+8`b_mz=)M|q8w1<#JDpHJ6KyG8x!1{jQOcx#m*tvE~ZQ*^YL6x{Jowmk_6Vu3t_xft%C zYGL-calR+p?lCdZK+(dviKfJbkI%o~0Z|?v9%or!P5-{GdUH{brAW+9&x8rbWjnjY zCMV1PX#xI@I5moL#+6xI+6X2WO*Zv;Ul^d(n?C;1`N(pmM*1mEF(r3IEh-4%T()0z zOuW8#Zx%#|j*TxsF)Fl=&B(}d|2{q$$1nIvU!8y12J|iEw2qO9jQ|hNg~&$0VUqDc z0VDjw-XU(;s=eu}>Ghv+!I1F)-3ss?pd!2p7kh^Z%f9TEtOXFS&XuLKaQb9m6^l{? zUe8KfTU#0$8e?hxFf!pNHwSecud`V^^HQgr_%$oGUU}26jY0$tw~~Bk_B+pT2OtFl z(rovae*OKAk?j$yh~Ei-UzwSK)w-I?V2J{#A>Ojg6#nu)m(V(^qV+<^uNO-KRfwcu zv2F7V(tEJzhsHV#2kORU>)w6E?`xlibpZ~*M1HsG;@t7xHqTCR2JSfftbt^A&`o|w zA6h-F;de4q8c@-ch#nn%B6xHZ8Wd*wIF4TWKls<6`~J&&2DVD5LSqOcZ(TkFgR`LL z+-LcoY!yaX4uw!`uzjBLZum5PGmMRvcSjwR^8RqN>s9TzD(=D)4YW6GnlRMSM2;2N zr@%YnQ$D^e?4I~3hQoI7AC#9qN!vF0nyJYWSCoC`vRzR1@9V@d<;ch=EJLBht51!- zpjI+IOsZ z&+7`%iG`T8Y1DRow8leREDC-T6=bj)39~z<% zf|aF2^8Wep;JJ8I&X0*pEzKL&-7a5x`)P%oQ?66ecSLySdV9Z zw|)Tp{Q5umJ#zguz<)5jqjVMN6kGU!AeQBe!2XN=i7!<*2}~a@o8Ea56^;D;87EH= z9W77z^yD8J9UY{uf8%G@$rtLv{?W}z9P|hz!*IK?GW)qL+^l%qOEAp2e-+j(iDlE0mjH5^JtiXkoxdQQRx>Nr zv<`Q1$@tadw_aqz18}MPvd2G3I3_I2&+}b4WMPBE*thTTgv2i|Ue3?tlC_KE&XZ5&)_WedUz) zhxDkoSWdt6#0z7XmfKrf&n!B;6csgWCmaR;jkI*@9I{+vhUO^;@~X+2^=H~1(Duz{W6>=#CvMb{v(|is46HM7iEz1ZIIES?xJdNQXk-y^v2{@!~ z6jhrCz+E7Bi3yx`R#wTFUg+y@HASlP@VIzaK*M5o#!s2iz$%yHSMa(fHOtD%jaM&I z-4|vNbnN-h7A8hOI`3d$fO&IkuzbQmNOkP2gNGEN7akIku8oe2kc;@tPGcXcYZ3vzPu3JP$2&$bRum}fHhyl}~CJ@?t-6t+x_@QLu} z>g=3uC54gLw){>?+otI-ab)AUYD-c=fiIvYC$@+`Rv{)TM#N(Y0L=8v++MTNQQJSW ztA`5G(%KSwo7FSjhdQq5F+~O6aKk@fdS93UmU#c;OEqoJ_30LW&1M8L;CSsPoIvb& zKQbpHlX!>8eK*L3fFK%VbmjSuQ{b%3E)0u_&J6VXYrR~aFyIXcfahl0CPeM;m2l#w z2nGj*E-!oXk%{_TL~6j*3&)q0FUoeW14ifUJ=ObjMgaFQkJ?~*;$?j+c*sWYdE9Iq zduS)?Qu`)e^Df1`MTZ3wPqUa0-(Nf1oX3!RK^M@mj8yB-YMa;Hzq*JGr;5t*y|6Qx2;nV$&NCy$Iix%=^*&{_B%I}D=Z z)$*fY=z%I8pf;cYSCsB9BfgxwBtFSYgo;g=gO0ndE?rGp`t%zC$;v9I8VT?Ti(g(j zKVLxJw>(pj!w&zDw>{`|T^Hr7Upqdu9#>bF9}*H3Teme|gaigO+gRA>mfP)v*#jmAFv%G-c$PZ$Wa(N7 zBmEYv@`-va(^6lEh+lq5fQOD%f3@qI6h|?JurGTw0RbmxY2~)CKLsG2Jj}Yh!be9u zTs*w%rsw_Z<28x`5j$_+-hgKW9zghQuOU+x&?yKraSnjl2!gs;>Dg4pbvIt9bimvU zD;$Io)HJ52Mtl#otA$U-=JdEbbIEq5T!g-m>+-pcu93TBRKnRMfX4hum(=`reV4{7 z1AQ;B?nhQl%y%5G?E7`hEXoXd^xXWBD}0f$)cj_V<$i43)mLCv*TJHW^ygFo>^H#;BavU zqe)kTL_1*X+fFm5t2j4q+<4GO1_g5SSx=$F02(5QNmle4T~bfATpkJ zcnqGLcpd_e1MiRPm0PGyF^6nG1OZ?xpETOu%HYz!iqA=0odR+pl9l!88c!u@?I3|h zJbe5_PKS*TuWIrk*sM7UY+QwbhcGMxAHL9m>vGe((_^i2MeG*C8&$V4(627Y?s z*mwgqZRoHL@#cq@VQZ=`CokO_zyReUbINoh*s4fKQ=+0iWo6}g!yj?uO=?cgU9$Ux zN0%*So$~?W;&(8EZ%yEgmAt}+@(e~we%I5J;8+;q4}I=+%)Yj<%@=aDUtd}IB4|x5 z5_|ozNBXGvM^>P>6#x)RfR zwj1$YAX4;u>1Ly0Za3^ZLax<5hhxz03g8a0g0#31RDE5>zmRVnQ+$dbu-QnQxu$1A zHi-Q)_H2GeLG`H5Hvl3bcqKz@uzlMUyqOs-RQMAe^E((!29$B;xA*;u{Y zmxG`d500C3<9R5Atl32!99BW~pZlc7#~0T#afDS-8prG54dh77fBZP_t!=Hr@LHE! zJPTVkYzz#&x>y)q+bfTWko%w15yPdjXR_3&Hv*o~(IoU-F5yP3=+-NQY}~?*ii{ET zIC6v+UBycP5p{`Ahy&4FVP?6@+<^yi+&J9$upX{>`MZb&A6jeh^6-SG@IIji9VG2g zhv-#TPwjP;p@xqTIm2n0hrGsA-Gw3S2#{1cu70#V4Rq4g@N#Rh`tk{WNqxb^!V6rG zd3kt@13EF|_wQ#{N#DXp<8#yi{RT|^zub85Tk3Ie(EJ@TWV|hR$=NG-f5S<8$sQ1O zMZ94%vE+h$w;4GdKJfc8cH>3>@ISV;`M6tCP{ZQ7B@`v!6D%(aH{Eu#@pi?LYqpm9 zz-PDe_200m{H=TiJHv_Rd}T%ESJVF0gIe4=q^Vz2Sw*F(ojn!Ija`FtD{rO{D%OW^ zt(@%+E$cif>||!NvCBH#9GVz0MSCOq+Lal%46R$t)u+~6?r3j6Cr2C){G^9 zNexUxy=hA$BO}2oDTNai%5DPZAidAIZu=22|>T8rF4V3%1_)#sF4m_x_ z;r3zm1lk7G|4lLs1y6(K?xjRvApp{N;gF&Fuf>xfvI=s67{vN5qT;puwEBmYu@t>|&P`MRHLh5MC4R#5uJSO00VK9URzi8&&jVAC& z{jVhhPN@czA~xNIW<$(BUlG6D5>QIW|(`ZrLH2AtJMYF1jHK%9F%6b%@=o3KC0G0U}al$Gq?~zX9A05wbr^_PW4Wfji*v ztkW`>@D~t_QuIm-X}|k^kZszGXS+$8xq=8!(}w#x1E6L0!_Bw3A62zH&>Txv{u8S} zWyE)Mlml^#)hpF|NLjfX|0R{Re9CkdLPbM!{`a@3v#g6Et0VfZ=hE^fSlY^BGzb%t z;*(!lzOpm3BHN`lYte8smhizdZhWK9bMXT8rUw>V8WDAu_+JhzS=VWw5~(T92PDL>XV!Pm~T`*6eN zRJ{X&l!kvUl%>xZAr|)b+g5D<`=$}mm$I_TG6K|(GQXBh{+PtN_0%pBq?8pg((nO8 zYYMR9rqBuHE_@{YU-!|GD}O*_WE?A%C};->@b8MEzRfMBz7l6*VuIdCaLhDNAXI(# zP=)kpQ7;aDSk~b0mEuZd&t5I$M`Z{whk<_d_ zuu9w~O?hhiTU;G49wfnBq&Go@kB-96gatfpw;@q>XVqvd$Aig;4>@h$f-)y1t*dl ze?AKTZ-)*lL`5Yc_U?nCZ$Y>T%ic^?4sg8@nCe zAW^8@@EYbgrYt8=Z)Z4(ux|AA1pk=vtw^i-^nE0udl6hm6V)}Q);DPqiIy;z+7t8 zYE<^rdhi`bN0stbVwOukF+xSn<~uta$wB+RyaLLn-)#ywdk-FX)KwT3(m32ftpra2#XW3&Y9vbd{U_%PlRjFt+}JC0_i;4=t7@8QiYIW%1GZ zs&(EIitBeS)0bNphJoHB!LnEDg*B(A$uVg2$`X2thyR+VCK;oNi2V5QIZyshlYi5# zG+90mmbxTrq>3k51I_NJNPoKWc7xBa^Bel*<>d*7oF_||wH_^+YR@49rgU}2es)qT zB?fl;jXxEUJ!W8bOltI|1ws&TGd`~D8~z_7n}v@Hpm8Ez^Yzl)$iH_BO{3H>F}+ZG zo|9Kd%;h-NF-ysg8u{rH3v3Am-)=z#iP(1PRVq3pO5xqVZ)0n#Q(;k91Chlt090dQ z-YzSzgbav`xTd7DaENh&K|xCL-GGca=E;0w1uV0`E}3wi%*AEkb9(f>F|K=(>hIqK zH$t`&$+ggln853AN2m3Z?KJ=VE7sXaRq*(sd-8)*8P)&))%F%#RdsC}=mG?!6$zzF zP$}sWkVfflP(m7{JCu?Z5h>}Gl5Vi*ZUiZ5B$RH>&GVk~{e#0`INXE1SIj-voOfIS z%m)Awst3N7$$Ww7|6xpX1}#Jgzs`MM)7_LPOiu8{;={e93&A5>6SZN0gHkoraNNcf zP5rVs-E*6GaS2HWTh?=h{*JIHxq;z9s%N$ZMfnH<%@&bf8AkfMScr>@OIqCDLhlDr zxxu=Mc7_g5e*SRCl9ZJ6K10>E;Z34qzTX97I6CSg_!6{#(U3HDLjA8#yO}|jPBl}n zfIm9z2)PoHadD}n@)TYW#Jm|Q379S*X?Jy55oPZGQ%l1aG#Pn_X?gz%XQF)Ga7sMe zk@^F=R}UR)zputbK}*x&-xJKJk2}5>#$SKk$q&C|)g`k+=KfI2(%b+cvd;jtfL428 zU@geV$aS~;d0qP2)5y@!u0N`GRzEc9Q#lB@>v?(kiqTRjA^vT-eV=Jy1P6FF&v@Sx z5d-|Kwg$^l`Y}gw%X7uh@AFVo0V_KQUm$N|J<)uF z*icEJ<{tPuQ<9YhoU8MJvf=0RG0-Ct;fnIR7V_O=Z&3i*m~B!pQd)sfJw}C^$z|S( z$Fb}2^NfbDN#(m{W=jZ&c7PK$AZiCVh3#agOTIHB{$oQ!gM$z_>0U-+S5;P~@9P&A z7k_aw|4C2{ViN0spM5K3@@sY8Tk}o(LNG3s_wqL$k(BVkFF^pTai_kc(966!)!x&g zY^0)+kx^DqazRZWOz1}+Tv<8NQ*i5UYwH2kBj1DHRnUu+lnf*$Ch|9b3@wM>#mZ_D zw!0^zdc3vZJ7L)3Bf6@v8f)Hs??4nPQrvrJtrik^z}tHplPKI#E$Yebca5ySF83B2 z9o;uXS$E755l_vVF7%YM{sUJwXzJ4mQ=gDicF#?SYn03G}_wdj`9`DaKA6iT#6o59Bak(}& zw&OiboUUiTpV{MplZ7bDeb&N~5{KzVr*D;Y(4|BMrh2X+FqpcADjvHf=GNp%hjFJSWt zDnpHFr$&vQ-R;{s{xm0Fj*he5Mia(%W~=UsO11|#g(Ln+le+jJ!-InyPBTMmigJzx z$56meBC}$kdm{T;$cTy!5-(R!^^;ckRItQc&Pf%5=?~8u&~>1L2_eZ9TfTe#rRO0h zhCd{oZwLnKM!aW9Uwtt#kBN(^F5ifaH#+L!^OvM^thk>g@KWbW6}&g@kJ|uUO44M# zk_qpwv9$?c;pnrB>9UN$*APFA-)re4WwKrn`AlttgZQGng6eNC_awl8+7(j+#EEEI z-M20u063+0$_?5X-VcHU1No7WkrB{g)=<~XF?W`JJ32IQkByD>J}Wu@7f=5xGj;h# z&UGOZQ+U_>8&VDb6JRg+YCnte}SK^jH?fZWx56coe^wb}L57w!ijHB9}6zDGxiZ{N}ciHmjr z>(g%>o0xc?B9X%3)>-sllm!pC&3-WW9OPx^Lcj*_JTWm8R4OGpmo`1HL85O{sBxl* zfA`^Q{}L23!>{ACz&tIl92gx`Reu{D7P{h+dCbDfT5Qo%Q1l7KfB6O3$<)}`jQpRo z$c=zV1%*|68JUnO4I?p>S0j}X{>jY-p8p>f;LBWf^+BG9FFjMw92bieSLE`-;ZQsK zGiW!!(gi#vkgb%K#s(Y3;XyKq%3nZk%#;A%`uzOs$hD9_ScuJ6l=T`uFsj@$6+=P6 z4+#h>FLzRL)_9+aVfH#}nfTGZ2a3V>g_ge?zrnIp7(j)+#)nm_!e1r{byk&&s2BO- z<$AmOGCVy*ujY?pD>QmvQgAIvIJ3NxRaa9Xymbp5`C5ucCW}>vW@2b4rf5f1u1EVv z8ny^m{QXWYBP74Lc%>;Q?a?mtJr-;qQWdBwGBEIesi&@49lVtW(T~^#vleqpbSASxwYhapdmRC|;g9ZVq z2VCQG6%Hi*o=X3@;fobjr-Eb5=+tKCO_JpA^AFkrWM-iMdEfHRJzNU1-XqfqWiNn> zo}2`OS1yYGISo1D{%^+x4x-|)CQ)jxuYiM#eTT#M1S&{+Qe+l+~NjsKvtBfL_7}OMH@Z7g32&G zH2u7Jz*;i<(H@pn0%_0_S)BLj*5@5__K|8o+I);<^wXv~MZxdM6D%T*YaS?%e3siA zw3iRgWngvtw(xKJwKobF&i15nEu1ZpZ=K?2Q8sQFX{p~>U2d}WuUZ0&5W8s~e!n#< zga&+oUdUHhtPvpJDK}>ueL^^cGgFFLSlgn=@PTcJkB5cR;=OsWRATxBN@U+DL;fq3 z!vmVya{c30hg()c#ONxc{UxzYl3d){8 z)eRobj6so!Ai0Su6)90Kz*#Q&-WJW!O`aD&K0!SfB4^aH zbUEk>GDd(pLFOQauBvjGa@|}ZY4W<9hb$a<`KL>V(pCZRtiKK^C*4+$pjCtN%)ct< zmLI+uC;)+erC3W);5DsM?aX|dQX2MgK=?pdz1cvpx@M(B!FQ6HM;hDu+i)_|qe3yY zDWVS@IkY#FWOD($Y4dOd&>rD&zD_K>-*}&$$y(H6uE#PKBHiSsXXqd;aRkttjGx;z ztUOR02bc?55o^#jbf^haKk|IQOdly|4FxwqkwE*MJ5=&ly~F0F7eRpsI?jhWP&I3_ zTpr0@RJX&q_4lZCcO}2zftz2+zTPymN5oB832A(%iRM<9gNRN5I19hrBTAHy5jZQm%o%in-;*)N_X+VS?LwWnXR^QE|}5Z733#E z|EHn;)@Y&eT4L&Wk#4p1?EF>zyYA8*t`8EFUsT70EF`zC*j7PJ;rw3n53V$^oNT#~ zhYlMZUa5vvF8jlI@^hkW1e_evR012e@dcb9&mbU{ruX-B zKaZftu^DiW|L8yv;JGD8^m zaImly$wVi+r`-NT`TTELND*-p;1wB!qRFk0L*F%RC~`pq$bjln9akjykQ%RVX6B+d znNOItt@Im|6Ttvf3JnFLt%k#7T@p|(WfW7lZ%@CCH>!nB?=oA>L>h!bN8x#gUMK5* zgv}SPFdK%J0CU6-0&16^cQzZlGZx z(aTIjh&80D3Y?2;s2R=88JVAS_2r4`w!mi?IN?vj!W|550v{U)$patp&|gbEOzfes z@|BVTRyj!2FB|ZIG|Sr-BvevWOKn~l8v(XH*jdUlyaRki<3|=ZFKazLd7Ib%{{B$4 z0B=&rGI@y2D!X#MCTv_`8-ym2KOzMcV)O(SmIs!$)@mx6l3WwOov1#*pTUOR6)MfI zU%$?63+|{oEd+tXqL06zm;?HZhKy~$vLAvtLz2;BnmN&w{tE4)iKFa;g9F3dws(ZT zf$szqYY1;G@Qt*z(&8j+?d?fPNC1QKW_jL*SBN23bSpQf>bc7NRcAu-( z(vni|Gcot}uEoYCAFfHNi|gaw4O4v5onKHqJUXVTq6x5qp03=k?oJwd76d@J*x_M8 zK|x-JoXZ9gK;ecB7z)jSf!DyyPB~v!QccYXsD6>sr|O0FvtG_?lg0F>(;f!2^?BKq zjfv93L&{mo_aIRL%DH1X@L^S1NSKBA21~Zo|Bl{wQ*+|74>XEmU-J0Mc@W z`JX<#dp}2M3pG$jAsO)}FmN*#^bq$0wN;W%Oj=i0K_gL5-9RllDK!)g+~^leXbQni zspR<5KNRCR6c3yO{8Wx|mxp$+?^xbnlzf|#%yjSZysHm(B_{n%a)JII zSB2yIIAWk% z4j}B{f|dCi*lYY`S}4dq>d-;-0aL83euF9S^c}4*s6qhoaki=t@ufhWI+)u4D@=N1 z%hJZ?L1F&lCYX4>9K{fJf`Bh3L!@*F7+Z~J-`5tPg?pE03smm4-S#63V1Ued(^^rMQ#0fLB5==`Ld&v6s2^WpFIV-x_slU z^DhZw)yT<`ara%IlmrQRz6UyR51@KH(ugV;@U`jZ;64bQWD%@iYI0n#0t;y0dn3|! zR{+D9Hz~Tar_aG@w%>ZI^~+pZDL(b>+qd69TLBPQlm@hie)tKI7-$QppsIc=ub+#T zbB>9NqfH6rXliS|L#-=Zy_49rk=R|q_?Ae*s z$jF$=z4!eX{y$d5e9fl7W!ehBJ#*Z<6l?L-dM4Zz^CRI1lOrtp!RcnXA^fZyN!7b?Ydj$zoav z1Ea0Q7#KVXz1$+h=FhHvyu+GP`)clrwtYD=<2c_5>7Vg7S3Yr&efnmXS_f-kYG80| zWKBg*pWt<|%Q`-6+}5Ou|AI4s|8yO)uI`nn=NM$T5%w>Jjs_uQS+~6Z@c({1sd}Q9 znIR}L5^+15i&1M_8+iEe2JH6-_4ZQt&*6Q|i9t^0(3BMRSRTJ3Zk^G(Q;4XjsfGBm zf8-1Aw`muRgz5s+7oomewRY*|^Tt9GJYtBFJe$Y~<67ki1 z`tDFB3hN7qI4=E+c(Y`JKx0Yn_*d(CYgrSuHNffCyx@Jyz@*q_7Y!H##YaOtLb<+ zGpuVpq4vj`8l&%}4tV2S0pergrU%79QUg*T-#yv(Ewm!wT>~K+FLKg5Es)*tTI0dq zPP5bIZlR7PE31Nn0xrg`(O4HdXvm?Iz6O-?$9PsrJ)l+AY)o{&Z~~NYKzX#=_X~Qa zPKF^!^U_G_wuM;e@78%P5sKXXy6{h2{54=1V^qq{7}PFHEo^PWqax$uk<%dqlLQt;lo&NF5^kd2Fplm+6%u+7D)9W0vBrm{gukf_fRTjLSJpTy#Xu%7Afha zg-t&FU!hO{-$mAn&Lh7*@4P$Oo~XDHm)MV}T6}f|$s!ME1xv2@XGgYw32=e$3@mIS z?8I^Dh4pe&M6%pGJ=XqR^n~y>&W@ze{1uU6;X@omtp~;MPzP z4vs+*U7YY07LS1L)tgaQ{Lr>Ej8MtRm91*mg=&BfKr;w!fzQ9wv>E3z9YB_}Y*duV14Bby z{Y(J6>48+FvtSK8Z{tIUiwZD2>yekxJLk)ougFs>DYUQkxxD$3|BBtBxG^guZPen^ z(%49-^o9J+DyT>($*G|c`@D&QjVdK2R)&EPme}RKR6w)!M4xf%h6CDkn?B3TG+VP& z+vW-^m*9P|g9j=o!a}@RK6D~K^jmz4wDg>IJ>yF?YfDOZQ5p==>{?Pz=8T7`HtO}U zP(sleEBLhQmERB{fBO!EZ@U<0ZY9L%Mui)eJg{JSf9sus5fK`4I82=q*1+Fwv5w^U z%Ap!OO_JmJF>10BjQSX}V!@(k&HBDul3-FL19y#fadI{ve#)x%w>Z550?j0L&0A^? zzD>>LRrRH>J?7Nyk=od058eyTN!#?WeU8&}YOMQoEV#(GZ{Hfpq2dh9OVXQnrp5B) zp1uL$XJ05BxO=+|G6YC`Tg45Y{&InNV1wxog)D$6gHPpS18_BQ|FiYoZCeyHc0h)j z{biEnV0*;jUqInPH5~*ovSj$#3|1vUdJ0fWXB(~7fV7q;DXb~@w;m@~%Ntd+@e4Ru z=1Yr?6BHU6ORc-+SKy(?y3)&`T=7t+{AqLWJMItiSWhjWp~Uo0pASHmRu9*S!+}a) zhiYoN^s&LcI6Xh%G0on#c^MeZW{(~|Y7+l8)1SzZQ=fZKl1Ga4BnUbzk8J&l!rgOz zA4detM&pIpflu{^F)9dj%|kYB!m3X|CEDB3!$=qkS>JMhBJKVACP+y}0C!{A)V9D#l zcP2*6R*_o!)0kT;@6?8X%2QM1CiW*3LMJH6 zqH%8&@{s^HeUAu4h8Y{bSyn%S|1t z69ARUQ=sZ^NcldmU+YaTa^I{+wjESeP##OaIGDzazNI#V04~VAFXFy*@QEykVGC9Y zhl{Zj*XEdk5^u7w?amQ&SY1Eay*MyXDh2UWin*d<7LwkSoOGIg;qBkYKpa1gPCWvK zpNCU!)~)Byys=nb{B4l{(&9jKiXHj5F|F){;*TGaN)$68cPxMad;<`bG6&mGTiLc0 zgV$(ad<3KJRAK1jnGB5`UlAJEvd3*+hDU_mv9dR}H{WG@G-TF9zT=!8$IE$-xey?vQ2Z7@Jwk8n>gX98JwD*! zH(y}y#nGN=vzaw-$ck?KdGOaKS==q1&f|IAYFI61qG144L|1XY8i@woZy`~`F04*z ztsm1*b}2Rl>hnQzT0PYNkN#xi=5*@j{OMw#Y4!eWf@Um7Yy15wK}zB$*s_vZKMWM> z=Z`GB74(Y3@(J-@8W49SbS7Sp4a{WzUH->WG)@h4@nPeaT%_hrnx>LTJ#DHapYVG% zEDwSfCV73L`yPYm``B1msE;-Ad4wRx23$RR>Wrv^J39i7A}-0UK%O(1F5Ui(UIDYx zpk_m|fi^5$t|_hMHs?fjR@TB+Ks(^5kb%L$GS4PGVA#Suf_~#nbNB>cB_>-ErJZ!P zKcvR+IBewW3^QqHjn~?36)FGq+njcJOk_!p{58L&?HK=oP2i?pnIe%S$L9r`KJkr` z{t98?sR|(#v9ixN!r_E*Z>3d zeDn)tZZLLY&51NOJNFcyd~sEleL3tw_CnSl38+O*4+ z^~h+G-yOL?JPgcUtn*`m!lJZvBOt*<$8FRL)RE86ccX+3zePmgQc`a4+WkC!l6Kp9 zU0+hYGB)7+-8z8?nA5bzN|>&FwjvE%w{0vYz^MUTV8FLy%VS z?EY~x@=T?@r}Jw`Q4tHreJSOgi{yS77{Jiw7G_Q7tu%DTEHEij;|ZE|ESxy+V)&ci z(NTMGv2CcGZGLJbJay^0?Rzw$g994^;%}t>G2uPo%MH_xR%Sc{gRn4kp#utrw4wtdlA|@|_i(zHr-`n9s_AmHElo{?n`>ckPB@%e~Q?$;g~+&2&i{n{TAF z2y^GJ1NZ(gR~(n0^|=%#$QxGgb-SO--m^5qc*6V5xs!~@jg-smisF?uLzKyl8psD3 z@wBS-Mr!Po%u|OK&JyUc#I!i=YRMW9|NHIG3SX4MciwWUonjl(4WViJ7{!#@7`J;N zaIfBNZEw~HkojtW_Zbj7@0C}mclqbJ$3HgK_Gvv}Zga_laS_O0Z5vTvr;sc4 zbzG4@OFs=6KH#oSsbwhRl8+cYh%;<(V7|-KlwUjXw!ORCONWi<;`au(sj_I)2hE1+ z;+5@~%PEO* zlq5IzX&!~y7;^#|N~_~=^B0s*8T&t8t~%J*c#Ag~t!p}PH$Ivspv3dMK~kVvpgJ^! z{+duOTrj)oiSl2aAn|$81Ggdm9Q}LsFOMgOe=G)GHZ5uE4P+O^w zIzC2!_0|0Qx2%r0Lo6f}pqyzE;u?T?;FEf2JolNSz_R}bVW=!avzI6ZqRD#tedNcL z()aM#^ypdb#z*}feJ-D{mfYN2&vT2}4CJBH44um`)9bI#Zj+No9Z-5g=hITEbwcHP z7Iiyt!D5PI+oMtZ-;eAKm6t1>jM2p$9i*H()_s(xS8hq=ingX93OhO2Dae0|N6q<| zE#9cbLkL(GqAmX1>URY5MG=;BNCq3@w^?_^+(&|Yg+BffEYaQ(1;Gc8VR*o5HYc~? zyB`u=eoM>PpW4De7=C)V#^F;gipVlQV1tbM-_AbPf^-a@Z1y|@P?P3HDHbx^})x3H91Yh&9XDi+LV}c zXh>3%77ejxmUH47h8=IqCwRok)zNye(wxtJ2o)VbJe{-SZIk8Mm3t?>Q!0Z>9p^^p ze7v2s)i#3>449iXMbyGkak4oYq!#w(*5=kJF^MT3QpGTYc6Yp|2k$d(f!mFs)5ZF} z9$cE4hydB*x^m6nQR$)G%`nNee$Fdm8a)FJI8sVr@9o%JdYE61kJ;t*8{mpK8M4GY z>h3rhAAI%@C0(JApkBjC^`DJx zG00en=(-Fl;X84sMJwhJY>XG0h39r_7Arkf%n_v6BR+;i$++V&1?}w&`?kI@@RA0f zCibsC_ilKWGI2QMn3-{t6>q1Gn)hdhEFY)IqxP~C{Qc;)6@pe0+94J?_zb zPOc%XOIcrA6P%cO=O#GrGV-iq)=P23oI!-v@^@vOGg+~Wn>|eC)A^V%z8W)D)?Nd> z@sx&_OYp#OI4GI?PNm}a_Fh@8%C?mwwu~aEa};(y{-=pc%dFm9`Na?&7Z?^DOhP(h%Hmf(?JPDk>8-wX{m3t9wkz{Y*8#z9m ztnK8|c(6Ly+Ypsy>8|d)pNAGJo6Xk3&%|(d`ES zOw{f4V5h9CTwcNNgOT>C{`gJA@yRRA52MG}t@eyvEUaGW$odo0ll_A_Y7&nXLv4t@ ztKxF%NBd=Xcr71R4{pbeYp?5CRPc)@S~w0n;Ol!K`O4~_@03yxb$ow^u^^0RIAi0< z=l8FchA}EFy??A>;P6^N-y8RTH;@rqOvCN{ySf86NYKhke`Ir$Y;k{ zc2poKQqL7i0>=;&I!I{Q+A3a(2uERHkP_{LqoYD>kIXsLW#=WU7N~zxU*t%>=cU0G z+X*}v9i2PKTSY-W@ynR*B_B`MWDYa$pM_cwT=_p$Fm-lz4!#((A9+K&wDb)vBpMwf zP1xx|b@V*?)61nv1?0$QBK^Y0lYxPe`VnEi;-0s3Tgd9_d^3d0y?SoLT%4OWwi}y< z=Fe@4iXpR_yqrvmaA1z_zJuFleo2;>RouJa7>@p$%0MXgpP3owo!HK^4b)v6>?i$M zc-9}RKmUYIT<}xUf^{?{itOWv$I7}Yv?digckhmp-3hb!l`j@;H-nCTb{-OKJJFb> zGc2va*QJ(r6C)r){nOm6KzM&{@4WND`ZF6Fe#?_a2^2Ad#z(N@+t1X~$96WjZE{;r zUdvFH%v~*c5eEa*P*ZGFyHTol`3r~N)p2iIVl;U@A$fDA+V|k{L$H)2+&(2QL^0ZW zIwIX2sx=QhFV4`fUOmoXK5wxpVy$p;Oy;rN)4QDcyS=_Q$e%+@j4Fl|pvZUe%g$l7 zJs`1>-M{l$1Ow|!c zOpjrMV<%|WLk;p8Lhvn%f$+&mh5XQDP{g&EcS%$?i4{^0JN$WGG4+dthI-wgB6eJR zfPfn6(su&FDYwn-4X$V7GZO5E%kely1ecu)j-%+Xz&xVmIBqKz0>1b&&P1xM7EMY~ z4~shIC29A?@78>F?LXSAEi9b(+=UYz!e1z`tS4Zm2>{V|tfIk9fvn7mlLwRk5W_!| z@5SA}v258!9J7YwHpwQ5mADUhE{{tIeDd+-Fp(~^rFgEA?Qmg=Bo|CX6@Bd+_H$x% zY=+l`clb(P{Cac$4W>Wp%`_HpcK!RD-DufuW6W*i#~lENNwuN|i5G}$Xbg;vMR_Ih zThIKKkO_bQu-W1mEL)pjy=smP@R{Oe6`&|yZRao-XIm2KpDH)8KUEawvz^Wnp@#{Z z;)T!1m`8;th>nOD2%#Y!%D_lckbkPv;6NVUi|QZI!@|k(#ynmmT&oC-OYXBW(-U{R zY4^H?74m)XUFRZfy0WtJzBj1u$H*d&4MaOX4+CH6Cd3#3K6)%Chl8a4XanlD%}le5 zgapV>(lHBZ6{!2VGLbRM;XrU+u-5?kRK2L;`W&<3E_sVuCk6nI#cSQ7j2+so4exJIIiJ z`E}gePDOi?M!MreEgXstp_CX&VL}`QV7lg!mxqQEkpq4m`xlScmF^LK4PnjzzGuOU>T7|UKiz_X2d^9wiQ#68!sOnImqijwL1B+wma#z{^Z+{MHC|k zN8RjP%tBsY!Nor@(ML8iyRH1=F27o=cR@BODvjS@cj(3vB-+jHUu_I1DPb2_&Eqx8 zghbkK@w*HUJ9Y-OG$e0No)i>>mesdn$6L?ok8=pzWqrhX;;X%wQ^+@BKjU|F8v#;x z8vdizpBV9WP zKO21r0=}*e)tTT$kB&wCx%Tee+e$gm@w(;@3HgD$J!4kCdd&H;a&>Fs3U}s3`gbKX zua6hSRHa)%zDyW_5&UP|${{%w#lt5An{nFjhwj#=3HFNiIMV^(VCH$5cmaPI@74%J{ zdH1)6oi_64k;k4<+|Dld=SU;(h(s!n( z?bl$l78b1ZqPXR^_05b^krkavAeP%jt|GauMFzCDpK4X||c9)s%${y}xfVY7%uZ!)jl zc<$&l=DeCYE8>5$|Iy8U3PCPRyu-uM`Ru0uQ3_!pP`UoV_R+dy0p&|XUHC{EbKKKT zx;qT35*$u=dzg5ux7>GneiK{ED=28P8T^Hc-Se9STE*vH5ExT;X>;6(0Hq~1y*jpg zM;nQ6zY{52ECag3;zTEXyCT``Rij5f@=dU3 z53Yeq`XLj)Gs9bkxa&g)U43JM&n}s`BU0x0-c{z|M0-y(S?4j2OAHTjwaF)JO?NsS z>^Fznz>3E2a)273*y6O{y)*

+ ![Image title](assets/images/logo.png){ width="300" } +
+ +
+

Generate text with LLMs

+

Robust prompting & (guided) text generation

+ [:fontawesome-solid-bolt: Get started](get_started.md){ .md-button .md-button--primary } + [:fontawesome-solid-code-pull-request: Contribute](https://github.com/outlines-dev/outlines){ .md-button } + +
+```python +from enum import Enum +from pydantic import BaseModel, constr + +import outlines.models as models +import outlines.text.generate as generate + + +class Armor(str, Enum): + leather = "leather" + chainmail = "chainmail" + plate = "plate" + + +class Character(BaseModel): + name: constr(max_length=10) + age: int + armor: Armor + strength: int + + +model = models.transformers("mistralai/Mistral-7B-v0.1", device="cuda") +generator = generate.json(model, Character, max_tokens=100) +sequence = generator("Give me a character description") +``` +
+ +
Star + +
diff --git a/docs/overrides/home.html b/docs/overrides/home.html deleted file mode 100644 index 4e32efee..00000000 --- a/docs/overrides/home.html +++ /dev/null @@ -1,202 +0,0 @@ -{% extends "main.html" %} -{% block tabs %} -{{ super() }} - - - -
-
-
-
- -
-
-

Outlines

-

Use Large Language Models and Symbolic methods to generate text machines understand.

- - Get started - - - Go to GitHub - -
-
-
-
- -{% endblock %} -{% block content %}{% endblock %} -{% block footer %}{% endblock %} diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 380cc358..d98248e9 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -11,3 +11,35 @@ code > * { border-radius: 1rem; } + +.index-pre-code { + margin: 50px; + width: 60%; + left: 50%; +} + +.index-pre-code pre>code { + text-align: left; +} + +h1.title { + color: #FFFFFF; + margin: 0px 0px 5px; +} + +h2.subtitle { + margin: 5px 0px 25px; +} + +.md-typeset { + line-height: 21px; + font-weight: 400; +} + +.md-typeset h1 { + font-weight: bold; +} + +.md-typeset h2 { + font-weight: bold; +} From ebde68210d6023668dfbbf12cf52f721d4e74e98 Mon Sep 17 00:00:00 2001 From: Ivan Herreros Date: Thu, 23 Nov 2023 10:49:33 +0100 Subject: [PATCH 309/734] Track token count and add system prompt --- outlines/models/openai.py | 86 +++++++++++++++++++++++++++++++-------- pyproject.toml | 2 +- 2 files changed, 70 insertions(+), 18 deletions(-) diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 7d7ac61c..52058103 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -79,6 +79,8 @@ def __init__( model_name: str, api_key: Optional[str] = None, max_retries: int = 6, + timeout: Optional[float] = None, + system_prompt: Optional[str] = None, config: Optional[OpenAIConfig] = None, ): """Create an `OpenAI` instance. @@ -93,6 +95,10 @@ def __init__( `openai.api_key`. max_retries The maximum number of retries when calls to the API fail. + timeout + Duration after which the request times out. + system_prompt + The content of the system message that precedes the user's prompt. config An instance of `OpenAIConfig`. Can be useful to specify some parameters that cannot be set by calling this class' methods. @@ -120,7 +126,16 @@ def __init__( else: self.config = OpenAIConfig(model=model_name) - self.client = openai.AsyncOpenAI(api_key=api_key, max_retries=max_retries) + self.client = openai.AsyncOpenAI( + api_key=api_key, max_retries=max_retries, timeout=timeout + ) + self.system_prompt = system_prompt + + # We count the total number of prompt and generated tokens as returned + # by the OpenAI API, summed over all the requests performed with this + # model instance. + self.prompt_tokens = 0 + self.completion_tokens = 0 def __call__( self, @@ -158,7 +173,13 @@ def __call__( ) ) if "gpt-" in self.config.model: - return generate_chat(prompt, self.client, config) + response, usage = generate_chat( + prompt, self.system_prompt, self.client, config + ) + self.prompt_tokens += usage["prompt_tokens"] + self.completion_tokens += usage["completion_tokens"] + + return response def generate_choice( self, prompt: str, choices: List[str], max_tokens: Optional[int] = None @@ -210,7 +231,13 @@ def generate_choice( break config = replace(config, logit_bias=mask, max_tokens=max_tokens_left) - response = generate_chat(prompt, self.client, config) + + response, usage = generate_chat( + prompt, self.system_prompt, self.client, config + ) + self.completion_tokens += usage["completion_tokens"] + self.prompt_tokens += usage["prompt_tokens"] + encoded_response = tokenizer.encode(response) if encoded_response in encoded_choices_left: @@ -255,22 +282,46 @@ def __repr__(self): @cache(ignore="client") -@functools.partial(outlines.vectorize, signature="(),(),()->(s)") +@functools.partial(outlines.vectorize, signature="(),(),(),()->(s),()") async def generate_chat( - prompt: str, client: "AsyncOpenAI", config: OpenAIConfig -) -> np.ndarray: + prompt: str, + system_prompt: Union[str, None], + client: "AsyncOpenAI", + config: OpenAIConfig, +) -> Tuple[np.ndarray, Dict]: + """Call OpenAI's Chat Completion API. + + Parameters + ---------- + prompt + The prompt we use to start the generation. Passed to the model + with the "user" role. + system_prompt + The system prompt, passed to the model with the "system" role + before the prompt. + client + The API client + config + An `OpenAIConfig` instance. + + Returns + ------- + A tuple that contains the model's response(s) and usage statistics. + + """ + system_message = ( + [{"role": "system", "content": system_prompt}] if system_prompt else [] + ) + user_message = [{"role": "user", "content": prompt}] + responses = await client.chat.completions.create( - messages=[{"role": "user", "content": prompt}], **asdict(config) # type: ignore + messages=system_message + user_message, + **asdict(config), # type: ignore ) - if config.n == 1: - results = np.array([responses.choices[0].message.content]) - else: - results = np.array( - [responses.choices[i].message.content for i in range(config.n)] - ) + results = np.array([responses.choices[i].message.content for i in range(config.n)]) - return results + return results, responses.usage.model_dump() openai = OpenAI @@ -292,8 +343,8 @@ def find_response_choices_intersection( choices. Say the response is of the form `[1, 2, 3, 4, 5]` and we have the choices - `[[1, 2], [1, 2, 3], [6, 7, 8]` then the function will return `[1, 2]` as the - intersection, and `[1, 2, 3]` as the choice that is left. + `[[1, 2], [1, 2, 3], [6, 7, 8]` then the function will return `[1, 2, 3]` as the + intersection, and `[[]]` as the list of choices left. Parameters ---------- @@ -305,7 +356,8 @@ def find_response_choices_intersection( Returns ------- A tuple that contains the longest intersection between the response and the - different choices, and the choices which start with this intersection. + different choices, and the choices which start with this intersection, with the + intersection removed. """ max_len_prefix = 0 diff --git a/pyproject.toml b/pyproject.toml index 87493cb0..48f3e9dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,7 @@ module = [ "jinja2", "joblib.*", "jsonschema.*", - "openai", + "openai.*", "nest_asyncio", "numpy.*", "perscache.*", From fcfc48b2cb5a391764d20925e6b3a1dd7d37c55a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 17:10:42 +0100 Subject: [PATCH 310/734] Update the OpenAI integration's documentation --- docs/reference/openai_text_generation.md | 94 ++++++++++++++++++------ outlines/models/openai.py | 2 +- 2 files changed, 74 insertions(+), 22 deletions(-) diff --git a/docs/reference/openai_text_generation.md b/docs/reference/openai_text_generation.md index 5845545f..5eb1c8c1 100644 --- a/docs/reference/openai_text_generation.md +++ b/docs/reference/openai_text_generation.md @@ -1,55 +1,89 @@ # Generate text with the OpenAI API -Outlines is focused on 🔓 models, but includes an OpenAI integration nevertheless. You can instantiate a model very simply by calling the [outlines.models.openai][] function, with either a chat or non chat model: +Outlines supports models available via the OpenAI Chat API, e.g. ChatGPT and GPT-4. The following models can be used with Outlines: ```python from outlines import models -model = models.openai("text-davinci-003") -model = models.openai("gpt4") +model = models.openai("gpt-3.5-turbo") +model = models.openai("gpt-4") print(type(model)) -# OpenAIAPI +# OpenAI ``` -!!! note +It is possible to pass a system message to the model when initializing it: + +```python +from outlines import models + +model = models.openai("gpt-4", system_prompt="You are a useful assistant") +``` + +This message will be used for every subsequent use of the model: + +## Usage - It is currently not possible to pass a system message to the model. If that is something you need, please [open an Issue](https://github.com/outlines-dev/outlines/issues) or, better, [submit a Pull Request](https://github.com/outlines-dev/outlines/pulls). +### Call the model -The OpenAI integration supports the following features: +OpenAI models can be directly called with a prompt: -- The ability to stop the generation when a specified sequence is found [🔗](#stop-when-a-sequence-is-found) -- The ability to choose between different choices [🔗](#multiple-choices) -- Vectorization, i.e. the ability to pass an array of prompts and execute all requests concurrently [🔗](#vectorized-calls) +```python +from outlines import models + +model = models.openai("gpt-3.5-turbo") +result = model("Say something", temperature=0, samples=2) +``` + +!!! warning -## Stop when a sequence is found + This syntax will soon be deprecated and one will be able to generate text with OpenAI models with the same syntax used to generate text with Open Source models. + +### Stop when a sequence is found The OpenAI API tends to be chatty and it can be useful to stop the generation once a given sequence has been found, instead of paying for the extra tokens and needing to post-process the output. For instance if you only to generate a single sentence: ```python from outlines import models -model = models.openai("text-davinci-003") +model = models.openai("gpt-4") response = model("Write a sentence", stop_at=['.']) ``` -## Multiple choices +### Choose between multiple choices -It can be difficult to deal with a classification problem with the OpenAI API. However well you prompt the model, chances are you are going to have to post-process the output anyway. Sometimes the model will even make up choices. Outlines allows you to *guarantee* that the output of the model will be within a set of choices you specify: +It can be difficult to deal with a classification problem with the OpenAI API. However well you prompt the model, chances are you are going to have to post-process the output anyway. Sometimes the model will even make up choices. Outlines allows you to *guarantee* that the output of the model will be within a set of choices: ```python from outlines import models -prompt = """ -Review: The OpenAI API is very limited. It does not allow me to do guided generation properly. -Question: What is the overall sentiment of this review? -Answer: -""" +model = models.openai("gpt-3.5-turbo") +result = model.generate_choice("Red or blue?", ["red", "blue"]) +``` -model = models.openai("text-davinci-003") -response = model(prompt, is_in=['Positive', 'Negative']) +!!! warning + + This syntax will soon be deprecated and one will be able to generate text with OpenAI models with the same syntax used to generate text with Open Source models. + +## Monitoring API use + +It is important to be able to track your API usage when working with OpenAI's API. The number of prompt tokens and completion tokens is directly accessible via the model instance: + +```python +import outlines.models + +model = models.openai("gpt-4") + +print(model.prompt_tokens) +# 0 + +print(model.completion_tokens) +# 0 ``` +These numbers are updated every time you call the model. + + ## Vectorized calls A unique feature of Outlines is that calls to the OpenAI API are *vectorized* (In the [NumPy sense](https://numpy.org/doc/stable/reference/generated/numpy.vectorize.html) of the word). In plain English this means that you can call an Openai model with an array of prompts with arbitrary shape to an OpenAI model and it will return an array of answers. All calls are executed concurrently, which means this takes roughly the same time as calling the model with a single prompt: @@ -165,3 +199,21 @@ You may find this useful, e.g., to implement [Tree of Thoughts](https://arxiv.or !!! note Outlines provides an `@outlines.vectorize` decorator that you can use on any `async` python function. This can be useful for instance when you call a remote API within your workflow. + + +## Advanced usage + +It is possible to specify the values for `seed`, `presence_penalty`, `frequence_penalty`, `top_p` by passing an instance of `OpenAIConfig` when initializing the model: + +```python +from outlines.models.openai import OpenAIConfig +from outlines import models + +config = OpenAIConfig( + presence_penalty=1., + frequence_penalty=1., + top_p=.95, + seed=0, +) +model = models.openai("gpt-4", config=config) +``` diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 52058103..45509a9f 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -57,7 +57,7 @@ class OpenAIConfig: """ - model: str + model: str = "" frequency_penalty: float = 0 logit_bias: Dict[int, int] = field(default_factory=dict) max_tokens: Optional[int] = None From 2095808b9a5f02ae9f06b013eb955f5a40710770 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 6 Dec 2023 19:09:02 +0100 Subject: [PATCH 311/734] Change line spacing and index code width --- docs/stylesheets/extra.css | 12 ++---------- mkdocs.yml | 2 +- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index d98248e9..bbb46b81 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -4,17 +4,9 @@ --md-text-font-family: "Inter"; } -.language-python.highlight > * { - border-radius: 1rem; -} - -code > * { - border-radius: 1rem; -} - .index-pre-code { margin: 50px; - width: 60%; + width: 700px; left: 50%; } @@ -32,7 +24,7 @@ h2.subtitle { } .md-typeset { - line-height: 21px; + line-height: 24px; font-weight: 400; } diff --git a/mkdocs.yml b/mkdocs.yml index 7119a03e..cb70a60a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -71,7 +71,7 @@ markdown_extensions: line_spans: __span pygments_lang_class: true noclasses: True - pygments_style: nord + pygments_style: nord-darker - pymdownx.superfences - pymdownx.tabbed: alternate_style: true From c0512dc6aa94dccda5778c034a18a3dc7952d12f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 09:40:20 +0100 Subject: [PATCH 312/734] Fix the width of the code example on index --- docs/index.md | 2 +- docs/stylesheets/extra.css | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.md b/docs/index.md index c70f11e7..73e598af 100644 --- a/docs/index.md +++ b/docs/index.md @@ -40,7 +40,7 @@ class Character(BaseModel): strength: int -model = models.transformers("mistralai/Mistral-7B-v0.1", device="cuda") +model = models.transformers("mistralai/Mistral-7B-v0.1") generator = generate.json(model, Character, max_tokens=100) sequence = generator("Give me a character description") ``` diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index bbb46b81..99671fd8 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -6,7 +6,7 @@ .index-pre-code { margin: 50px; - width: 700px; + max-width: 700px; left: 50%; } From 64232df7af3f0941f9ceb86b3b3bc146a6c7bc14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 13:22:54 +0100 Subject: [PATCH 313/734] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 65f0b5e9..e1a37aba 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ [![Contributors][contributors-badge]][contributors] [![Twitter][twitter-badge]][twitter] -*Generate text that machines understand.* +*Robust (guided) text generation.* [Install](#installation) • [Guided generation](#guided-generation) • From 1fbd6696cd51356206da79c9a9e665f3afce473e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 16:17:36 +0100 Subject: [PATCH 314/734] Update the `get_started` section of the documentation --- docs/cookbook/index.md | 1 - docs/feedback.md | 14 +++ docs/get_started.md | 99 +++++++------------ docs/index.md | 3 - docs/licence.md | 213 +++++++++++++++++++++++++++++++++++++++++ mkdocs.yml | 8 +- 6 files changed, 263 insertions(+), 75 deletions(-) delete mode 100644 docs/cookbook/index.md create mode 100644 docs/feedback.md create mode 100644 docs/licence.md diff --git a/docs/cookbook/index.md b/docs/cookbook/index.md deleted file mode 100644 index 741ecbe7..00000000 --- a/docs/cookbook/index.md +++ /dev/null @@ -1 +0,0 @@ -# Cookbook diff --git a/docs/feedback.md b/docs/feedback.md new file mode 100644 index 00000000..e689af3c --- /dev/null +++ b/docs/feedback.md @@ -0,0 +1,14 @@ +--- +icon: material/comment-quote-outline +--- + +# Feedback + +We highly value the insights of our users, and we would love to hear from you. If you are using Outlines for your projects and would like to share your experience with us, let's connect: + +- What are you building with it? +- What do you like about it? +- What challenges are you facing? +- What do you think could be improved? + +To schedule an appointment follow [this link](https://cal.com/dottxt/outlines). This is exclusively intended to share your experience, please go on [Discord](https://discord.gg/UppQmhEpe8) or [GitHub](https://github.com/outlines-dev/outlines/discussions) for support. diff --git a/docs/get_started.md b/docs/get_started.md index 5d8c5644..75e87751 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -1,97 +1,62 @@ --- title: Get Started +icon: material/human-greeting --- - -
- ![Outlines logo](assets/images/logo.png){ width="300" } -
- - -
- -

Generate text that machines understand.

- - - - -
+# Getting started - -# +## 1. Installation -## :sparkles: Features - -- :material-keyboard: Prompting utilities -- :material-regex: Regex-guided generation -- :material-code-json: JSON-guided generation -- :material-dice-multiple-outline: Multiple sequence sampling methods -- :material-open-source-initiative: Integration with several open source libraries - -## :floppy_disk: Install +Outlines is available on PyPi: ```bash pip install outlines ``` -??? info "Using OpenAI and Transformers" - Outlines :wavy_dash: does not install the `openai` or `transformers` libraries by default. You will have to install these libraries manually. To use `transformers` models you will also need to install the `datasets` library. +!!! info "Model integrations" -## :eyes: Sneak Peek + The following model integrations are available. To use them you must install the required dependencies: -=== "Code" + - `openai` for OpenAI models; + - `transformers` for Hugging Face models; + - `autoawq` for AWQ models; + - `auto-gptq` for GPTQ models. - ```python - from enum import Enum - from pydantic import BaseModel, constr - import outlines.models as models - import outlines.text.generate as generate +## 2. Hello, World +A very simple Outlines program looks like: - class Weapon(str, Enum): - sword = "sword" - axe = "axe" - mace = "mace" - spear = "spear" - bow = "bow" - crossbow = "crossbow" +=== "Code" + ```python + import outlines - class Armor(str, Enum): - leather = "leather" - chainmail = "chainmail" - plate = "plate" + model = outlines.models.transformers("gpt2") + generator = outlines.generate.format(model, int) + generate("2+2=") + ``` - class Character(BaseModel): - name: constr(max_length=20) - age: int - armor: Armor - weapon: Weapon - strength: int +=== "Output" + ```bash + 4 + ``` - model = models.transformers("gpt2") - generator = generate.json(model, Character) - sequence = generator("Create a character description for a role playing game in JSON") +The program goes through the following steps: - print(sequence) - ``` -=== "Output" +1. Initialize the model using the `transformers` library. Weights are loaded in memory; +2. Initialize the generator. `outlines.generate.format` constraints the output of the model + to be a valid Python data type. +3. Call the generator with a prompt. - ```json - { - "name": "Anonymous Tokens", - "age": 7, - "armor": "plate", - "weapon": "mace", - "strength": 4171 - } - ``` +## 3. Going further + +If you need more inspiration you can take a look at the [Examples](examples/index.md). If you have any question, or requests for documentation please reach out to us on [GitHub](https://github.com/outlines-dev/outlines/discussions), [Twitter](https://twitter.com/remilouf) or [Discord](https://discord.gg/UppQmhEpe8). -## Acknowledgements +## 4. Acknowledgements
diff --git a/docs/index.md b/docs/index.md index 73e598af..1fec7c7c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -45,7 +45,4 @@ generator = generate.json(model, Character, max_tokens=100) sequence = generator("Give me a character description") ``` - -Star - diff --git a/docs/licence.md b/docs/licence.md new file mode 100644 index 00000000..e2ba20f8 --- /dev/null +++ b/docs/licence.md @@ -0,0 +1,213 @@ +--- +title: Licenced +icon: material/scale-balance +--- + +# Licence + +Outlines is licenced under the Apache 2.0 licence. Please follow its terms when re-using part of Outlines' codebase. + +``` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023- The Outlines developers + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` diff --git a/mkdocs.yml b/mkdocs.yml index cb70a60a..da19ea49 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -100,10 +100,10 @@ plugins: nav: - Home: index.md - - Get Started: - - get_started.md - - Cookbook: - - cookbook/index.md + - Getting Started: + - Getting Started: get_started.md + - Feedback: feedback.md + - Licence: licence.md - Examples: - examples/index.md - Synthetic dating Profile: examples/dating_profiles.md From 68b71ae810e0d6815a83df525da6d707cd4e971a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 16:17:52 +0100 Subject: [PATCH 315/734] Add authors and date to licence --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 261eeb9e..cac50cdf 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2023- The Outlines developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 4edf7768dc77d68b91b27f0a7d7b088eda730668 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 16:40:23 +0100 Subject: [PATCH 316/734] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index e1a37aba..60155cca 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ [![Pypi][pypi-badge]][pypi] [![Contributors][contributors-badge]][contributors] +[![Discord][discord-badge]][discord] [![Twitter][twitter-badge]][twitter] *Robust (guided) text generation.* @@ -456,6 +457,8 @@ Outlines is open-source and licensed under the [Apache License 2.0](LICENSE). [contributors]: https://github.com/outlines-dev/outlines/graphs/contributors [contributors-badge]: https://img.shields.io/github/contributors/outlines-dev/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4 [twitter]: https://twitter.com/dottxtai +[discord]: https://discord.gg/1182316225284554793 +[discord-badge]: https://img.shields.io/discord/1072170173785723041?color=81A1C1&logo=discord&logoColor=white&style=flat-square [twitter-badge]: https://img.shields.io/twitter/follow/dottxtai?style=social [pypi]: https://pypi.org/project/outlines/ [pypi-badge]: https://img.shields.io/pypi/v/outlines?color=ECEFF4&logo=python&logoColor=white&style=flat-square From 93fd91abf9120f5b34f1efdabda162ca4da89304 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 16:44:17 +0100 Subject: [PATCH 317/734] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 60155cca..7bb26382 100644 --- a/README.md +++ b/README.md @@ -457,8 +457,8 @@ Outlines is open-source and licensed under the [Apache License 2.0](LICENSE). [contributors]: https://github.com/outlines-dev/outlines/graphs/contributors [contributors-badge]: https://img.shields.io/github/contributors/outlines-dev/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4 [twitter]: https://twitter.com/dottxtai -[discord]: https://discord.gg/1182316225284554793 -[discord-badge]: https://img.shields.io/discord/1072170173785723041?color=81A1C1&logo=discord&logoColor=white&style=flat-square +[discord]: https://discord.gg/BSSVv3Pf +[discord-badge]: https://img.shields.io/discord/1182316225284554793?color=81A1C1&logo=discord&logoColor=white&style=flat-square [twitter-badge]: https://img.shields.io/twitter/follow/dottxtai?style=social [pypi]: https://pypi.org/project/outlines/ [pypi-badge]: https://img.shields.io/pypi/v/outlines?color=ECEFF4&logo=python&logoColor=white&style=flat-square From 430ee4df0b39f1adeaed033a2a509946002bcf44 Mon Sep 17 00:00:00 2001 From: "Brandon T. Willard" Date: Thu, 7 Dec 2023 15:25:13 -0600 Subject: [PATCH 318/734] Remove unused FSM code --- outlines/text/fsm.py | 132 +---------------------------------------- tests/text/test_fsm.py | 120 ++----------------------------------- 2 files changed, 7 insertions(+), 245 deletions(-) diff --git a/outlines/text/fsm.py b/outlines/text/fsm.py index f78f33e4..a96fa8e2 100644 --- a/outlines/text/fsm.py +++ b/outlines/text/fsm.py @@ -1,12 +1,10 @@ from collections import namedtuple from functools import lru_cache -from itertools import chain -from typing import TYPE_CHECKING, Dict, Generator, List, Optional, Sequence, Set, Tuple +from typing import TYPE_CHECKING, Dict, Generator, List, Sequence, Set, Tuple import numba import numpy as np from interegular.fsm import FSM, Alphabet, OblivionError, anything_else -from joblib import Parallel, delayed from numba.typed.typedobjectutils import _nonoptional if TYPE_CHECKING: @@ -149,17 +147,6 @@ def create_fsm_info( ], ) -spec = [ - numba.int64, - numba.types.Set(numba.int64), - numba.types.DictType(numba.types.UniTuple(numba.int64, 2), numba.int64), - numba.types.DictType(numba.int64, numba.types.ListType(numba.int64)), - numba.optional(numba.int64), - numba.types.DictType(numba.types.string, numba.int64), -] - -FSMInfoNumbaType = numba.types.NamedTuple(spec, FSMInfo) - def make_deterministic_fsm(fsm: FSM) -> Tuple[BetterFSM, Dict[int, int]]: """Construct an equivalent FSM with deterministic state labels.""" @@ -314,123 +301,6 @@ def walk_fsm( return accepted_states -# TODO FIXME: Can't cache this due to https://github.com/numba/numba/issues/9177 -@numba.njit(nogil=True) -def find_partial_matches( - fsm_info: FSMInfo, - input_string: str, - full_match: bool = True, -) -> Generator[Tuple[int, List[int]], None, None]: - """Find the states in the finite state machine `fsm_info` that accept `input_string`. - - This will consider all possible states in the finite state machine (FSM) - that accept the beginning of `input_string` as starting points, unless a - specific `start_state` is provided. - - Parameters - ---------- - fsm_info - The finite state machine. - input_string - The string for which we generate partial matches. - full_match - Matches must cover the entire string. - - Returns - ------- - A set of tuples corresponding to each valid starting state in the FSM. The - first element of each tuple contains an integer indicating the position in - `input_string` at which the FSM stopped. The second element is the tuple - of states visited during execution of the FSM plus the next, unvisited - transition state. - - """ - - if len(input_string) == 0: - return - - trans_key = fsm_info.alphabet_symbol_mapping.get( - input_string[0], fsm_info.alphabet_anything_value - ) - - for state in fsm_info.trans_key_to_states.get( - trans_key, numba.typed.List.empty_list(numba.int64) # type: ignore - ): - path = _walk_fsm( - fsm_info.transitions, - fsm_info.alphabet_symbol_mapping, - fsm_info.alphabet_anything_value, - fsm_info.initial, - fsm_info.finals, - input_string, - state, - full_match=full_match, - ) - if path: - path.insert(0, state) - res = (len(path) - 2, path) - yield res - - -@numba.njit(nogil=True, cache=True) -def process_token_string( - fsm_info: FSMInfo, - token: str, - token_idx: int, - final_state_string: Optional[str] = None, -) -> Set[Tuple[int, int]]: - res = set() - vocab_string_len = len(token) - - for end_idx, state_seq in find_partial_matches(fsm_info, token, full_match=False): - if end_idx is not None and end_idx < vocab_string_len - 1: - continue - - res.add((state_seq[0], token_idx)) - - if token == final_state_string: - # Allow transitions to EOS from all terminals FSM states - for state in fsm_info.finals: - res.add((state, token_idx)) - - return res - - -def create_fsm_index( - fsm_info: FSMInfo, - vocabulary: Dict[str, int], - final_state_string: Optional[str] = None, - n_jobs=-1, -) -> Dict[int, Set[int]]: - """Construct a map from FSM states to subsets of `vocabulary`. - - The subsets of `vocabulary` consist of elements that are accepted by--or - transition to--the corresponding partial parse states. - - Parameters - ---------- - fsm - The finite-state machine. - vocabulary - The vocabulary composed of token strings mapped to token IDs. - final_state_string - A string from `vocabulary` that is to be added to all the final states - in the FSM (e.g. ``""``). - """ - - results = Parallel(backend="threading", n_jobs=n_jobs, return_as="generator")( - delayed(process_token_string)(fsm_info, token, token_idx, final_state_string) - for token, token_idx in vocabulary.items() - ) - - states_to_token_subsets: Dict[int, Set[int]] = {} - - for fsm_state, token_idx in chain.from_iterable(results): - states_to_token_subsets.setdefault(fsm_state, set()).add(token_idx) - - return states_to_token_subsets - - def fsm_union( fsms: Sequence[FSM], ) -> Tuple[FSM, Dict[int, Tuple[Set[Tuple[int, int]], Set[int], Dict[int, Set[int]]]]]: diff --git a/tests/text/test_fsm.py b/tests/text/test_fsm.py index 10c18eec..7091fd0b 100644 --- a/tests/text/test_fsm.py +++ b/tests/text/test_fsm.py @@ -5,10 +5,8 @@ from outlines.models.transformers import TransformerTokenizer from outlines.text.fsm import ( _walk_fsm, - create_fsm_index, create_fsm_index_end_to_end, create_fsm_index_tokenizer, - find_partial_matches, fsm_union, get_sub_fsms_from_seq, make_deterministic_fsm, @@ -84,112 +82,6 @@ def test_walk_fsm(function): assert res == tuple() -def test_partial_match(): - name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") - name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) - assert name_fsm.initial == 0 - - name_fsm = name_fsm.fsm_info - - def_pattern = interegular.parse_pattern("def") - def_fsm, _ = make_deterministic_fsm(def_pattern.to_fsm().reduce()) - assert def_fsm.initial == 0 - - def_fsm = def_fsm.fsm_info - - def to_python(res): - return {(x, tuple(y)) for x, y in res} - - res = to_python(find_partial_matches(def_fsm, "def")) - assert res == {(2, (0, 1, 2, 3))} - res = to_python(find_partial_matches(def_fsm, "de", full_match=False)) - assert res == {(1, (0, 1, 2))} - res = to_python(find_partial_matches(def_fsm, "d", full_match=False)) - assert res == {(0, (0, 1))} - res = to_python(find_partial_matches(def_fsm, "")) - assert res == set() - res = to_python(find_partial_matches(def_fsm, "df")) - assert res == set() - res = to_python(find_partial_matches(def_fsm, "ef", full_match=False)) - assert res == {(1, (1, 2, 3))} - res = to_python(find_partial_matches(def_fsm, "e", full_match=False)) - assert res == {(0, (1, 2))} - res = to_python(find_partial_matches(def_fsm, "f", full_match=False)) - assert res == {(0, (2, 3))} - res = to_python(find_partial_matches(def_fsm, "ef foo", full_match=False)) - assert res == {(1, (1, 2, 3))} - - # This string has a `DEF` token in it, but should ultimately not lex one - res = to_python(find_partial_matches(def_fsm, "defb", full_match=False)) - assert res == {(2, (0, 1, 2, 3))} - - # `NAME` can have multiple start states for this input - res = to_python(find_partial_matches(name_fsm, "d", full_match=False)) - assert res == {(0, (0, 1)), (0, (1, 1))} - # Not this case - res = to_python(find_partial_matches(name_fsm, "1d")) - assert res == {(1, (1, 1, 1))} - - res = to_python(find_partial_matches(name_fsm, "blah")) - assert res == { - (3, (0, 1, 1, 1, 1)), - (3, (1, 1, 1, 1, 1)), - } - - float_pattern = interegular.parse_pattern( - r"([+-]?((0|[1-9]+)([.][0-9]*)?)|([.][0-9]+))" - ) - float_fsm, _ = make_deterministic_fsm(float_pattern.to_fsm().reduce()) - assert 5 in float_fsm.finals - assert 2 not in float_fsm.finals - - float_fsm = float_fsm.fsm_info - - res = to_python(find_partial_matches(float_fsm, ".", full_match=False)) - assert res == {(0, (3, 5)), (0, (4, 5)), (0, (0, 2))} - - joins_fsm, _ = make_deterministic_fsm( - interegular.parse_pattern(r"(JOIN LEFT|JOIN)").to_fsm().reduce() - ) - - joins_fsm = joins_fsm.fsm_info - - res = to_python(find_partial_matches(joins_fsm, "JOIN BLAH", full_match=False)) - assert res == {(3, (0, 1, 2, 3, 4))} - - res = to_python(find_partial_matches(joins_fsm, "JOIN L", full_match=False)) - assert res == {(5, (0, 1, 2, 3, 4, 5, 6))} - - res = to_python(find_partial_matches(joins_fsm, "JOI", full_match=False)) - assert res == {(2, (0, 1, 2, 3))} - - regex_pattern = interegular.parse_pattern("0|[1-9][2-9]*") - regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) - - # State `1` has no transitions - assert not regex_fsm.map[1] - - res = to_python(find_partial_matches(regex_fsm.fsm_info, "0", numba.int64(1))) - assert res == {(0, (0, 1))} - - -def test_create_fsm_index(): - regex_str = "0|[1-9][0-9]*" - - regex_pattern = interegular.parse_pattern(regex_str) - regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) - - vocabulary = {"blah": 0, "1a": 1, "2": 2, "0": 3, "": 4} - - res = create_fsm_index(regex_fsm.fsm_info, vocabulary) - - assert res == {0: {2, 3}, 2: {2, 3}} - - res = create_fsm_index(regex_fsm.fsm_info, vocabulary, "") - - assert res == {0: {2, 3}, 1: {4}, 2: {2, 3, 4}} - - def test_get_sub_fsms_from_seq(): name_pattern = interegular.parse_pattern(r"[^\W\d]\w*") name_fsm, _ = make_deterministic_fsm(name_pattern.to_fsm().reduce()) @@ -329,18 +221,18 @@ def test_get_sub_fsms_from_seq(): ] fsm, fsms_to_trans_finals = fsm_union(join_fsms) - ((_, state_seq),) = find_partial_matches(fsm.fsm_info, "OI", full_match=False) - + # Matching "OI" + state_seq = [1, 2, 3] res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(0, True, False), (1, True, False)] - ((_, state_seq),) = find_partial_matches(fsm.fsm_info, "N", full_match=False) - + # Matching "N" + state_seq = [3, 4] res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(0, False, True), (1, True, False)] - ((_, state_seq),) = find_partial_matches(fsm.fsm_info, " ", full_match=False) - + # Matching " " + state_seq = [4, 5] res = list(get_sub_fsms_from_seq(state_seq, fsms_to_trans_finals)) assert res == [(1, True, False)] From bdc0f5fe8d2c2db6dd05268ec180c669c793835c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 16 Nov 2023 14:12:53 +0100 Subject: [PATCH 319/734] Move `prompts.py` to root folder --- docs/api/prompts.md | 2 +- examples/babyagi.py | 8 +++--- examples/dating_profile.py | 3 +- examples/math_generate_code.py | 4 +-- examples/meta_prompting.py | 16 +++++------ examples/pick_odd_one_out.py | 4 +-- examples/react.py | 6 ++-- examples/self_consistency.py | 4 +-- outlines/__init__.py | 2 +- outlines/{text => }/prompts.py | 0 outlines/text/__init__.py | 1 - tests/{text => }/test_prompts.py | 47 ++++++++++++++++---------------- 12 files changed, 49 insertions(+), 48 deletions(-) rename outlines/{text => }/prompts.py (100%) rename tests/{text => }/test_prompts.py (86%) diff --git a/docs/api/prompts.md b/docs/api/prompts.md index f9899400..9d28f838 100644 --- a/docs/api/prompts.md +++ b/docs/api/prompts.md @@ -1 +1 @@ -::: outlines.text.prompts +::: outlines.prompts diff --git a/examples/babyagi.py b/examples/babyagi.py index fd4b8b3d..406e41d7 100644 --- a/examples/babyagi.py +++ b/examples/babyagi.py @@ -7,8 +7,8 @@ from collections import deque from typing import Deque, List +import outlines import outlines.models as models -import outlines.text as text model = models.openai("gpt-3.5-turbo") @@ -18,7 +18,7 @@ ################# -@text.prompt +@outlines.prompt def perform_task_ppt(objective: str, task: str): """You are an AI who performs one task based on the following objective: {{objective}}. @@ -33,7 +33,7 @@ def perform_task_ppt(objective: str, task: str): ##################### -@text.prompt +@outlines.prompt def create_tasks_ppt( objective: str, previous_task: str, result: str, task_list: List[str] ): @@ -69,7 +69,7 @@ def create_tasks_fmt(result: str) -> List[str]: ######################## -@text.prompt +@outlines.prompt def prioritize_tasks_ppt(objective: str, task_names: List[str], next_task_id: int): """You are a task prioritization AI tasked with cleaning the formatting of \ and reprioritizing the following tasks: {{task_names}}. diff --git a/examples/dating_profile.py b/examples/dating_profile.py index 228f1399..7bf2a886 100644 --- a/examples/dating_profile.py +++ b/examples/dating_profile.py @@ -5,6 +5,7 @@ import transformers from pydantic import BaseModel, conlist +import outlines import outlines.models as models import outlines.text as text @@ -41,7 +42,7 @@ class Example: profile: DatingProfile -@text.prompt +@outlines.prompt def dating_profile_prompt(description: str, examples: list[Example]): """ You are a world-renowned matchmaker who understands the modern dating market. Your job is to generate dating app profiles for male clients interested in women based on a provided description. The profiles should be authentic, show off their strengths, and maximize their likelihood of getting matches on dating apps. diff --git a/examples/math_generate_code.py b/examples/math_generate_code.py index df141818..246004d8 100644 --- a/examples/math_generate_code.py +++ b/examples/math_generate_code.py @@ -1,6 +1,6 @@ """Example from https://dust.tt/spolu/a/d12ac33169""" +import outlines import outlines.models as models -import outlines.text as text examples = [ {"question": "What is 37593 * 67?", "code": "37593 * 67"}, @@ -17,7 +17,7 @@ question = "Carla is downloading a 200 GB file. She can download 2 GB/minute, but 40% of the way through the download, the download fails. Then Carla has to restart the download from the beginning. How load did it take her to download the file in minutes?" -@text.prompt +@outlines.prompt def answer_with_code_prompt(question, examples): """ {% for example in examples %} diff --git a/examples/meta_prompting.py b/examples/meta_prompting.py index 80519167..a9525fbd 100644 --- a/examples/meta_prompting.py +++ b/examples/meta_prompting.py @@ -11,12 +11,12 @@ """ import argparse +import outlines import outlines.models as models -import outlines.text as text def split_into_steps(question, model_name: str): - @text.prompt + @outlines.prompt def solve(question): """{{question}} Rephrase : : as a true or false statement, identify an Object, relationship and subject @@ -37,14 +37,14 @@ def solve(question): def fill_in_the_blanks(question, model_name: str): - @text.prompt + @outlines.prompt def determine_goal(question): """{{question}} In order to solve this problem, we will analyze each of the options and determine """ - @text.prompt + @outlines.prompt def solve(memory): """{{memory}}. Let's begin.""" @@ -60,7 +60,7 @@ def solve(memory): def ask_an_expert(question, model_name: str): - @text.prompt + @outlines.prompt def find_expert(question): """ {{question}} @@ -78,7 +78,7 @@ def find_expert(question): on the screen: " """ - @text.prompt + @outlines.prompt def get_answer(question, expert, memory): """ {{memory}}". @@ -99,14 +99,14 @@ def get_answer(question, expert, memory): def ask_an_expert_simple(question, model_name: str): - @text.prompt + @outlines.prompt def find_expert(question): """ Q: {{question}} A: A good person to answer this question would be """ - @text.prompt + @outlines.prompt def get_answer(expert, memory): """ {{memory}}. diff --git a/examples/pick_odd_one_out.py b/examples/pick_odd_one_out.py index 676c7e56..513228cb 100644 --- a/examples/pick_odd_one_out.py +++ b/examples/pick_odd_one_out.py @@ -9,11 +9,11 @@ arXiv preprint arXiv:2212.06094. """ +import outlines import outlines.models as models -import outlines.text as text -@text.prompt +@outlines.prompt def build_ooo_prompt(options): """ Pick the odd word out: skirt, dress, pen, jacket. diff --git a/examples/react.py b/examples/react.py index 2a4a5262..0f684042 100644 --- a/examples/react.py +++ b/examples/react.py @@ -12,11 +12,11 @@ """ import requests # type: ignore +import outlines import outlines.models as models -import outlines.text as text -@text.prompt +@outlines.prompt def build_reAct_prompt(question): """What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? Tho 1: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado ... @@ -30,7 +30,7 @@ def build_reAct_prompt(question): """ -@text.prompt +@outlines.prompt def add_mode(i, mode, result, prompt): """{{ prompt }} {{ mode }} {{ i }}: {{ result }} diff --git a/examples/self_consistency.py b/examples/self_consistency.py index 396c1a45..e3cbe64a 100644 --- a/examples/self_consistency.py +++ b/examples/self_consistency.py @@ -2,8 +2,8 @@ import numpy as np +import outlines import outlines.models as models -import outlines.text as text examples = [ { @@ -43,7 +43,7 @@ question = "When I was 6 my sister was half my age. Now I’m 70 how old is my sister?" -@text.prompt +@outlines.prompt def few_shots(question, examples): """ {% for example in examples %} diff --git a/outlines/__init__.py b/outlines/__init__.py index a5082287..0bf1a38e 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,7 +1,7 @@ """Outlines is a Generative Model Programming Framework.""" from outlines.base import vectorize from outlines.caching import clear_cache, disable_cache, get_cache -from outlines.text import prompt +from outlines.prompts import prompt __all__ = [ "clear_cache", diff --git a/outlines/text/prompts.py b/outlines/prompts.py similarity index 100% rename from outlines/text/prompts.py rename to outlines/prompts.py diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py index b1ae976c..a1189ca0 100644 --- a/outlines/text/__init__.py +++ b/outlines/text/__init__.py @@ -1,2 +1 @@ from .generate import continuation -from .prompts import prompt, render diff --git a/tests/text/test_prompts.py b/tests/test_prompts.py similarity index 86% rename from tests/text/test_prompts.py rename to tests/test_prompts.py index d3f3698e..65eeb202 100644 --- a/tests/text/test_prompts.py +++ b/tests/test_prompts.py @@ -3,42 +3,43 @@ import pytest from pydantic import BaseModel, Field -import outlines.text as text +import outlines +from outlines.prompts import render def test_render(): tpl = """ A test string""" - assert text.render(tpl) == "A test string" + assert render(tpl) == "A test string" tpl = """ A test string """ - assert text.render(tpl) == "A test string" + assert render(tpl) == "A test string" tpl = """ A test Another test """ - assert text.render(tpl) == "A test\nAnother test" + assert render(tpl) == "A test\nAnother test" tpl = """A test Another test """ - assert text.render(tpl) == "A test\nAnother test" + assert render(tpl) == "A test\nAnother test" tpl = """ A test line An indented line """ - assert text.render(tpl) == "A test line\n An indented line" + assert render(tpl) == "A test line\n An indented line" tpl = """ A test line An indented line """ - assert text.render(tpl) == "A test line\n An indented line\n" + assert render(tpl) == "A test line\n An indented line\n" def test_render_escaped_linebreak(): @@ -47,7 +48,7 @@ def test_render_escaped_linebreak(): that we break \ in several lines """ - assert text.render(tpl) == "A long test that we break in several lines" + assert render(tpl) == "A long test that we break in several lines" tpl = """ Break in \ @@ -58,7 +59,7 @@ def test_render_escaped_linebreak(): Goes back to normal """ assert ( - text.render(tpl) + render(tpl) == "Break in several lines But respect the indentation\n on line breaks.\nAnd after everything Goes back to normal" ) @@ -70,7 +71,7 @@ def test_render_jinja(): # Notice the newline after the end of the loop examples = ["one", "two"] - prompt = text.render( + prompt = render( """ {% for e in examples %} Example: {{e}} @@ -81,7 +82,7 @@ def test_render_jinja(): # We can remove the newline by cloing with -%} examples = ["one", "two"] - prompt = text.render( + prompt = render( """ {% for e in examples %} Example: {{e}} @@ -100,12 +101,12 @@ def test_render_jinja(): final """ - assert text.render(tpl, is_true=True) == "true\nfinal" - assert text.render(tpl, is_true=False) == "final" + assert render(tpl, is_true=True) == "true\nfinal" + assert render(tpl, is_true=False) == "final" def test_prompt_basic(): - @text.prompt + @outlines.prompt def test_tpl(variable): """{{variable}} test""" @@ -121,7 +122,7 @@ def test_tpl(variable): p = test_tpl(variable="test") assert p == "test test" - @text.prompt + @outlines.prompt def test_single_quote_tpl(variable): "${variable} test" @@ -130,7 +131,7 @@ def test_single_quote_tpl(variable): def test_prompt_kwargs(): - @text.prompt + @outlines.prompt def test_kwarg_tpl(var, other_var="other"): """{{var}} and {{other_var}}""" @@ -150,13 +151,13 @@ def test_kwarg_tpl(var, other_var="other"): def test_no_prompt(): with pytest.raises(TypeError, match="template"): - @text.prompt + @outlines.prompt def test_empty(variable): pass with pytest.raises(TypeError, match="template"): - @text.prompt + @outlines.prompt def test_only_code(variable): return variable @@ -172,7 +173,7 @@ def with_description(): """ pass - @text.prompt + @outlines.prompt def name_description_ppt(fn): """ {{fn|name}}: {{fn|description}} @@ -187,7 +188,7 @@ def name_description_ppt(fn): def with_signature(one: int, two: List[str], three: float = 1.0): pass - @text.prompt + @outlines.prompt def name_signature_ppt(fn): """ {{fn|name}}: {{fn|signature}} @@ -199,7 +200,7 @@ def name_signature_ppt(fn): def test_function_call(one, two=2): return one + two - @text.prompt + @outlines.prompt def source_ppt(fn): """ {{fn|source}} @@ -214,7 +215,7 @@ class SimpleResponse(BaseModel): one: str = Field(description="a description") two: str - @text.prompt + @outlines.prompt def source_ppt(model): "{{model | schema }}" @@ -245,7 +246,7 @@ class ConvolutedResponse(BaseModel): def test_prompt_dict_response(): response = {"one": "a description", "two": ""} - @text.prompt + @outlines.prompt def source_ppt(model): "{{model | schema }}" From 894563c4076a2af45f4015f52f2ce1a87907724d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 16 Nov 2023 14:17:50 +0100 Subject: [PATCH 320/734] Rename `sample.py` to `samplers.py` --- docs/api/sample.md | 1 - docs/api/samplers.md | 1 + mkdocs.yml | 2 +- outlines/text/generate/continuation.py | 2 +- outlines/text/generate/regex.py | 2 +- outlines/text/generate/{sample.py => samplers.py} | 0 outlines/text/generate/sequence.py | 6 +++--- tests/text/generate/{test_sample.py => test_samplers.py} | 6 +++++- 8 files changed, 12 insertions(+), 8 deletions(-) delete mode 100644 docs/api/sample.md create mode 100644 docs/api/samplers.md rename outlines/text/generate/{sample.py => samplers.py} (100%) rename tests/text/generate/{test_sample.py => test_samplers.py} (95%) diff --git a/docs/api/sample.md b/docs/api/sample.md deleted file mode 100644 index 1f962ea1..00000000 --- a/docs/api/sample.md +++ /dev/null @@ -1 +0,0 @@ -::: outlines.text.generate.sample diff --git a/docs/api/samplers.md b/docs/api/samplers.md new file mode 100644 index 00000000..b5ccd47c --- /dev/null +++ b/docs/api/samplers.md @@ -0,0 +1 @@ +::: outlines.text.generate.samplers diff --git a/mkdocs.yml b/mkdocs.yml index da19ea49..0e84ff20 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -127,5 +127,5 @@ nav: - api/fsm.md - api/parsing.md - api/regex.md - - api/sample.md + - api/samplers.md - api/continuation.md diff --git a/outlines/text/generate/continuation.py b/outlines/text/generate/continuation.py index 8f70ccf9..d55acc49 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/text/generate/continuation.py @@ -5,7 +5,7 @@ from outlines.text.generate.sequence import Sequence if TYPE_CHECKING: - from outlines.text.generate.sample import Sampler + from outlines.text.generate.samplers import Sampler class Continuation(Sequence): diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 63b33d34..d8ade413 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -12,7 +12,7 @@ from outlines.text.types import python_types_to_regex if TYPE_CHECKING: - from outlines.text.generate.sample import Sampler + from outlines.text.generate.samplers import Sampler class Regex(Continuation): diff --git a/outlines/text/generate/sample.py b/outlines/text/generate/samplers.py similarity index 100% rename from outlines/text/generate/sample.py rename to outlines/text/generate/samplers.py diff --git a/outlines/text/generate/sequence.py b/outlines/text/generate/sequence.py index e66cafa0..499595b9 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/text/generate/sequence.py @@ -6,8 +6,8 @@ from outlines.models import OpenAI if TYPE_CHECKING: - from outlines.models.transformers import KVCacheType, Transformer - from outlines.text.generate.sample import Sampler +from outlines.models.transformers import KVCacheType, Transformer +from outlines.text.generate.sample import Sampler class Sequence: @@ -46,7 +46,7 @@ def __init__( model.tokenizer.pad_token_id, device=model.device ) if sampler is None: - from outlines.text.generate.sample import multinomial + from outlines.text.generate.samplers import multinomial self.sampler = multinomial else: diff --git a/tests/text/generate/test_sample.py b/tests/text/generate/test_samplers.py similarity index 95% rename from tests/text/generate/test_sample.py rename to tests/text/generate/test_samplers.py index 884bb4a3..5e1543fc 100644 --- a/tests/text/generate/test_sample.py +++ b/tests/text/generate/test_samplers.py @@ -2,7 +2,11 @@ import torch -from outlines.text.generate.sample import greedy, multinomial, vectorized_random_choice +from outlines.text.generate.samplers import ( + greedy, + multinomial, + vectorized_random_choice, +) def test_greedy(): From da9269548d431f5cc287e3345f925eb817f49fcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 16 Nov 2023 15:01:26 +0100 Subject: [PATCH 321/734] Separate indexing from generation. /text -> /generate and flatten --- docs/api/continuation.md | 2 +- docs/api/fsm.md | 2 +- docs/api/json_schema.md | 2 +- docs/api/parsing.md | 2 +- docs/api/regex.md | 2 +- docs/api/samplers.md | 2 +- examples/dating_profile.py | 5 +- examples/parsing.py | 2 +- outlines/{text => }/generate/__init__.py | 0 outlines/{text => }/generate/continuation.py | 4 +- outlines/generate/generator.py | 136 +++++++++++++++++ outlines/{text => }/generate/regex.py | 13 +- outlines/{text => }/generate/samplers.py | 0 outlines/{text => }/generate/sequence.py | 6 +- {tests/text => outlines/index}/__init__.py | 0 outlines/{text => index}/fsm.py | 0 outlines/{text => index}/json_schema.py | 0 outlines/{text => index}/parsing.py | 2 +- outlines/{text => index}/types.py | 0 outlines/text/__init__.py | 1 - tests/generate/__init__.py | 0 .../{text => }/generate/test_continuation.py | 2 +- tests/generate/test_generator.py | 139 ++++++++++++++++++ .../generate/test_integration_transfomers.py | 4 +- tests/{text => }/generate/test_regex.py | 6 +- tests/{text => }/generate/test_samplers.py | 6 +- tests/{text => }/generate/test_sequence.py | 2 +- tests/{text => index}/partial_python.lark | 0 tests/{text => index}/test_fsm.py | 6 +- tests/{text => index}/test_json_schema.py | 2 +- tests/{text => index}/test_parsing.py | 8 +- tests/{text => index}/test_types.py | 2 +- 32 files changed, 315 insertions(+), 43 deletions(-) rename outlines/{text => }/generate/__init__.py (100%) rename outlines/{text => }/generate/continuation.py (97%) create mode 100644 outlines/generate/generator.py rename outlines/{text => }/generate/regex.py (97%) rename outlines/{text => }/generate/samplers.py (100%) rename outlines/{text => }/generate/sequence.py (97%) rename {tests/text => outlines/index}/__init__.py (100%) rename outlines/{text => index}/fsm.py (100%) rename outlines/{text => index}/json_schema.py (100%) rename outlines/{text => index}/parsing.py (99%) rename outlines/{text => index}/types.py (100%) delete mode 100644 outlines/text/__init__.py create mode 100644 tests/generate/__init__.py rename tests/{text => }/generate/test_continuation.py (97%) create mode 100644 tests/generate/test_generator.py rename tests/{text => }/generate/test_integration_transfomers.py (99%) rename tests/{text => }/generate/test_regex.py (97%) rename tests/{text => }/generate/test_samplers.py (95%) rename tests/{text => }/generate/test_sequence.py (99%) rename tests/{text => index}/partial_python.lark (100%) rename tests/{text => index}/test_fsm.py (99%) rename tests/{text => index}/test_json_schema.py (99%) rename tests/{text => index}/test_parsing.py (97%) rename tests/{text => index}/test_types.py (93%) diff --git a/docs/api/continuation.md b/docs/api/continuation.md index e8790a2a..66b9a719 100644 --- a/docs/api/continuation.md +++ b/docs/api/continuation.md @@ -1 +1 @@ -::: outlines.text.generate.continuation +::: outlines.generate.continuation diff --git a/docs/api/fsm.md b/docs/api/fsm.md index 7c8f543a..a7cc19f5 100644 --- a/docs/api/fsm.md +++ b/docs/api/fsm.md @@ -1 +1 @@ -::: outlines.text.fsm +::: outlines.index.fsm diff --git a/docs/api/json_schema.md b/docs/api/json_schema.md index 78272c9a..575a85e8 100644 --- a/docs/api/json_schema.md +++ b/docs/api/json_schema.md @@ -1 +1 @@ -::: outlines.text.json_schema +::: outlines.index.json_schema diff --git a/docs/api/parsing.md b/docs/api/parsing.md index 75efa846..a5ed59f2 100644 --- a/docs/api/parsing.md +++ b/docs/api/parsing.md @@ -1 +1 @@ -::: outlines.text.parsing +::: outlines.index.parsing diff --git a/docs/api/regex.md b/docs/api/regex.md index cdfb9abd..5ef91db4 100644 --- a/docs/api/regex.md +++ b/docs/api/regex.md @@ -1 +1 @@ -::: outlines.text.generate.regex +::: outlines.generate.regex diff --git a/docs/api/samplers.md b/docs/api/samplers.md index b5ccd47c..c125e9a6 100644 --- a/docs/api/samplers.md +++ b/docs/api/samplers.md @@ -1 +1 @@ -::: outlines.text.generate.samplers +::: outlines.generate.samplers diff --git a/examples/dating_profile.py b/examples/dating_profile.py index 7bf2a886..acc00126 100644 --- a/examples/dating_profile.py +++ b/examples/dating_profile.py @@ -6,8 +6,7 @@ from pydantic import BaseModel, conlist import outlines -import outlines.models as models -import outlines.text as text +from outlines import models class QuestionChoice(str, Enum): @@ -122,7 +121,7 @@ def dating_profile_prompt(description: str, examples: list[Example]): new_description = "I'm a laid-back lawyer who spends a lot of his free-time gaming. I work in a corporate office, but ended up here after the start-up I cofounded got acquired, so still play ping pong with my cool coworkers every day. I have a bar at home where I make cocktails, which is great for entertaining friends. I secretly like to wear suits and get a new one tailored every few months. I also like weddings because I get to wear those suits, and it's a good excuse for a date. I watch the latest series because I'm paying, with my hard-earned money, for every streaming service." prompt = dating_profile_prompt(description=new_description, examples=samples) -profile = text.generate.json(model, DatingProfile)(prompt) # type: ignore +profile = outlines.generate.json(model, DatingProfile)(prompt) # type: ignore print(profile) # Sample generated profiles diff --git a/examples/parsing.py b/examples/parsing.py index dbee9ab9..0f403182 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -14,7 +14,7 @@ set_seed, ) -from outlines.text.parsing import PartialLark, PartialPythonIndenter +from outlines.index.parsing import PartialLark, PartialPythonIndenter revision = None checkpoint = "Salesforce/codegen-350M-mono" diff --git a/outlines/text/generate/__init__.py b/outlines/generate/__init__.py similarity index 100% rename from outlines/text/generate/__init__.py rename to outlines/generate/__init__.py diff --git a/outlines/text/generate/continuation.py b/outlines/generate/continuation.py similarity index 97% rename from outlines/text/generate/continuation.py rename to outlines/generate/continuation.py index d55acc49..a3576794 100644 --- a/outlines/text/generate/continuation.py +++ b/outlines/generate/continuation.py @@ -2,10 +2,10 @@ import torch -from outlines.text.generate.sequence import Sequence +from outlines.generate.sequence import Sequence if TYPE_CHECKING: - from outlines.text.generate.samplers import Sampler + from outlines.generate.samplers import Sampler class Continuation(Sequence): diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py new file mode 100644 index 00000000..1a6dfde4 --- /dev/null +++ b/outlines/generate/generator.py @@ -0,0 +1,136 @@ +import math +from dataclasses import dataclass +from typing import TYPE_CHECKING, Generator, List, Optional + +import torch + +if TYPE_CHECKING: + from outlines.generate.samplers import Sampler + + +@dataclass +class GenerationState: + token_ids: torch.Tensor + attention_masks: torch.Tensor + kv_cache: Optional[torch.Tensor] = None + + +def process(generator: Generator, index, state: GenerationState): + """This generator drives the text generation process by + walking through the FSM.""" + next(generator) + + fsm_states = [0 for _ in range(state.token_ids.shape[0])] + while True: + logits_mask = get_next_instructions(index, fsm_states) + + next_token_ids, kv_cache = generator.send((state, logits_mask)) + + token_ids = update_token_ids(state.token_ids, next_token_ids) + attention_masks = update_attention_masks(state.attention_masks) + state = GenerationState(token_ids, attention_masks, kv_cache) + + fsm_states = get_next_fsm_states(index, fsm_states, next_token_ids) + is_finished = is_generation_finished(index, fsm_states) + if is_finished: + yield token_ids, next_token_ids + return + + yield state + + +def get_next_fsm_states( + index, fsm_states: List[int], next_token_ids: torch.Tensor +) -> List[int]: + return [ + index.next_state(fsm_state, token_id) + for fsm_state, token_id in zip(fsm_states, next_token_ids) + ] + + +def get_next_instructions(index, fsm_states: List[int]) -> torch.Tensor: + return [index.next_instruction(state) for state in fsm_states] + + +def is_generation_finished(index, fsm_states: List[int]) -> bool: + return all([index.is_finished(state) for state in fsm_states]) + + +def update_token_ids( + token_ids: torch.Tensor, next_token_ids: torch.Tensor +) -> torch.Tensor: + return torch.concatenate([token_ids, next_token_ids], dim=1 - 1) + + +def update_attention_masks(attention_masks: torch.Tensor) -> torch.Tensor: + return torch.concatenate( + [ + attention_masks, + torch.ones( + attention_masks.shape[:-1] + (1,), device=attention_masks.device + ), + ], + axis=-1, + ) + + +def token_generator(model, sampler: "Sampler", samples: int, rng: torch.Generator): + """Generator that yields a token every time it is called. + + This process is designed to be steered by another supervising + process that supplies the current sequence and the indices + of the tokens to mask before sampling. + + Parameters + ---------- + model + A model that takes a sequence of tokens as an input and + returns a probability distribution over the next tokens. + sampler + A function that samples tokens from a probability + distribution over the next tokens. + + Yields + ------ + A tensor with the sampled tokens. + + """ + while True: + (token_ids, attention_masks, kv_cache), logits_mask = yield + + try: + logits, new_kv_cache = model(token_ids, attention_masks, kv_cache) + except IndexError: # Exceeding the context length + return + + biased_logits = bias_logits(logits, logits_mask) + next_token_ids = sampler(biased_logits, samples, rng) + + yield next_token_ids, new_kv_cache + + +def bias_logits( + logits: torch.Tensor, + ids_to_mask: List, +) -> torch.Tensor: + """Mask the logits. + + The function iterates over a nested list where each list corresponds to the + indices that need to be masked for each row in the array. + + Parameters + ---------- + logits + Two dimensional tensor that contains the next-token probability + distribution. + ids_to_mask + The ids to mask in each dimension. + + Returns + ------- + A view of the original logits tensor where some values are masked. + + """ + for i, ids in enumerate(ids_to_mask): + logits[i, ids] = -math.inf + return logits diff --git a/outlines/text/generate/regex.py b/outlines/generate/regex.py similarity index 97% rename from outlines/text/generate/regex.py rename to outlines/generate/regex.py index d8ade413..bf995aa6 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/generate/regex.py @@ -6,13 +6,16 @@ import torch from pydantic import BaseModel -from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm -from outlines.text.generate.continuation import Continuation -from outlines.text.json_schema import build_regex_from_object, get_schema_from_signature -from outlines.text.types import python_types_to_regex +from outlines.generate.continuation import Continuation +from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm +from outlines.index.json_schema import ( + build_regex_from_object, + get_schema_from_signature, +) +from outlines.index.types import python_types_to_regex if TYPE_CHECKING: - from outlines.text.generate.samplers import Sampler + from outlines.generate.samplers import Sampler class Regex(Continuation): diff --git a/outlines/text/generate/samplers.py b/outlines/generate/samplers.py similarity index 100% rename from outlines/text/generate/samplers.py rename to outlines/generate/samplers.py diff --git a/outlines/text/generate/sequence.py b/outlines/generate/sequence.py similarity index 97% rename from outlines/text/generate/sequence.py rename to outlines/generate/sequence.py index 499595b9..b444d0b4 100644 --- a/outlines/text/generate/sequence.py +++ b/outlines/generate/sequence.py @@ -6,8 +6,8 @@ from outlines.models import OpenAI if TYPE_CHECKING: -from outlines.models.transformers import KVCacheType, Transformer -from outlines.text.generate.sample import Sampler + from outlines.generate.samplers import Sampler + from outlines.models.transformers import KVCacheType, Transformer class Sequence: @@ -46,7 +46,7 @@ def __init__( model.tokenizer.pad_token_id, device=model.device ) if sampler is None: - from outlines.text.generate.samplers import multinomial + from outlines.generate.samplers import multinomial self.sampler = multinomial else: diff --git a/tests/text/__init__.py b/outlines/index/__init__.py similarity index 100% rename from tests/text/__init__.py rename to outlines/index/__init__.py diff --git a/outlines/text/fsm.py b/outlines/index/fsm.py similarity index 100% rename from outlines/text/fsm.py rename to outlines/index/fsm.py diff --git a/outlines/text/json_schema.py b/outlines/index/json_schema.py similarity index 100% rename from outlines/text/json_schema.py rename to outlines/index/json_schema.py diff --git a/outlines/text/parsing.py b/outlines/index/parsing.py similarity index 99% rename from outlines/text/parsing.py rename to outlines/index/parsing.py index fc34fff7..7e1c333a 100644 --- a/outlines/text/parsing.py +++ b/outlines/index/parsing.py @@ -35,7 +35,7 @@ from lark.parsers.lalr_interactive_parser import InteractiveParser from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser -from outlines.text.fsm import ( +from outlines.index.fsm import ( fsm_union, get_sub_fsms_from_seq, make_deterministic_fsm, diff --git a/outlines/text/types.py b/outlines/index/types.py similarity index 100% rename from outlines/text/types.py rename to outlines/index/types.py diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py deleted file mode 100644 index a1189ca0..00000000 --- a/outlines/text/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .generate import continuation diff --git a/tests/generate/__init__.py b/tests/generate/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/text/generate/test_continuation.py b/tests/generate/test_continuation.py similarity index 97% rename from tests/text/generate/test_continuation.py rename to tests/generate/test_continuation.py index 944ae6df..d6194d7e 100644 --- a/tests/text/generate/test_continuation.py +++ b/tests/generate/test_continuation.py @@ -1,6 +1,6 @@ import torch -from outlines.text.generate.continuation import Continuation, continuation +from outlines.generate.continuation import Continuation, continuation class Tokenizer: diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py new file mode 100644 index 00000000..b9e79982 --- /dev/null +++ b/tests/generate/test_generator.py @@ -0,0 +1,139 @@ +import math + +import pytest +import torch + +from outlines.generate.generator import bias_logits, token_generator + + +def test_generator_error(): + def model(*_): + raise IndexError + + def sampler(): + return None + + generator = token_generator(model, sampler, 1, None) + next(generator) + with pytest.raises(StopIteration): + generator.send(((None, None, None), None)) + + +@pytest.mark.parametrize( + "logits,indices_to_mask,expected", + [ + ( + torch.tensor([[1, 2, 3, 4]], dtype=torch.float), + [[]], + torch.tensor([[1, 2, 3, 4]], dtype=torch.float), + ), + ( + torch.tensor([[1, 2, 3, 4]], dtype=torch.float), + [[1]], + torch.tensor([[1, -math.inf, 3, 4]], dtype=torch.float), + ), + ( + torch.tensor([[1, 2, 3, 4]], dtype=torch.float), + [[1, 3]], + torch.tensor([[1, -math.inf, 3, -math.inf]], dtype=torch.float), + ), + ( + torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float), + [[0], [2]], + torch.tensor([[-math.inf, 2, 3], [4, 5, -math.inf]], dtype=torch.float), + ), + ( + torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float), + [[1], [0, 2]], + torch.tensor( + [[1, -math.inf, 3], [-math.inf, 5, -math.inf]], dtype=torch.float + ), + ), + ], +) +def test_bias_logits(logits, indices_to_mask, expected): + masked_logits = bias_logits(logits, indices_to_mask) + assert torch.equal(masked_logits, expected) + + +def test_generator_1d(): + def model(*_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits) + + # 1D, no bias + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[]])) + assert result == 3 + + # 1D, bias one + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[3]])) + assert result == 2 + + # 1D, bias two + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[2, 3]])) + assert result == 1 + + +def test_generator_2d(): + def model(*_): + return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, dim=1) + + # 2D, no bias + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[]])) + assert torch.equal(result, torch.tensor([3, 3])) + + # 2D, bias one each + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[3], [3]])) + assert torch.equal(result, torch.tensor([2, 2])) + + # 2D, bias one + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[3], []])) + assert torch.equal(result, torch.tensor([2, 3])) + + # 2D, bias different number + generator = token_generator(model, sampler, 1, None) + next(generator) + result, _ = generator.send(((None, None, None), [[3], [2, 3]])) + assert torch.equal(result, torch.tensor([2, 1])) + + +@pytest.mark.xfail +def get_next_fsm_states(): + raise NotImplementedError + + +@pytest.mark.xfail +def get_next_instructions(): + raise NotImplementedError + + +@pytest.mark.xfail +def is_generation_finished(): + raise NotImplementedError + + +@pytest.mark.xfail +def update_token_ids(): + raise NotImplementedError + + +@pytest.mark.xfail +def update_attention_masks(): + raise NotImplementedError diff --git a/tests/text/generate/test_integration_transfomers.py b/tests/generate/test_integration_transfomers.py similarity index 99% rename from tests/text/generate/test_integration_transfomers.py rename to tests/generate/test_integration_transfomers.py index 04bd0d46..8de6008d 100644 --- a/tests/text/generate/test_integration_transfomers.py +++ b/tests/generate/test_integration_transfomers.py @@ -7,10 +7,10 @@ import torch from pydantic import BaseModel, constr +import outlines.generate as generate import outlines.models as models -import outlines.text.generate as generate +from outlines.index.fsm import reduced_vocabulary from outlines.models.transformers import TransformerTokenizer -from outlines.text.fsm import reduced_vocabulary def test_transformers_integration_continuation(): diff --git a/tests/text/generate/test_regex.py b/tests/generate/test_regex.py similarity index 97% rename from tests/text/generate/test_regex.py rename to tests/generate/test_regex.py index 642e383d..ede00d68 100644 --- a/tests/text/generate/test_regex.py +++ b/tests/generate/test_regex.py @@ -4,9 +4,9 @@ import pytest import torch -import outlines.text.generate as generate -from outlines.text.fsm import create_fsm_index_tokenizer, make_deterministic_fsm -from outlines.text.generate.regex import Regex +import outlines.generate as generate +from outlines.generate.regex import Regex +from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm class Tokenizer: diff --git a/tests/text/generate/test_samplers.py b/tests/generate/test_samplers.py similarity index 95% rename from tests/text/generate/test_samplers.py rename to tests/generate/test_samplers.py index 5e1543fc..5928a1ae 100644 --- a/tests/text/generate/test_samplers.py +++ b/tests/generate/test_samplers.py @@ -2,11 +2,7 @@ import torch -from outlines.text.generate.samplers import ( - greedy, - multinomial, - vectorized_random_choice, -) +from outlines.generate.samplers import greedy, multinomial, vectorized_random_choice def test_greedy(): diff --git a/tests/text/generate/test_sequence.py b/tests/generate/test_sequence.py similarity index 99% rename from tests/text/generate/test_sequence.py rename to tests/generate/test_sequence.py index e5ede8c5..08c27f1f 100644 --- a/tests/text/generate/test_sequence.py +++ b/tests/generate/test_sequence.py @@ -5,9 +5,9 @@ import pytest import torch +from outlines.generate.sequence import Sequence from outlines.models import OpenAI from outlines.models.tokenizer import Tokenizer -from outlines.text.generate.sequence import Sequence def test_openai_error(): diff --git a/tests/text/partial_python.lark b/tests/index/partial_python.lark similarity index 100% rename from tests/text/partial_python.lark rename to tests/index/partial_python.lark diff --git a/tests/text/test_fsm.py b/tests/index/test_fsm.py similarity index 99% rename from tests/text/test_fsm.py rename to tests/index/test_fsm.py index 7091fd0b..db4aadac 100644 --- a/tests/text/test_fsm.py +++ b/tests/index/test_fsm.py @@ -2,8 +2,7 @@ import numba import pytest -from outlines.models.transformers import TransformerTokenizer -from outlines.text.fsm import ( +from outlines.index.fsm import ( _walk_fsm, create_fsm_index_end_to_end, create_fsm_index_tokenizer, @@ -12,6 +11,7 @@ make_deterministic_fsm, walk_fsm, ) +from outlines.models.transformers import TransformerTokenizer def walk_fsm_numba( @@ -321,7 +321,7 @@ def test_json_index_performance(): from pydantic import BaseModel, constr import outlines.models as models - from outlines.text.generate.regex import Regex, build_regex_from_object + from outlines.generate.regex import Regex, build_regex_from_object class Weapon(str, Enum): sword = "sword" diff --git a/tests/text/test_json_schema.py b/tests/index/test_json_schema.py similarity index 99% rename from tests/text/test_json_schema.py rename to tests/index/test_json_schema.py index a0af780e..a4be6a42 100644 --- a/tests/text/test_json_schema.py +++ b/tests/index/test_json_schema.py @@ -5,7 +5,7 @@ import pytest from pydantic import BaseModel, constr -from outlines.text.json_schema import ( +from outlines.index.json_schema import ( BOOLEAN, INTEGER, NULL, diff --git a/tests/text/test_parsing.py b/tests/index/test_parsing.py similarity index 97% rename from tests/text/test_parsing.py rename to tests/index/test_parsing.py index 8c7dff92..f3ec1a9a 100644 --- a/tests/text/test_parsing.py +++ b/tests/index/test_parsing.py @@ -4,14 +4,14 @@ from lark.indenter import DedentError from lark.lexer import UnexpectedCharacters, UnexpectedToken -from outlines.text.parsing import PartialLark, PartialPythonIndenter +from outlines.index.parsing import PartialLark, PartialPythonIndenter def test_partial_parsing(): lp = PartialLark.open_from_package( "tests", "partial_python.lark", - ["text"], + ["index"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", @@ -123,7 +123,7 @@ def test_partial_parsing(): lp = PartialLark.open_from_package( "tests", "partial_python.lark", - ["text"], + ["index"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", @@ -160,7 +160,7 @@ def test_sequential_parse_example(): lp = PartialLark.open_from_package( "tests", "partial_python.lark", - ["text"], + ["index"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", diff --git a/tests/text/test_types.py b/tests/index/test_types.py similarity index 93% rename from tests/text/test_types.py rename to tests/index/test_types.py index d70d5bd7..606b70e6 100644 --- a/tests/text/test_types.py +++ b/tests/index/test_types.py @@ -2,7 +2,7 @@ import pytest -from outlines.text.types import ( +from outlines.index.types import ( BOOLEAN, DATE, DATETIME, From e7d238441c3b28f313e47e1117b1d27cb946cda3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 16 Nov 2023 17:03:02 +0100 Subject: [PATCH 322/734] Add `Index` type --- outlines/generate/generator.py | 3 ++- outlines/index/index.py | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 outlines/index/index.py diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index 1a6dfde4..f76e449a 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -6,6 +6,7 @@ if TYPE_CHECKING: from outlines.generate.samplers import Sampler + from outlines.index.index import Index @dataclass @@ -15,7 +16,7 @@ class GenerationState: kv_cache: Optional[torch.Tensor] = None -def process(generator: Generator, index, state: GenerationState): +def process(generator: Generator, index: "Index", state: GenerationState): """This generator drives the text generation process by walking through the FSM.""" next(generator) diff --git a/outlines/index/index.py b/outlines/index/index.py new file mode 100644 index 00000000..89e49393 --- /dev/null +++ b/outlines/index/index.py @@ -0,0 +1,11 @@ +from typing import Callable, NamedTuple, NewType + +import torch + +State = NewType("State", int) + + +class Index(NamedTuple): + next_instruction: Callable[[State], torch.Tensor] + next_state: Callable[[State, torch.Tensor], State] + is_final: Callable[[State], bool] From 5f8c94b18fd5bfbf72dbc22cda03f5e6ff60392b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 15 Nov 2023 12:15:37 +0100 Subject: [PATCH 323/734] Add generator that samples the next tokens --- outlines/generate/generator.py | 297 +++++++++++++++---- outlines/generate/text.py | 10 + outlines/index/index.py | 34 ++- tests/generate/test_generator.py | 479 +++++++++++++++++++++++++------ 4 files changed, 667 insertions(+), 153 deletions(-) create mode 100644 outlines/generate/text.py diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index f76e449a..1c5feb25 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -1,115 +1,294 @@ +import dataclasses import math -from dataclasses import dataclass -from typing import TYPE_CHECKING, Generator, List, Optional +from typing import TYPE_CHECKING, Callable, List, Optional, Union import torch +from outlines.index.index import FSMState + if TYPE_CHECKING: from outlines.generate.samplers import Sampler - from outlines.index.index import Index + from outlines.index.index import FSM -@dataclass +@dataclasses.dataclass(frozen=True) class GenerationState: token_ids: torch.Tensor attention_masks: torch.Tensor kv_cache: Optional[torch.Tensor] = None -def process(generator: Generator, index: "Index", state: GenerationState): - """This generator drives the text generation process by - walking through the FSM.""" - next(generator) +class SequenceGenerator: + def __init__(self, fsm, model, sampler, device): + self.generate_token = token_generator(model, sampler) + self.fsm = fsm + self.tokenizer = model.tokenizer + self.device = device + + def init_generation_state( + self, + prompt: Union[str, List[str]], + kv_cache: Optional[torch.Tensor] = None, + rng: Optional[torch.Generator] = None, + ): + """Initialize the generation state. + + This method is responsible for encoding the prompt, moving token ids + to the device and initializing the random number generator. + + Parameters + ---------- + prompt + The prompt on which the generation is conditioned. + rng + The state of the random number generator. + + Returns + ------- + A `GenerationState` object. + + """ + token_ids, attention_masks = self.tokenizer.encode(prompt) + token_ids = token_ids.to(self.device) + attention_masks = attention_masks.to(self.device) + + return GenerationState(token_ids, attention_masks, kv_cache) + + def __call__( + self, + prompt, + kv_cache: Optional[torch.tensor] = None, + rng: Optional[torch.Generator] = None, + ): + sequence_generator = self.stream(prompt, rng) + *_, last = sequence_generator + return last + + def stream( + self, + prompt: str, + kv_cache: Optional[torch.tensor] = None, + rng: Optional[torch.Generator] = None, + ): + if rng is None: + rng = torch.Generator(device=self.device) + rng.seed() + + init_state = self.init_generation_state(prompt, kv_cache, rng) + + num_sequences = init_state.token_ids.shape[0] + init_fsm_states = [FSMState(0) for _ in range(num_sequences)] + + return sequence_generator( + self.generate_token, self.fsm, init_state, init_fsm_states, rng + ) + + +def sequence_generator( + token_generator: Callable, + fsm: "FSM", + init_state: GenerationState, + fsm_states: List[FSMState], + rng: torch.Generator, +): + """Generates sequences of tokens. + + Parameters + ---------- + token_generator + A callable that generate a new token given the current generation state + and logits biases. + fsm + The finite-state machine that drives the text generation. + init_state + The initial generation state for the batches. + fsm_states + The initial states of the finite-state machine for each sequence in the batch. + + Yields + ------ + A new generation state. - fsm_states = [0 for _ in range(state.token_ids.shape[0])] + """ + state = init_state while True: - logits_mask = get_next_instructions(index, fsm_states) + logits_masks = get_next_instructions(fsm, fsm_states) - next_token_ids, kv_cache = generator.send((state, logits_mask)) + next_token_ids, kv_cache = token_generator( + **dataclasses.asdict(state), + rng=rng, + logits_masks=logits_masks, + ) token_ids = update_token_ids(state.token_ids, next_token_ids) - attention_masks = update_attention_masks(state.attention_masks) + attention_masks = expand_attention_masks(state.attention_masks) state = GenerationState(token_ids, attention_masks, kv_cache) - fsm_states = get_next_fsm_states(index, fsm_states, next_token_ids) - is_finished = is_generation_finished(index, fsm_states) + fsm_states = get_next_fsm_states(fsm, fsm_states, next_token_ids) + is_finished = is_generation_finished(fsm, fsm_states) if is_finished: - yield token_ids, next_token_ids + yield state return yield state +def token_generator(model, sampler: "Sampler") -> Callable: + """Generate one token at a time. + + This process is designed to be steered by another supervising + process that supplies the current sequence and the indices + of the tokens to mask before sampling. + + Parameters + ---------- + model + A model that takes a sequence of tokens as an input and + returns a probability distribution over the next tokens. + sampler + A function that samples tokens from a probability + distribution over the next tokens. + + Returns + ------- + A tensor with the sampled tokens. + + """ + + def generate( + token_ids, + attention_masks, + kv_cache, + logits_masks, + rng: torch.Generator, + ) -> Union[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + try: + logits, new_kv_cache = model(token_ids, attention_masks, kv_cache) + except IndexError: # Exceeding the context length + raise IndexError( + "The input length exceeds the context length of the model." + ) + + biased_logits = bias_logits(logits, logits_masks) + next_token_ids = sampler(biased_logits, 1, rng) + + return next_token_ids, new_kv_cache + + return generate + + def get_next_fsm_states( - index, fsm_states: List[int], next_token_ids: torch.Tensor -) -> List[int]: + fsm: "FSM", fsm_states: List[FSMState], next_token_ids: torch.Tensor +) -> List[FSMState]: + """ + + Parameters + ---------- + fsm + The finite-state machine used to monitor this batch. + next_token_ids + The tokens that were just generated. + + Returns + ------- + A `torch.Tensor` object that represents the next logit mask. + + """ return [ - index.next_state(fsm_state, token_id) + fsm.next_state(fsm_state, token_id) for fsm_state, token_id in zip(fsm_states, next_token_ids) ] -def get_next_instructions(index, fsm_states: List[int]) -> torch.Tensor: - return [index.next_instruction(state) for state in fsm_states] +def get_next_instructions(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: + """Get the new instructions for each sequence from the finite-state machine. + Parameters + ---------- + fsm + The finite-state machine used to monitor this batch. + fsm_states + The FSM states corresponding to each sequence in the batch. -def is_generation_finished(index, fsm_states: List[int]) -> bool: - return all([index.is_finished(state) for state in fsm_states]) + Returns + ------- + A nested list that contains the ids of the logits to bias. + """ + return [fsm.next_instruction(state) for state in fsm_states] -def update_token_ids( - token_ids: torch.Tensor, next_token_ids: torch.Tensor -) -> torch.Tensor: - return torch.concatenate([token_ids, next_token_ids], dim=1 - 1) +def is_generation_finished(fsm: "FSM", fsm_states: List[FSMState]) -> bool: + """Determine if the generation is finished. -def update_attention_masks(attention_masks: torch.Tensor) -> torch.Tensor: - return torch.concatenate( - [ - attention_masks, - torch.ones( - attention_masks.shape[:-1] + (1,), device=attention_masks.device - ), - ], - axis=-1, - ) + A generation is considered finished if the FSM of every sequence in the + batch is in a final state. + A better solution is to return finished sequences as soon as their FSM + is in a final state. -def token_generator(model, sampler: "Sampler", samples: int, rng: torch.Generator): - """Generator that yields a token every time it is called. + Parameters + ---------- + fsm + The finite-state machine used to monitor this batch. + fsm_states + The FSM states corresponding to each sequence in the batch. + + Returns + ------- + Whether all sequences are finished sampling. + + """ + return all([fsm.is_final_state(state) for state in fsm_states]) - This process is designed to be steered by another supervising - process that supplies the current sequence and the indices - of the tokens to mask before sampling. + +def update_token_ids( + token_ids: torch.Tensor, next_token_ids: torch.Tensor +) -> torch.Tensor: + """Append the sampled tokens to the running sequence of tokens. Parameters ---------- - model - A model that takes a sequence of tokens as an input and - returns a probability distribution over the next tokens. - sampler - A function that samples tokens from a probability - distribution over the next tokens. + token_ids + The current token sequences + next_token_ids + The tokens that were just generated and that we need to append + to the existing sequences. - Yields - ------ - A tensor with the sampled tokens. + Returns + ------- + A new sequence of token ids that contains the tokens that were + just generated. """ - while True: - (token_ids, attention_masks, kv_cache), logits_mask = yield + return torch.concatenate([token_ids, next_token_ids], dim=-1) - try: - logits, new_kv_cache = model(token_ids, attention_masks, kv_cache) - except IndexError: # Exceeding the context length - return - biased_logits = bias_logits(logits, logits_mask) - next_token_ids = sampler(biased_logits, samples, rng) +def expand_attention_masks(attention_masks: torch.Tensor) -> torch.Tensor: + """Expand the attention masks. + + Parameters + ---------- + attention_masks + The attention masks for each sequence in the batch. - yield next_token_ids, new_kv_cache + Returns + ------- + The attention masks padded with 1s. + + """ + return torch.concatenate( + [ + attention_masks, + torch.ones( + attention_masks.shape[:-1] + (1,), device=attention_masks.device + ), + ], + axis=-1, + ) +@torch.inference_mode() def bias_logits( logits: torch.Tensor, ids_to_mask: List, diff --git a/outlines/generate/text.py b/outlines/generate/text.py new file mode 100644 index 00000000..38bc8d53 --- /dev/null +++ b/outlines/generate/text.py @@ -0,0 +1,10 @@ +class text: + def __init__(self): + pass + + def __call__(self, prompt): + pass + + def __iter__(self): + # This is something + pass diff --git a/outlines/index/index.py b/outlines/index/index.py index 89e49393..aa251ed8 100644 --- a/outlines/index/index.py +++ b/outlines/index/index.py @@ -1,11 +1,33 @@ -from typing import Callable, NamedTuple, NewType +from dataclasses import dataclass +from typing import NewType, Protocol, Union import torch -State = NewType("State", int) +FSMState = NewType("FSMState", int) -class Index(NamedTuple): - next_instruction: Callable[[State], torch.Tensor] - next_state: Callable[[State, torch.Tensor], State] - is_final: Callable[[State], bool] +@dataclass(frozen=True) +class GenerateInstruction: + logits_mask: str + temperature: float + top_k: int + top_p: int + + +@dataclass(frozen=True) +class FillInstruction: + token_ids: int + + +FSMInstruction = Union[GenerateInstruction, FillInstruction] + + +class FSM(Protocol): + def next_instruction(self, state: FSMState) -> FSMInstruction: + ... + + def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: + ... + + def is_final_state(self, state: FSMState) -> bool: + ... diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index b9e79982..56b0ae49 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -1,9 +1,278 @@ import math +from typing import Generator import pytest import torch -from outlines.generate.generator import bias_logits, token_generator +from outlines.generate.generator import ( + GenerationState, + SequenceGenerator, + bias_logits, + expand_attention_masks, + get_next_fsm_states, + get_next_instructions, + is_generation_finished, + sequence_generator, + token_generator, + update_token_ids, +) +from outlines.index.index import FSMState + + +def test_sequence_generator_class(): + class MockFSM: + def next_state(self, state, next_token_ids): + return 0 + + def next_instruction(self, _): + return [] + + def is_final_state(self, _): + return True + + class MockTokenizer: + def encode(self, _): + return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + + class MockModel: + def __init__(self): + self.tokenizer = MockTokenizer() + + def __call__(*_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True) + + # Stream + generator = SequenceGenerator(MockFSM(), MockModel(), sampler, "cpu") + assert generator.device == "cpu" + assert isinstance(generator.tokenizer, MockTokenizer) + assert isinstance(generator.fsm, MockFSM) + assert callable(generator.generate_token) + + result = generator.init_generation_state("test") + assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3]])) + assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1]])) + assert result.kv_cache is None + + sequence = generator.stream("test") + assert isinstance(sequence, Generator) + + state = next(sequence) + assert torch.equal(state.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) + assert torch.equal(state.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + + with pytest.raises(StopIteration): + state = next(sequence) + + # Call + generator = SequenceGenerator(MockFSM(), MockModel(), sampler, "cpu") + result = generator("test") + assert torch.equal(state.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) + assert torch.equal(state.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + + +def test_sequence_generator_1d_single_iteration(): + class MockFSM: + def next_state(self, state, next_token_ids): + return 0 + + def next_instruction(self, _): + return [] + + def is_final_state(self, _): + return True + + class MockTokenizer: + def encode(self, _): + return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + + class MockModel: + def __init__(self): + self.tokenizer = MockTokenizer() + + def __call__(*_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True) + + init_state = GenerationState( + torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]), None + ) + init_fsm_states = [0] + generate = token_generator(MockModel(), sampler) + sequence = sequence_generator( + generate, MockFSM(), init_state, init_fsm_states, torch.Generator() + ) + result = next(sequence) + + assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) + assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + + with pytest.raises(StopIteration): + next(sequence) + + +def test_sequence_generator_1d_several_iterations(): + class MockFSM: + def next_state(self, state, next_token_ids): + return FSMState(state + 1) + + def next_instruction(self, _): + return [] + + def is_final_state(self, state): + if state < 2: + return False + else: + return True + + class MockTokenizer: + def encode(self, _): + return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + + class MockModel: + def __init__(self): + self.tokenizer = MockTokenizer() + + def __call__(*_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True) + + init_state = GenerationState( + torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]), None + ) + init_fsm_states = [0] + generate = token_generator(MockModel(), sampler) + sequence = sequence_generator( + generate, MockFSM(), init_state, init_fsm_states, torch.Generator() + ) + + result = next(sequence) + assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) + assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + + result = next(sequence) + assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3, 3, 3]])) + assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1, 1, 1]])) + + with pytest.raises(StopIteration): + next(sequence) + + +def test_sequence_generator_2d_single_iteration(): + class MockFSM: + def next_state(self, state, next_token_ids): + return 0 + + def next_instruction(self, _): + return [] + + def is_final_state(self, _): + return True + + class MockTokenizer: + def encode(self, _): + return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]), torch.tensor( + [[1, 1, 1, 1], [1, 1, 1, 1]] + ) + + class MockModel: + def __init__(self): + self.tokenizer = MockTokenizer() + + def __call__(*_): + return torch.tensor([[0, 1, 2, 3], [4, 5, 7, 6]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True, dim=-1) + + init_state = GenerationState( + torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]), + torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]]), + None, + ) + init_fsm_states = [0, 0] + generate = token_generator(MockModel(), sampler) + sequence = sequence_generator( + generate, MockFSM(), init_state, init_fsm_states, torch.Generator() + ) + + result = next(sequence) + assert torch.equal( + result.token_ids, torch.tensor([[0, 1, 2, 3, 3], [4, 5, 6, 7, 2]]) + ) + assert torch.equal( + result.attention_masks, torch.tensor([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]) + ) + + with pytest.raises(StopIteration): + next(sequence) + + +def test_sequence_generator_2d_several_iterations(): + class MockFSM: + def next_state(self, state, next_token_ids): + return FSMState(state + 1) + + def next_instruction(self, _): + return [] + + def is_final_state(self, state): + if state < 2: + return False + else: + return True + + class MockTokenizer: + def encode(self, _): + return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]), torch.tensor( + [[1, 1, 1, 1], [1, 1, 1, 1]] + ) + + class MockModel: + def __init__(self): + self.tokenizer = MockTokenizer() + + def __call__(*_): + return torch.tensor([[0, 1, 2, 3], [4, 5, 7, 6]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True, dim=-1) + + init_state = GenerationState( + torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]), + torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]]), + None, + ) + init_fsm_states = [0, 0] + generate = token_generator(MockModel(), sampler) + sequence = sequence_generator( + generate, MockFSM(), init_state, init_fsm_states, torch.Generator() + ) + + result = next(sequence) + assert torch.equal( + result.token_ids, torch.tensor([[0, 1, 2, 3, 3], [4, 5, 6, 7, 2]]) + ) + assert torch.equal( + result.attention_masks, torch.tensor([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]) + ) + + result = next(sequence) + assert torch.equal( + result.token_ids, torch.tensor([[0, 1, 2, 3, 3, 3], [4, 5, 6, 7, 2, 2]]) + ) + assert torch.equal( + result.attention_masks, torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]) + ) + + with pytest.raises(StopIteration): + next(sequence) def test_generator_error(): @@ -13,10 +282,127 @@ def model(*_): def sampler(): return None - generator = token_generator(model, sampler, 1, None) - next(generator) - with pytest.raises(StopIteration): - generator.send(((None, None, None), None)) + generator = token_generator(model, sampler) + with pytest.raises(IndexError, match="The input length"): + generator(None, None, None, None, None) + + +@pytest.mark.parametrize( + "logits_biases,expected_result", + [ + ([[]], [[3]]), + ([[3]], [[2]]), + ([[2, 3]], [[1]]), + ], +) +def test_generator_1d(logits_biases, expected_result): + def model(*_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True) + + generator = token_generator(model, sampler) + result, _ = generator(None, None, None, logits_biases, None) + assert torch.equal(result, torch.tensor(expected_result)) + + +@pytest.mark.parametrize( + "logits_biases,expected_result", + [ + ([[]], [[3], [3]]), + ([[3], [3]], [[2], [2]]), + ([[3], []], [[2], [3]]), + ([[2, 3], [3]], [[1], [2]]), + ], +) +def test_generator_2d(logits_biases, expected_result): + def model(*_): + return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, dim=1, keepdims=True) + + generator = token_generator(model, sampler) + result, _ = generator(None, None, None, logits_biases, None) + assert torch.equal(result, torch.tensor(expected_result)) + + +def test_get_next_fsm_states(): + class MockFSM: + def next_state(self, state, next_token_ids): + return 0 + + result = get_next_fsm_states(MockFSM(), [0], torch.tensor([[0]])) + assert result == [0] + + result = get_next_fsm_states(MockFSM(), [0, 0], torch.tensor([[0], [0]])) + assert result == [0, 0] + + +def test_get_next_instructions(): + class MockFSM: + def next_instruction(self, _): + return [1, 2, 3, 4] + + result = get_next_instructions(MockFSM(), [0]) + assert result == [[1, 2, 3, 4]] + + result = get_next_instructions(MockFSM(), [0, 1]) + assert result == [[1, 2, 3, 4], [1, 2, 3, 4]] + + +def test_is_generation_finished(): + class MockFSMFinished: + def is_final_state(self, _): + return True + + result = is_generation_finished(MockFSMFinished(), [1, 1]) + assert result is True + + class MockFSMNotFinished: + def is_final_state(self, state): + if state == 0: + return False + else: + return True + + result = is_generation_finished(MockFSMNotFinished(), [0, 1]) + assert result is False + + +@pytest.mark.parametrize( + "token_ids,next_token_ids,expected_result", + [ + (torch.tensor([[1]]), torch.tensor([[2]]), torch.tensor([[1, 2]])), + ( + torch.tensor([[1], [1]]), + torch.tensor([[2], [3]]), + torch.tensor([[1, 2], [1, 3]]), + ), + ], +) +def test_update_token_ids(token_ids, next_token_ids, expected_result): + result = update_token_ids(token_ids, next_token_ids) + assert torch.equal(result, expected_result) + + +@pytest.mark.parametrize( + "attention_masks,expected_result", + [ + ( + torch.tensor([[1, 1]], dtype=torch.float), + torch.tensor([[1, 1, 1]], dtype=torch.float), + ), + ( + torch.tensor([[1, 1], [1, 1]], dtype=torch.float), + torch.tensor([[1, 1, 1], [1, 1, 1]], dtype=torch.float), + ), + ], +) +def test_expand_attention_masks(attention_masks, expected_result): + result = expand_attention_masks(attention_masks) + assert torch.equal(result, expected_result) @pytest.mark.parametrize( @@ -54,86 +440,3 @@ def sampler(): def test_bias_logits(logits, indices_to_mask, expected): masked_logits = bias_logits(logits, indices_to_mask) assert torch.equal(masked_logits, expected) - - -def test_generator_1d(): - def model(*_): - return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None - - def sampler(biased_logits, *_): - return torch.argmax(biased_logits) - - # 1D, no bias - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[]])) - assert result == 3 - - # 1D, bias one - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[3]])) - assert result == 2 - - # 1D, bias two - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[2, 3]])) - assert result == 1 - - -def test_generator_2d(): - def model(*_): - return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float), None - - def sampler(biased_logits, *_): - return torch.argmax(biased_logits, dim=1) - - # 2D, no bias - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[]])) - assert torch.equal(result, torch.tensor([3, 3])) - - # 2D, bias one each - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[3], [3]])) - assert torch.equal(result, torch.tensor([2, 2])) - - # 2D, bias one - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[3], []])) - assert torch.equal(result, torch.tensor([2, 3])) - - # 2D, bias different number - generator = token_generator(model, sampler, 1, None) - next(generator) - result, _ = generator.send(((None, None, None), [[3], [2, 3]])) - assert torch.equal(result, torch.tensor([2, 1])) - - -@pytest.mark.xfail -def get_next_fsm_states(): - raise NotImplementedError - - -@pytest.mark.xfail -def get_next_instructions(): - raise NotImplementedError - - -@pytest.mark.xfail -def is_generation_finished(): - raise NotImplementedError - - -@pytest.mark.xfail -def update_token_ids(): - raise NotImplementedError - - -@pytest.mark.xfail -def update_attention_masks(): - raise NotImplementedError From 0c67ab5457dc6998cc2815c97101cfb5d458f3b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 22 Nov 2023 15:13:57 +0100 Subject: [PATCH 324/734] Create FSM that stops generation when token found --- outlines/index/index.py | 47 +++++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/outlines/index/index.py b/outlines/index/index.py index aa251ed8..525c164b 100644 --- a/outlines/index/index.py +++ b/outlines/index/index.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import NewType, Protocol, Union +from typing import List, NewType, Optional, Protocol import torch @@ -8,22 +8,11 @@ @dataclass(frozen=True) class GenerateInstruction: - logits_mask: str - temperature: float - top_k: int - top_p: int - - -@dataclass(frozen=True) -class FillInstruction: - token_ids: int - - -FSMInstruction = Union[GenerateInstruction, FillInstruction] + tokens_to_mask: List[int] class FSM(Protocol): - def next_instruction(self, state: FSMState) -> FSMInstruction: + def next_instruction(self, state: FSMState) -> GenerateInstruction: ... def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: @@ -31,3 +20,33 @@ def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: def is_final_state(self, state: FSMState) -> bool: ... + + +class StopAtTokenFSM: + def __init__(self, stop_token_id: int, max_tokens: Optional[int] = None): + self.stop_token_id = stop_token_id + self.max_tokens = max_tokens + self.num_tokens_generated = 0 + + def next_instructions(self, _: FSMState) -> GenerateInstruction: + return GenerateInstruction([]) + + def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: + self.num_tokens_generated += 1 + + if token_id == self.stop_token_id: + return FSMState(1) + else: + return FSMState(0) + + def is_final_state(self, state: FSMState) -> bool: + # Stop if the maximum number of tokens has been generated + # regardless of whether the stop token id has been found. + if self.max_tokens is not None: + if self.num_tokens_generated == self.max_tokens: + return True + + if state == 1: + return True + else: + return False From 9c77ce175f859cc1dcc3aa5442256a504b2deca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 22 Nov 2023 15:39:39 +0100 Subject: [PATCH 325/734] Create Regex FSM --- outlines/index/index.py | 90 +++++++++++++++++++++++++++++++++++---- tests/index/test_index.py | 50 ++++++++++++++++++++++ 2 files changed, 131 insertions(+), 9 deletions(-) create mode 100644 tests/index/test_index.py diff --git a/outlines/index/index.py b/outlines/index/index.py index 525c164b..042bf2f4 100644 --- a/outlines/index/index.py +++ b/outlines/index/index.py @@ -1,18 +1,18 @@ -from dataclasses import dataclass -from typing import List, NewType, Optional, Protocol +from typing import TYPE_CHECKING, List, NewType, Optional, Protocol +import interegular import torch -FSMState = NewType("FSMState", int) +from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm +if TYPE_CHECKING: + from outlines.models.tokenizer import Tokenizer -@dataclass(frozen=True) -class GenerateInstruction: - tokens_to_mask: List[int] +FSMState = NewType("FSMState", int) class FSM(Protocol): - def next_instruction(self, state: FSMState) -> GenerateInstruction: + def next_instruction(self, state: FSMState) -> torch.Tensor: ... def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: @@ -28,8 +28,8 @@ def __init__(self, stop_token_id: int, max_tokens: Optional[int] = None): self.max_tokens = max_tokens self.num_tokens_generated = 0 - def next_instructions(self, _: FSMState) -> GenerateInstruction: - return GenerateInstruction([]) + def next_instruction(self, _: FSMState) -> List[int]: + return [] def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: self.num_tokens_generated += 1 @@ -50,3 +50,75 @@ def is_final_state(self, state: FSMState) -> bool: return True else: return False + + +class RegexFSM: + def __init__( + self, + regex_string: str, + tokenizer: "Tokenizer", + max_tokens: Optional[int] = None, + ): + regex_pattern = interegular.parse_pattern(regex_string) + regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) + ( + self.states_to_token_maps, + self.empty_token_ids, + ) = create_fsm_index_tokenizer(regex_fsm, tokenizer) + + if not any( + regex_fsm.finals.intersection(v.values()) + for v in self.states_to_token_maps.values() + ): + raise ValueError( + "The vocabulary does not allow us to build a sequence that matches the input regex" + ) + + self.final_states = regex_fsm.finals | { + -1 + } # Include the EOS token in final states + self.max_tokens = max_tokens + self.num_tokens_generated = 0 + self.vocabulary = tokenizer.vocabulary.values() + self.end_token = tokenizer.eos_token_id + + def next_instruction(self, state: FSMState) -> List[int]: + next_tokens_to_end_states = self.states_to_token_maps.get(state) + + if next_tokens_to_end_states is None: + # If there are no transitions from the current state, + # then we must've been in a final state of the FSM. + # We produce EOS tokens from here on. + authorized_tokens = [self.end_token] + else: + authorized_tokens = list(next_tokens_to_end_states.keys()) + + forbidden_tokens = [ + token for token in self.vocabulary if token not in authorized_tokens + ] + + return list(forbidden_tokens) + + def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: + self.num_tokens_generated += 1 + + if token_id == self.end_token: + return FSMState(-1) + + last_token_to_end_state = self.states_to_token_maps[state] + next_state = last_token_to_end_state.get(int(token_id)) + if next_state is None: + next_state = -1 + + return FSMState(next_state) + + def is_final_state(self, state: FSMState) -> bool: + # Stop if the maximum number of tokens has been generated + # regardless of whether the stop token id has been found. + if self.max_tokens is not None: + if self.num_tokens_generated == self.max_tokens: + return True + elif state in self.final_states: + return True + + return False diff --git a/tests/index/test_index.py b/tests/index/test_index.py new file mode 100644 index 00000000..39f76212 --- /dev/null +++ b/tests/index/test_index.py @@ -0,0 +1,50 @@ +import pytest + +from outlines.index.index import RegexFSM, StopAtTokenFSM + + +def test_stop_at_token(): + fsm = StopAtTokenFSM(1) + + assert fsm.next_instruction(0) == [] + assert fsm.next_state(0, 10) == 0 + assert fsm.next_state(0, 1) == 1 + assert fsm.is_final_state(0) is False + assert fsm.is_final_state(1) is True + + +def test_regex_vocabulary_error(): + class MockTokenizer: + vocabulary = {"a": 1} + special_tokens = {"eos"} + + def convert_token_to_string(self, token): + return token + + regex_str = "[1-9]" + + with pytest.raises(ValueError, match="The vocabulary"): + RegexFSM(regex_str, MockTokenizer()) + + +def test_regex(): + class MockTokenizer: + vocabulary = {"1": 1, "a": 2, "eos": 3} + special_tokens = {"eos"} + eos_token_id = 3 + + def convert_token_to_string(self, token): + return token + + regex_str = "[1-9]" + tokenizer = MockTokenizer() + fsm = RegexFSM(regex_str, tokenizer) + + assert fsm.states_to_token_maps == {0: {1: 1}} + assert fsm.next_instruction(state=0) == [2, 3] + assert fsm.next_state(state=0, token_id=1) == 1 + assert fsm.next_state(state=0, token_id=tokenizer.eos_token_id) == -1 + + assert fsm.is_final_state(1) is True + assert fsm.is_final_state(0) is False + assert fsm.is_final_state(-1) is True From ed649c43cca9833fc992d17bb26525c72dabee72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 23 Nov 2023 09:06:31 +0100 Subject: [PATCH 326/734] Add user interface for text generation --- outlines/generate/api.py | 70 ++++++++++ outlines/generate/generator.py | 101 ++++++++++----- outlines/index/index.py | 15 +-- outlines/models/transformers.py | 11 +- pyproject.toml | 2 +- tests/generate/test_generator.py | 120 +++++++++++------- .../generate/test_integration_transfomers.py | 118 +++++++++++++---- tests/index/test_fsm.py | 11 +- tests/models/test_transformers.py | 12 +- 9 files changed, 338 insertions(+), 122 deletions(-) create mode 100644 outlines/generate/api.py diff --git a/outlines/generate/api.py b/outlines/generate/api.py new file mode 100644 index 00000000..7ff45363 --- /dev/null +++ b/outlines/generate/api.py @@ -0,0 +1,70 @@ +import json as pyjson +from typing import Callable, List, Optional, Union + +from pydantic import BaseModel + +from outlines.generate.generator import SequenceGenerator +from outlines.generate.samplers import Sampler, multinomial +from outlines.index.index import RegexFSM, StopAtTokenFSM +from outlines.index.json_schema import ( + build_regex_from_object, + get_schema_from_signature, +) +from outlines.index.types import python_types_to_regex + + +def text(model, max_tokens: Optional[int] = None, *, sampler: Sampler = multinomial): + eos_token = model.tokenizer.eos_token_id + fsm = StopAtTokenFSM(eos_token, max_tokens) + + device = model.device + generator = SequenceGenerator(fsm, model, sampler, device) + + return generator + + +def regex( + model, + regex_str: str, + max_tokens: Optional[int] = None, + sampler: Sampler = multinomial, +): + fsm = RegexFSM(regex_str, model.tokenizer, max_tokens) + + device = model.device + generator = SequenceGenerator(fsm, model, sampler, device) + + return generator + + +def format( + model, python_type, max_tokens: Optional[int] = None, sampler: Sampler = multinomial +): + regex_str = python_types_to_regex(python_type) + return regex(model, regex_str, max_tokens, sampler) + + +def choice( + model, + choices: List[str], + max_tokens: Optional[int] = None, + sampler: Sampler = multinomial, +): + regex_str = r"(" + r"|".join(choices) + r")" + return regex(model, regex_str, max_tokens, sampler) + + +def json( + model, + schema_object: Union[str, object, Callable], + max_tokens: Optional[int] = None, + sampler: Sampler = multinomial, +): + if isinstance(schema_object, type(BaseModel)): + schema = pyjson.dumps(schema_object.model_json_schema()) + elif callable(schema_object): + schema = pyjson.dumps(get_schema_from_signature(schema_object)) + + regex_str = build_regex_from_object(schema) + + return regex(model, regex_str, max_tokens, sampler) diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index 1c5feb25..df319d26 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -1,6 +1,6 @@ import dataclasses import math -from typing import TYPE_CHECKING, Callable, List, Optional, Union +from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, Union import torch @@ -14,8 +14,8 @@ @dataclasses.dataclass(frozen=True) class GenerationState: token_ids: torch.Tensor - attention_masks: torch.Tensor - kv_cache: Optional[torch.Tensor] = None + kv_cache: torch.Tensor + logits: torch.Tensor class SequenceGenerator: @@ -29,7 +29,6 @@ def init_generation_state( self, prompt: Union[str, List[str]], kv_cache: Optional[torch.Tensor] = None, - rng: Optional[torch.Generator] = None, ): """Initialize the generation state. @@ -52,45 +51,62 @@ def init_generation_state( token_ids = token_ids.to(self.device) attention_masks = attention_masks.to(self.device) - return GenerationState(token_ids, attention_masks, kv_cache) + return token_ids, attention_masks, kv_cache def __call__( self, prompt, kv_cache: Optional[torch.tensor] = None, rng: Optional[torch.Generator] = None, - ): - sequence_generator = self.stream(prompt, rng) - *_, last = sequence_generator - return last + ) -> Union[str, List[str]]: + sequence_generator = self.stream(prompt, kv_cache, rng) + tokens = [token for token in sequence_generator] + sequences = ["".join(sequence) for sequence in list(zip(*tokens))] + return sequences if len(sequences) > 1 else sequences[0] def stream( self, prompt: str, kv_cache: Optional[torch.tensor] = None, rng: Optional[torch.Generator] = None, - ): + ) -> Iterator[Union[List[str], str]]: if rng is None: rng = torch.Generator(device=self.device) rng.seed() - init_state = self.init_generation_state(prompt, kv_cache, rng) + init_state = self.init_generation_state(prompt, kv_cache) + + token_ids = init_state[1] + num_sequences = token_ids.shape[0] - num_sequences = init_state.token_ids.shape[0] init_fsm_states = [FSMState(0) for _ in range(num_sequences)] - return sequence_generator( + states = sequence_generator( self.generate_token, self.fsm, init_state, init_fsm_states, rng ) + def token_generator() -> Iterator[Union[List[str], str]]: + while True: + try: + sequence = next(states) + except StopIteration: + return + + next_token_ids = sequence.token_ids[:, -1] + next_tokens = self.tokenizer.decode(next_token_ids) + + yield next_tokens + + return token_generator() + def sequence_generator( token_generator: Callable, fsm: "FSM", - init_state: GenerationState, + init_state: Tuple, fsm_states: List[FSMState], rng: torch.Generator, -): +) -> Iterator[GenerationState]: """Generates sequences of tokens. Parameters @@ -107,32 +123,35 @@ def sequence_generator( Yields ------ - A new generation state. + A new sequence. """ - state = init_state + token_ids, attention_masks, kv_cache = init_state while True: - logits_masks = get_next_instructions(fsm, fsm_states) + logits_masks = get_next_instruction(fsm, fsm_states) - next_token_ids, kv_cache = token_generator( - **dataclasses.asdict(state), + next_token_ids, kv_cache, logits = token_generator( + token_ids, + attention_masks, + kv_cache, rng=rng, logits_masks=logits_masks, ) - token_ids = update_token_ids(state.token_ids, next_token_ids) - attention_masks = expand_attention_masks(state.attention_masks) - state = GenerationState(token_ids, attention_masks, kv_cache) + token_ids = update_token_ids(token_ids, next_token_ids) + attention_masks = expand_attention_masks(attention_masks) fsm_states = get_next_fsm_states(fsm, fsm_states, next_token_ids) is_finished = is_generation_finished(fsm, fsm_states) + if is_finished: - yield state + yield GenerationState(token_ids, kv_cache, logits) return - yield state + yield GenerationState(token_ids, kv_cache, logits) +@torch.inference_mode def token_generator(model, sampler: "Sampler") -> Callable: """Generate one token at a time. @@ -151,7 +170,9 @@ def token_generator(model, sampler: "Sampler") -> Callable: Returns ------- - A tensor with the sampled tokens. + A tuple that contains a tensor with the sampled tokens, a tensor with + the K-V cache for the sequence and the tensor that contains the next-token + logits that were returned by the model. """ @@ -172,7 +193,7 @@ def generate( biased_logits = bias_logits(logits, logits_masks) next_token_ids = sampler(biased_logits, 1, rng) - return next_token_ids, new_kv_cache + return next_token_ids, new_kv_cache, biased_logits return generate @@ -195,12 +216,12 @@ def get_next_fsm_states( """ return [ - fsm.next_state(fsm_state, token_id) + fsm.next_state(fsm_state, int(token_id[0])) for fsm_state, token_id in zip(fsm_states, next_token_ids) ] -def get_next_instructions(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: +def get_next_instruction(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: """Get the new instructions for each sequence from the finite-state machine. Parameters @@ -288,7 +309,29 @@ def expand_attention_masks(attention_masks: torch.Tensor) -> torch.Tensor: ) +<<<<<<< HEAD @torch.inference_mode() +======= +def update_logprobs(logprobs, next_token_ids, next_token_logits): + """Update the sequences' total logprob. + + Parameters + ---------- + logprobs + The current log-probabilities for each sequence. + next_token_ids + The token ids that were just sampled + next_token_logits + The logits returned by the model. + + """ + next_token_logprobs = torch.nn.LogSoftmax(dim=-1)(next_token_logits) + new_logprobs = next_token_logprobs[ + range(next_token_ids.shape[0]), next_token_ids.flatten() + ] + return logprobs + new_logprobs + +@torch.inference_mode def bias_logits( logits: torch.Tensor, ids_to_mask: List, diff --git a/outlines/index/index.py b/outlines/index/index.py index 042bf2f4..e3ba1a84 100644 --- a/outlines/index/index.py +++ b/outlines/index/index.py @@ -1,7 +1,6 @@ from typing import TYPE_CHECKING, List, NewType, Optional, Protocol import interegular -import torch from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm @@ -12,17 +11,17 @@ class FSM(Protocol): - def next_instruction(self, state: FSMState) -> torch.Tensor: + def next_instruction(self, state: FSMState) -> List[int]: ... - def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: + def next_state(self, state: FSMState, token_id: int) -> FSMState: ... def is_final_state(self, state: FSMState) -> bool: ... -class StopAtTokenFSM: +class StopAtTokenFSM(FSM): def __init__(self, stop_token_id: int, max_tokens: Optional[int] = None): self.stop_token_id = stop_token_id self.max_tokens = max_tokens @@ -31,7 +30,7 @@ def __init__(self, stop_token_id: int, max_tokens: Optional[int] = None): def next_instruction(self, _: FSMState) -> List[int]: return [] - def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: + def next_state(self, state: FSMState, token_id: int) -> FSMState: self.num_tokens_generated += 1 if token_id == self.stop_token_id: @@ -52,7 +51,7 @@ def is_final_state(self, state: FSMState) -> bool: return False -class RegexFSM: +class RegexFSM(FSM): def __init__( self, regex_string: str, @@ -99,14 +98,14 @@ def next_instruction(self, state: FSMState) -> List[int]: return list(forbidden_tokens) - def next_state(self, state: FSMState, token_id: torch.Tensor) -> FSMState: + def next_state(self, state: FSMState, token_id: int) -> FSMState: self.num_tokens_generated += 1 if token_id == self.end_token: return FSMState(-1) last_token_to_end_state = self.states_to_token_maps[state] - next_state = last_token_to_end_state.get(int(token_id)) + next_state = last_token_to_end_state.get(token_id) if next_state is None: next_state = -1 diff --git a/outlines/models/transformers.py b/outlines/models/transformers.py index 240acbba..9e333bba 100644 --- a/outlines/models/transformers.py +++ b/outlines/models/transformers.py @@ -67,6 +67,7 @@ def __init__( self.model = model self.tokenizer = tokenizer + @torch.inference_mode def forward( self, input_ids: torch.LongTensor, @@ -103,9 +104,8 @@ def forward( output_hidden_states=False, past_key_values=past_key_values, ) - next_token_logits = output.logits[..., -1, :] - return next_token_logits, output.past_key_values + return output.logits, output.past_key_values def __call__( self, @@ -113,7 +113,10 @@ def __call__( attention_mask: torch.LongTensor, past_key_values: Optional[Tuple] = None, ) -> torch.FloatTensor: - return self.forward(input_ids, attention_mask, past_key_values)[0] + logits, kv_cache = self.forward(input_ids, attention_mask, past_key_values) + next_token_logits = logits[..., -1, :] + + return next_token_logits, kv_cache class TransformerTokenizer(Tokenizer): @@ -151,7 +154,7 @@ def encode( return output["input_ids"], output["attention_mask"] def decode(self, token_ids: torch.LongTensor) -> List[str]: - text = self.tokenizer.batch_decode(token_ids) + text = self.tokenizer.batch_decode(token_ids, skip_special_tokens=True) return text def convert_token_to_string(self, token: str) -> str: diff --git a/pyproject.toml b/pyproject.toml index 48f3e9dd..60e91f67 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,7 +98,7 @@ module = [ "perscache.*", "PIL", "PIL.Image", - "pydantic", + "pydantic.*", "pytest", "referencing.*", "scipy.*", diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index 56b0ae49..92acd799 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -5,12 +5,11 @@ import torch from outlines.generate.generator import ( - GenerationState, SequenceGenerator, bias_logits, expand_attention_masks, get_next_fsm_states, - get_next_instructions, + get_next_instruction, is_generation_finished, sequence_generator, token_generator, @@ -34,6 +33,9 @@ class MockTokenizer: def encode(self, _): return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + def decode(self, _): + return "x" + class MockModel: def __init__(self): self.tokenizer = MockTokenizer() @@ -52,25 +54,23 @@ def sampler(biased_logits, *_): assert callable(generator.generate_token) result = generator.init_generation_state("test") - assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3]])) - assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1]])) - assert result.kv_cache is None + token_ids, attention_masks, kv_cache = result + assert torch.equal(token_ids, torch.tensor([[0, 1, 2, 3]])) + assert torch.equal(attention_masks, torch.tensor([[1, 1, 1, 1]])) + assert kv_cache is None sequence = generator.stream("test") assert isinstance(sequence, Generator) - state = next(sequence) - assert torch.equal(state.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) - assert torch.equal(state.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + next(sequence) with pytest.raises(StopIteration): - state = next(sequence) + next(sequence) # Call generator = SequenceGenerator(MockFSM(), MockModel(), sampler, "cpu") result = generator("test") - assert torch.equal(state.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) - assert torch.equal(state.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + assert result == "x" def test_sequence_generator_1d_single_iteration(): @@ -88,6 +88,9 @@ class MockTokenizer: def encode(self, _): return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + def decode(self, x): + return x + class MockModel: def __init__(self): self.tokenizer = MockTokenizer() @@ -98,9 +101,7 @@ def __call__(*_): def sampler(biased_logits, *_): return torch.argmax(biased_logits, keepdims=True) - init_state = GenerationState( - torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]), None - ) + init_state = (torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]), None) init_fsm_states = [0] generate = token_generator(MockModel(), sampler) sequence = sequence_generator( @@ -109,7 +110,7 @@ def sampler(biased_logits, *_): result = next(sequence) assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) - assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + assert torch.equal(result.logits, torch.tensor([[0, 1, 2, 3]])) with pytest.raises(StopIteration): next(sequence) @@ -133,6 +134,9 @@ class MockTokenizer: def encode(self, _): return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + def decode(self, x): + return x + class MockModel: def __init__(self): self.tokenizer = MockTokenizer() @@ -143,9 +147,7 @@ def __call__(*_): def sampler(biased_logits, *_): return torch.argmax(biased_logits, keepdims=True) - init_state = GenerationState( - torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]), None - ) + init_state = (torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]), None) init_fsm_states = [0] generate = token_generator(MockModel(), sampler) sequence = sequence_generator( @@ -154,11 +156,11 @@ def sampler(biased_logits, *_): result = next(sequence) assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3, 3]])) - assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1, 1]])) + assert torch.equal(result.logits, torch.tensor([[0, 1, 2, 3]])) result = next(sequence) assert torch.equal(result.token_ids, torch.tensor([[0, 1, 2, 3, 3, 3]])) - assert torch.equal(result.attention_masks, torch.tensor([[1, 1, 1, 1, 1, 1]])) + assert torch.equal(result.logits, torch.tensor([[0, 1, 2, 3]])) with pytest.raises(StopIteration): next(sequence) @@ -181,6 +183,9 @@ def encode(self, _): [[1, 1, 1, 1], [1, 1, 1, 1]] ) + def decode(self, x): + return x + class MockModel: def __init__(self): self.tokenizer = MockTokenizer() @@ -191,7 +196,7 @@ def __call__(*_): def sampler(biased_logits, *_): return torch.argmax(biased_logits, keepdims=True, dim=-1) - init_state = GenerationState( + init_state = ( torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]), torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]]), None, @@ -207,7 +212,7 @@ def sampler(biased_logits, *_): result.token_ids, torch.tensor([[0, 1, 2, 3, 3], [4, 5, 6, 7, 2]]) ) assert torch.equal( - result.attention_masks, torch.tensor([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]) + result.logits, torch.tensor([[0, 1, 2, 3], [4, 5, 7, 6]], dtype=torch.float) ) with pytest.raises(StopIteration): @@ -234,6 +239,9 @@ def encode(self, _): [[1, 1, 1, 1], [1, 1, 1, 1]] ) + def decode(self, x): + return x + class MockModel: def __init__(self): self.tokenizer = MockTokenizer() @@ -244,7 +252,7 @@ def __call__(*_): def sampler(biased_logits, *_): return torch.argmax(biased_logits, keepdims=True, dim=-1) - init_state = GenerationState( + init_state = ( torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]), torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]]), None, @@ -260,7 +268,7 @@ def sampler(biased_logits, *_): result.token_ids, torch.tensor([[0, 1, 2, 3, 3], [4, 5, 6, 7, 2]]) ) assert torch.equal( - result.attention_masks, torch.tensor([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]) + result.logits, torch.tensor([[0, 1, 2, 3], [4, 5, 7, 6]], dtype=torch.float) ) result = next(sequence) @@ -268,7 +276,7 @@ def sampler(biased_logits, *_): result.token_ids, torch.tensor([[0, 1, 2, 3, 3, 3], [4, 5, 6, 7, 2, 2]]) ) assert torch.equal( - result.attention_masks, torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]) + result.logits, torch.tensor([[0, 1, 2, 3], [4, 5, 7, 6]], dtype=torch.float) ) with pytest.raises(StopIteration): @@ -288,44 +296,64 @@ def sampler(): @pytest.mark.parametrize( - "logits_biases,expected_result", + "logits_biases,expected_result,expected_biased_logits", [ - ([[]], [[3]]), - ([[3]], [[2]]), - ([[2, 3]], [[1]]), + ([[]], [[3]], [[0, 1, 2, 3]]), + ([[3]], [[2]], [[0, 1, 2, -math.inf]]), + ([[2, 3]], [[1]], [[0, 1, -math.inf, -math.inf]]), ], ) -def test_generator_1d(logits_biases, expected_result): - def model(*_): - return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None +def test_generator_1d(logits_biases, expected_result, expected_biased_logits): + class MockTokenizer: + def decode(self, _): + return "x" + + class MockModel: + tokenizer = MockTokenizer() + + def __call__(self, *_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None def sampler(biased_logits, *_): return torch.argmax(biased_logits, keepdims=True) - generator = token_generator(model, sampler) - result, _ = generator(None, None, None, logits_biases, None) + generator = token_generator(MockModel(), sampler) + result, _, biased_logits = generator(None, None, None, logits_biases, None) assert torch.equal(result, torch.tensor(expected_result)) + assert torch.equal(biased_logits, torch.tensor(expected_biased_logits)) @pytest.mark.parametrize( - "logits_biases,expected_result", + "logits_biases,expected_result,expected_biased_logits", [ - ([[]], [[3], [3]]), - ([[3], [3]], [[2], [2]]), - ([[3], []], [[2], [3]]), - ([[2, 3], [3]], [[1], [2]]), + ([[]], [[3], [3]], [[0, 1, 2, 3], [4, 5, 6, 7]]), + ([[3], [3]], [[2], [2]], [[0, 1, 2, -math.inf], [4, 5, 6, -math.inf]]), + ([[3], []], [[2], [3]], [[0, 1, 2, -math.inf], [4, 5, 6, 7]]), + ( + [[2, 3], [3]], + [[1], [2]], + [[0, 1, -math.inf, -math.inf], [4, 5, 6, -math.inf]], + ), ], ) -def test_generator_2d(logits_biases, expected_result): - def model(*_): - return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float), None +def test_generator_2d(logits_biases, expected_result, expected_biased_logits): + class MockTokenizer: + def decode(self, _): + return "x" + + class MockModel: + tokenizer = MockTokenizer() + + def __call__(self, *_): + return torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float), None def sampler(biased_logits, *_): return torch.argmax(biased_logits, dim=1, keepdims=True) - generator = token_generator(model, sampler) - result, _ = generator(None, None, None, logits_biases, None) + generator = token_generator(MockModel(), sampler) + result, _, biased_logits = generator(None, None, None, logits_biases, None) assert torch.equal(result, torch.tensor(expected_result)) + assert torch.equal(biased_logits, torch.tensor(expected_biased_logits)) def test_get_next_fsm_states(): @@ -345,10 +373,10 @@ class MockFSM: def next_instruction(self, _): return [1, 2, 3, 4] - result = get_next_instructions(MockFSM(), [0]) + result = get_next_instruction(MockFSM(), [0]) assert result == [[1, 2, 3, 4]] - result = get_next_instructions(MockFSM(), [0, 1]) + result = get_next_instruction(MockFSM(), [0, 1]) assert result == [[1, 2, 3, 4], [1, 2, 3, 4]] diff --git a/tests/generate/test_integration_transfomers.py b/tests/generate/test_integration_transfomers.py index 8de6008d..f2102434 100644 --- a/tests/generate/test_integration_transfomers.py +++ b/tests/generate/test_integration_transfomers.py @@ -1,4 +1,5 @@ import datetime +import json import re from enum import Enum from typing import List, Union @@ -13,41 +14,69 @@ from outlines.models.transformers import TransformerTokenizer -def test_transformers_integration_continuation(): +def test_transformers_integration_text(): rng = torch.Generator() rng.manual_seed(10000) # Choosen so is generated model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") - sequence = generate.continuation(model)("Write a short sentence ", rng=rng) + sequence = generate.text(model)("Write a short sentence ", rng=rng) assert isinstance(sequence, str) assert model.tokenizer.eos_token not in sequence - sequence = generate.continuation(model, max_tokens=10)( - "Write a short sentence ", rng=rng - ) + sequence = generate.text(model, max_tokens=10)("Write a short sentence ", rng=rng) assert isinstance(sequence, str) prompts = ["Write a short sentence ", "And another one "] - sequence = generate.continuation(model, max_tokens=10)(prompts, rng=rng) + sequence = generate.text(model, max_tokens=10)(prompts, rng=rng) assert isinstance(sequence, list) assert len(sequence) == 2 assert isinstance(sequence[0], str) + +def test_transformers_integration_streaming(): + rng = torch.Generator() + rng.manual_seed(10000) # Choosen so is generated + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + sequence = generate.text(model, max_tokens=10).stream("Write a short sentence ", rng=rng) + + token = next(sequence) + assert isinstance(token, list) + assert isinstance(token[0], str) + + remaining = "".join([token[0] for token in sequence]) + assert isinstance(remaining, str) + + sequence = generate.text(model, max_tokens=10).stream(["Prompt1", "Prompt2"], rng=rng) + tokens = next(sequence) + assert isinstance(tokens, list) + assert isinstance(tokens[0], str) + assert isinstance(tokens[1], str) + + +@pytest.mark.xfail(reason="not implemented") +def test_transformers_integration_text_stop(): + rng = torch.Generator() + rng.manual_seed(10000) # Choosen so is generated + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") prompt = "Write a short sentence " - sequence = generate.continuation(model, stop="a")(prompt, rng=rng) + sequence = generate.text(model, stop="a")(prompt, rng=rng) assert sequence[len(prompt) :].find("a") == -1 -@pytest.mark.xfail -def test_transformers_integration_continuation_array_samples(): +@pytest.mark.xfail(reason="not implemented") +def test_transformers_integration_text_array_samples(): rng = torch.Generator() rng.manual_seed(0) model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") prompts = ["Write a short sentence", "And another one"] - _ = generate.continuation(model, max_tokens=10)(prompts, rng=rng, samples=3) + _ = generate.text(model, max_tokens=10)(prompts, rng=rng, samples=3) def test_transformers_various_regexes(): @@ -64,6 +93,17 @@ def test_transformers_various_regexes(): sequence = generator(prompt, rng=rng) assert re.fullmatch(regex_str, sequence) is not None + +def test_transformers_various_regexes_prompt_list(): + rng = torch.Generator() + rng.manual_seed(0) + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompt = "Write an email address" + regex_str = r"([a-z]{10})@([a-z]{5})\.([a-z]{3})" + generator = generate.regex(model, regex_str) + # Two prompts sequence = generator([prompt, prompt], rng=rng) assert re.fullmatch(regex_str, sequence[0]) is not None @@ -79,7 +119,7 @@ def test_transformers_integration_integer(): prompt = "Write a short sentence" sequence = generate.format(model, int, max_tokens=10)(prompt, rng=rng) - assert sequence[0] != 0 + assert sequence != "" int(sequence) @@ -106,7 +146,7 @@ def test_transformers_integration_float(): prompt = "Write a short sentence" sequence = generate.format(model, float, max_tokens=10)(prompt, rng=rng) - assert sequence[0] != 0 + assert sequence != "" float(sequence) @@ -119,7 +159,7 @@ def test_transformers_integration_bool(): prompt = "Is this True or False?" sequence = generate.format(model, bool, max_tokens=10)(prompt, rng=rng) - assert sequence[0] != 0 + assert sequence != "" bool(sequence) @@ -132,7 +172,7 @@ def test_transformers_integration_date(): prompt = "What day is it today?" sequence = generate.format(model, datetime.date, max_tokens=10)(prompt, rng=rng) - assert sequence[0] != 0 + assert sequence != "" datetime.datetime.strptime(sequence, "%Y-%m-%d") @@ -145,7 +185,7 @@ def test_transformers_integration_time(): prompt = "What time is it?" sequence = generate.format(model, datetime.time, max_tokens=10)(prompt, rng=rng) - assert sequence[0] != 0 + assert sequence != "" datetime.datetime.strptime(sequence, "%H:%M:%S") @@ -158,7 +198,7 @@ def test_transformers_integration_datetime(): prompt = "What time is it?" sequence = generate.format(model, datetime.datetime, max_tokens=20)(prompt, rng=rng) - assert sequence[0] != 0 + assert sequence != 0 datetime.datetime.strptime(sequence, "%Y-%m-%d %H:%M:%S") @@ -195,13 +235,33 @@ class Spam(BaseModel): rng = torch.Generator() rng.manual_seed(0) # make sure that `bar` is not an int - result = generate.json(model, Spam, max_tokens=1000)(prompt, rng=rng) + result = generate.json(model, Spam, max_tokens=500)(prompt, rng=rng) + result = Spam.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.foo, int) assert isinstance(result.bar, float) assert isinstance(result.spam, str) assert isinstance(result.fuzz, bool) - assert len(result.spam) == 10 + assert len(result.spam) <= 10 + + +def test_transformers_json_batch(): + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + prompts = ["Output some JSON ", "Output more JSON"] + + class Spam(BaseModel): + foo: int + bar: float + spam: constr(max_length=10) + fuzz: bool + + rng = torch.Generator() + rng.manual_seed(0) # make sure that `bar` is not an int + + result = generate.json(model, Spam, max_tokens=500)(prompts, rng=rng) + assert isinstance(result[0], BaseModel) + assert isinstance(result[1], BaseModel) def test_transformers_json_str_enum(): @@ -222,6 +282,7 @@ class User(BaseModel): name: Name result = generate.json(model, User)(prompt, rng=rng) + result = User.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.user_id, int) assert result.name in ["John", "Marc", "Michel"] @@ -243,6 +304,7 @@ class User(BaseModel): user_id: Id result = generate.json(model, User)(prompt, rng=rng) + result = User.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.user_id, int) assert result.user_id in [1, 2] @@ -261,6 +323,7 @@ class User(BaseModel): rng.manual_seed(0) result = generate.json(model, User)(prompt, rng=rng) + result = User.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.user_id, int) assert isinstance(result.value, list) @@ -268,6 +331,7 @@ class User(BaseModel): assert isinstance(value, float) or isinstance(value, int) +@pytest.mark.xfail(reason="The implementation of `anyOf` is incorrect") def test_transformers_json_union(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") @@ -280,12 +344,13 @@ class Spam(BaseModel): rng = torch.Generator() rng.manual_seed(4) - sequence = generate.json(model, Spam, max_tokens=100)(prompt, rng=rng) - assert isinstance(sequence, BaseModel) + result = generate.json(model, Spam, max_tokens=100)(prompt, rng=rng) + result = Spam.parse_raw(result) + assert isinstance(result, BaseModel) assert ( - isinstance(sequence.bar, int) - or isinstance(sequence.bar, float) - or isinstance(sequence.bar, str) + isinstance(result.bar, int) + or isinstance(result.bar, float) + or isinstance(result.bar, str) ) @@ -301,6 +366,7 @@ def function(foo: int, bar: List[int]): rng.manual_seed(4) sequence = generate.json(model, function, max_tokens=100)(prompt, rng=rng) + sequence = json.loads(sequence) assert isinstance(sequence, dict) assert isinstance(function(**sequence), int) @@ -319,10 +385,10 @@ def test_transformers_logits_vocab_size(): generator = generate.choice(model, ["True", "False"]) rng = torch.Generator() - rng.manual_seed(4) + rng.manual_seed(101) - masked_logits = generator("blah", rng=rng) - assert masked_logits == "True" + sequence = generator("blah", rng=rng) + assert sequence == "False" def test_transformers_reduced_vocabulary_caching(): diff --git a/tests/index/test_fsm.py b/tests/index/test_fsm.py index db4aadac..b1440e2f 100644 --- a/tests/index/test_fsm.py +++ b/tests/index/test_fsm.py @@ -320,8 +320,7 @@ def test_json_index_performance(): from line_profiler import LineProfiler # type: ignore [import] from pydantic import BaseModel, constr - import outlines.models as models - from outlines.generate.regex import Regex, build_regex_from_object + import outlines class Weapon(str, Enum): sword = "sword" @@ -345,16 +344,16 @@ class Character(BaseModel): # TODO: Add support for conint strength: int # conint(int, ge=0, le=100) - model = models.transformers("gpt2", device="cuda") + model = outlines.models.transformers("gpt2", device="cuda") json_schema = json.dumps(Character.model_json_schema()) def build_regex(): - regex_str = build_regex_from_object(json_schema) - Regex(model, regex_str, 100) + regex_str = outlines.index.json_schema.build_regex_from_object(json_schema) + outlines.generate.regex(model, regex_str) profiler = LineProfiler(create_fsm_index_end_to_end) profiler.add_function(create_fsm_index_tokenizer) - profiler.add_function(Regex.__init__) + profiler.add_function(outlines.index.index.RegexFSM.__init__) profiler.runctx( "build_regex()", diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index f0b9d681..2687d6a8 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -75,16 +75,24 @@ def test_model(): assert model.device.type == "cpu" input_ids = torch.tensor([[0, 1, 2]]) - logits = model(input_ids, torch.ones_like(input_ids)) + logits, kv_cache = model(input_ids, torch.ones_like(input_ids)) assert logits.type() == "torch.FloatTensor" assert logits.ndim == 2 assert logits.shape[0] == 1 + assert len(kv_cache) == model.model.config.n_layer + assert len(kv_cache[0]) == 2 + assert kv_cache[0][0].shape[1] == model.model.config.n_head + assert kv_cache[0][0].shape[2] == 3 # number of tokens input_ids = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) - logits = model(input_ids, torch.ones_like(input_ids)) + logits, kv_cache = model(input_ids, torch.ones_like(input_ids)) assert logits.type() == "torch.FloatTensor" assert logits.ndim == 2 assert logits.shape[0] == 3 + assert len(kv_cache) == model.model.config.n_layer + assert len(kv_cache[0]) == 2 + assert kv_cache[0][0].shape[1] == model.model.config.n_head + assert kv_cache[0][0].shape[2] == 3 # number of tokens with pytest.raises(AssertionError): input_ids = torch.tensor([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [0, 1, 2]]]) From a068bd4a6e6517d47d50aab5ab1e7e3e3e629c63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 27 Nov 2023 12:04:44 +0100 Subject: [PATCH 327/734] Remove old text generation logic --- outlines/generate/__init__.py | 3 +- outlines/generate/continuation.py | 123 --------- outlines/generate/regex.py | 411 ---------------------------- outlines/generate/sequence.py | 245 ----------------- outlines/generate/text.py | 10 - tests/generate/test_continuation.py | 94 ------- tests/generate/test_regex.py | 211 -------------- tests/generate/test_sequence.py | 404 --------------------------- 8 files changed, 1 insertion(+), 1500 deletions(-) delete mode 100644 outlines/generate/continuation.py delete mode 100644 outlines/generate/regex.py delete mode 100644 outlines/generate/sequence.py delete mode 100644 outlines/generate/text.py delete mode 100644 tests/generate/test_continuation.py delete mode 100644 tests/generate/test_regex.py delete mode 100644 tests/generate/test_sequence.py diff --git a/outlines/generate/__init__.py b/outlines/generate/__init__.py index 9895d5c1..d3b8766a 100644 --- a/outlines/generate/__init__.py +++ b/outlines/generate/__init__.py @@ -1,2 +1 @@ -from .continuation import continuation -from .regex import choice, format, json, regex +from .api import choice, format, json, regex, text diff --git a/outlines/generate/continuation.py b/outlines/generate/continuation.py deleted file mode 100644 index a3576794..00000000 --- a/outlines/generate/continuation.py +++ /dev/null @@ -1,123 +0,0 @@ -from typing import TYPE_CHECKING, List, Optional, Union - -import torch - -from outlines.generate.sequence import Sequence - -if TYPE_CHECKING: - from outlines.generate.samplers import Sampler - - -class Continuation(Sequence): - """Represents a completion generation model. - - `Continuation` instances are unconstrained generation models that stop when - an EOS token has been found or when the maximum number of tokens has been - reached. - - >>> import outlines.text as text - >>> sequence = text.generate.continuation(model)("Say something") - - """ - - def __init__( - self, - model, - max_tokens: Optional[int] = None, - sampler: Optional["Sampler"] = None, - stop: Union[str, List[str]] = [], - ): - super().__init__(model, max_tokens, sampler) - self.eos_token_id = torch.tensor( - [self.model.tokenizer.eos_token_id], device=self.device - ) - - if isinstance(stop, str): - stop = [stop] - - self.stop_sequences = stop - - def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: - """Determine whether the sequences reached maximum length of end with - and EOS token. - - We only need to look for the EOS token in the last element rather than - in the whole sequence. Indeed, (1) EOS is a single token (2) - `Sequence`'s `__call__` methods only passed the `token_ids` of the - sequences that haven't been marked as finished already. - - Parameters - ---------- - token_ids - The input sequences. - - """ - - contains_eos = token_ids[:, -1] == self.model.tokenizer.eos_token_id - - if self.stop_sequences: - sequences = self.model.tokenizer.decode(token_ids) - contains_stop_sequence = [] - for sequence in sequences: - contains_stop_sequence.append( - any(stop_str in sequence for stop_str in self.stop_sequences) - ) - contains_stop_sequence = torch.tensor( - contains_stop_sequence, dtype=torch.bool, device=self.model.device - ) - - return torch.logical_or(contains_eos, contains_stop_sequence) - else: - return contains_eos - - def postprocess_completions(self, completions: List[str]) -> List[str]: - """Remove the EOS token from the completion. - - Sequences in `stop` take precedence over EOS. For instance, if - `stop=["\n"]` and the generated sequence is 'One\nTwo` - `Continuation.postprocess_completions` will return `One`. - - """ - completions_without_eos = [ - completion.replace(self.model.tokenizer.eos_token, "") - for completion in completions - ] - - completions_without_stop = [] - for completion in completions_without_eos: - for stop_str in self.stop_sequences: - idx = completion.rfind(stop_str) # ignore the prompt - if idx > 0: - completion = completion[:idx] - - completions_without_stop.append(completion) - - return completions_without_stop - - -def continuation( - model, - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - stop: Union[str, List[str]] = [], -): - """Generate text sequences. - - Parameters - ---------- - model - The language model to use to compute the next-token logits. - max_tokens - The maximum number of tokens to generate. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - stop - A string or list of strings which, when generated, stops - the generation for this sequence. - - """ - return Continuation(model, max_tokens, sampler, stop) diff --git a/outlines/generate/regex.py b/outlines/generate/regex.py deleted file mode 100644 index bf995aa6..00000000 --- a/outlines/generate/regex.py +++ /dev/null @@ -1,411 +0,0 @@ -import json as pyjson -import math -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Tuple, Union - -import interegular -import torch -from pydantic import BaseModel - -from outlines.generate.continuation import Continuation -from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm -from outlines.index.json_schema import ( - build_regex_from_object, - get_schema_from_signature, -) -from outlines.index.types import python_types_to_regex - -if TYPE_CHECKING: - from outlines.generate.samplers import Sampler - - -class Regex(Continuation): - """Represents a regex-based generation model. - - `Regex` instances are constrained generation models that only generate - sequences matching a given regex. - - >>> import outlines.text as text - >>> generator = text.generate.regex(model, "(0|[1-9][0-9]+)") - - Sequences can then be generated from a prompt as follows: - - >>> sequence_1 = generator("Return an integer between 0 and 10") - >>> sequence_2 = generator("Rate the movie "Hackers" on a scale from 0 to 10") - - .. note: - Reuse instances of these guided generators (e.g. `generator` from the - above example) whenever possible, because constructing them has more - overhead than generating token sequences from them. - - """ - - def __init__( - self, - model, - regex_string: str, - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - stop: Union[str, List[str]] = [], - allow_empty_tokens: bool = True, - initial_state: Optional[int] = None, - final_states: Optional[Set[int]] = None, - states_to_token_maps: Optional[Dict[int, Dict[int, int]]] = None, - empty_token_ids: Optional[Set[int]] = None, - format_fn: Callable[[str], Union[BaseModel, dict, str]] = lambda x: x, - ): - """ - - Parameters - ---------- - model - The instance of the model used to generate next-token probabilities. - regex_string - The regex with which the token sampling process is guided/constrained. - max_tokens - The maximum number of tokens to be sampled. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - stop - Optional stopping string(s). - allow_empty_tokens - Allow sampling of tokens corresponding to empty strings. - states_to_token_maps - Pre-computed map of FSM start states to maps between token ids and their - corresponding FSM end states. - empty_token_ids - Pre-computed set of token ids for tokens that are empty strings. - format_fn - The function to apply to the generated JSON. - - """ - super().__init__(model, max_tokens, sampler, stop) - - if ( - states_to_token_maps is None - or empty_token_ids is None - or initial_state is None - or final_states is None - ): - regex_pattern = interegular.parse_pattern(regex_string) - regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) - - ( - self.states_to_token_maps, - self.empty_token_ids, - ) = create_fsm_index_tokenizer(regex_fsm, model.tokenizer) - self.initial_state = regex_fsm.initial - self.final_states = regex_fsm.finals - else: - self.initial_state = initial_state - self.final_states = final_states - self.states_to_token_maps = states_to_token_maps - self.empty_token_ids = empty_token_ids - - # Check whether a terminal path (from the initial state of the FSM to - # one of its terminal states) exists, raise an exception otherwise. - if not any( - self.final_states.intersection(v.values()) - for v in self.states_to_token_maps.values() - ): - raise ValueError( - "The vocabulary does not allow us to build a sequence that matches the input regex" - ) - - # When an EOS is observed, the last FSM state becomes `-1`. - self.last_fsm_states: List[int] = [] - self.mask_cache: Dict[Tuple[int, int], torch.LongTensor] = {} - self.regex_string = regex_string - self.allow_empty_tokens = allow_empty_tokens - self.format_fn = format_fn - - def create_proposal( - self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor - ) -> torch.DoubleTensor: - """Modify the next-token logits so that only valid tokens can be generated. - - Parameters - ---------- - generated_token_ids - The token ids generated so far. - logits - The next-token logits. - - """ - - assert generated_token_ids.ndim == 2 - - if len(self.last_fsm_states) == 0: - self.last_fsm_states = [self.initial_state for _ in range(logits.shape[0])] - - masks = [] - - for i, (token_seq, last_state) in enumerate( - zip( - generated_token_ids, - self.last_fsm_states, - ) - ): - if token_seq.shape[0] > 0: - # Get the last token that was sampled - last_token = int(token_seq[-1]) - - if last_token in self.empty_token_ids: - # An empty token was sampled, so the FSM state hasn't changed - next_state = last_state - next_token_ids = list(self.states_to_token_maps[last_state].keys()) - - elif last_token != self.model.tokenizer.eos_token_id: - # If we previously ended with an EOS, we shouldn't be - # getting/sampling any more non-EOS tokens. - assert last_state > -1 - - last_token_to_end_state = self.states_to_token_maps[last_state] - - next_state = last_token_to_end_state[last_token] - - next_tokens_to_end_states = self.states_to_token_maps.get( - next_state - ) - - if next_tokens_to_end_states is None: - # If there are no transitions from the current state, - # then we must've been in a final state of the FSM. - # We produce EOS tokens from here on. - assert next_state in self.final_states - next_state = -1 - next_token_ids = [self.model.tokenizer.eos_token_id] - else: - next_token_ids = list(next_tokens_to_end_states.keys()) - else: - # Since we already have an EOS, only sample EOS tokes from - # here on. - next_state = -1 - next_token_ids = [self.model.tokenizer.eos_token_id] - else: - # There weren't any previous tokens, so we can't update the state - next_state = last_state - next_token_ids = list(self.states_to_token_maps[last_state].keys()) - - mask = self._get_mask_for_state( - next_state, logits.shape[-1], next_token_ids - ) - masks.append(mask) - self.last_fsm_states[i] = next_state - - mask = torch.concatenate(masks, dim=0) - - return logits + mask - - def _get_mask_for_state( - self, state: int, size: int, next_token_ids: List[int] - ) -> torch.LongTensor: - mask = self.mask_cache.get((state, size)) - - if mask is None: - mask = torch.full( - (size,), - -math.inf, - device=self.device, - ) - - if self.allow_empty_tokens: - token_ids = list(self.empty_token_ids) + next_token_ids - else: - token_ids = next_token_ids - - mask[token_ids] = 0 - mask = mask.unsqueeze(0) - self.mask_cache[(state, size)] = mask - - return mask - - def postprocess_completions(self, completions: List[str]): - self.last_fsm_states.clear() - results: List[str] = super().postprocess_completions(completions) - return [self.format_fn(result) for result in results] - - -def regex( - model, - regex_string: str, - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - allow_empty_tokens: bool = True, -): - """Generate text sequences that match the input regex. - - .. note: - Reuse instances of these guided generators whenever possible, - because constructing them has more overhead than generating - token sequences from them. See the docstring for `Regex`. - - Parameters - ---------- - model - The language model to use to compute the next-token logits. - regex_string - The regular expression that generated expressions must match. - max_tokens - The maximum number of tokens to generate. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - allow_empty_tokens - Allow sampling of tokens corresponding to empty strings. - - """ - return Regex( - model, - regex_string, - max_tokens, - sampler=sampler, - allow_empty_tokens=allow_empty_tokens, - ) - - -def format( - model, - python_type, - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - allow_empty_tokens: bool = True, -): - """Generate integers. - - The regex used to constrain the generation optionally matches plus or minus - signs and forbids leading zeros (even if the `int` function in Python allows - them). - - .. note: - Reuse instances of these guided generators whenever possible, - because constructing them has more overhead than generating - token sequences from them. See the docstring for `Regex`. - - Parameters - ---------- - model - The language model to use to compute the next-token logits. - python_type - The format in which the output is expected, defined as a Python type. - max_tokens - The maximum number of tokens to generate. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - allow_empty_tokens - Allow sampling of tokens corresponding to empty strings. - - """ - regex_str = python_types_to_regex(python_type) - return Regex( - model, - regex_str, - max_tokens, - sampler=sampler, - allow_empty_tokens=allow_empty_tokens, - ) - - -def choice( - model, - choices: List[str], - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - allow_empty_tokens: bool = True, -): - """Choose between different sequences. - - .. note: - Reuse instances of these guided generators whenever possible, - because constructing them has more overhead than generating - token sequences from them. See the docstring for `Regex`. - - Parameters - ---------- - model - The language model to use to compute the next-token logits. - max_tokens - The maximum number of tokens to generate. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - allow_empty_tokens - Allow sampling of tokens corresponding to empty strings. - """ - regex_str = r"(" + r"|".join(choices) + r")" - return Regex( - model, - regex_str, - max_tokens, - sampler=sampler, - allow_empty_tokens=allow_empty_tokens, - ) - - -def json( - model, - schema_object: Union[str, BaseModel, Callable], - max_tokens: Optional[int] = None, - *, - sampler: Optional["Sampler"] = None, - allow_empty_tokens: bool = True, -) -> Union[dict, BaseModel]: - """Generate a text sequence that follows a JSON schema or Pydantic model. - - .. note: - Reuse instances of these guided generators whenever possible, - because constructing them has more overhead than generating - token sequences from them. See the docstring for `Regex`. - - Parameters - --------- - model - The language model to use to compute the next-token logits. - schema - The JSON schema, Pydantic model or function (signature) that guides the - generation. - max_tokens - The maximum number of tokens to generate. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - allow_empty_tokens - Allow sampling of tokens corresponding to empty strings. - - """ - if isinstance(schema_object, type(BaseModel)): - schema = pyjson.dumps(schema_object.model_json_schema()) - format_fn = lambda x: schema_object.model_validate(pyjson.loads(x)) - elif callable(schema_object): - schema = pyjson.dumps(get_schema_from_signature(schema_object)) - # TODO: Convert string fields to their respective types - format_fn = lambda x: pyjson.loads(x) - else: - schema = schema_object - format_fn = lambda x: pyjson.loads(x) - - regex_str = build_regex_from_object(schema) - - return Regex( - model, - regex_str, - max_tokens, - sampler=sampler, - allow_empty_tokens=allow_empty_tokens, - format_fn=format_fn, - ) diff --git a/outlines/generate/sequence.py b/outlines/generate/sequence.py deleted file mode 100644 index b444d0b4..00000000 --- a/outlines/generate/sequence.py +++ /dev/null @@ -1,245 +0,0 @@ -import math -from typing import TYPE_CHECKING, List, Optional, Tuple, Union - -import torch - -from outlines.models import OpenAI - -if TYPE_CHECKING: - from outlines.generate.samplers import Sampler - from outlines.models.transformers import KVCacheType, Transformer - - -class Sequence: - """Represents a sequence generation method.""" - - def __init__( - self, - model: "Transformer", - max_tokens: Optional[int] = None, - sampler: Optional["Sampler"] = None, - ): - """Create a `Sequence` instance. - - Parameters - ---------- - model - The instance of the model used to generate next-token probabilities. - max_tokens - The maximum number of tokens that will be generated if no termination - condition is met. - sampler - The function used to draw samples. Defaults to - `outlines.text.generate.sample.multinomial`. See - `outlines.text.generate.sample.Sampler` for the expected form of - such functions. - - """ - if isinstance(model, OpenAI): - raise TypeError("Cannot use guided generation with the OpenAI API.") - - self.model = model - self.device = model.device - self.max_tokens = max_tokens - - self.pad_token_id = torch.tensor( - model.tokenizer.pad_token_id, device=model.device - ) - if sampler is None: - from outlines.generate.samplers import multinomial - - self.sampler = multinomial - else: - self.sampler = sampler - - def create_proposal( - self, generated_token_ids: torch.LongTensor, logits: torch.DoubleTensor - ) -> torch.DoubleTensor: - """Create a new proposal from the next-token logits.""" - return logits - - def is_finished(self, token_ids: torch.LongTensor) -> torch.BoolTensor: - """Determine whether we should stop the generation.""" - raise NotImplementedError( - "`Sequence.is_finished` must be implemented by subclasses." - ) - - def postprocess_completions(self, completions: List[str]) -> List[str]: - return completions - - def step( - self, - rng: torch.Generator, - num_prompt_tokens: int, - token_ids: torch.LongTensor, - attention_mask: torch.LongTensor, - samples: int = 1, - past_key_values: Optional["KVCacheType"] = None, - ) -> Tuple[torch.LongTensor, torch.FloatTensor, Optional["KVCacheType"]]: - """Generate one or several tokens that complete the input sequence. - - The sampling step consists in using a model to generate next-token - logits and then sample `samples`-many new tokens from a categorical - distribution parametrized by these logits. - - Parameters - ---------- - rng - Random number Generator instance. - num_prompt_tokens - The number of tokens in the prompt. - token_ids - The token sequences. It has dimensions ``(n_seqs, n)`` for - some sequence length ``n <= num_prompt_tokens``. - samples - The number of continuations to sample from the next-token probability - distribution. - - Returns - ------- - A tuple with an array of shape ``(samples, n_seqs, 1)`` - that contains the completed sequences (i.e. input token IDs and - generated token IDs) and an array of shape - ``(samples, n_seqs, vocab_size)`` that contains the next token - probabilities. - - """ - probs, past_key_values = self.model.forward( - token_ids, attention_mask, past_key_values - ) - probs = self.create_proposal(token_ids[:, num_prompt_tokens:], probs) - - assert probs.shape[:-1] == token_ids.shape[:-1] - - next_token_ids = self.sampler(probs, samples, rng).unsqueeze(-1) - - probs = torch.broadcast_to(probs, (samples,) + probs.shape) - - return next_token_ids, probs, past_key_values - - def expand_attention_mask( - self, attention_mask: torch.LongTensor - ) -> torch.LongTensor: - """Expand the attention mask after the last completion. - - Parameters - ---------- - attention_mask - An attention mask with shape ``(n_seqs, attention_mask_len)``. - - Returns - ------- - A new attention mask with shape ``(n_seqs, attention_mask_len + 1)``. - - """ - attention_mask = torch.concatenate( - [ - attention_mask, - torch.ones(attention_mask.shape[:-1] + (1,), device=self.device), - ], - axis=-1, - ) - return attention_mask - - @torch.inference_mode() - def __call__( - self, - prompt: Union[str, List[str]], - samples: int = 1, - rng: Optional[torch.Generator] = None, - ) -> Union[str, List[str]]: - """Generate a new sequence given a prompt. - - Parameters - ---------- - prompt - The input prompt. - samples - The number of samples to generate for each prompt. - - Returns - ------- - The full sequence that contains the prompts and the generated string. - - """ - - token_ids, attention_mask = self.model.tokenizer.encode(prompt) - - token_ids = token_ids.squeeze(0) - attention_mask = attention_mask.squeeze(0) - - token_ids = token_ids.to(self.device) - attention_mask = attention_mask.to(self.device) - - if rng is None: - rng = torch.Generator(device=self.device) - rng.seed() - - orig_batch_shape = token_ids.shape[:-1] - num_prompt_tokens = token_ids.shape[-1] - - token_ids = torch.broadcast_to(token_ids, (samples,) + token_ids.shape) - attention_mask = torch.broadcast_to( - attention_mask, (samples,) + attention_mask.shape - ) - - # We flatten the original batch and sample dimensions so that the - # resulting shape we work in is simply `(num_of_sequences, tokens)` - batch_size = samples * math.prod(orig_batch_shape) - token_ids = token_ids.reshape((batch_size, num_prompt_tokens)) - attention_mask = attention_mask.reshape((batch_size, num_prompt_tokens)) - - is_finished = torch.zeros(batch_size, dtype=torch.bool, device=self.device) - unfinished_past_key_values = None - - while True: - num_generated_tokens = token_ids.shape[-1] - num_prompt_tokens - if torch.all(is_finished) or num_generated_tokens == self.max_tokens: - break - - is_not_finished = ~is_finished - - # Draw samples only for the sequences that aren't finished - unfinished_token_ids = token_ids[is_not_finished] - unfinished_attention_mask = attention_mask[is_not_finished] - unfinished_next_token_ids, _, past_key_values = self.step( - rng, - num_prompt_tokens, - unfinished_token_ids, - unfinished_attention_mask, - past_key_values=unfinished_past_key_values, - ) - unfinished_next_token_ids = unfinished_next_token_ids.squeeze(0) - - # Create an array for the next tokens of every sequence, including - # the finished ones (but pad them) - next_token_ids = torch.full( - (batch_size, 1), self.pad_token_id, device=self.device - ) - next_token_ids[is_not_finished] = unfinished_next_token_ids - - # TODO: Terminate if the sampled sequence is larger than the - # context size of the model? - token_ids = torch.concatenate([token_ids, next_token_ids], axis=-1) - - attention_mask = self.expand_attention_mask(attention_mask) - - local_is_finished = self.is_finished( - token_ids[is_not_finished][:, num_prompt_tokens:] - ).flatten() - - is_finished[is_not_finished] = local_is_finished - - if past_key_values: - unfinished_past_key_values = tuple( - tuple(vv[~local_is_finished.to(vv.device)] for vv in v) - for v in past_key_values - ) - - result = self.model.tokenizer.decode(token_ids[:, num_prompt_tokens:]) - result = self.postprocess_completions(result) - - if len(result) == 1: - return result[0] - - return result diff --git a/outlines/generate/text.py b/outlines/generate/text.py deleted file mode 100644 index 38bc8d53..00000000 --- a/outlines/generate/text.py +++ /dev/null @@ -1,10 +0,0 @@ -class text: - def __init__(self): - pass - - def __call__(self, prompt): - pass - - def __iter__(self): - # This is something - pass diff --git a/tests/generate/test_continuation.py b/tests/generate/test_continuation.py deleted file mode 100644 index d6194d7e..00000000 --- a/tests/generate/test_continuation.py +++ /dev/null @@ -1,94 +0,0 @@ -import torch - -from outlines.generate.continuation import Continuation, continuation - - -class Tokenizer: - eos_token = "" - eos_token_id = 0 - pad_token_id = -1 - - def decode(self, token_ids): - return ["Test"] * token_ids.shape[0] - - -class Model: - tokenizer = Tokenizer() - device = "cpu" - - -def test_continuation_eos_is_finished(): - model = continuation(Model()) - assert isinstance(model, Continuation) - - token_ids = torch.tensor([[3, 2]]) - result = model.is_finished(token_ids) - assert torch.equal(result, torch.tensor([False])) - - token_ids = torch.tensor([[3, 2, 0]]) - result = model.is_finished(token_ids) - assert torch.equal(result, torch.tensor([True])) - - token_ids = torch.tensor([[3, 2, 1], [3, 2, 0]]) - result = model.is_finished(token_ids) - assert torch.equal(result, torch.tensor([False, True])) - - token_ids = torch.tensor([[3, 2, 1, 0], [3, 2, 0, -1]]) - result = model.is_finished(token_ids) - assert torch.equal(result, torch.tensor([True, False])) - - -def test_continuation_postprocess(): - model = continuation(Model()) - result = model.postprocess_completions(["Here"]) - assert len(result) == 1 - assert result[0] == "Here" - - -def test_continuation_stop_is_finished(): - tokenizer = Tokenizer() - tokenizer.decode = lambda x: ["finished \n", "not_finished"] - model = Model() - model.tokenizer = tokenizer - - model = continuation(model, stop=["\n"]) - - token_ids = torch.tensor([[2, 3], [2, 3]]) - result = model.is_finished(token_ids) - assert torch.equal(result, torch.tensor([True, False])) - - -def test_continuation_stop_postprocess(): - model = Continuation(Model(), stop="\n") - result = model.postprocess_completions(["Stop\n"]) - assert len(result) == 1 - assert result[0] == "Stop" - - model = Continuation(Model(), stop=["\n", ","]) - result = model.postprocess_completions(["Stop"]) - assert len(result) == 1 - assert result[0] == "Stop" - - result = model.postprocess_completions(["Stop\n"]) - assert len(result) == 1 - assert result[0] == "Stop" - - result = model.postprocess_completions(["Stop\naaa"]) - assert len(result) == 1 - assert result[0] == "Stop" - - result = model.postprocess_completions(["Stop,aa\naaa"]) - assert len(result) == 1 - assert result[0] == "Stop" - - result = model.postprocess_completions(["Stop\naa,a"]) - assert len(result) == 1 - assert result[0] == "Stop" - - result = model.postprocess_completions(["Stop\n", "Nonstop"]) - assert len(result) == 2 - assert result == ["Stop", "Nonstop"] - - result = model.postprocess_completions(["StopHere\nNoHere"]) - assert len(result) == 1 - assert result[0] == "StopHere" diff --git a/tests/generate/test_regex.py b/tests/generate/test_regex.py deleted file mode 100644 index ede00d68..00000000 --- a/tests/generate/test_regex.py +++ /dev/null @@ -1,211 +0,0 @@ -import math - -import interegular -import pytest -import torch - -import outlines.generate as generate -from outlines.generate.regex import Regex -from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm - - -class Tokenizer: - eos_token = "" - pad_token = None - eos_token_id = 0 - pad_token_id = -1 - vocabulary = {"": 0, "-": 1, "1": 2, "0.": 3, "431": 4, "a": 5, "A": 6} - tokens = list(vocabulary.keys()) - special_tokens = {""} - - def encode(self, tokens): - if not isinstance(tokens, (tuple, list)): - tokens = [tokens] - - return [self.vocabulary[token] for token in tokens] - - def decode(self, token_ids): - decoded = [] - for i in range(token_ids.shape[0]): - decoded.append("".join([self.tokens[idx] for idx in token_ids[i]])) - - return decoded - - def convert_token_to_string(self, token): - return token - - -class TokenizerWithEmpty(Tokenizer): - vocabulary = {"": 0, "-": 1, "1": 2, "0.": 3, "431": 4, "a": 5, "A": 6, "": 7} - tokens = list(vocabulary.keys()) - - -class Model: - tokenizer = Tokenizer() - device = "cpu" - - -class ModelWithEmpty: - tokenizer = TokenizerWithEmpty() - device = "cpu" - - -@pytest.mark.parametrize( - "regex_string, valid_first_token, proposal", - [ - ( - r"[A-Z]+", - 6, - [-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0], - ), - ( - r"[a-z]+", - 5, - [-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0, -math.inf], - ), - ( - r"(a|A)", - 6, - [-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0, 1.0], - ), - (r"\d+", 2, [-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]), - (r"\d+\.", 3, [-math.inf, -math.inf, 1.0, 1.0, 1.0, -math.inf, -math.inf]), - ], -) -def test_regex_proposal(regex_string, valid_first_token, proposal): - model = Model() - generator = generate.regex(model, regex_string) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[]]), logits) - assert torch.equal(result.squeeze(), torch.tensor(proposal)) - assert result.squeeze()[0] == -math.inf - - # The EOS token can be generated once the FSM is in an accept state - result = generator.create_proposal(torch.tensor([[valid_first_token]]), logits) - assert result.squeeze()[0] == 1 - - -def test_regex_no_valid_transition(): - model = Model() - with pytest.raises(ValueError, match="The vocabulary does not allow"): - generate.regex(model, "aw") - - -@pytest.mark.parametrize( - "input_ids, proposal", - [ - ([[]], [[-math.inf, 1.0, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), - ([[1]], [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), - ([[4]], [[1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]]), - ( - [[4], [2]], - [ - [1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf], - [1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf], - ], - ), - ( - [[4, 0], [1, 2]], - [ - [1.0, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf], - [1.0, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf], - ], - ), - ], -) -def test_integer_proposal(input_ids, proposal): - model = Model() - generator = generate.format(model, int) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor(input_ids), logits) - assert torch.equal( - result, - torch.tensor(proposal), - ) - - -def test_choice_proposal(): - model = Model() - generator = generate.choice(model, ["1", "431a", "431A-"]) - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[]]), logits) - assert torch.equal( - result, - torch.tensor( - [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]] - ), - ) - - result = generator.create_proposal(torch.tensor([[4]]), logits) - assert torch.equal( - result, - torch.tensor( - [[-math.inf, -math.inf, -math.inf, -math.inf, -math.inf, 1.0, 1.0]] - ), - ) - - result = generator.create_proposal(torch.tensor([[4, 6]]), logits) - assert torch.equal( - result, - torch.tensor( - [[-math.inf, 1.0, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf]] - ), - ) - - -@pytest.mark.parametrize( - "input_ids, proposal, with_empty", - [ - ([[]], [[-math.inf, 1.0, 1.0, 1.0, 1.0, -math.inf, -math.inf]], False), - ( - [[3]], - [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]], - True, - ), - ], -) -def test_float_proposal(input_ids, proposal, with_empty): - model = Model() - generator = generate.format(model, float, allow_empty_tokens=with_empty) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor(input_ids), logits) - assert torch.equal( - result, - torch.tensor(proposal), - ) - - -def test_Regex_precomputed(): - model = Model() - choices = ["1", "431a", "431A-"] - regex_str = r"(" + r"|".join(choices) + r")" - - regex_pattern = interegular.parse_pattern(regex_str) - regex_fsm, _ = make_deterministic_fsm(regex_pattern.to_fsm().reduce()) - - ( - states_to_token_maps, - empty_token_ids, - ) = create_fsm_index_tokenizer(regex_fsm, model.tokenizer) - - generator = Regex( - model, - regex_str, - max_tokens=100, - initial_state=regex_fsm.initial, - final_states=regex_fsm.finals, - states_to_token_maps=states_to_token_maps, - empty_token_ids=empty_token_ids, - ) - - logits = torch.ones(len(model.tokenizer.vocabulary)) - result = generator.create_proposal(torch.tensor([[]]), logits) - assert torch.equal( - result, - torch.tensor( - [[-math.inf, -math.inf, 1.0, -math.inf, 1.0, -math.inf, -math.inf]] - ), - ) diff --git a/tests/generate/test_sequence.py b/tests/generate/test_sequence.py deleted file mode 100644 index 08c27f1f..00000000 --- a/tests/generate/test_sequence.py +++ /dev/null @@ -1,404 +0,0 @@ -import math -from typing import Dict, List, Union - -import numpy as np -import pytest -import torch - -from outlines.generate.sequence import Sequence -from outlines.models import OpenAI -from outlines.models.tokenizer import Tokenizer - - -def test_openai_error(): - class Mock(OpenAI): - def __init__(self): - pass - - model = Mock() - with pytest.raises(TypeError): - Sequence(model) - - -class MockModel: - def __init__(self, tokenizer, logits): - self.tokenizer = tokenizer - self.logits = logits - self.iteration_idx = 0 - self.device = "cpu" - - def forward(self, input_ids, *_): - import math - - batch_shape = input_ids.shape[:-1] - vocab_shape = (self.logits.shape[-1],) - shaped_logits = torch.tile( - self.logits[self.iteration_idx], (math.prod(batch_shape), 1) - ) - self.iteration_idx += 1 - - return shaped_logits.reshape(batch_shape + vocab_shape), None - - def __call__(self, input_ids, *_): - return self.forward(input_ids)[0] - - -class MockTokenizer(Tokenizer): - def __init__(self, vocabulary: Dict[str, int]): - self.vocabulary = vocabulary - self.id_to_str = {v: k for k, v in vocabulary.items()} if vocabulary else {} - self.pad_token_id = -1 - self.id_to_str[self.pad_token_id] = "" - - def encode(self, prompts: Union[str, List[str]]): - if isinstance(prompts, str): - prompts = [prompts] - - token_ids = torch.tensor([[self.vocabulary[prompt]] for prompt in prompts]) - attention_mask = torch.ones_like(token_ids) - - return token_ids, attention_mask - - def decode(self, token_ids): - ndims = np.ndim(token_ids) - - assert 0 < ndims <= 2 - - if ndims == 1: - token_ids = [token_ids] - - res = ["".join(self.id_to_str[int(idx)] for idx in seq) for seq in token_ids] - - return res if ndims > 1 else res[0] - - def convert_token_to_string(self, token: str) -> str: - return token - - def __hash__(self): - return id(self) - - -def test_sequence_error(): - with pytest.raises(NotImplementedError, match="must be implemented"): - sequence = Sequence(MockModel(MockTokenizer(None), None)) - sequence.is_finished(torch.tensor([1])) - - -class ModelStep: - """Mock model to test `Sequence.step`""" - - def __init__(self, tokenizer, logits): - self.device = "cpu" - self.logits = logits - self.tokenizer = tokenizer - - def forward(self, input_ids, *_): - """Call the model. - - We first repeat the logits `num_sequences` times, and then - reshape the resulting array to match the batch size. - - """ - import math - - batch_shape = input_ids.shape[:-1] - vocab_shape = (self.logits.shape[-1],) - shaped_logits = torch.tile(self.logits, (math.prod(batch_shape), 1)) - return shaped_logits.reshape(batch_shape + vocab_shape), None - - def __call__(self, input_ids, *_): - return self.forward(input_ids)[0] - - -def test_sequence_step(): - rng = torch.Generator() - rng.manual_seed(0) - - logits = torch.tensor([-math.inf, 1, -math.inf, -math.inf], dtype=torch.double) - model = ModelStep(MockTokenizer(None), logits) - - sequence = Sequence(model) - - input_ids = torch.tensor([[1, 2]]) - token_ids, probs, _ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) - assert torch.equal(token_ids, torch.tensor([[[1]]])) - assert probs.shape == (1, 1, 4) - - -def test_sequence_step_batch(): - rng = torch.Generator() - rng.manual_seed(0) - - logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) - model = ModelStep(MockTokenizer(None), logits) - - sequence = Sequence(model) - - input_ids = torch.tensor([[1, 2], [3, 4]]) - token_ids, probs, _ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) - assert torch.equal(token_ids, torch.tensor([[[1], [2]]])) - assert probs.shape == (1, 2, 4) - - -def test_sequence_step_sample(): - rng = torch.Generator() - rng.manual_seed(0) - - logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) - model = ModelStep(MockTokenizer(None), logits) - - sequence = Sequence(model) - input_ids = torch.tensor([[1, 2]]) - token_ids, probs, _ = sequence.step( - rng, 2, input_ids, torch.ones((1, 2)), samples=3 - ) - assert torch.equal(token_ids, torch.tensor([[[1]], [[2]], [[1]]])) - assert probs.shape == (3, 1, 4) - - -def test_sequence_step_sample_batch(): - rng = torch.Generator() - rng.manual_seed(0) - - logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) - model = ModelStep(MockTokenizer(None), logits) - - sequence = Sequence(model) - input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) - token_ids, probs, _ = sequence.step( - rng, 3, input_ids, torch.ones((2, 3)), samples=3 - ) - assert torch.equal( - token_ids, - torch.tensor( - [ - [[1], [2]], - [[1], [1]], - [[1], [2]], - ] - ), - ) - assert probs.shape == (3, 2, 4) - - -def test_sequence_step_loop(): - """Make sure that we can feed `step`'s output back as an input.""" - rng = torch.Generator() - rng.manual_seed(0) - - logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) - model = ModelStep(MockTokenizer(None), logits) - - sequence = Sequence(model) - input_ids = torch.tensor([[1, 2]]) - token_ids, *_ = sequence.step(rng, 2, input_ids, torch.ones((1, 2))) - token_ids, probs, _ = sequence.step( - rng, 2, token_ids.squeeze(0), torch.ones((1, 3)) - ) - assert torch.equal(token_ids, torch.tensor([[[2]]])) - assert probs.shape == (1, 1, 4) - - input_ids = torch.tensor([[1, 2], [3, 4]]) - token_ids, *_ = sequence.step(rng, 2, input_ids, torch.ones((2, 2))) - token_ids, probs, _ = sequence.step( - rng, 2, token_ids.squeeze(0), torch.ones((2, 3)) - ) - assert torch.equal(token_ids, torch.tensor([[[1], [2]]])) - assert probs.shape == (1, 2, 4) - - # The number of samples becomes the batch size at the next iteration. - input_ids = torch.tensor([[1, 2]]) - token_ids, *_ = sequence.step(rng, 2, input_ids, torch.ones((1, 2)), samples=3) - token_ids, probs, _ = sequence.step( - rng, 2, token_ids.squeeze(1), torch.ones((3, 3)) - ) - assert torch.equal(token_ids, torch.tensor([[[2], [1], [1]]])) - assert probs.shape == (1, 3, 4) - - -def test_sequence_step_loop_general(): - rng = torch.Generator() - rng.manual_seed(0) - - logits = torch.tensor([-math.inf, 0.5, 0.5, -math.inf], dtype=torch.double) - model = ModelStep(MockTokenizer(None), logits) - - sequence = Sequence(model) - input_ids = torch.tensor([[1, 2, 1], [3, 4, 1]]) - token_ids, *_ = sequence.step(rng, 3, input_ids, torch.ones((1, 3)), samples=3) - result, *_ = sequence.step(rng, 3, token_ids, torch.ones((3, 4))) - assert result.shape == (1, 3, 2, 1) - assert torch.equal( - result.squeeze(0), - torch.tensor( - [ - [[1], [2]], - [[1], [2]], - [[1], [1]], - ] - ), - ) - - -def test_call_single_prompt(): - class FinishAfterTwo(Sequence): - def __init__(self, model): - super().__init__(model) - self.iteration_idx = 0 - - def is_finished(self, token_ids): - """Finish generating the sequence after two iterations""" - if self.iteration_idx == 0: - self.iteration_idx += 1 - return torch.tensor([False]) - else: - return torch.tensor([True]) - - tokenizer = MockTokenizer({"Test": 0, "a": 1, "b": 2}) - model = MockModel( - tokenizer, - torch.tensor([[1.0, -math.inf, -math.inf], [-math.inf, 1.0, -math.inf]]), - ) - sequence = FinishAfterTwo(model) - - result = sequence("Test") - assert result == "Testa" - - -def test_call_prompt_list(): - class FinishAfterThree(Sequence): - def __init__(self, model): - super().__init__(model) - self.iteration_idx = 0 - - def is_finished(self, token_ids): - """Finish generating the first sequence after two iteration and the - second one after two iterations. - - """ - if self.iteration_idx == 0: - self.iteration_idx += 1 - return torch.tensor([False, False, False]) - elif self.iteration_idx == 1: - self.iteration_idx += 1 - return torch.tensor([True, False, True]) - else: - return torch.tensor([True]) # We only consider the unfinished sequences - - tokenizer = MockTokenizer( - {"Test1": 0, "Test2": 1, "a": 2, "b": 3, "c": 4, "Test3": 5} - ) - model = MockModel( - tokenizer, - torch.tensor( - [ - [-math.inf, -math.inf, 1.0, -math.inf, -math.inf, -math.inf], - [-math.inf, -math.inf, -math.inf, 1.0, -math.inf, -math.inf], - [-math.inf, -math.inf, -math.inf, -math.inf, 1.0, -math.inf], - ] - ), - ) - sequence = FinishAfterThree(model) - - result = sequence(["Test1", "Test2", "Test3"]) - assert result == ["ab", "abc", "ab"] - - -def test_call_single_prompt_samples(): - class FinishAfterTwo(Sequence): - def __init__(self, model): - super().__init__(model) - self.iteration_idx = 0 - - def is_finished(self, token_ids): - if self.iteration_idx == 0: - self.iteration_idx += 1 - return torch.tensor([False, False, False]) - else: - return torch.tensor([True, True, True]) - - tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 2, "Test": 4}) - model = MockModel( - tokenizer, - torch.tensor( - [ - [1, -math.inf, -math.inf, -math.inf], - [-math.inf, 1, -math.inf, -math.inf], - ], - dtype=torch.double, - ), - ) - sequence = FinishAfterTwo(model) - result = sequence("Test", samples=3) - assert result == ["ab", "ab", "ab"] - - class FinishAfterOne(Sequence): - def __init__(self, model): - super().__init__(model) - - def is_finished(self, token_ids): - return torch.tensor([True, True, True]) - - tokenizer = MockTokenizer({"a": 0, "b": 1, "c": 3, "Test": 4}) - model = MockModel( - tokenizer, - torch.tensor( - [ - [1, -math.inf, -math.inf, -math.inf], - [-math.inf, 1, -math.inf, -math.inf], - ], - dtype=torch.double, - ), - ) - sequence = FinishAfterOne(model) - result = sequence("Test", samples=3) - assert result == ["a", "a", "a"] - - -def test_call_prompt_list_samples(): - class FinishAfterThree(Sequence): - def __init__(self, model): - super().__init__(model) - self.iteration_idx = 0 - - def is_finished(self, token_ids): - if self.iteration_idx == 0: - self.iteration_idx += 1 - batch_shape = token_ids.shape[:-1] - return torch.zeros(batch_shape, dtype=torch.bool) - elif self.iteration_idx == 1: - self.iteration_idx += 1 - return torch.tensor( - [[True, False, True], [True, False, True], [True, False, True]] - ) - else: - return torch.tensor([True, True, True]) - - tokenizer = MockTokenizer( - {"a": 0, "b": 1, "c": 2, "Test1": 3, "Test2": 4, "Test3": 5} - ) - model = MockModel( - tokenizer, - torch.tensor( - [ - [1, -math.inf, -math.inf, -math.inf, -math.inf, -math.inf], - [-math.inf, 1, -math.inf, -math.inf, -math.inf, -math.inf], - [-math.inf, -math.inf, 1, -math.inf, -math.inf, -math.inf], - ], - dtype=torch.double, - ), - ) - sequence = FinishAfterThree(model) - - result = sequence(["Test1", "Test2", "Test3"], samples=3) - assert result == [ - "ab", - "abc", - "ab", - "ab", - "abc", - "ab", - "ab", - "abc", - "ab", - ] From 3618c657c94b0816c1f1e70278e4da8ff0e48540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 30 Nov 2023 13:24:56 +0100 Subject: [PATCH 328/734] Use `torch.multinomial` instead of custom sampler --- outlines/generate/samplers.py | 42 +-------------------------------- tests/generate/test_samplers.py | 40 ++++--------------------------- 2 files changed, 5 insertions(+), 77 deletions(-) diff --git a/outlines/generate/samplers.py b/outlines/generate/samplers.py index a91e1f09..5249722f 100644 --- a/outlines/generate/samplers.py +++ b/outlines/generate/samplers.py @@ -67,45 +67,5 @@ def multinomial( """ probs = torch.nn.functional.softmax(logits, dim=-1) - # next_token_ids = torch.multinomial(probs, num_samples=samples, generator=rng) - next_token_ids = vectorized_random_choice(rng, probs, samples) + next_token_ids = torch.multinomial(probs, num_samples=samples, generator=rng) return next_token_ids - - -def vectorized_random_choice( - rng: torch.Generator, - p: torch.FloatTensor, - samples: int = 1, -): - """Vectorized implementation of `np.random.choice`. - - `np.random.choice` does not support arrays of probability. This implements - the equivalent of this function where the `p` argument can be a matrix. - - Note - ---- - `torch.searchsorted` may be more efficient, but it is not implemented for - every backend, for instance MPS. - - Parameters - ---------- - rng - Torch random number Generator instance - p - An array of probability of shape ``(num_probability_vectors, num_items)`` - that must sum to 1. - samples - The number of samples to take for each probability vector. - - Returns - ------- - An array of shape ``(num_samples, batch_size)`` - - """ - cumsum = torch.unsqueeze(p.cumsum(axis=-1), 0) - rand = torch.rand( - (samples,) + p.shape[:-1] + (1,), generator=rng, device=rng.device - ) - idx = (cumsum < rand).sum(axis=-1) - - return idx diff --git a/tests/generate/test_samplers.py b/tests/generate/test_samplers.py index 5928a1ae..9d76256e 100644 --- a/tests/generate/test_samplers.py +++ b/tests/generate/test_samplers.py @@ -2,7 +2,7 @@ import torch -from outlines.generate.samplers import greedy, multinomial, vectorized_random_choice +from outlines.generate.samplers import greedy, multinomial def test_greedy(): @@ -30,43 +30,11 @@ def test_multinomial(): assert next_token_ids.equal(torch.tensor([[2]])) next_token_ids = multinomial(logits, 2, rng) - assert next_token_ids.equal(torch.tensor([[2], [1]])) + assert next_token_ids.equal(torch.tensor([[2, 1]])) logits = torch.tensor([[10.0, 0.0, 9.0], [-math.inf, 4.0, 5.0]]) next_token_ids = multinomial(logits, 1, rng) - assert next_token_ids.equal(torch.tensor([[0, 2]])) + assert next_token_ids.equal(torch.tensor([[0], [1]])) next_token_ids = multinomial(logits, 2, rng) - assert next_token_ids.equal(torch.tensor([[0, 1], [2, 2]])) - - -def test_vectorized_random_choice(): - rng = torch.Generator() - rng.manual_seed(0) - - probs = torch.tensor([[1, 0, 0, 0]]) - sample = vectorized_random_choice(rng, probs) - assert sample.shape == (1, 1) - assert torch.equal(sample, torch.zeros((1, 1))) - - probs = torch.tensor([[1, 0, 0, 0]]) - sample = vectorized_random_choice(rng, probs, samples=3) - assert sample.shape == (3, 1) - assert torch.equal(sample, torch.zeros((3, 1))) - - probs = torch.tile(torch.tensor([[1, 0, 0, 0]]), (2, 1)) - sample = vectorized_random_choice(rng, probs) - assert sample.shape == (1, 2) - assert torch.equal(sample, torch.zeros((1, 2))) - - probs = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]]) - sample = vectorized_random_choice(rng, probs, samples=3) - assert sample.shape == (3, 2) - assert torch.equal(sample, torch.tensor([[0, 1], [0, 1], [0, 1]])) - - probs = torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0]], [[0, 0, 1, 0], [0, 0, 0, 1]]]) - sample = vectorized_random_choice(rng, probs, samples=3) - assert sample.shape == (3, 2, 2) - assert torch.equal( - sample, torch.tensor([[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[0, 1], [2, 3]]]) - ) + assert next_token_ids.equal(torch.tensor([[2, 0], [2, 1]])) From 8c1e9a83c925ba9c78e59579f5ac3eeeb3b0675c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 30 Nov 2023 16:39:10 +0100 Subject: [PATCH 329/734] Fix `datetime.time` regex --- outlines/index/types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/index/types.py b/outlines/index/types.py index d33b213d..93b59dd2 100644 --- a/outlines/index/types.py +++ b/outlines/index/types.py @@ -5,7 +5,7 @@ BOOLEAN = "(True|False)" FLOAT = rf"{INTEGER}(\.[0-9]+)?([eE][+-][0-9]+)?" DATE = r"(\d{4})-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])" -TIME = r"([0-1][1-9]|2[0-4]):([0-5][0-9]):([0-5][0-9])" +TIME = r"([0-1][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])" DATETIME = rf"({DATE})(\s)({TIME})" From deb50cbab0a4b095b81f3521e2bebac8e7df399a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 30 Nov 2023 17:27:47 +0100 Subject: [PATCH 330/734] Add aliases and deprecation warnings for old API --- outlines/__init__.py | 2 + outlines/text/__init__.py | 0 outlines/text/generate/__init__.py | 1 + outlines/text/generate/api.py | 82 +++++++++++++++++++ .../generate/test_integration_transfomers.py | 29 +++++++ 5 files changed, 114 insertions(+) create mode 100644 outlines/text/__init__.py create mode 100644 outlines/text/generate/__init__.py create mode 100644 outlines/text/generate/api.py diff --git a/outlines/__init__.py b/outlines/__init__.py index 0bf1a38e..d211706d 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,4 +1,6 @@ """Outlines is a Generative Model Programming Framework.""" +import outlines.generate +import outlines.text.generate from outlines.base import vectorize from outlines.caching import clear_cache, disable_cache, get_cache from outlines.prompts import prompt diff --git a/outlines/text/__init__.py b/outlines/text/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/outlines/text/generate/__init__.py b/outlines/text/generate/__init__.py new file mode 100644 index 00000000..951c2f5b --- /dev/null +++ b/outlines/text/generate/__init__.py @@ -0,0 +1 @@ +from .api import choice, continuation, format, json, regex diff --git a/outlines/text/generate/api.py b/outlines/text/generate/api.py new file mode 100644 index 00000000..5d82c2e0 --- /dev/null +++ b/outlines/text/generate/api.py @@ -0,0 +1,82 @@ +import warnings +from typing import Callable, List, Optional, Union + +import outlines +from outlines.generate.samplers import Sampler, multinomial + + +def json( + model, + schema_object: Union[str, object, Callable], + max_tokens: Optional[int] = None, + *, + sampler: Sampler = multinomial, +): + warnings.warn( + "`outlines.text.generate.json` is deprecated, please use `outlines.generate.json` instead. " + "The old import path will be removed in Outlines v0.1.0.", + DeprecationWarning, + ) + return outlines.generate.json(model, schema_object, max_tokens, sampler=sampler) + + +def regex( + model, + regex_str: str, + max_tokens: Optional[int] = None, + *, + sampler: Sampler = multinomial, +): + warnings.warn( + "`outlines.text.generate.regex` is deprecated, please use `outlines.generate.regex` instead. " + "The old import path will be removed in Outlines v0.1.0.", + DeprecationWarning, + ) + return outlines.generate.regex(model, regex_str, max_tokens, sampler=sampler) + + +def format( + model, python_type, max_tokens: Optional[int] = None, sampler: Sampler = multinomial +): + warnings.warn( + "`outlines.text.generate.format` is deprecated, please use `outlines.generate.format` instead. " + "The old import path will be removed in Outlines v0.1.0.", + DeprecationWarning, + ) + return outlines.generate.format(model, python_type, max_tokens, sampler=sampler) + + +def continuation( + model, + max_tokens: Optional[int] = None, + *, + sampler: Sampler = multinomial, + stop: Optional[Union[str, List[str]]] = None, +): + warnings.warn( + "`outlines.text.generate.continuation` is deprecated, please use `outlines.generate.text` instead. " + "The old import path will be removed in Outlines v0.1.0.", + DeprecationWarning, + ) + if stop is not None: + raise NotImplementedError( + "The `stop` keyword is unavailable in the updated API. Please open an issue " + " at https://github.com/outlines-dev/outlines/issues if you need it implemented." + ) + + return outlines.generate.text(model, max_tokens, sampler=sampler) + + +def choice( + model, + choices: List[str], + max_tokens: Optional[int] = None, + *, + sampler: Sampler = multinomial, +): + warnings.warn( + "`outlines.text.generate.choice` is deprecated, please use `outlines.generate.choice` instead. " + "The old import path will be removed in Outlines v0.1.0.", + DeprecationWarning, + ) + return outlines.generate.choice(model, choices, max_tokens, sampler) diff --git a/tests/generate/test_integration_transfomers.py b/tests/generate/test_integration_transfomers.py index f2102434..020f3d87 100644 --- a/tests/generate/test_integration_transfomers.py +++ b/tests/generate/test_integration_transfomers.py @@ -14,6 +14,35 @@ from outlines.models.transformers import TransformerTokenizer +def test_deprecation(): + import outlines + + model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" + model = models.transformers(model_name, device="cpu") + + with pytest.warns(DeprecationWarning): + outlines.text.generate.continuation(model, max_tokens=10) + + with pytest.raises(NotImplementedError): + outlines.text.generate.continuation(model, max_tokens=10, stop="string") + + with pytest.warns(DeprecationWarning): + outlines.text.generate.choice(model, ["A", "B"], max_tokens=10) + + with pytest.warns(DeprecationWarning): + outlines.text.generate.regex(model, "[0-9]", max_tokens=10) + + with pytest.warns(DeprecationWarning): + outlines.text.generate.format(model, int, max_tokens=10) + + with pytest.warns(DeprecationWarning): + + def function(a: int): + pass + + outlines.text.generate.json(model, function, max_tokens=10) + + def test_transformers_integration_text(): rng = torch.Generator() rng.manual_seed(10000) # Choosen so is generated From e8bf6d25ea7f15b08c366baec4cba55f9ed23855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 30 Nov 2023 17:48:35 +0100 Subject: [PATCH 331/734] Convert JSON output to pydantic model or dictionary --- outlines/generate/api.py | 15 ++++++++++++--- outlines/generate/generator.py | 7 ++++++- tests/generate/test_integration_transfomers.py | 7 ------- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/outlines/generate/api.py b/outlines/generate/api.py index 7ff45363..5ab1bb2b 100644 --- a/outlines/generate/api.py +++ b/outlines/generate/api.py @@ -62,9 +62,18 @@ def json( ): if isinstance(schema_object, type(BaseModel)): schema = pyjson.dumps(schema_object.model_json_schema()) + regex_str = build_regex_from_object(schema) + generator = regex(model, regex_str, max_tokens, sampler) + generator.format_sequence = lambda x: schema_object.parse_raw(x) elif callable(schema_object): schema = pyjson.dumps(get_schema_from_signature(schema_object)) + regex_str = build_regex_from_object(schema) + generator = regex(model, regex_str, max_tokens, sampler) + generator.format_sequence = lambda x: pyjson.loads(x) + elif isinstance(schema_object, str): + schema = schema_object + regex_str = build_regex_from_object(schema) + generator = regex(model, regex_str, max_tokens, sampler) + generator.format_sequence = lambda x: pyjson.loads(x) - regex_str = build_regex_from_object(schema) - - return regex(model, regex_str, max_tokens, sampler) + return generator diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index df319d26..fa3e6bf0 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -53,6 +53,9 @@ def init_generation_state( return token_ids, attention_masks, kv_cache + def format_sequence(self, sequence): + return sequence + def __call__( self, prompt, @@ -61,7 +64,9 @@ def __call__( ) -> Union[str, List[str]]: sequence_generator = self.stream(prompt, kv_cache, rng) tokens = [token for token in sequence_generator] - sequences = ["".join(sequence) for sequence in list(zip(*tokens))] + sequences = [ + self.format_sequence("".join(sequence)) for sequence in list(zip(*tokens)) + ] return sequences if len(sequences) > 1 else sequences[0] def stream( diff --git a/tests/generate/test_integration_transfomers.py b/tests/generate/test_integration_transfomers.py index 020f3d87..79d40771 100644 --- a/tests/generate/test_integration_transfomers.py +++ b/tests/generate/test_integration_transfomers.py @@ -1,5 +1,4 @@ import datetime -import json import re from enum import Enum from typing import List, Union @@ -265,7 +264,6 @@ class Spam(BaseModel): rng.manual_seed(0) # make sure that `bar` is not an int result = generate.json(model, Spam, max_tokens=500)(prompt, rng=rng) - result = Spam.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.foo, int) assert isinstance(result.bar, float) @@ -311,7 +309,6 @@ class User(BaseModel): name: Name result = generate.json(model, User)(prompt, rng=rng) - result = User.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.user_id, int) assert result.name in ["John", "Marc", "Michel"] @@ -333,7 +330,6 @@ class User(BaseModel): user_id: Id result = generate.json(model, User)(prompt, rng=rng) - result = User.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.user_id, int) assert result.user_id in [1, 2] @@ -352,7 +348,6 @@ class User(BaseModel): rng.manual_seed(0) result = generate.json(model, User)(prompt, rng=rng) - result = User.parse_raw(result) assert isinstance(result, BaseModel) assert isinstance(result.user_id, int) assert isinstance(result.value, list) @@ -374,7 +369,6 @@ class Spam(BaseModel): rng.manual_seed(4) result = generate.json(model, Spam, max_tokens=100)(prompt, rng=rng) - result = Spam.parse_raw(result) assert isinstance(result, BaseModel) assert ( isinstance(result.bar, int) @@ -395,7 +389,6 @@ def function(foo: int, bar: List[int]): rng.manual_seed(4) sequence = generate.json(model, function, max_tokens=100)(prompt, rng=rng) - sequence = json.loads(sequence) assert isinstance(sequence, dict) assert isinstance(function(**sequence), int) From 173c65669cb770abdae00640d586b34bbaaf92aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 1 Dec 2023 11:12:33 +0100 Subject: [PATCH 332/734] Move `index` to `fsm` --- examples/parsing.py | 2 +- outlines/{index => fsm}/__init__.py | 0 outlines/{index/index.py => fsm/fsm.py} | 29 +++++++++++++++++-- outlines/{index => fsm}/json_schema.py | 0 outlines/{index => fsm}/parsing.py | 2 +- outlines/{index/fsm.py => fsm/regex.py} | 0 outlines/{index => fsm}/types.py | 0 outlines/generate/api.py | 9 ++---- outlines/generate/generator.py | 5 ++-- tests/{index => fsm}/partial_python.lark | 0 .../{index/test_index.py => fsm/test_fsm.py} | 2 +- tests/{index => fsm}/test_json_schema.py | 2 +- tests/{index => fsm}/test_parsing.py | 8 ++--- .../{index/test_fsm.py => fsm/test_regex.py} | 2 +- tests/{index => fsm}/test_types.py | 2 +- tests/generate/test_generator.py | 2 +- .../generate/test_integration_transfomers.py | 10 +++++-- 17 files changed, 50 insertions(+), 25 deletions(-) rename outlines/{index => fsm}/__init__.py (100%) rename outlines/{index/index.py => fsm/fsm.py} (82%) rename outlines/{index => fsm}/json_schema.py (100%) rename outlines/{index => fsm}/parsing.py (99%) rename outlines/{index/fsm.py => fsm/regex.py} (100%) rename outlines/{index => fsm}/types.py (100%) rename tests/{index => fsm}/partial_python.lark (100%) rename tests/{index/test_index.py => fsm/test_fsm.py} (95%) rename tests/{index => fsm}/test_json_schema.py (99%) rename tests/{index => fsm}/test_parsing.py (97%) rename tests/{index/test_fsm.py => fsm/test_regex.py} (99%) rename tests/{index => fsm}/test_types.py (93%) diff --git a/examples/parsing.py b/examples/parsing.py index 0f403182..a10da4eb 100644 --- a/examples/parsing.py +++ b/examples/parsing.py @@ -14,7 +14,7 @@ set_seed, ) -from outlines.index.parsing import PartialLark, PartialPythonIndenter +from outlines.fsm.parsing import PartialLark, PartialPythonIndenter revision = None checkpoint = "Salesforce/codegen-350M-mono" diff --git a/outlines/index/__init__.py b/outlines/fsm/__init__.py similarity index 100% rename from outlines/index/__init__.py rename to outlines/fsm/__init__.py diff --git a/outlines/index/index.py b/outlines/fsm/fsm.py similarity index 82% rename from outlines/index/index.py rename to outlines/fsm/fsm.py index e3ba1a84..75091da4 100644 --- a/outlines/index/index.py +++ b/outlines/fsm/fsm.py @@ -2,7 +2,7 @@ import interegular -from outlines.index.fsm import create_fsm_index_tokenizer, make_deterministic_fsm +from outlines.fsm.regex import create_fsm_index_tokenizer, make_deterministic_fsm if TYPE_CHECKING: from outlines.models.tokenizer import Tokenizer @@ -27,8 +27,30 @@ def __init__(self, stop_token_id: int, max_tokens: Optional[int] = None): self.max_tokens = max_tokens self.num_tokens_generated = 0 - def next_instruction(self, _: FSMState) -> List[int]: - return [] + def next_instruction(self, state: FSMState) -> List[int]: + """Generate a list of forbidden tokens for the next step. + + When in the initial state we allow every token to be generated. + In the final state the only allowed token is `stop_token_id`. + + Parameters + ---------- + state + The current state of the FSM + + Returns + ------- + A list that contains the tokens to mask. + + """ + if state == 0: + return [] + else: + return [ + token_id + for token_id in self.vocabulary + if token_id != self.stop_token_id + ] def next_state(self, state: FSMState, token_id: int) -> FSMState: self.num_tokens_generated += 1 @@ -52,6 +74,7 @@ def is_final_state(self, state: FSMState) -> bool: class RegexFSM(FSM): + """FSM to generate text that is in the language of a regular expression.""" def __init__( self, regex_string: str, diff --git a/outlines/index/json_schema.py b/outlines/fsm/json_schema.py similarity index 100% rename from outlines/index/json_schema.py rename to outlines/fsm/json_schema.py diff --git a/outlines/index/parsing.py b/outlines/fsm/parsing.py similarity index 99% rename from outlines/index/parsing.py rename to outlines/fsm/parsing.py index 7e1c333a..9ebc2af5 100644 --- a/outlines/index/parsing.py +++ b/outlines/fsm/parsing.py @@ -35,7 +35,7 @@ from lark.parsers.lalr_interactive_parser import InteractiveParser from lark.parsers.lalr_parser import LALR_Parser, ParseConf, ParserState, _Parser -from outlines.index.fsm import ( +from outlines.fsm.regex import ( fsm_union, get_sub_fsms_from_seq, make_deterministic_fsm, diff --git a/outlines/index/fsm.py b/outlines/fsm/regex.py similarity index 100% rename from outlines/index/fsm.py rename to outlines/fsm/regex.py diff --git a/outlines/index/types.py b/outlines/fsm/types.py similarity index 100% rename from outlines/index/types.py rename to outlines/fsm/types.py diff --git a/outlines/generate/api.py b/outlines/generate/api.py index 5ab1bb2b..22083cf3 100644 --- a/outlines/generate/api.py +++ b/outlines/generate/api.py @@ -3,14 +3,11 @@ from pydantic import BaseModel +from outlines.fsm.fsm import RegexFSM, StopAtTokenFSM +from outlines.fsm.json_schema import build_regex_from_object, get_schema_from_signature +from outlines.fsm.types import python_types_to_regex from outlines.generate.generator import SequenceGenerator from outlines.generate.samplers import Sampler, multinomial -from outlines.index.index import RegexFSM, StopAtTokenFSM -from outlines.index.json_schema import ( - build_regex_from_object, - get_schema_from_signature, -) -from outlines.index.types import python_types_to_regex def text(model, max_tokens: Optional[int] = None, *, sampler: Sampler = multinomial): diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index fa3e6bf0..d707c5ae 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -4,11 +4,11 @@ import torch -from outlines.index.index import FSMState +from outlines.fsm.fsm import FSMState if TYPE_CHECKING: + from outlines.fsm.fsm import FSM from outlines.generate.samplers import Sampler - from outlines.index.index import FSM @dataclasses.dataclass(frozen=True) @@ -336,6 +336,7 @@ def update_logprobs(logprobs, next_token_ids, next_token_logits): ] return logprobs + new_logprobs + @torch.inference_mode def bias_logits( logits: torch.Tensor, diff --git a/tests/index/partial_python.lark b/tests/fsm/partial_python.lark similarity index 100% rename from tests/index/partial_python.lark rename to tests/fsm/partial_python.lark diff --git a/tests/index/test_index.py b/tests/fsm/test_fsm.py similarity index 95% rename from tests/index/test_index.py rename to tests/fsm/test_fsm.py index 39f76212..366fa1e1 100644 --- a/tests/index/test_index.py +++ b/tests/fsm/test_fsm.py @@ -1,6 +1,6 @@ import pytest -from outlines.index.index import RegexFSM, StopAtTokenFSM +from outlines.fsm.fsm import RegexFSM, StopAtTokenFSM def test_stop_at_token(): diff --git a/tests/index/test_json_schema.py b/tests/fsm/test_json_schema.py similarity index 99% rename from tests/index/test_json_schema.py rename to tests/fsm/test_json_schema.py index a4be6a42..454d120c 100644 --- a/tests/index/test_json_schema.py +++ b/tests/fsm/test_json_schema.py @@ -5,7 +5,7 @@ import pytest from pydantic import BaseModel, constr -from outlines.index.json_schema import ( +from outlines.fsm.json_schema import ( BOOLEAN, INTEGER, NULL, diff --git a/tests/index/test_parsing.py b/tests/fsm/test_parsing.py similarity index 97% rename from tests/index/test_parsing.py rename to tests/fsm/test_parsing.py index f3ec1a9a..9edd99be 100644 --- a/tests/index/test_parsing.py +++ b/tests/fsm/test_parsing.py @@ -4,14 +4,14 @@ from lark.indenter import DedentError from lark.lexer import UnexpectedCharacters, UnexpectedToken -from outlines.index.parsing import PartialLark, PartialPythonIndenter +from outlines.fsm.parsing import PartialLark, PartialPythonIndenter def test_partial_parsing(): lp = PartialLark.open_from_package( "tests", "partial_python.lark", - ["index"], + ["fsm"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", @@ -123,7 +123,7 @@ def test_partial_parsing(): lp = PartialLark.open_from_package( "tests", "partial_python.lark", - ["index"], + ["fsm"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", @@ -160,7 +160,7 @@ def test_sequential_parse_example(): lp = PartialLark.open_from_package( "tests", "partial_python.lark", - ["index"], + ["fsm"], parser="lalr", postlex=PartialPythonIndenter(), start="file_input", diff --git a/tests/index/test_fsm.py b/tests/fsm/test_regex.py similarity index 99% rename from tests/index/test_fsm.py rename to tests/fsm/test_regex.py index b1440e2f..2d2dbd99 100644 --- a/tests/index/test_fsm.py +++ b/tests/fsm/test_regex.py @@ -2,7 +2,7 @@ import numba import pytest -from outlines.index.fsm import ( +from outlines.fsm.regex import ( _walk_fsm, create_fsm_index_end_to_end, create_fsm_index_tokenizer, diff --git a/tests/index/test_types.py b/tests/fsm/test_types.py similarity index 93% rename from tests/index/test_types.py rename to tests/fsm/test_types.py index 606b70e6..cee586fa 100644 --- a/tests/index/test_types.py +++ b/tests/fsm/test_types.py @@ -2,7 +2,7 @@ import pytest -from outlines.index.types import ( +from outlines.fsm.types import ( BOOLEAN, DATE, DATETIME, diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index 92acd799..d250765a 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -4,6 +4,7 @@ import pytest import torch +from outlines.fsm.fsm import FSMState from outlines.generate.generator import ( SequenceGenerator, bias_logits, @@ -15,7 +16,6 @@ token_generator, update_token_ids, ) -from outlines.index.index import FSMState def test_sequence_generator_class(): diff --git a/tests/generate/test_integration_transfomers.py b/tests/generate/test_integration_transfomers.py index 79d40771..94d66841 100644 --- a/tests/generate/test_integration_transfomers.py +++ b/tests/generate/test_integration_transfomers.py @@ -9,7 +9,7 @@ import outlines.generate as generate import outlines.models as models -from outlines.index.fsm import reduced_vocabulary +from outlines.fsm.regex import reduced_vocabulary from outlines.models.transformers import TransformerTokenizer @@ -68,7 +68,9 @@ def test_transformers_integration_streaming(): model_name = "hf-internal-testing/tiny-random-GPTJForCausalLM" model = models.transformers(model_name, device="cpu") - sequence = generate.text(model, max_tokens=10).stream("Write a short sentence ", rng=rng) + sequence = generate.text(model, max_tokens=10).stream( + "Write a short sentence ", rng=rng + ) token = next(sequence) assert isinstance(token, list) @@ -77,7 +79,9 @@ def test_transformers_integration_streaming(): remaining = "".join([token[0] for token in sequence]) assert isinstance(remaining, str) - sequence = generate.text(model, max_tokens=10).stream(["Prompt1", "Prompt2"], rng=rng) + sequence = generate.text(model, max_tokens=10).stream( + ["Prompt1", "Prompt2"], rng=rng + ) tokens = next(sequence) assert isinstance(tokens, list) assert isinstance(tokens[0], str) From 4e456d959500a857f1e62e0fea4ced2f27fa23a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 1 Dec 2023 11:18:01 +0100 Subject: [PATCH 333/734] Update the documentation --- docs/api/continuation.md | 1 - docs/api/fsm.md | 2 +- docs/api/json_schema.md | 2 +- docs/api/parsing.md | 2 +- mkdocs.yml | 3 ++- 5 files changed, 5 insertions(+), 5 deletions(-) delete mode 100644 docs/api/continuation.md diff --git a/docs/api/continuation.md b/docs/api/continuation.md deleted file mode 100644 index 66b9a719..00000000 --- a/docs/api/continuation.md +++ /dev/null @@ -1 +0,0 @@ -::: outlines.generate.continuation diff --git a/docs/api/fsm.md b/docs/api/fsm.md index a7cc19f5..0f6a1ab0 100644 --- a/docs/api/fsm.md +++ b/docs/api/fsm.md @@ -1 +1 @@ -::: outlines.index.fsm +::: outlines.fsm.fsm diff --git a/docs/api/json_schema.md b/docs/api/json_schema.md index 575a85e8..471cb3a8 100644 --- a/docs/api/json_schema.md +++ b/docs/api/json_schema.md @@ -1 +1 @@ -::: outlines.index.json_schema +::: outlines.fsm.json_schema diff --git a/docs/api/parsing.md b/docs/api/parsing.md index a5ed59f2..e9662999 100644 --- a/docs/api/parsing.md +++ b/docs/api/parsing.md @@ -1 +1 @@ -::: outlines.index.parsing +::: outlines.fsm.parsing diff --git a/mkdocs.yml b/mkdocs.yml index 0e84ff20..d1c1873f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -125,7 +125,8 @@ nav: - api/prompts.md - api/json_schema.md - api/fsm.md + - api/regex.md - api/parsing.md - api/regex.md - api/samplers.md - - api/continuation.md + - api/text.md From dac4f313da3f79c468b8950112f4a4802b19b245 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 1 Dec 2023 12:45:17 +0100 Subject: [PATCH 334/734] Make `max_token` change FSM state --- outlines/fsm/fsm.py | 104 ++++++++++++++++++++++++++++++++------- outlines/generate/api.py | 2 +- tests/fsm/test_fsm.py | 11 +++-- 3 files changed, 96 insertions(+), 21 deletions(-) diff --git a/outlines/fsm/fsm.py b/outlines/fsm/fsm.py index 75091da4..3d87d9cc 100644 --- a/outlines/fsm/fsm.py +++ b/outlines/fsm/fsm.py @@ -22,10 +22,24 @@ def is_final_state(self, state: FSMState) -> bool: class StopAtTokenFSM(FSM): - def __init__(self, stop_token_id: int, max_tokens: Optional[int] = None): + """FSM to generate text until a specified token id is generated or + a specified number of tokens has been generated. + + Text is usually produced until the EOS token is generated by the + model. + + """ + + def __init__( + self, + tokenizer: "Tokenizer", + stop_token_id: int, + max_tokens: Optional[int] = None, + ): self.stop_token_id = stop_token_id self.max_tokens = max_tokens self.num_tokens_generated = 0 + self.vocabulary = tokenizer.vocabulary.values() def next_instruction(self, state: FSMState) -> List[int]: """Generate a list of forbidden tokens for the next step. @@ -53,19 +67,37 @@ def next_instruction(self, state: FSMState) -> List[int]: ] def next_state(self, state: FSMState, token_id: int) -> FSMState: + """Update the state of the FSM. + + The FSM stays in the initial state `0` unless the specified stop token + has been generated or the maximum number of tokens has been reached. In + which case the FSM moves to the final state `1`. + + Parameters + ---------- + state + The current state of the FSM. + token_id + The id of the token that was just generated. + + Returns + ------- + The new state of the FSM. + + """ self.num_tokens_generated += 1 + if self.max_tokens is not None: + if self.num_tokens_generated >= self.max_tokens: + return FSMState(1) + if token_id == self.stop_token_id: return FSMState(1) - else: - return FSMState(0) + + return FSMState(0) def is_final_state(self, state: FSMState) -> bool: - # Stop if the maximum number of tokens has been generated - # regardless of whether the stop token id has been found. - if self.max_tokens is not None: - if self.num_tokens_generated == self.max_tokens: - return True + """Determine whether the current state of the FSM is a final state.""" if state == 1: return True @@ -75,6 +107,7 @@ def is_final_state(self, state: FSMState) -> bool: class RegexFSM(FSM): """FSM to generate text that is in the language of a regular expression.""" + def __init__( self, regex_string: str, @@ -105,12 +138,31 @@ def __init__( self.end_token = tokenizer.eos_token_id def next_instruction(self, state: FSMState) -> List[int]: + """Generate a list of forbidden tokens for the next step. + + The initialization of the FSM builds an index which maps FSM states to a + map from authorized tokens to the state in which the FSM needs to move + if said token is generated. Therefore the authorized tokens at the + current state are the keys of the map returned by the value of the index + for current state. + + If the current state is not contained in the end this means that we are + in a final state of the FSM. We only authorize EOS tokens in the final + state. + + Parameters + ---------- + state + The current state of the FSM + + Returns + ------- + A list that contains the tokens to mask. + + """ next_tokens_to_end_states = self.states_to_token_maps.get(state) if next_tokens_to_end_states is None: - # If there are no transitions from the current state, - # then we must've been in a final state of the FSM. - # We produce EOS tokens from here on. authorized_tokens = [self.end_token] else: authorized_tokens = list(next_tokens_to_end_states.keys()) @@ -122,8 +174,29 @@ def next_instruction(self, state: FSMState) -> List[int]: return list(forbidden_tokens) def next_state(self, state: FSMState, token_id: int) -> FSMState: + """Update the state of the FSM. + + We use the index to determine to which state the FSM should transition + given the token that was just generated. + + Parameters + ---------- + state + The current state of the FSM. + token_id + The id of the token that was just generated. + + Returns + ------- + The new state of the FSM. + + """ self.num_tokens_generated += 1 + if self.max_tokens is not None: + if self.num_tokens_generated == self.max_tokens: + return FSMState(-1) + if token_id == self.end_token: return FSMState(-1) @@ -135,12 +208,9 @@ def next_state(self, state: FSMState, token_id: int) -> FSMState: return FSMState(next_state) def is_final_state(self, state: FSMState) -> bool: - # Stop if the maximum number of tokens has been generated - # regardless of whether the stop token id has been found. - if self.max_tokens is not None: - if self.num_tokens_generated == self.max_tokens: - return True - elif state in self.final_states: + """Determine whether the current state of the FSM is a final state.""" + + if state in self.final_states: return True return False diff --git a/outlines/generate/api.py b/outlines/generate/api.py index 22083cf3..5032f95f 100644 --- a/outlines/generate/api.py +++ b/outlines/generate/api.py @@ -12,7 +12,7 @@ def text(model, max_tokens: Optional[int] = None, *, sampler: Sampler = multinomial): eos_token = model.tokenizer.eos_token_id - fsm = StopAtTokenFSM(eos_token, max_tokens) + fsm = StopAtTokenFSM(model.tokenizer, eos_token, max_tokens) device = model.device generator = SequenceGenerator(fsm, model, sampler, device) diff --git a/tests/fsm/test_fsm.py b/tests/fsm/test_fsm.py index 366fa1e1..71e333b5 100644 --- a/tests/fsm/test_fsm.py +++ b/tests/fsm/test_fsm.py @@ -4,11 +4,16 @@ def test_stop_at_token(): - fsm = StopAtTokenFSM(1) + class MockTokenizer: + vocabulary = {"a": 1, "eos": 2} + special_tokens = {"eos"} + + fsm = StopAtTokenFSM(MockTokenizer(), 2) assert fsm.next_instruction(0) == [] - assert fsm.next_state(0, 10) == 0 - assert fsm.next_state(0, 1) == 1 + assert fsm.next_instruction(1) == [1] + assert fsm.next_state(0, 2) == 1 + assert fsm.next_state(0, 1) == 0 assert fsm.is_final_state(0) is False assert fsm.is_final_state(1) is True From 6bb8e32b29976fb65f5e51e145b02695225e3b61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 12:13:35 +0100 Subject: [PATCH 335/734] Add `torch.inference_mode` decorator --- outlines/generate/generator.py | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index d707c5ae..73db979f 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -156,7 +156,6 @@ def sequence_generator( yield GenerationState(token_ids, kv_cache, logits) -@torch.inference_mode def token_generator(model, sampler: "Sampler") -> Callable: """Generate one token at a time. @@ -181,6 +180,7 @@ def token_generator(model, sampler: "Sampler") -> Callable: """ + @torch.inference_mode() def generate( token_ids, attention_masks, @@ -268,6 +268,7 @@ def is_generation_finished(fsm: "FSM", fsm_states: List[FSMState]) -> bool: return all([fsm.is_final_state(state) for state in fsm_states]) +@torch.inference_mode() def update_token_ids( token_ids: torch.Tensor, next_token_ids: torch.Tensor ) -> torch.Tensor: @@ -290,6 +291,7 @@ def update_token_ids( return torch.concatenate([token_ids, next_token_ids], dim=-1) +@torch.inference_mode() def expand_attention_masks(attention_masks: torch.Tensor) -> torch.Tensor: """Expand the attention masks. @@ -314,30 +316,7 @@ def expand_attention_masks(attention_masks: torch.Tensor) -> torch.Tensor: ) -<<<<<<< HEAD @torch.inference_mode() -======= -def update_logprobs(logprobs, next_token_ids, next_token_logits): - """Update the sequences' total logprob. - - Parameters - ---------- - logprobs - The current log-probabilities for each sequence. - next_token_ids - The token ids that were just sampled - next_token_logits - The logits returned by the model. - - """ - next_token_logprobs = torch.nn.LogSoftmax(dim=-1)(next_token_logits) - new_logprobs = next_token_logprobs[ - range(next_token_ids.shape[0]), next_token_ids.flatten() - ] - return logprobs + new_logprobs - - -@torch.inference_mode def bias_logits( logits: torch.Tensor, ids_to_mask: List, From ceb2dc4eb950603314910fc37cb0f6b15caee6ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 12:35:09 +0100 Subject: [PATCH 336/734] Rename `next_instruction` to `forbidden_logits` --- outlines/fsm/fsm.py | 28 ++++++++++++---------------- outlines/generate/generator.py | 6 +++--- tests/fsm/test_fsm.py | 6 +++--- tests/generate/test_generator.py | 20 ++++++++++---------- 4 files changed, 28 insertions(+), 32 deletions(-) diff --git a/outlines/fsm/fsm.py b/outlines/fsm/fsm.py index 3d87d9cc..bc1c5150 100644 --- a/outlines/fsm/fsm.py +++ b/outlines/fsm/fsm.py @@ -11,7 +11,7 @@ class FSM(Protocol): - def next_instruction(self, state: FSMState) -> List[int]: + def forbidden_token_ids(self, state: FSMState) -> List[int]: ... def next_state(self, state: FSMState, token_id: int) -> FSMState: @@ -40,8 +40,9 @@ def __init__( self.max_tokens = max_tokens self.num_tokens_generated = 0 self.vocabulary = tokenizer.vocabulary.values() + self.final_states = {1} - def next_instruction(self, state: FSMState) -> List[int]: + def forbidden_token_ids(self, state: FSMState) -> List[int]: """Generate a list of forbidden tokens for the next step. When in the initial state we allow every token to be generated. @@ -98,11 +99,7 @@ def next_state(self, state: FSMState, token_id: int) -> FSMState: def is_final_state(self, state: FSMState) -> bool: """Determine whether the current state of the FSM is a final state.""" - - if state == 1: - return True - else: - return False + return state in self.final_states class RegexFSM(FSM): @@ -121,6 +118,9 @@ def __init__( self.empty_token_ids, ) = create_fsm_index_tokenizer(regex_fsm, tokenizer) + # We make sure that it is possible to generate strings in the language + # of the regular expression with the tokens present in the model's + # vocabulary. if not any( regex_fsm.finals.intersection(v.values()) for v in self.states_to_token_maps.values() @@ -135,9 +135,9 @@ def __init__( self.max_tokens = max_tokens self.num_tokens_generated = 0 self.vocabulary = tokenizer.vocabulary.values() - self.end_token = tokenizer.eos_token_id + self.end_token_id = tokenizer.eos_token_id - def next_instruction(self, state: FSMState) -> List[int]: + def forbidden_token_ids(self, state: FSMState) -> List[int]: """Generate a list of forbidden tokens for the next step. The initialization of the FSM builds an index which maps FSM states to a @@ -163,7 +163,7 @@ def next_instruction(self, state: FSMState) -> List[int]: next_tokens_to_end_states = self.states_to_token_maps.get(state) if next_tokens_to_end_states is None: - authorized_tokens = [self.end_token] + authorized_tokens = [self.end_token_id] else: authorized_tokens = list(next_tokens_to_end_states.keys()) @@ -197,7 +197,7 @@ def next_state(self, state: FSMState, token_id: int) -> FSMState: if self.num_tokens_generated == self.max_tokens: return FSMState(-1) - if token_id == self.end_token: + if token_id == self.end_token_id: return FSMState(-1) last_token_to_end_state = self.states_to_token_maps[state] @@ -209,8 +209,4 @@ def next_state(self, state: FSMState, token_id: int) -> FSMState: def is_final_state(self, state: FSMState) -> bool: """Determine whether the current state of the FSM is a final state.""" - - if state in self.final_states: - return True - - return False + return state in self.final_states diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index 73db979f..f6bb315d 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -133,7 +133,7 @@ def sequence_generator( """ token_ids, attention_masks, kv_cache = init_state while True: - logits_masks = get_next_instruction(fsm, fsm_states) + logits_masks = get_logits_masks(fsm, fsm_states) next_token_ids, kv_cache, logits = token_generator( token_ids, @@ -226,7 +226,7 @@ def get_next_fsm_states( ] -def get_next_instruction(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: +def get_logits_masks(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: """Get the new instructions for each sequence from the finite-state machine. Parameters @@ -241,7 +241,7 @@ def get_next_instruction(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor A nested list that contains the ids of the logits to bias. """ - return [fsm.next_instruction(state) for state in fsm_states] + return [fsm.forbidden_token_ids(state) for state in fsm_states] def is_generation_finished(fsm: "FSM", fsm_states: List[FSMState]) -> bool: diff --git a/tests/fsm/test_fsm.py b/tests/fsm/test_fsm.py index 71e333b5..f2db973b 100644 --- a/tests/fsm/test_fsm.py +++ b/tests/fsm/test_fsm.py @@ -10,8 +10,8 @@ class MockTokenizer: fsm = StopAtTokenFSM(MockTokenizer(), 2) - assert fsm.next_instruction(0) == [] - assert fsm.next_instruction(1) == [1] + assert fsm.forbidden_token_ids(0) == [] + assert fsm.forbidden_token_ids(1) == [1] assert fsm.next_state(0, 2) == 1 assert fsm.next_state(0, 1) == 0 assert fsm.is_final_state(0) is False @@ -46,7 +46,7 @@ def convert_token_to_string(self, token): fsm = RegexFSM(regex_str, tokenizer) assert fsm.states_to_token_maps == {0: {1: 1}} - assert fsm.next_instruction(state=0) == [2, 3] + assert fsm.forbidden_token_ids(state=0) == [2, 3] assert fsm.next_state(state=0, token_id=1) == 1 assert fsm.next_state(state=0, token_id=tokenizer.eos_token_id) == -1 diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index d250765a..4f7fac1a 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -9,8 +9,8 @@ SequenceGenerator, bias_logits, expand_attention_masks, + get_logits_masks, get_next_fsm_states, - get_next_instruction, is_generation_finished, sequence_generator, token_generator, @@ -23,7 +23,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def next_instruction(self, _): + def forbidden_token_ids(self, _): return [] def is_final_state(self, _): @@ -78,7 +78,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def next_instruction(self, _): + def forbidden_token_ids(self, _): return [] def is_final_state(self, _): @@ -121,7 +121,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return FSMState(state + 1) - def next_instruction(self, _): + def forbidden_token_ids(self, _): return [] def is_final_state(self, state): @@ -171,7 +171,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def next_instruction(self, _): + def forbidden_token_ids(self, _): return [] def is_final_state(self, _): @@ -224,7 +224,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return FSMState(state + 1) - def next_instruction(self, _): + def forbidden_token_ids(self, _): return [] def is_final_state(self, state): @@ -368,15 +368,15 @@ def next_state(self, state, next_token_ids): assert result == [0, 0] -def test_get_next_instructions(): +def test_get_forbidden_token_idss(): class MockFSM: - def next_instruction(self, _): + def forbidden_token_ids(self, _): return [1, 2, 3, 4] - result = get_next_instruction(MockFSM(), [0]) + result = get_logits_masks(MockFSM(), [0]) assert result == [[1, 2, 3, 4]] - result = get_next_instruction(MockFSM(), [0, 1]) + result = get_logits_masks(MockFSM(), [0, 1]) assert result == [[1, 2, 3, 4], [1, 2, 3, 4]] From 15440c59f6137070b9ee1fb3e1349665cd0c5de2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 12:40:07 +0100 Subject: [PATCH 337/734] Move init of generator state outside of `SequenceGeneration` --- outlines/generate/generator.py | 61 +++++++++++++++++--------------- tests/generate/test_generator.py | 42 ++++++++++++++++++---- 2 files changed, 68 insertions(+), 35 deletions(-) diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index f6bb315d..7272306a 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -9,6 +9,7 @@ if TYPE_CHECKING: from outlines.fsm.fsm import FSM from outlines.generate.samplers import Sampler + from outlines.models.tokenizer import Tokenizer @dataclasses.dataclass(frozen=True) @@ -25,34 +26,6 @@ def __init__(self, fsm, model, sampler, device): self.tokenizer = model.tokenizer self.device = device - def init_generation_state( - self, - prompt: Union[str, List[str]], - kv_cache: Optional[torch.Tensor] = None, - ): - """Initialize the generation state. - - This method is responsible for encoding the prompt, moving token ids - to the device and initializing the random number generator. - - Parameters - ---------- - prompt - The prompt on which the generation is conditioned. - rng - The state of the random number generator. - - Returns - ------- - A `GenerationState` object. - - """ - token_ids, attention_masks = self.tokenizer.encode(prompt) - token_ids = token_ids.to(self.device) - attention_masks = attention_masks.to(self.device) - - return token_ids, attention_masks, kv_cache - def format_sequence(self, sequence): return sequence @@ -79,7 +52,7 @@ def stream( rng = torch.Generator(device=self.device) rng.seed() - init_state = self.init_generation_state(prompt, kv_cache) + init_state = init_generator_state(self.tokenizer, self.device, prompt, kv_cache) token_ids = init_state[1] num_sequences = token_ids.shape[0] @@ -105,6 +78,36 @@ def token_generator() -> Iterator[Union[List[str], str]]: return token_generator() +def init_generator_state( + tokenizer: "Tokenizer", + device: str, + prompt: Union[str, List[str]], + kv_cache: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Initialize the generation state. + + This method is responsible for encoding the prompt, moving token ids + to the device and initializing the random number generator. + + Parameters + ---------- + prompt + The prompt on which the generation is conditioned. + rng + The state of the random number generator. + + Returns + ------- + A `GenerationState` object. + + """ + token_ids, attention_masks = tokenizer.encode(prompt) + token_ids = token_ids.to(device) + attention_masks = attention_masks.to(device) + + return token_ids, attention_masks, kv_cache + + def sequence_generator( token_generator: Callable, fsm: "FSM", diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index 4f7fac1a..4da823ba 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -11,6 +11,7 @@ expand_attention_masks, get_logits_masks, get_next_fsm_states, + init_generator_state, is_generation_finished, sequence_generator, token_generator, @@ -53,12 +54,6 @@ def sampler(biased_logits, *_): assert isinstance(generator.fsm, MockFSM) assert callable(generator.generate_token) - result = generator.init_generation_state("test") - token_ids, attention_masks, kv_cache = result - assert torch.equal(token_ids, torch.tensor([[0, 1, 2, 3]])) - assert torch.equal(attention_masks, torch.tensor([[1, 1, 1, 1]])) - assert kv_cache is None - sequence = generator.stream("test") assert isinstance(sequence, Generator) @@ -73,6 +68,41 @@ def sampler(biased_logits, *_): assert result == "x" +def test_init_sequence_generator(): + class MockFSM: + def next_state(self, state, next_token_ids): + return 0 + + def forbidden_token_ids(self, _): + return [] + + def is_final_state(self, _): + return True + + class MockTokenizer: + def encode(self, _): + return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) + + def decode(self, _): + return "x" + + class MockModel: + def __init__(self): + self.tokenizer = MockTokenizer() + + def __call__(*_): + return torch.tensor([[0, 1, 2, 3]], dtype=torch.float), None + + def sampler(biased_logits, *_): + return torch.argmax(biased_logits, keepdims=True) + + result = init_generator_state(MockTokenizer(), "cpu", "") + token_ids, attention_masks, kv_cache = result + assert torch.equal(token_ids, torch.tensor([[0, 1, 2, 3]])) + assert torch.equal(attention_masks, torch.tensor([[1, 1, 1, 1]])) + assert kv_cache is None + + def test_sequence_generator_1d_single_iteration(): class MockFSM: def next_state(self, state, next_token_ids): From 18de09a30a1c29125a663bdc14c7fb5a0e36547e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 12:47:37 +0100 Subject: [PATCH 338/734] Move `SequenceGenerator` to `api.py` --- outlines/generate/api.py | 70 ++++++++++++++++++++++++++++++-- outlines/generate/generator.py | 59 --------------------------- tests/generate/test_generator.py | 2 +- 3 files changed, 68 insertions(+), 63 deletions(-) diff --git a/outlines/generate/api.py b/outlines/generate/api.py index 5032f95f..f6c74776 100644 --- a/outlines/generate/api.py +++ b/outlines/generate/api.py @@ -1,15 +1,79 @@ import json as pyjson -from typing import Callable, List, Optional, Union +from typing import Callable, Iterator, List, Optional, Union +import torch from pydantic import BaseModel -from outlines.fsm.fsm import RegexFSM, StopAtTokenFSM +from outlines.fsm.fsm import FSMState, RegexFSM, StopAtTokenFSM from outlines.fsm.json_schema import build_regex_from_object, get_schema_from_signature from outlines.fsm.types import python_types_to_regex -from outlines.generate.generator import SequenceGenerator +from outlines.generate.generator import ( + init_generator_state, + sequence_generator, + token_generator, +) from outlines.generate.samplers import Sampler, multinomial +class SequenceGenerator: + def __init__(self, fsm, model, sampler, device): + self.generate_token = token_generator(model, sampler) + self.fsm = fsm + self.tokenizer = model.tokenizer + self.device = device + + def format_sequence(self, sequence): + return sequence + + def __call__( + self, + prompt, + kv_cache: Optional[torch.tensor] = None, + rng: Optional[torch.Generator] = None, + ) -> Union[str, List[str]]: + sequence_generator = self.stream(prompt, kv_cache, rng) + tokens = [token for token in sequence_generator] + sequences = [ + self.format_sequence("".join(sequence)) for sequence in list(zip(*tokens)) + ] + return sequences if len(sequences) > 1 else sequences[0] + + def stream( + self, + prompt: str, + kv_cache: Optional[torch.tensor] = None, + rng: Optional[torch.Generator] = None, + ) -> Iterator[Union[List[str], str]]: + if rng is None: + rng = torch.Generator(device=self.device) + rng.seed() + + init_state = init_generator_state(self.tokenizer, self.device, prompt, kv_cache) + + token_ids = init_state[1] + num_sequences = token_ids.shape[0] + + init_fsm_states = [FSMState(0) for _ in range(num_sequences)] + + states = sequence_generator( + self.generate_token, self.fsm, init_state, init_fsm_states, rng + ) + + def token_generator() -> Iterator[Union[List[str], str]]: + while True: + try: + sequence = next(states) + except StopIteration: + return + + next_token_ids = sequence.token_ids[:, -1] + next_tokens = self.tokenizer.decode(next_token_ids) + + yield next_tokens + + return token_generator() + + def text(model, max_tokens: Optional[int] = None, *, sampler: Sampler = multinomial): eos_token = model.tokenizer.eos_token_id fsm = StopAtTokenFSM(model.tokenizer, eos_token, max_tokens) diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index 7272306a..d4fce2af 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -19,65 +19,6 @@ class GenerationState: logits: torch.Tensor -class SequenceGenerator: - def __init__(self, fsm, model, sampler, device): - self.generate_token = token_generator(model, sampler) - self.fsm = fsm - self.tokenizer = model.tokenizer - self.device = device - - def format_sequence(self, sequence): - return sequence - - def __call__( - self, - prompt, - kv_cache: Optional[torch.tensor] = None, - rng: Optional[torch.Generator] = None, - ) -> Union[str, List[str]]: - sequence_generator = self.stream(prompt, kv_cache, rng) - tokens = [token for token in sequence_generator] - sequences = [ - self.format_sequence("".join(sequence)) for sequence in list(zip(*tokens)) - ] - return sequences if len(sequences) > 1 else sequences[0] - - def stream( - self, - prompt: str, - kv_cache: Optional[torch.tensor] = None, - rng: Optional[torch.Generator] = None, - ) -> Iterator[Union[List[str], str]]: - if rng is None: - rng = torch.Generator(device=self.device) - rng.seed() - - init_state = init_generator_state(self.tokenizer, self.device, prompt, kv_cache) - - token_ids = init_state[1] - num_sequences = token_ids.shape[0] - - init_fsm_states = [FSMState(0) for _ in range(num_sequences)] - - states = sequence_generator( - self.generate_token, self.fsm, init_state, init_fsm_states, rng - ) - - def token_generator() -> Iterator[Union[List[str], str]]: - while True: - try: - sequence = next(states) - except StopIteration: - return - - next_token_ids = sequence.token_ids[:, -1] - next_tokens = self.tokenizer.decode(next_token_ids) - - yield next_tokens - - return token_generator() - - def init_generator_state( tokenizer: "Tokenizer", device: str, diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index 4da823ba..6d80d245 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -5,8 +5,8 @@ import torch from outlines.fsm.fsm import FSMState +from outlines.generate.api import SequenceGenerator from outlines.generate.generator import ( - SequenceGenerator, bias_logits, expand_attention_masks, get_logits_masks, From e9282c49b0357eeb266e149fc57dcc99661fa69c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 12:50:24 +0100 Subject: [PATCH 339/734] Return FSM states with the sequence generator --- outlines/generate/generator.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index d4fce2af..097cd4d2 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -17,6 +17,7 @@ class GenerationState: token_ids: torch.Tensor kv_cache: torch.Tensor logits: torch.Tensor + fsm_states: List[FSMState] def init_generator_state( @@ -94,10 +95,10 @@ def sequence_generator( is_finished = is_generation_finished(fsm, fsm_states) if is_finished: - yield GenerationState(token_ids, kv_cache, logits) + yield GenerationState(token_ids, kv_cache, logits, fsm_states) return - yield GenerationState(token_ids, kv_cache, logits) + yield GenerationState(token_ids, kv_cache, logits, fsm_states) def token_generator(model, sampler: "Sampler") -> Callable: From 2d7346f44d383bd083ce5cf8b19358fc2ed1a69b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 6 Dec 2023 16:32:48 +0100 Subject: [PATCH 340/734] Make FSM return allowed tokens --- outlines/fsm/fsm.py | 28 +++++--------- outlines/generate/generator.py | 31 +++++++++------- tests/fsm/test_fsm.py | 6 +-- tests/generate/test_generator.py | 64 +++++++++++++++++--------------- 4 files changed, 63 insertions(+), 66 deletions(-) diff --git a/outlines/fsm/fsm.py b/outlines/fsm/fsm.py index bc1c5150..0cd91df5 100644 --- a/outlines/fsm/fsm.py +++ b/outlines/fsm/fsm.py @@ -11,7 +11,7 @@ class FSM(Protocol): - def forbidden_token_ids(self, state: FSMState) -> List[int]: + def allowed_token_ids(self, state: FSMState) -> List[int]: ... def next_state(self, state: FSMState, token_id: int) -> FSMState: @@ -42,8 +42,8 @@ def __init__( self.vocabulary = tokenizer.vocabulary.values() self.final_states = {1} - def forbidden_token_ids(self, state: FSMState) -> List[int]: - """Generate a list of forbidden tokens for the next step. + def allowed_token_ids(self, state: FSMState) -> List[int]: + """Generate a list of allowed tokens for the next step. When in the initial state we allow every token to be generated. In the final state the only allowed token is `stop_token_id`. @@ -59,13 +59,9 @@ def forbidden_token_ids(self, state: FSMState) -> List[int]: """ if state == 0: - return [] + return list(self.vocabulary) else: - return [ - token_id - for token_id in self.vocabulary - if token_id != self.stop_token_id - ] + return [self.stop_token_id] def next_state(self, state: FSMState, token_id: int) -> FSMState: """Update the state of the FSM. @@ -137,8 +133,8 @@ def __init__( self.vocabulary = tokenizer.vocabulary.values() self.end_token_id = tokenizer.eos_token_id - def forbidden_token_ids(self, state: FSMState) -> List[int]: - """Generate a list of forbidden tokens for the next step. + def allowed_token_ids(self, state: FSMState) -> List[int]: + """Generate a list of allowed tokens for the next step. The initialization of the FSM builds an index which maps FSM states to a map from authorized tokens to the state in which the FSM needs to move @@ -163,15 +159,9 @@ def forbidden_token_ids(self, state: FSMState) -> List[int]: next_tokens_to_end_states = self.states_to_token_maps.get(state) if next_tokens_to_end_states is None: - authorized_tokens = [self.end_token_id] + return [self.end_token_id] else: - authorized_tokens = list(next_tokens_to_end_states.keys()) - - forbidden_tokens = [ - token for token in self.vocabulary if token not in authorized_tokens - ] - - return list(forbidden_tokens) + return list(next_tokens_to_end_states.keys()) def next_state(self, state: FSMState, token_id: int) -> FSMState: """Update the state of the FSM. diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index 097cd4d2..4ee3992a 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -78,14 +78,14 @@ def sequence_generator( """ token_ids, attention_masks, kv_cache = init_state while True: - logits_masks = get_logits_masks(fsm, fsm_states) + allowed_tokens = get_allowed_tokens(fsm, fsm_states) - next_token_ids, kv_cache, logits = token_generator( + next_token_ids, kv_cache, logits, _ = token_generator( token_ids, attention_masks, kv_cache, rng=rng, - logits_masks=logits_masks, + allowed_tokens=allowed_tokens, ) token_ids = update_token_ids(token_ids, next_token_ids) @@ -127,10 +127,10 @@ def token_generator(model, sampler: "Sampler") -> Callable: @torch.inference_mode() def generate( - token_ids, - attention_masks, - kv_cache, - logits_masks, + token_ids: torch.Tensor, + attention_masks: torch.Tensor, + kv_cache: torch.Tensor, + allowed_tokens: List[List[int]], rng: torch.Generator, ) -> Union[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: try: @@ -140,10 +140,10 @@ def generate( "The input length exceeds the context length of the model." ) - biased_logits = bias_logits(logits, logits_masks) + biased_logits = bias_logits(logits, allowed_tokens) next_token_ids = sampler(biased_logits, 1, rng) - return next_token_ids, new_kv_cache, biased_logits + return next_token_ids, new_kv_cache, logits, biased_logits return generate @@ -171,7 +171,7 @@ def get_next_fsm_states( ] -def get_logits_masks(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: +def get_allowed_tokens(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: """Get the new instructions for each sequence from the finite-state machine. Parameters @@ -183,10 +183,10 @@ def get_logits_masks(fsm: "FSM", fsm_states: List[FSMState]) -> torch.Tensor: Returns ------- - A nested list that contains the ids of the logits to bias. + A nested list that contains the ids of the logits to keep. """ - return [fsm.forbidden_token_ids(state) for state in fsm_states] + return [fsm.allowed_token_ids(state) for state in fsm_states] def is_generation_finished(fsm: "FSM", fsm_states: List[FSMState]) -> bool: @@ -284,6 +284,9 @@ def bias_logits( A view of the original logits tensor where some values are masked. """ + biased_logits = torch.empty(logits.shape) for i, ids in enumerate(ids_to_mask): - logits[i, ids] = -math.inf - return logits + mask = torch.full((logits.shape[-1],), -math.inf, device=logits.device) + mask[ids] = 0 + biased_logits[i] = logits[i] + mask + return biased_logits diff --git a/tests/fsm/test_fsm.py b/tests/fsm/test_fsm.py index f2db973b..19b11daa 100644 --- a/tests/fsm/test_fsm.py +++ b/tests/fsm/test_fsm.py @@ -10,8 +10,8 @@ class MockTokenizer: fsm = StopAtTokenFSM(MockTokenizer(), 2) - assert fsm.forbidden_token_ids(0) == [] - assert fsm.forbidden_token_ids(1) == [1] + assert fsm.allowed_token_ids(0) == [1, 2] + assert fsm.allowed_token_ids(1) == [2] assert fsm.next_state(0, 2) == 1 assert fsm.next_state(0, 1) == 0 assert fsm.is_final_state(0) is False @@ -46,7 +46,7 @@ def convert_token_to_string(self, token): fsm = RegexFSM(regex_str, tokenizer) assert fsm.states_to_token_maps == {0: {1: 1}} - assert fsm.forbidden_token_ids(state=0) == [2, 3] + assert fsm.allowed_token_ids(state=0) == [1] assert fsm.next_state(state=0, token_id=1) == 1 assert fsm.next_state(state=0, token_id=tokenizer.eos_token_id) == -1 diff --git a/tests/generate/test_generator.py b/tests/generate/test_generator.py index 6d80d245..1e313ff2 100644 --- a/tests/generate/test_generator.py +++ b/tests/generate/test_generator.py @@ -9,7 +9,7 @@ from outlines.generate.generator import ( bias_logits, expand_attention_masks, - get_logits_masks, + get_allowed_tokens, get_next_fsm_states, init_generator_state, is_generation_finished, @@ -24,7 +24,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def forbidden_token_ids(self, _): + def allowed_token_ids(self, _): return [] def is_final_state(self, _): @@ -35,7 +35,7 @@ def encode(self, _): return torch.tensor([[0, 1, 2, 3]]), torch.tensor([[1, 1, 1, 1]]) def decode(self, _): - return "x" + return ["testx"] class MockModel: def __init__(self): @@ -73,7 +73,7 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def forbidden_token_ids(self, _): + def allowed_token_ids(self, _): return [] def is_final_state(self, _): @@ -108,8 +108,8 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def forbidden_token_ids(self, _): - return [] + def allowed_token_ids(self, _): + return [0, 1, 2, 3] def is_final_state(self, _): return True @@ -151,8 +151,8 @@ class MockFSM: def next_state(self, state, next_token_ids): return FSMState(state + 1) - def forbidden_token_ids(self, _): - return [] + def allowed_token_ids(self, _): + return [0, 1, 2, 3] def is_final_state(self, state): if state < 2: @@ -201,8 +201,8 @@ class MockFSM: def next_state(self, state, next_token_ids): return 0 - def forbidden_token_ids(self, _): - return [] + def allowed_token_ids(self, _): + return [0, 1, 2, 3] def is_final_state(self, _): return True @@ -254,8 +254,8 @@ class MockFSM: def next_state(self, state, next_token_ids): return FSMState(state + 1) - def forbidden_token_ids(self, _): - return [] + def allowed_token_ids(self, _): + return [0, 1, 2, 3] def is_final_state(self, state): if state < 2: @@ -328,9 +328,9 @@ def sampler(): @pytest.mark.parametrize( "logits_biases,expected_result,expected_biased_logits", [ - ([[]], [[3]], [[0, 1, 2, 3]]), - ([[3]], [[2]], [[0, 1, 2, -math.inf]]), - ([[2, 3]], [[1]], [[0, 1, -math.inf, -math.inf]]), + ([[0, 1, 2, 3]], [[3]], [[0, 1, 2, 3]]), + ([[0, 1, 2]], [[2]], [[0, 1, 2, -math.inf]]), + ([[0, 1]], [[1]], [[0, 1, -math.inf, -math.inf]]), ], ) def test_generator_1d(logits_biases, expected_result, expected_biased_logits): @@ -348,7 +348,7 @@ def sampler(biased_logits, *_): return torch.argmax(biased_logits, keepdims=True) generator = token_generator(MockModel(), sampler) - result, _, biased_logits = generator(None, None, None, logits_biases, None) + result, _, _, biased_logits = generator(None, None, None, logits_biases, None) assert torch.equal(result, torch.tensor(expected_result)) assert torch.equal(biased_logits, torch.tensor(expected_biased_logits)) @@ -356,11 +356,15 @@ def sampler(biased_logits, *_): @pytest.mark.parametrize( "logits_biases,expected_result,expected_biased_logits", [ - ([[]], [[3], [3]], [[0, 1, 2, 3], [4, 5, 6, 7]]), - ([[3], [3]], [[2], [2]], [[0, 1, 2, -math.inf], [4, 5, 6, -math.inf]]), - ([[3], []], [[2], [3]], [[0, 1, 2, -math.inf], [4, 5, 6, 7]]), + ([[0, 1, 2, 3], [0, 1, 2, 3]], [[3], [3]], [[0, 1, 2, 3], [4, 5, 6, 7]]), + ( + [[0, 1, 2], [0, 1, 2]], + [[2], [2]], + [[0, 1, 2, -math.inf], [4, 5, 6, -math.inf]], + ), + ([[0, 1, 2], [0, 1, 2, 3]], [[2], [3]], [[0, 1, 2, -math.inf], [4, 5, 6, 7]]), ( - [[2, 3], [3]], + [[0, 1], [0, 1, 2]], [[1], [2]], [[0, 1, -math.inf, -math.inf], [4, 5, 6, -math.inf]], ), @@ -381,7 +385,7 @@ def sampler(biased_logits, *_): return torch.argmax(biased_logits, dim=1, keepdims=True) generator = token_generator(MockModel(), sampler) - result, _, biased_logits = generator(None, None, None, logits_biases, None) + result, _, _, biased_logits = generator(None, None, None, logits_biases, None) assert torch.equal(result, torch.tensor(expected_result)) assert torch.equal(biased_logits, torch.tensor(expected_biased_logits)) @@ -398,15 +402,15 @@ def next_state(self, state, next_token_ids): assert result == [0, 0] -def test_get_forbidden_token_idss(): +def test_get_allowed_token_idss(): class MockFSM: - def forbidden_token_ids(self, _): + def allowed_token_ids(self, _): return [1, 2, 3, 4] - result = get_logits_masks(MockFSM(), [0]) + result = get_allowed_tokens(MockFSM(), [0]) assert result == [[1, 2, 3, 4]] - result = get_logits_masks(MockFSM(), [0, 1]) + result = get_allowed_tokens(MockFSM(), [0, 1]) assert result == [[1, 2, 3, 4], [1, 2, 3, 4]] @@ -468,27 +472,27 @@ def test_expand_attention_masks(attention_masks, expected_result): [ ( torch.tensor([[1, 2, 3, 4]], dtype=torch.float), - [[]], + [[0, 1, 2, 3]], torch.tensor([[1, 2, 3, 4]], dtype=torch.float), ), ( torch.tensor([[1, 2, 3, 4]], dtype=torch.float), - [[1]], + [[0, 2, 3]], torch.tensor([[1, -math.inf, 3, 4]], dtype=torch.float), ), ( torch.tensor([[1, 2, 3, 4]], dtype=torch.float), - [[1, 3]], + [[0, 2]], torch.tensor([[1, -math.inf, 3, -math.inf]], dtype=torch.float), ), ( torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float), - [[0], [2]], + [[1, 2], [0, 1]], torch.tensor([[-math.inf, 2, 3], [4, 5, -math.inf]], dtype=torch.float), ), ( torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float), - [[1], [0, 2]], + [[0, 2], [1]], torch.tensor( [[1, -math.inf, 3], [-math.inf, 5, -math.inf]], dtype=torch.float ), From de21766fbd4c4c9a59532be38d2516785a08c6a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 10:47:32 +0100 Subject: [PATCH 341/734] Make `stream` output tokens with whitespaces --- outlines/generate/api.py | 112 ++++++++++++++++++++++++++++++++++----- 1 file changed, 100 insertions(+), 12 deletions(-) diff --git a/outlines/generate/api.py b/outlines/generate/api.py index f6c74776..7130bf70 100644 --- a/outlines/generate/api.py +++ b/outlines/generate/api.py @@ -27,28 +27,106 @@ def format_sequence(self, sequence): def __call__( self, - prompt, - kv_cache: Optional[torch.tensor] = None, + prompts: Union[str, List[str]], rng: Optional[torch.Generator] = None, + kv_cache: Optional[torch.tensor] = None, ) -> Union[str, List[str]]: - sequence_generator = self.stream(prompt, kv_cache, rng) - tokens = [token for token in sequence_generator] - sequences = [ - self.format_sequence("".join(sequence)) for sequence in list(zip(*tokens)) + """Generate the full text sequence. + + Since `SequenceGenerator.stream` calls the tokenizer at every step this + method loops over the generator returned by `sequence_generator` itself + so the tokenizer is called only once after all token ids have been + generated. + + Parameters + ---------- + prompts + A string or list of strings that are passed to the model before + generating the first token. + kv_cache + A tensor containing the past key-value cache. It can be for instance + used when we are interleaving prompting and model calls. Defaults to + `None`. + rng + The random number generator. Defaults to a non-seeded `torch.Generator` + instance. + + Returns + ------- + A string or list of strings that contain the generated text. + + """ + + if isinstance(prompts, str): + prompts = [prompts] + + prompt_lengths = [len(prompt) for prompt in prompts] + + if rng is None: + rng = torch.Generator(device=self.device) + rng.seed() + + init_state = init_generator_state( + self.tokenizer, self.device, prompts, kv_cache + ) + num_sequences = len(prompts) + init_fsm_states = [FSMState(0) for _ in range(num_sequences)] + + states = sequence_generator( + self.generate_token, self.fsm, init_state, init_fsm_states, rng + ) + + while True: + try: + last_state = next(states) + except StopIteration: + break + + sequences = self.tokenizer.decode(last_state.token_ids) + generated = [ + sequence[length:] for sequence, length in zip(sequences, prompt_lengths) ] - return sequences if len(sequences) > 1 else sequences[0] + formatted = [self.format_sequence(sequence) for sequence in generated] + + return formatted if len(formatted) > 1 else formatted[0] def stream( self, - prompt: str, - kv_cache: Optional[torch.tensor] = None, + prompts: Union[str, List[str]], rng: Optional[torch.Generator] = None, + kv_cache: Optional[torch.tensor] = None, ) -> Iterator[Union[List[str], str]]: + """Generate the text sequence one token at a time. + + Since `Tokenizer.decode` strips the whitespaces from the tokens we have no + choice but to decode the generated token ids at each step and compare the + current decoded strings to the previously decoded strings. + + Parameters + ---------- + prompts + A string or list of strings that are passed to the model before + generating the first token. + kv_cache + A tensor containing the past key-value cache. It can be for instance + used when we are interleaving prompting and model calls. Defaults to + `None`. + rng + The random number generator. Defaults to a non-seeded `torch.Generator` + instance. + + Returns + ------- + A string or list of strings that contain the generated text. + + """ if rng is None: rng = torch.Generator(device=self.device) rng.seed() - init_state = init_generator_state(self.tokenizer, self.device, prompt, kv_cache) + init_state = init_generator_state( + self.tokenizer, self.device, prompts, kv_cache + ) token_ids = init_state[1] num_sequences = token_ids.shape[0] @@ -60,14 +138,24 @@ def stream( ) def token_generator() -> Iterator[Union[List[str], str]]: + previously_generated_sequences = ["" for _ in range(num_sequences)] + num_generated = 0 while True: try: sequence = next(states) + num_generated += 1 except StopIteration: return - next_token_ids = sequence.token_ids[:, -1] - next_tokens = self.tokenizer.decode(next_token_ids) + generated_token_ids = sequence.token_ids[:, -num_generated:] + generated_sequences = self.tokenizer.decode(generated_token_ids) + next_tokens = [ + token[len(sequence) :] + for token, sequence in zip( + generated_sequences, previously_generated_sequences + ) + ] + previously_generated_sequences = generated_sequences yield next_tokens From 5ff1ac794f4c34147be831861120bbc47e03e845 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 11:11:37 +0100 Subject: [PATCH 342/734] Bump Pytorch version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 60e91f67..c75c6158 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ dependencies = [ "pydantic>=2.0", "scipy", "tenacity", - "torch", + "torch>=2.1", "numba", "joblib", "referencing", From 3bb295e9b8218ce028f9fc7ac3312518f5f2680e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Thu, 7 Dec 2023 11:38:29 +0100 Subject: [PATCH 343/734] Update the documentation --- README.md | 80 +++++++++++-------------------- docs/examples/chain_of_density.md | 10 ++-- docs/examples/dating_profiles.md | 9 ++-- outlines/__init__.py | 1 + outlines/models/openai.py | 4 +- 5 files changed, 40 insertions(+), 64 deletions(-) diff --git a/README.md b/README.md index 7bb26382..fb66a89a 100644 --- a/README.md +++ b/README.md @@ -83,34 +83,21 @@ is to ensure that there is a well-defined interface between their output and user-defined code. **Outlines** provides ways to control the generation of language models to make their output more predictable. -### Early stopping - -You can stop the generation after a given sequence has been found: - -``` python -import outlines.text.generate as generate -import outlines.models as models - -model = models.transformers("gpt2") -answer = generate.continuation(model, stop=["."])("Tell me a one-sentence joke.") -``` - ### Multiple choices You can reduce the completion to a choice between multiple possibilities: ``` python -import outlines.text.generate as generate -import outlines.models as models +import outlines -model = models.transformers("gpt2") +model = outlines.models.transformers("gpt2") prompt = """You are a sentiment-labelling assistant. Is the following review positive or negative? Review: This restaurant is just awesome! """ -answer = generate.choice(model, ["Positive", "Negative"])(prompt) +answer = outlines.generate.choice(model, ["Positive", "Negative"])(prompt) ``` ### Type constraint @@ -119,16 +106,15 @@ You can instruct the model to only return integers or floats: ``` python -import outlines.text.generate as generate -import outlines.models as models +import outlines -model = models.transformers("gpt2") +model = outlines.models.transformers("gpt2") prompt = "1+1=" -answer = generate.integer(model)(prompt) +answer = outlines.generate.format(model, int)(prompt) prompt = "sqrt(2)=" -answer = generate.float(model)(prompt) +answer = outlines.generate.format(model, float)(prompt) ``` ### Efficient regex-guided generation @@ -138,15 +124,13 @@ Outlines also comes with fast regex-guided generation. In fact, the `choice`, hood: ``` python -import outlines.models as models -import outlines.text.generate as generate +import outlines - -model = models.transformers("gpt2-medium") +model = outlines.models.transformers("gpt2-medium") prompt = "Is 1+1=2? " -unguided = generate.continuation(model, max_tokens=30)(prompt) -guided = generate.regex(model, r"\s*([Yy]es|[Nn]o|[Nn]ever|[Aa]lways)", max_tokens=30)( +unguided = outlines.generate.continuation(model, max_tokens=30)(prompt) +guided = outlines.generate.regex(model, r"\s*([Yy]es|[Nn]o|[Nn]ever|[Aa]lways)", max_tokens=30)( prompt ) @@ -162,15 +146,13 @@ print(guided) ``` ``` python -import outlines.models as models -import outlines.text.generate as generate - +import outlines -model = models.transformers("gpt2-medium") +model = outlines.models.transformers("gpt2-medium") prompt = "What is the IP address of the Google DNS servers? " unguided = generate.continuation(model, max_tokens=30)(prompt) -guided = generate.regex( +guided = outlines.generate.regex( model, r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)", max_tokens=30, @@ -199,9 +181,7 @@ Outlines 〰 allows to guide the generation process so the output is *guaranteed from enum import Enum from pydantic import BaseModel, constr -import outlines.models as models -import outlines.text.generate as generate - +import outlines import torch @@ -228,10 +208,10 @@ class Character(BaseModel): strength: int -model = models.transformers("gpt2", device="cuda") +model = outlines.models.transformers("gpt2", device="cuda") # Construct guided sequence generator -generator = generate.json(model, Character, max_tokens=100) +generator = outlines.generate.json(model, Character, max_tokens=100) # Draw a sample rng = torch.Generator(device="cuda") @@ -269,14 +249,14 @@ The method works with union types, optional types, arrays, nested schemas, etc. Outlines can infer the structure of the output from the signature of a function. The result is a dictionary, and can be passed directly to the function using the usual dictionary expansion syntax `**`: ```python -from outlines import models -from outlines import text +import outlines + def add(a: int, b: int): return a + b -model = models.transformers("mistralai/Mistral-7B") -generator = text.generate.json(model, add) +model = outlines.models.transformers("mistralai/Mistral-7B") +generator = outlines.generate.json(model, add) result = generator("Return two integers named a and b respectively. a is odd and b even.") print(add(**result)) @@ -300,9 +280,7 @@ Template functions require no superfluous abstraction, they use the Jinja2 templating engine to help build complex prompts in a concise manner: ``` python -import outlines.text as text -import outlines.models as models - +import outlines examples = [ ("The food was digusting", "Negative"), @@ -311,7 +289,7 @@ examples = [ ("The waiter was rude", "Negative") ] -@text.prompt +@outlines.prompt def labelling(to_label, examples): """You are a sentiment-labelling assistant. @@ -321,9 +299,9 @@ def labelling(to_label, examples): {{ to_label }} // """ -model = models.transformers("gpt2") +model = outlines.models.transformers("gpt2") prompt = labelling("Just awesome", examples) -answer = text.generate.continuation(model, max_tokens=100)(prompt) +answer = outlines.generate.continuation(model, max_tokens=100)(prompt) ``` ### Tools @@ -337,7 +315,7 @@ extract the function's name, description, signature and source: ``` python from typing import Callable, List -import outlines.text as text +import outlines def google_search(query: str): @@ -350,7 +328,7 @@ def wikipedia_search(query: str): pass -@text.prompt +@outlines.prompt def my_commands(tools: List[Callable]): """AVAILABLE COMMANDS: @@ -374,7 +352,7 @@ extract the expected response's schema: ``` python from pydantic import BaseModel, Field -import outlines.text as text +import outlines class Joke(BaseModel): @@ -384,7 +362,7 @@ class Joke(BaseModel): ) -@text.prompt +@outlines.prompt def joke_ppt(response_model): """Tell a joke and explain why the joke is funny. diff --git a/docs/examples/chain_of_density.md b/docs/examples/chain_of_density.md index 3eb5f4a1..0ee36ad4 100644 --- a/docs/examples/chain_of_density.md +++ b/docs/examples/chain_of_density.md @@ -29,9 +29,9 @@ The prompt also asks the model to return a list of JSON objects that contain the We can now implement the prompt provided in the paper: ```python -from outlines import text +import outlines -@text.prompt +@outlines.prompt def chain_of_density(article): """Article: {{ article }} @@ -86,12 +86,10 @@ class Summaries(BaseModel): We now generate the prompt by passing the article we want to summarize to the template. We load a quantized version of Mistral-7B using the AutoAWQ library, and then use JSON-guided generation to generate the summaries: ```python -from outlines import models - -model = models.awq("TheBloke/Mistral-7B-OpenOrca-AWQ") +model = outlines.models.awq("TheBloke/Mistral-7B-OpenOrca-AWQ") prompt = chain_of_density(article) -result = text.generate.json(model, Summaries)(prompt) +result = outlines.generate.json(model, Summaries)(prompt) ``` We can now check the results: diff --git a/docs/examples/dating_profiles.md b/docs/examples/dating_profiles.md index 3b365431..894a0a1f 100644 --- a/docs/examples/dating_profiles.md +++ b/docs/examples/dating_profiles.md @@ -10,8 +10,7 @@ import torch import transformers from pydantic import BaseModel, conlist, constr -import outlines.models as models -import outlines.text as text +import outlines ``` ## Defining the profile with Pydantic @@ -59,7 +58,7 @@ We will use Outlines' prompt templating abilities to generate the prompt for us. ```python -@text.prompt +@outlines.prompt def dating_profile_prompt(description: str, examples: list[Example]): """ You are a world-renowned matchmaker who understands the modern dating @@ -136,7 +135,7 @@ config = transformers.AutoConfig.from_pretrained( "mosaicml/mpt-7b-8k-instruct", trust_remote_code=True ) config.init_device = "meta" -model = models.transformers( +model = outlines.models.transformers( model_name="mosaicml/mpt-7b-8k-instruct", device="cuda", model_kwargs={ @@ -163,7 +162,7 @@ it's a good excuse for a date. I watch the latest series because I'm paying, with my hard-earned money, for every streaming service.""" prompt = dating_profile_prompt(new_description, samples) -profile = text.generate.json(model, DatingProfile)(prompt) +profile = outlines.generate.json(model, DatingProfile)(prompt) parsed_profile = DatingProfile.model_validate_json(profile) ``` diff --git a/outlines/__init__.py b/outlines/__init__.py index d211706d..7c8414af 100644 --- a/outlines/__init__.py +++ b/outlines/__init__.py @@ -1,5 +1,6 @@ """Outlines is a Generative Model Programming Framework.""" import outlines.generate +import outlines.models import outlines.text.generate from outlines.base import vectorize from outlines.caching import clear_cache, disable_cache, get_cache diff --git a/outlines/models/openai.py b/outlines/models/openai.py index 45509a9f..47e83345 100644 --- a/outlines/models/openai.py +++ b/outlines/models/openai.py @@ -8,7 +8,7 @@ import numpy as np -import outlines +from outlines.base import vectorize from outlines.caching import cache __all__ = ["OpenAI", "openai"] @@ -282,7 +282,7 @@ def __repr__(self): @cache(ignore="client") -@functools.partial(outlines.vectorize, signature="(),(),(),()->(s),()") +@functools.partial(vectorize, signature="(),(),()->(s)") async def generate_chat( prompt: str, system_prompt: Union[str, None], From fd5af1a4f72c306a485d5672983764b39acfaa52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 4 Dec 2023 23:55:25 +0100 Subject: [PATCH 344/734] Add Mamba models --- README.md | 9 +++--- docs/get_started.md | 3 +- outlines/models/__init__.py | 1 + outlines/models/mamba.py | 58 +++++++++++++++++++++++++++++++++++++ pyproject.toml | 1 + 5 files changed, 67 insertions(+), 5 deletions(-) create mode 100644 outlines/models/mamba.py diff --git a/README.md b/README.md index fb66a89a..b397a724 100644 --- a/README.md +++ b/README.md @@ -56,10 +56,11 @@ via the next-token logits. It can be used with API-based models as well. ## Available models -- Transformers -- AutoGPTQ -- AutoAWQ -- OpenAI API +- [Transformers](https://github.com/huggingface/transformers) +- [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) +- [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) +- [OpenAI API](https://github.com/openai/openai-python) +- [Mamba](https://github.com/state-spaces/mamba) Outlines 〰 has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][twitter] to stay up to date! diff --git a/docs/get_started.md b/docs/get_started.md index 75e87751..9ead561f 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -21,7 +21,8 @@ pip install outlines - `openai` for OpenAI models; - `transformers` for Hugging Face models; - `autoawq` for AWQ models; - - `auto-gptq` for GPTQ models. + - `auto-gptq` for GPTQ models; + - `mamba_ssm` for Mamba models. ## 2. Hello, World diff --git a/outlines/models/__init__.py b/outlines/models/__init__.py index 857c3261..6ecb7b6f 100644 --- a/outlines/models/__init__.py +++ b/outlines/models/__init__.py @@ -7,5 +7,6 @@ """ from .awq import awq from .gptq import gptq +from .mamba import Mamba, mamba from .openai import OpenAI, openai from .transformers import Transformer, transformers diff --git a/outlines/models/mamba.py b/outlines/models/mamba.py new file mode 100644 index 00000000..ea0fcc15 --- /dev/null +++ b/outlines/models/mamba.py @@ -0,0 +1,58 @@ +from typing import TYPE_CHECKING, Optional + +import torch + +from .transformers import TransformerTokenizer + +if TYPE_CHECKING: + from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + from transformers import PreTrainedTokenizer + + +TOKENIZER_MODEL = "EleutherAI/gpt-neox-20b" + + +class Mamba: + """Represent a `mamba` model.""" + + def __init__( + self, model: "MambaLMHeadModel", tokenizer: "PreTrainedTokenizer", device + ): + self.device = device + self.model = model + self.tokenizer = tokenizer + + def forward(self, input_ids: torch.LongTensor, *_): + """Compute a forward pass through the mamba model.""" + + output = self.model(input_ids) + next_token_logits = output.logits[..., -1, :] + return next_token_logits, None + + def __call__(self, input_ids: torch.LongTensor, *_) -> torch.FloatTensor: + return self.forward(input_ids) + + +def mamba( + model_name: str, + device: Optional[str] = None, + model_kwargs: dict = {}, + tokenizer_kwargs: dict = {}, +): + try: + from mamba_ssm import MambaLMHeadModel + except ImportError: + raise ImportError( + "The `mamba_ssm` library needs to be installed in order to use Mamba people." + ) + + if not torch.cuda.is_available(): + raise NotImplementedError("Mamba models can only run on GPU.") + else: + if device is None: + device = "cuda" + + model = MambaLMHeadModel.from_pretrained(model_name, device=device) + tokenizer = TransformerTokenizer(TOKENIZER_MODEL, **tokenizer_kwargs) + + return Mamba(model, tokenizer, device) diff --git a/pyproject.toml b/pyproject.toml index c75c6158..f060321f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,6 +93,7 @@ module = [ "joblib.*", "jsonschema.*", "openai.*", + "mamba_ssm.*", "nest_asyncio", "numpy.*", "perscache.*", From e0b3d879ccd1b33b45c7d596f662d70c8253a435 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Fri, 8 Dec 2023 12:17:14 +0100 Subject: [PATCH 345/734] Place `biased_logits` on device --- outlines/generate/generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outlines/generate/generator.py b/outlines/generate/generator.py index 4ee3992a..7a5522f8 100644 --- a/outlines/generate/generator.py +++ b/outlines/generate/generator.py @@ -284,7 +284,7 @@ def bias_logits( A view of the original logits tensor where some values are masked. """ - biased_logits = torch.empty(logits.shape) + biased_logits = torch.empty(logits.shape, device=logits.device) for i, ids in enumerate(ids_to_mask): mask = torch.full((logits.shape[-1],), -math.inf, device=logits.device) mask[ids] = 0 From 5df56a55fe55e72e1eab4692c85efbc464f89f27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Tue, 28 Nov 2023 11:16:08 +0100 Subject: [PATCH 346/734] Add models playing chess example --- docs/examples/index.md | 1 + docs/examples/models_playing_chess.md | 80 +++++++++++++++++++++++++++ mkdocs.yml | 1 + 3 files changed, 82 insertions(+) create mode 100644 docs/examples/models_playing_chess.md diff --git a/docs/examples/index.md b/docs/examples/index.md index 91682c61..c9a303d5 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -2,3 +2,4 @@ - [Dating Profile](dating_profiles.md): Build dating profiles from descriptions using prompt templating and JSON-guided generation. - [Chain Of Density](chain_of_density.md): Summarize documents using chain of density prompting and JSON-guided generation. +- [Playing Chess](models_playing_chess.md): Make Mistral-7B play chess against itself using regex-guided generation. diff --git a/docs/examples/models_playing_chess.md b/docs/examples/models_playing_chess.md new file mode 100644 index 00000000..2a1e21bf --- /dev/null +++ b/docs/examples/models_playing_chess.md @@ -0,0 +1,80 @@ +# Large language models playing chess + +In this example we will make a quantized version of Mistral-7B play chess against itself. On its own the model easily generates invalid move, so we will give it a little help. At each step we will generate a regex that only matches valid move, and use it to help the model only generating valid moves. + +## The chessboard + +The game will be played on a standard checkboard. We will use the `chess` [library](https://github.com/niklasf/python-chess) to track the opponents' moves, and check that the moves are valid. + +```python +import chess + +board = chess.Board("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1") +``` + +## The opponents + +Mistral-7B quantized will be playing against itself: + +```python +from outlines import models + +board_state = models.transformers("TheBloke/Mistral-7B-OpenOrca-AWQ", device="cuda") +``` + +## A little help for the language model + +To make sure Mistral-7B generates valid chess moves we will use Outline's regex-guided generation. We define a function that takes the current state of the board and returns a regex that matches all possible legal moves: + +```python +import re + +def legal_moves_regex(board): + """Build a regex that only matches valid moves.""" + legal_moves = list(board.legal_moves) + legal_modes_str = [board.san(move) for move in legal_moves] + legal_modes_str = [re.sub(r"[+#]", "", move) for move in legal_modes_str] + regex_pattern = "|".join(re.escape(move) for move in legal_modes_str) + regex_pattern = f"{regex_pattern}" + return regex_pattern +``` + +## Prompting the language model + +The prompt corresponds to the current state of the board, so we start with: + +```python +prompt = "Score: 1-0 WhiteElo: 1600 BlackElo: 1600 Timecontrol: 1800+0 Moves: 1." +``` + +We update the prompt at each step so it reflects the state of the board after the previous move. + +## Let's play! + + +```python +from outlines import generate + + +turn_number = 0 +while not board.is_game_over(): + regex_pattern = legal_moves_regex(board) + guided = generate.regex(model, regex_pattern)(board_state) + move = board.parse_san(guided) + + if turn_number % 2 == 0 : # It's White's turn + board_state += board.san(move) + " " + else: + board_state += board.san(move) + " " + str(turn_number) + "." + + turn_number += 1 + + board.push(move) + + print(board_state) +``` + +It turns out Mistal-7B (quantized) is not very good at playing chess: the game systematically ends because of the threefold repetition rule. + + +*This example was originally authored by [@903124S](@903124S) in [this gist](https://gist.github.com/903124/cfbefa24da95e2316e0d5e8ef8ed360d).* diff --git a/mkdocs.yml b/mkdocs.yml index d1c1873f..70211707 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -108,6 +108,7 @@ nav: - examples/index.md - Synthetic dating Profile: examples/dating_profiles.md - Chain of density prompting: examples/chain_of_density.md + - Playing chess: examples/models_playing_chess.md - Reference: - reference/index.md - Prompting: reference/prompting.md From ac3ae2f08846f453c7c5ac04a300854f039ac87a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Sun, 10 Dec 2023 22:33:57 +0100 Subject: [PATCH 347/734] Do not parse output of `generate.JSON` in README (#425) --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index b397a724..9c94bd52 100644 --- a/README.md +++ b/README.md @@ -237,10 +237,6 @@ print(sequence) # "weapon": "sword", # "strength": 0 # } - -parsed = Character.model_validate_json(sequence) -print(parsed) -# name='piggyback' age=23 armor= weapon= strength=0 ``` The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/outlines-dev/outlines/issues/215), but everything else should work. From be6206edd900013c7a7f0126458b831973a98ab2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 11 Dec 2023 09:30:39 +0100 Subject: [PATCH 348/734] Add grid cards in `Get Started` --- docs/get_started.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/docs/get_started.md b/docs/get_started.md index 9ead561f..c17a79ec 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -5,6 +5,42 @@ icon: material/human-greeting # Getting started +
+- :material-chat-processing-outline:{ .lg .middle } __Powerful Prompt Templating__ + + --- + + Better manage your prompts' complexity with prompt templating + + [:octicons-arrow-right-24: Learn more](prompting/index.md) + + +- :material-regex:{ .lg .middle } __Make LLMs follows a Regex__ + + --- + + Generate text that parses correctly 100% of the time + + [:octicons-arrow-right-24: Guide LLMs](reference/regex.md) + +- :material-code-json:{ .lg .middle } __Make LLMs generate valid JSON__ + + --- + + No more invalid JSON outputs, 100% guaranteed + + [:octicons-arrow-right-24: Generate JSON](reference/json.md) + +- :material-keyboard-outline:{ .lg .middle } __Rich text generation primitives__ + + --- + + Multiple choice, dynamic stopping with OpenAI and Open Source models + + [:octicons-arrow-right-24: Generate text](reference/index.md) + +
+ ## 1. Installation Outlines is available on PyPi: From 10a2ca25505859082e05314e78c0c9ac060eba8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 11 Dec 2023 09:34:50 +0100 Subject: [PATCH 349/734] Add Discord link to index --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 1fec7c7c..c795f4b7 100644 --- a/docs/index.md +++ b/docs/index.md @@ -16,7 +16,7 @@ hide:

Generate text with LLMs

Robust prompting & (guided) text generation

[:fontawesome-solid-bolt: Get started](get_started.md){ .md-button .md-button--primary } - [:fontawesome-solid-code-pull-request: Contribute](https://github.com/outlines-dev/outlines){ .md-button } + [:fontawesome-brands-discord: Join the Community](https://discord.gg/ZxBxyWmW5n){ .md-button }
```python From fb405e59bcdeb71c6bbbbf841375d895d478aac2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Mon, 11 Dec 2023 09:30:11 +0100 Subject: [PATCH 350/734] Add prompting reference --- docs/prompting/index.md | 402 ++++++++++++++++++++++++++++++++++++ docs/reference/prompting.md | 132 ------------ mkdocs.yml | 4 +- 3 files changed, 404 insertions(+), 134 deletions(-) create mode 100644 docs/prompting/index.md delete mode 100644 docs/reference/prompting.md diff --git a/docs/prompting/index.md b/docs/prompting/index.md new file mode 100644 index 00000000..56db9261 --- /dev/null +++ b/docs/prompting/index.md @@ -0,0 +1,402 @@ +# Prompt templating + +Outlines provides a powerful domain-specific language to write and manage +prompts, via what we call *prompt functions*. Prompt functions are Python +functions that contain a template for the prompt in their docstring, and their +arguments correspond to the variables used in the prompt. When called, a prompt +function returns the template rendered with the values of the arguments. + +The aim of prompt functions is to solve several recurrent problems with prompting: + +1. **Building complex prompts quickly leads to messy code.** This problem has + already been solved in the web development community by using templating, so + why not use it here? +2. **Composing prompts is difficult.** Why not just compose functions? +3. **Separating prompts from code.** Encapsulation in functions allows a clean + separation between prompts and code. Moreover, like any function, prompt + functions can be imported from other modules. + +Outlines uses the [Jinja templating +engine](https://jinja.palletsprojects.com/en/3.1.x/) to render prompts, which +allows to easily compose complex prompts. + +!!! warning "Prompt rendering" + + Prompt functions are opinionated when it comes to prompt rendering. These opinions are meant to avoid common prompting errors, but can have unintended consequences if you are doing something unusuable. We advise to always print the prompt before using it. You can also [read the + reference](#formatting-conventions) section if you want to know more. + +## Your first prompt + +The following snippet showcases a very simple prompt. The variables between +curly brackets `{{ }}` are placeholders for the values of the arguments you +will pass to the prompt function. + +=== "Code" + + ```python + import outlines + + @outlines.prompt + def greetings(name, question): + """Hello, {{ name }}! + {{ question }} + """ + + prompt = greetings("user", "How are you?") + print(prompt) + ``` + +=== "Output" + + ```text + Hello, user! + How are you? + ``` + +If a variable is missing in the function's arguments, Jinja2 will throw an `UndefinedError` exception: + +=== "Code" + + ```python + import outlines + + @outlines.prompt + def greetings(name): + """Hello, {{ surname }}!""" + + prompt = greetings("user") + ``` + +=== "Output" + + ```text + Traceback (most recent call last): + File "", line 9, in + File "/home/remi/projects/normal/outlines/outlines/prompts.py", line 38, in __call__ + return render(self.template, **bound_arguments.arguments) + File "/home/remi/projects/normal/outlines/outlines/prompts.py", line 213, in render + return jinja_template.render(**values) + File "/home/remi/micromamba/envs/outlines/lib/python3.9/site-packages/jinja2/environment.py", line 1301, in render + self.environment.handle_exception() + File "/home/remi/micromamba/envs/outlines/lib/python3.9/site-packages/jinja2/environment.py", line 936, in handle_exception + raise rewrite_traceback_stack(source=source) + File "

m7ps%ZcXMW^QrvcyUo9 zXt?&xz4D2o5UPWkB99-N-E2wZ?CQ;sXbgo^0F72w2@EqvdI>Kag)nD?cuhHE`4CX! zC2{*uA*<`FJt;h`#U8hg_I#bLqaeNCzteMbS1zN|Rxy?RW4@St>{zxc#ADp?1-N7! z4|ovi>nb^o`uuh~Jv~JqlSudbj6R4~@5Yy0RM~NP2m>XWwq?Ii$0MKQW@zg-N80dS z&Vg~QQWj>fyKzqiD5As*-aeh7eEY61oxIs{4k`~rT%1rcGGM~oU&UQo5--y(%E^(! zSiKugfiI(%jD|djdm!3E)AF|eMHy^pf7aGC*&4GextWFHdyNGI1RfUtGcvcd)RB@3 zN^MTnX9Q6iZVq1Hk`|l=bJ50+*-kG1%$VMh+WqA8_!{9WS;n#F)_<&5TMqH+N>k0> zzgR1GUMC+l;V>k?%}{<|h`kFJLrJ;oHp8yfwHt))Og62*GTlj`@j%1od7~_&o$2qc z=MG1inT0YHk1YWU8JNOYx10|@5yv7XY+BZ+>?5ud!%Sc!h=_|R9SH`KmE~oQL|l3a z302!J!=_J}zVC+IAC*W^I_0P)CdPqXjO7a}I-;-#CS-l?SL=%V9kzidn z#F;)A5s5qdQ~h9FL;3~pm2pb-nf*AGuZn|;N?C4haAHAd{c{`@?GAR5_wb}AB_^s< z{m#y>?5J^J?F;Wj_0jL<{1m7IfDNw4uguKfhra*Z`c#%qL!ai*p}9NpQ{+6o ze^YM8Lo`y6k8_cDCtRB)_f@m6Lm7g=?WJd{XDHvkv(tfo(PxN@v9RerM4ex0W>)&3 zMS7Bn`i7{Ld?w^dT?ryY3}avCw*} z7%9R}ss*4|uHpAVo{=6e2p7*ITQgQH{c3v3c7MtDh`ho#B)U7n${P-OZ5{-$G zc0Tfa_r?-8H#bR7qNang;+n2oplTIFH`vGe zN8i9VLy#`;hWP5Pk8~79d)Dy-eqCJbr#$%@nCg7SCTmyr?E@eeKu>0#yvrhV4~m2F+;WamD)_=VqncmGzK@oNm)zyYP}mow*KutttuR&9v6FXk3EK%F%m zDg`$;Co7Xgy*@QJQ;oi&37pAN2{_q(p{P^liFWn6IkS$73Fqlkc?EgM^Lg0uXajGY zY(0xwF0>x*D)tx#SwM4qjB8IXhA&A*F>H9f)_;^WdDoAb?pzNX0K{v9iKb#-j0sZ# z{x#aYD%h%Hj{|OsU(ZOpb>MPBGGR0Jmlh3;VAEH)>Fy^53d)Ux?Xw2AXuGSsI@l%W zdELVpa6|k$+(9&^6ZdRXS=k`;xN0t&q=n0p^(o`?OfLB$>Cb(i?V$D6>|{w%2V9aYjDJhZqE@U2^?5) zD*01uaUuUH>+%0iDF-v5H6k)HSBZ+$+xs#d#pf^Id?|QeT5TUhqMhz%4wo&{?@j-i zn;G+qU>{YXxw=2vz1F!{tKw%J3kp=C%Ohpy5}5nrBR}MOwFVnx#(ji%=ly%HpUVd0 zQ4Lf%$^75By1!5;uzp)2lmAQZadIR!`#gZ3g7wq)V~2@JZQGxptzd5gAgy})>l-KU zEVw73Lq*P$uk?R|ijt63;Ig;BeYVh$KXBTJC zH>tiT&GBP1cxGiRe|2vAuVZbVKfl#MzV_c7QjUCBfRFf4q$t||%+!Gb#F11hX{kOz z-OD92UzAa?Q2^6A!EwK15DYx4D2PI*-BYM?v=(t){49)#nh!Jh>KZco0|k<{!RM>r zZPoreCIQ6zj$7ZBybiw;KfMbFp`-jY6-6yH1dI3j;~U~5w>kD|0Ooc2JVqV!^?%nf zu=0hX_b@~;uf}E&WLI;~GJnfy*#+u1?m&HWR7|DFrF+>oOM1k`G5zYc<5rzdz8!PP zoGN*x9==}J{_hw!PE28DN8P!zBaKVRZ}ajJO0PnP-?x6@^n?14V7`5^iiFlBM>90% zt}a}i8Fv4(sYt4s;hxh5b*mWQ31o>qMCZtc4mUhEOn7g}!)Z6zrEk4iAJPha$9C14i z`aYN`c<;XpPm^*eo7*^+TbBY5c}kN76%|!RMus|ObTdzoIP^Fv$T4M?4=cYC=-An7Tif?Wie%CDo}A4VFqa(+B73|mU|BfiHJ!_iE+Tq zFFXto^MG+PQw>$8i~FShDapH}nl~>h5q?7Ba-<}*;-&G{A+l5Td3Zxy^gXnQ85!;x z)jI~PKfX7|XB7yYFwX^&|97c4&xSL`=bfg%@+my^-S`tIk_^a!&PBre$4qgu*@Oj^ z;<~gkPg@B)y+UM+AKsS72xv8iN1~@!Vc(g9KH$d5dbVo8<@=xa3)V0&PDqugvjjid zZ;H8_?k!A|muvC#e4_u~yfc1bHI&s*QxNK~@AegTKKjEvt(5idQ8x>xCKiKf397U3exe)y~fY*I)EL*@E z&RM+cU2+Al{rBcTk?T&}|NHaRaYw#io$Q>z{MqaO2M~=0ApigX literal 0 HcmV?d00001 diff --git a/docs/examples/index.md b/docs/examples/index.md index cf792f75..91682c61 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -1,3 +1,4 @@ # Examples - [Dating Profile](dating_profiles.md): Build dating profiles from descriptions using prompt templating and JSON-guided generation. +- [Chain Of Density](chain_of_density.md): Summarize documents using chain of density prompting and JSON-guided generation. diff --git a/mkdocs.yml b/mkdocs.yml index 4cab4067..24904c4d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -106,7 +106,8 @@ nav: - cookbook/index.md - Examples: - examples/index.md - - Dating Profile: examples/dating_profiles.md + - Synthetic dating Profile: examples/dating_profiles.md + - Chain of density prompting: examples/chain_of_density.md - Reference: - reference/index.md - Prompting: reference/prompting.md From 12392f22cf4dbe0459ceb04800a266740f304dab Mon Sep 17 00:00:00 2001 From: bettybas <108075506+bettybas@users.noreply.github.com> Date: Wed, 29 Nov 2023 14:20:40 +0100 Subject: [PATCH 304/734] =?UTF-8?q?Make=20`generate.json()=CB=8B=20work=20?= =?UTF-8?q?with=20json=20schema=20strings?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- outlines/text/generate/regex.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/outlines/text/generate/regex.py b/outlines/text/generate/regex.py index 71271467..63b33d34 100644 --- a/outlines/text/generate/regex.py +++ b/outlines/text/generate/regex.py @@ -393,7 +393,8 @@ def json( # TODO: Convert string fields to their respective types format_fn = lambda x: pyjson.loads(x) else: - format_fn = lambda x: x + schema = schema_object + format_fn = lambda x: pyjson.loads(x) regex_str = build_regex_from_object(schema) From e1c96041c6ba372df560edcbbea37bde876cd8c1 Mon Sep 17 00:00:00 2001 From: bparis Date: Fri, 1 Dec 2023 10:30:28 +0100 Subject: [PATCH 305/734] Fix oneOf implementation of the json schema spec Implements XOR regex using negative lookaheads. --- outlines/text/json_schema.py | 11 ++++++++++- tests/text/test_json_schema.py | 15 ++++++++++++--- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/outlines/text/json_schema.py b/outlines/text/json_schema.py index 4044d225..891f3d9c 100644 --- a/outlines/text/json_schema.py +++ b/outlines/text/json_schema.py @@ -137,7 +137,16 @@ def to_regex(resolver: Resolver, instance: dict): # one of the given subschemas. elif "oneOf" in instance: subregexes = [to_regex(resolver, t) for t in instance["oneOf"]] - return rf"({'|'.join(subregexes)})" + + xor_patterns = [] + # json schema validation ensured there is no overlapping schemas in oneOf + for subregex in subregexes: + other_subregexes = filter(lambda r: r != subregex, subregexes) + other_subregexes_str = "|".join([f"{s}" for s in other_subregexes]) + negative_lookahead = f"(?!.*({other_subregexes_str}))" + xor_patterns.append(f"({subregex}){negative_lookahead}") + + return rf"({'|'.join(xor_patterns)})" # The enum keyword is used to restrict a value to a fixed set of values. It # must be an array with at least one element, where each element is unique. diff --git a/tests/text/test_json_schema.py b/tests/text/test_json_schema.py index 38039298..a0af780e 100644 --- a/tests/text/test_json_schema.py +++ b/tests/text/test_json_schema.py @@ -220,10 +220,19 @@ def test_match_number(pattern, does_match): ( { "title": "Foo", - "oneOf": [{"type": "string"}, {"type": "number"}], + "oneOf": [{"type": "string"}, {"type": "number"}, {"type": "boolean"}], }, - rf"({STRING}|{NUMBER})", - [("12.3", True), ('"a"', True), ('1.3"a"', False)], + rf"(({STRING})(?!.*({NUMBER}|{BOOLEAN}))|({NUMBER})(?!.*({STRING}|{BOOLEAN}))|({BOOLEAN})(?!.*({STRING}|{NUMBER})))", + [ + ("12.3", True), + ("true", True), + ('"a"', True), + ("null", False), + ("", False), + ("12true", False), + ('1.3"a"', False), + ('12.3true"a"', False), + ], ), # anyOf ( From 2bd2f1ba251dfa6a43ad3446f8d1de4ace8f4183 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 6 Dec 2023 12:01:47 +0100 Subject: [PATCH 306/734] Use `Inter` instead of `Roboto` typeface --- docs/stylesheets/extra.css | 1 + mkdocs.yml | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 7e98a8cc..380cc358 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,6 +1,7 @@ :root > * { --md-code-bg-color: #2E3440; --md-default-bg-color: black; + --md-text-font-family: "Inter"; } .language-python.highlight > * { diff --git a/mkdocs.yml b/mkdocs.yml index 24904c4d..e30afb4b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,5 @@ # Site information -site_name: Outlines +site_name: Outlines 〰️ site_author: The Outlines developers site_description: >- Generate text that machines understand using Large Language Models and @@ -31,7 +31,7 @@ theme: - header.autohide - announce.dismiss font: - text: Roboto + text: Inter code: Source Code Pro # Additional configuration From be92549dd89f2e491b42261bf42a22a9c1057dbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 6 Dec 2023 12:02:26 +0100 Subject: [PATCH 307/734] Expand navigation --- mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index e30afb4b..7119a03e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -26,7 +26,7 @@ theme: repo: fontawesome/brands/github features: - content.code.copy - - navigation.sticky + - navigation.expand - navigation.tabs - header.autohide - announce.dismiss From 742ebb6ffef9f0360bc33b41a78bc8f46274d369 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Louf?= Date: Wed, 6 Dec 2023 12:02:37 +0100 Subject: [PATCH 308/734] Update the home page --- docs/get_started.md | 2 +- docs/index.md | 49 ++++++++- docs/overrides/home.html | 202 ------------------------------------- docs/stylesheets/extra.css | 32 ++++++ 4 files changed, 81 insertions(+), 204 deletions(-) delete mode 100644 docs/overrides/home.html diff --git a/docs/get_started.md b/docs/get_started.md index c127f1de..5d8c5644 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -89,7 +89,7 @@ pip install outlines "weapon": "mace", "strength": 4171 } - ````` + ``` ## Acknowledgements diff --git a/docs/index.md b/docs/index.md index 6fb100a4..c70f11e7 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,4 +1,51 @@ --- title: Outlines -template: home.html +hide: + - navigation + - toc + - feedback --- + +# + +