diff --git a/test/system/inspect.spec.ts b/test/system/inspect.spec.ts index 59f12cba..49ee1d3e 100644 --- a/test/system/inspect.spec.ts +++ b/test/system/inspect.spec.ts @@ -380,6 +380,25 @@ describe('inspect', () => { expect(result.dependencyGraph.equals(expected)).toBeTruthy(); }); + + it('should return expected dependencies for poetry-optional-dependencies', async () => { + const workspace = 'poetry-app-optional-dependencies'; + testUtils.chdirWorkspaces(workspace); + + const result = await inspect('.', FILENAMES.poetry.lockfile); + + const expected = [ + { + pkg: { + name: 'opentelemetry-distro', + version: '0.35b0', + }, + directDeps: ['opentelemetry-distro'], + }, + ]; + + compareTransitiveLines(result.dependencyGraph, expected); + }); }); it('should return correct target file for poetry project when relative path to poetry lock file is passed', async () => { diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/__init__.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/__init__.py new file mode 100644 index 00000000..0400780e --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/__init__.py @@ -0,0 +1,61 @@ +#!/usr/bin/python + +from . import core +from . import exposition +from . import gc_collector +from . import platform_collector +from . import process_collector +from . import registry +from . import metrics_core +from . import metrics + +__all__ = ['Counter', 'Gauge', 'Summary', 'Histogram', 'Info', 'Enum'] + +CollectorRegistry = registry.CollectorRegistry +REGISTRY = registry.REGISTRY +Metric = metrics_core.Metric +Counter = metrics.Counter +Gauge = metrics.Gauge +Summary = metrics.Summary +Histogram = metrics.Histogram +Info = metrics.Info +Enum = metrics.Enum + +CONTENT_TYPE_LATEST = exposition.CONTENT_TYPE_LATEST +generate_latest = exposition.generate_latest +MetricsHandler = exposition.MetricsHandler +make_wsgi_app = exposition.make_wsgi_app +start_http_server = exposition.start_http_server +start_wsgi_server = exposition.start_wsgi_server +write_to_textfile = exposition.write_to_textfile +push_to_gateway = exposition.push_to_gateway +pushadd_to_gateway = exposition.pushadd_to_gateway +delete_from_gateway = exposition.delete_from_gateway +instance_ip_grouping_key = exposition.instance_ip_grouping_key + +ProcessCollector = process_collector.ProcessCollector +PROCESS_COLLECTOR = process_collector.PROCESS_COLLECTOR + +PlatformCollector = platform_collector.PlatformCollector +PLATFORM_COLLECTOR = platform_collector.PLATFORM_COLLECTOR + +GCCollector = gc_collector.GCCollector +GC_COLLECTOR = gc_collector.GC_COLLECTOR + +if __name__ == '__main__': + c = Counter('cc', 'A counter') + c.inc() + + g = Gauge('gg', 'A gauge') + g.set(17) + + s = Summary('ss', 'A summary', ['a', 'b']) + s.labels('c', 'd').observe(17) + + h = Histogram('hh', 'A histogram') + h.observe(.6) + + start_http_server(8000) + import time + while True: + time.sleep(1) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/bridge/__init__.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/bridge/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/bridge/graphite.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/bridge/graphite.py new file mode 100644 index 00000000..713d8c09 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/bridge/graphite.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +from __future__ import unicode_literals + +import logging +import re +import socket +import threading +import time +from timeit import default_timer + +from ..registry import REGISTRY + +# Roughly, have to keep to what works as a file name. +# We also remove periods, so labels can be distinguished. + +_INVALID_GRAPHITE_CHARS = re.compile(r"[^a-zA-Z0-9_-]") + + +def _sanitize(s): + return _INVALID_GRAPHITE_CHARS.sub('_', s) + + +class _RegularPush(threading.Thread): + def __init__(self, pusher, interval, prefix): + super(_RegularPush, self).__init__() + self._pusher = pusher + self._interval = interval + self._prefix = prefix + + def run(self): + wait_until = default_timer() + while True: + while True: + now = default_timer() + if now >= wait_until: + # May need to skip some pushes. + while wait_until < now: + wait_until += self._interval + break + # time.sleep can return early. + time.sleep(wait_until - now) + try: + self._pusher.push(prefix=self._prefix) + except IOError: + logging.exception("Push failed") + + +class GraphiteBridge(object): + def __init__(self, address, registry=REGISTRY, timeout_seconds=30, _timer=time.time): + self._address = address + self._registry = registry + self._timeout = timeout_seconds + self._timer = _timer + + def push(self, prefix=''): + now = int(self._timer()) + output = [] + + prefixstr = '' + if prefix: + prefixstr = prefix + '.' + + for metric in self._registry.collect(): + for s in metric.samples: + if s.labels: + labelstr = '.' + '.'.join( + ['{0}.{1}'.format( + _sanitize(k), _sanitize(v)) + for k, v in sorted(s.labels.items())]) + else: + labelstr = '' + output.append('{0}{1}{2} {3} {4}\n'.format( + prefixstr, _sanitize(s.name), labelstr, float(s.value), now)) + + conn = socket.create_connection(self._address, self._timeout) + conn.sendall(''.join(output).encode('ascii')) + conn.close() + + def start(self, interval=60.0, prefix=''): + t = _RegularPush(self, interval, prefix) + t.daemon = True + t.start() diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/context_managers.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/context_managers.py new file mode 100644 index 00000000..2b271a7c --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/context_managers.py @@ -0,0 +1,68 @@ +from __future__ import unicode_literals + +from timeit import default_timer + +from .decorator import decorate + + +class ExceptionCounter(object): + def __init__(self, counter, exception): + self._counter = counter + self._exception = exception + + def __enter__(self): + pass + + def __exit__(self, typ, value, traceback): + if isinstance(value, self._exception): + self._counter.inc() + + def __call__(self, f): + def wrapped(func, *args, **kwargs): + with self: + return func(*args, **kwargs) + + return decorate(f, wrapped) + + +class InprogressTracker(object): + def __init__(self, gauge): + self._gauge = gauge + + def __enter__(self): + self._gauge.inc() + + def __exit__(self, typ, value, traceback): + self._gauge.dec() + + def __call__(self, f): + def wrapped(func, *args, **kwargs): + with self: + return func(*args, **kwargs) + + return decorate(f, wrapped) + + +class Timer(object): + def __init__(self, callback): + self._callback = callback + + def _new_timer(self): + return self.__class__(self._callback) + + def __enter__(self): + self._start = default_timer() + + def __exit__(self, typ, value, traceback): + # Time can go backwards. + duration = max(default_timer() - self._start, 0) + self._callback(duration) + + def __call__(self, f): + def wrapped(func, *args, **kwargs): + # Obtaining new instance of timer every time + # ensures thread safety and reentrancy. + with self._new_timer(): + return func(*args, **kwargs) + + return decorate(f, wrapped) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/core.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/core.py new file mode 100644 index 00000000..521322a7 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/core.py @@ -0,0 +1,35 @@ +from __future__ import unicode_literals + +from .metrics import Counter, Enum, Gauge, Histogram, Info, Summary +from .metrics_core import ( + CounterMetricFamily, GaugeHistogramMetricFamily, GaugeMetricFamily, + HistogramMetricFamily, InfoMetricFamily, Metric, Sample, + StateSetMetricFamily, SummaryMetricFamily, UnknownMetricFamily, + UntypedMetricFamily, +) +from .registry import CollectorRegistry, REGISTRY +from .samples import Exemplar, Sample, Timestamp + +__all__ = ( + 'CollectorRegistry', + 'Counter', + 'CounterMetricFamily', + 'Enum', + 'Exemplar', + 'Gauge', + 'GaugeHistogramMetricFamily', + 'GaugeMetricFamily', + 'Histogram', + 'HistogramMetricFamily', + 'Info', + 'InfoMetricFamily', + 'Metric', + 'REGISTRY', + 'Sample', + 'StateSetMetricFamily', + 'Summary', + 'SummaryMetricFamily', + 'Timestamp', + 'UnknownMetricFamily', + 'UntypedMetricFamily', +) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/decorator.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/decorator.py new file mode 100644 index 00000000..0fc7a221 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/decorator.py @@ -0,0 +1,420 @@ +# ######################### LICENSE ############################ # + +# Copyright (c) 2005-2016, Michele Simionato +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: + +# Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# Redistributions in bytecode form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +""" +Decorator module, see http://pypi.python.org/pypi/decorator +for the documentation. +""" +from __future__ import print_function + +import collections +import inspect +import itertools +import operator +import re +import sys + +__version__ = '4.0.10' + +if sys.version_info >= (3,): + from inspect import getfullargspec + + def get_init(cls): + return cls.__init__ +else: + class getfullargspec(object): + "A quick and dirty replacement for getfullargspec for Python 2.X" + + def __init__(self, f): + self.args, self.varargs, self.varkw, self.defaults = \ + inspect.getargspec(f) + self.kwonlyargs = [] + self.kwonlydefaults = None + + def __iter__(self): + yield self.args + yield self.varargs + yield self.varkw + yield self.defaults + + getargspec = inspect.getargspec + + def get_init(cls): + return cls.__init__.__func__ + +# getargspec has been deprecated in Python 3.5 +ArgSpec = collections.namedtuple( + 'ArgSpec', 'args varargs varkw defaults') + + +def getargspec(f): + """A replacement for inspect.getargspec""" + spec = getfullargspec(f) + return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults) + + +DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(') + + +# basic functionality +class FunctionMaker(object): + """ + An object with the ability to create functions with a given signature. + It has attributes name, doc, module, signature, defaults, dict and + methods update and make. + """ + + # Atomic get-and-increment provided by the GIL + _compile_count = itertools.count() + + def __init__(self, func=None, name=None, signature=None, + defaults=None, doc=None, module=None, funcdict=None): + self.shortsignature = signature + if func: + # func can be a class or a callable, but not an instance method + self.name = func.__name__ + if self.name == '': # small hack for lambda functions + self.name = '_lambda_' + self.doc = func.__doc__ + self.module = func.__module__ + if inspect.isfunction(func): + argspec = getfullargspec(func) + self.annotations = getattr(func, '__annotations__', {}) + for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', + 'kwonlydefaults'): + setattr(self, a, getattr(argspec, a)) + for i, arg in enumerate(self.args): + setattr(self, 'arg%d' % i, arg) + if sys.version_info < (3,): # easy way + self.shortsignature = self.signature = ( + inspect.formatargspec( + formatvalue=lambda val: "", *argspec)[1:-1]) + else: # Python 3 way + allargs = list(self.args) + allshortargs = list(self.args) + if self.varargs: + allargs.append('*' + self.varargs) + allshortargs.append('*' + self.varargs) + elif self.kwonlyargs: + allargs.append('*') # single star syntax + for a in self.kwonlyargs: + allargs.append('%s=None' % a) + allshortargs.append('%s=%s' % (a, a)) + if self.varkw: + allargs.append('**' + self.varkw) + allshortargs.append('**' + self.varkw) + self.signature = ', '.join(allargs) + self.shortsignature = ', '.join(allshortargs) + self.dict = func.__dict__.copy() + # func=None happens when decorating a caller + if name: + self.name = name + if signature is not None: + self.signature = signature + if defaults: + self.defaults = defaults + if doc: + self.doc = doc + if module: + self.module = module + if funcdict: + self.dict = funcdict + # check existence required attributes + assert hasattr(self, 'name') + if not hasattr(self, 'signature'): + raise TypeError('You are decorating a non function: %s' % func) + + def update(self, func, **kw): + "Update the signature of func with the data in self" + func.__name__ = self.name + func.__doc__ = getattr(self, 'doc', None) + func.__dict__ = getattr(self, 'dict', {}) + func.__defaults__ = getattr(self, 'defaults', ()) + func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) + func.__annotations__ = getattr(self, 'annotations', None) + try: + frame = sys._getframe(3) + except AttributeError: # for IronPython and similar implementations + callermodule = '?' + else: + callermodule = frame.f_globals.get('__name__', '?') + func.__module__ = getattr(self, 'module', callermodule) + func.__dict__.update(kw) + + def make(self, src_templ, evaldict=None, addsource=False, **attrs): + "Make a new function from a given template and update the signature" + src = src_templ % vars(self) # expand name and signature + evaldict = evaldict or {} + mo = DEF.match(src) + if mo is None: + raise SyntaxError('not a valid function template\n%s' % src) + name = mo.group(1) # extract the function name + names = set([name] + [arg.strip(' *') for arg in + self.shortsignature.split(',')]) + for n in names: + if n in ('_func_', '_call_'): + raise NameError('%s is overridden in\n%s' % (n, src)) + + if not src.endswith('\n'): # add a newline for old Pythons + src += '\n' + + # Ensure each generated function has a unique filename for profilers + # (such as cProfile) that depend on the tuple of (, + # , ) being unique. + filename = '' % (next(self._compile_count),) + try: + code = compile(src, filename, 'single') + exec(code, evaldict) + except: + print('Error in generated code:', file=sys.stderr) + print(src, file=sys.stderr) + raise + func = evaldict[name] + if addsource: + attrs['__source__'] = src + self.update(func, **attrs) + return func + + @classmethod + def create(cls, obj, body, evaldict, defaults=None, + doc=None, module=None, addsource=True, **attrs): + """ + Create a function from the strings name, signature and body. + evaldict is the evaluation dictionary. If addsource is true an + attribute __source__ is added to the result. The attributes attrs + are added, if any. + """ + if isinstance(obj, str): # "name(signature)" + name, rest = obj.strip().split('(', 1) + signature = rest[:-1] # strip a right parens + func = None + else: # a function + name = None + signature = None + func = obj + self = cls(func, name, signature, defaults, doc, module) + ibody = '\n'.join(' ' + line for line in body.splitlines()) + return self.make('def %(name)s(%(signature)s):\n' + ibody, + evaldict, addsource, **attrs) + + +def decorate(func, caller): + """ + decorate(func, caller) decorates a function using a caller. + """ + evaldict = dict(_call_=caller, _func_=func) + fun = FunctionMaker.create( + func, "return _call_(_func_, %(shortsignature)s)", + evaldict, __wrapped__=func) + if hasattr(func, '__qualname__'): + fun.__qualname__ = func.__qualname__ + return fun + + +def decorator(caller, _func=None): + """decorator(caller) converts a caller function into a decorator""" + if _func is not None: # return a decorated function + # this is obsolete behavior; you should use decorate instead + return decorate(_func, caller) + # else return a decorator function + if inspect.isclass(caller): + name = caller.__name__.lower() + doc = 'decorator(%s) converts functions/generators into ' \ + 'factories of %s objects' % (caller.__name__, caller.__name__) + elif inspect.isfunction(caller): + if caller.__name__ == '': + name = '_lambda_' + else: + name = caller.__name__ + doc = caller.__doc__ + else: # assume caller is an object with a __call__ method + name = caller.__class__.__name__.lower() + doc = caller.__call__.__doc__ + evaldict = dict(_call_=caller, _decorate_=decorate) + return FunctionMaker.create( + '%s(func)' % name, 'return _decorate_(func, _call_)', + evaldict, doc=doc, module=caller.__module__, + __wrapped__=caller) + + +# ####################### contextmanager ####################### # + +try: # Python >= 3.2 + from contextlib import _GeneratorContextManager +except ImportError: # Python >= 2.5 + from contextlib import GeneratorContextManager as _GeneratorContextManager + + +class ContextManager(_GeneratorContextManager): + def __call__(self, func): + """Context manager decorator""" + return FunctionMaker.create( + func, "with _self_: return _func_(%(shortsignature)s)", + dict(_self_=self, _func_=func), __wrapped__=func) + + +init = getfullargspec(_GeneratorContextManager.__init__) +n_args = len(init.args) +if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g(*a, **k)) + ContextManager.__init__ = __init__ +elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 + pass +elif n_args == 4: # (self, gen, args, kwds) Python 3.5 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g, a, k) + ContextManager.__init__ = __init__ + +contextmanager = decorator(ContextManager) + + +# ############################ dispatch_on ############################ # + +def append(a, vancestors): + """ + Append ``a`` to the list of the virtual ancestors, unless it is already + included. + """ + add = True + for j, va in enumerate(vancestors): + if issubclass(va, a): + add = False + break + if issubclass(a, va): + vancestors[j] = a + add = False + if add: + vancestors.append(a) + + +# inspired from simplegeneric by P.J. Eby and functools.singledispatch +def dispatch_on(*dispatch_args): + """ + Factory of decorators turning a function into a generic function + dispatching on the given arguments. + """ + assert dispatch_args, 'No dispatch args passed' + dispatch_str = '(%s,)' % ', '.join(dispatch_args) + + def check(arguments, wrong=operator.ne, msg=''): + """Make sure one passes the expected number of arguments""" + if wrong(len(arguments), len(dispatch_args)): + raise TypeError('Expected %d arguments, got %d%s' % + (len(dispatch_args), len(arguments), msg)) + + def gen_func_dec(func): + """Decorator turning a function into a generic function""" + + # first check the dispatch arguments + argset = set(getfullargspec(func).args) + if not set(dispatch_args) <= argset: + raise NameError('Unknown dispatch arguments %s' % dispatch_str) + + typemap = {} + + def vancestors(*types): + """ + Get a list of sets of virtual ancestors for the given types + """ + check(types) + ras = [[] for _ in range(len(dispatch_args))] + for types_ in typemap: + for t, type_, ra in zip(types, types_, ras): + if issubclass(t, type_) and type_ not in t.__mro__: + append(type_, ra) + return [set(ra) for ra in ras] + + def ancestors(*types): + """ + Get a list of virtual MROs, one for each type + """ + check(types) + lists = [] + for t, vas in zip(types, vancestors(*types)): + n_vas = len(vas) + if n_vas > 1: + raise RuntimeError( + 'Ambiguous dispatch for %s: %s' % (t, vas)) + elif n_vas == 1: + va, = vas + mro = type('t', (t, va), {}).__mro__[1:] + else: + mro = t.__mro__ + lists.append(mro[:-1]) # discard t and object + return lists + + def register(*types): + """ + Decorator to register an implementation for the given types + """ + check(types) + + def dec(f): + check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) + typemap[types] = f + return f + return dec + + def dispatch_info(*types): + """ + An utility to introspect the dispatch algorithm + """ + check(types) + lst = [] + for anc in itertools.product(*ancestors(*types)): + lst.append(tuple(a.__name__ for a in anc)) + return lst + + def _dispatch(dispatch_args, *args, **kw): + types = tuple(type(arg) for arg in dispatch_args) + try: # fast path + f = typemap[types] + except KeyError: + pass + else: + return f(*args, **kw) + combinations = itertools.product(*ancestors(*types)) + next(combinations) # the first one has been already tried + for types_ in combinations: + f = typemap.get(types_) + if f is not None: + return f(*args, **kw) + + # else call the default implementation + return func(*args, **kw) + + return FunctionMaker.create( + func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, + dict(_f_=_dispatch), register=register, default=func, + typemap=typemap, vancestors=vancestors, ancestors=ancestors, + dispatch_info=dispatch_info, __wrapped__=func) + + gen_func_dec.__name__ = 'dispatch_on' + dispatch_str + return gen_func_dec diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/exposition.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/exposition.py new file mode 100644 index 00000000..22dea5da --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/exposition.py @@ -0,0 +1,365 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +import base64 +from contextlib import closing +import os +import socket +import sys +import threading +from wsgiref.simple_server import make_server, WSGIRequestHandler + +from .openmetrics import exposition as openmetrics +from .registry import REGISTRY +from .utils import floatToGoString + +try: + from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer + from SocketServer import ThreadingMixIn + from urllib2 import build_opener, Request, HTTPHandler + from urllib import quote_plus + from urlparse import parse_qs, urlparse +except ImportError: + # Python 3 + from http.server import BaseHTTPRequestHandler, HTTPServer + from socketserver import ThreadingMixIn + from urllib.request import build_opener, Request, HTTPHandler + from urllib.parse import quote_plus, parse_qs, urlparse + + +CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') +'''Content type of the latest text format''' + +PYTHON26_OR_OLDER = sys.version_info < (2, 7) + + +def make_wsgi_app(registry=REGISTRY): + '''Create a WSGI app which serves the metrics from a registry.''' + def prometheus_app(environ, start_response): + params = parse_qs(environ.get('QUERY_STRING', '')) + r = registry + encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT')) + if 'name[]' in params: + r = r.restricted_registry(params['name[]']) + output = encoder(r) + + status = str('200 OK') + headers = [(str('Content-type'), content_type)] + start_response(status, headers) + return [output] + return prometheus_app + + +class _SilentHandler(WSGIRequestHandler): + """WSGI handler that does not log requests.""" + + def log_message(self, format, *args): + """Log nothing.""" + + +def start_wsgi_server(port, addr='', registry=REGISTRY): + """Starts a WSGI server for prometheus metrics as a daemon thread.""" + app = make_wsgi_app(registry) + httpd = make_server(addr, port, app, handler_class=_SilentHandler) + t = threading.Thread(target=httpd.serve_forever) + t.daemon = True + t.start() + + +def generate_latest(registry=REGISTRY): + '''Returns the metrics from the registry in latest text format as a string.''' + + def sample_line(s): + if s.labels: + labelstr = '{{{0}}}'.format(','.join( + ['{0}="{1}"'.format( + k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) + for k, v in sorted(s.labels.items())])) + else: + labelstr = '' + timestamp = '' + if s.timestamp is not None: + # Convert to milliseconds. + timestamp = ' {0:d}'.format(int(float(s.timestamp) * 1000)) + return '{0}{1} {2}{3}\n'.format( + s.name, labelstr, floatToGoString(s.value), timestamp) + + output = [] + for metric in registry.collect(): + try: + mname = metric.name + mtype = metric.type + # Munging from OpenMetrics into Prometheus format. + if mtype == 'counter': + mname = mname + '_total' + elif mtype == 'info': + mname = mname + '_info' + mtype = 'gauge' + elif mtype == 'stateset': + mtype = 'gauge' + elif mtype == 'gaugehistogram': + # A gauge histogram is really a gauge, + # but this captures the strucutre better. + mtype = 'histogram' + elif mtype == 'unknown': + mtype = 'untyped' + + output.append('# HELP {0} {1}\n'.format( + mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) + output.append('# TYPE {0} {1}\n'.format(mname, mtype)) + + om_samples = {} + for s in metric.samples: + for suffix in ['_created', '_gsum', '_gcount']: + if s.name == metric.name + suffix: + # OpenMetrics specific sample, put in a gauge at the end. + om_samples.setdefault(suffix, []).append(sample_line(s)) + break + else: + output.append(sample_line(s)) + except Exception as exception: + exception.args = (exception.args or ('',)) + (metric,) + raise + + for suffix, lines in sorted(om_samples.items()): + output.append('# TYPE {0}{1} gauge\n'.format(metric.name, suffix)) + output.extend(lines) + return ''.join(output).encode('utf-8') + + +def choose_encoder(accept_header): + accept_header = accept_header or '' + for accepted in accept_header.split(','): + if accepted.split(';')[0].strip() == 'application/openmetrics-text': + return (openmetrics.generate_latest, + openmetrics.CONTENT_TYPE_LATEST) + return (generate_latest, CONTENT_TYPE_LATEST) + + +class MetricsHandler(BaseHTTPRequestHandler): + """HTTP handler that gives metrics from ``REGISTRY``.""" + registry = REGISTRY + + def do_GET(self): + registry = self.registry + params = parse_qs(urlparse(self.path).query) + encoder, content_type = choose_encoder(self.headers.get('Accept')) + if 'name[]' in params: + registry = registry.restricted_registry(params['name[]']) + try: + output = encoder(registry) + except: + self.send_error(500, 'error generating metric output') + raise + self.send_response(200) + self.send_header('Content-Type', content_type) + self.end_headers() + self.wfile.write(output) + + def log_message(self, format, *args): + """Log nothing.""" + + @classmethod + def factory(cls, registry): + """Returns a dynamic MetricsHandler class tied + to the passed registry. + """ + # This implementation relies on MetricsHandler.registry + # (defined above and defaulted to REGISTRY). + + # As we have unicode_literals, we need to create a str() + # object for type(). + cls_name = str(cls.__name__) + MyMetricsHandler = type(cls_name, (cls, object), + {"registry": registry}) + return MyMetricsHandler + + +class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer): + """Thread per request HTTP server.""" + # Make worker threads "fire and forget". Beginning with Python 3.7 this + # prevents a memory leak because ``ThreadingMixIn`` starts to gather all + # non-daemon threads in a list in order to join on them at server close. + # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the + # same as Python 3.7's ``ThreadingHTTPServer``. + daemon_threads = True + + +def start_http_server(port, addr='', registry=REGISTRY): + """Starts an HTTP server for prometheus metrics as a daemon thread""" + CustomMetricsHandler = MetricsHandler.factory(registry) + httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler) + t = threading.Thread(target=httpd.serve_forever) + t.daemon = True + t.start() + + +def write_to_textfile(path, registry): + '''Write metrics to the given path. + + This is intended for use with the Node exporter textfile collector. + The path must end in .prom for the textfile collector to process it.''' + tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident) + with open(tmppath, 'wb') as f: + f.write(generate_latest(registry)) + # rename(2) is atomic. + os.rename(tmppath, path) + + +def default_handler(url, method, timeout, headers, data): + '''Default handler that implements HTTP/HTTPS connections. + + Used by the push_to_gateway functions. Can be re-used by other handlers.''' + def handle(): + request = Request(url, data=data) + request.get_method = lambda: method + for k, v in headers: + request.add_header(k, v) + resp = build_opener(HTTPHandler).open(request, timeout=timeout) + if resp.code >= 400: + raise IOError("error talking to pushgateway: {0} {1}".format( + resp.code, resp.msg)) + + return handle + + +def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None): + '''Handler that implements HTTP/HTTPS connections with Basic Auth. + + Sets auth headers using supplied 'username' and 'password', if set. + Used by the push_to_gateway functions. Can be re-used by other handlers.''' + def handle(): + '''Handler that implements HTTP Basic Auth. + ''' + if username is not None and password is not None: + auth_value = '{0}:{1}'.format(username, password).encode('utf-8') + auth_token = base64.b64encode(auth_value) + auth_header = b'Basic ' + auth_token + headers.append(['Authorization', auth_header]) + default_handler(url, method, timeout, headers, data)() + + return handle + + +def push_to_gateway( + gateway, job, registry, grouping_key=None, timeout=30, + handler=default_handler): + '''Push metrics to the given pushgateway. + + `gateway` the url for your push gateway. Either of the form + 'http://pushgateway.local', or 'pushgateway.local'. + Scheme defaults to 'http' if none is provided + `job` is the job label to be attached to all pushed metrics + `registry` is an instance of CollectorRegistry + `grouping_key` please see the pushgateway documentation for details. + Defaults to None + `timeout` is how long push will attempt to connect before giving up. + Defaults to 30s, can be set to None for no timeout. + `handler` is an optional function which can be provided to perform + requests to the 'gateway'. + Defaults to None, in which case an http or https request + will be carried out by a default handler. + If not None, the argument must be a function which accepts + the following arguments: + url, method, timeout, headers, and content + May be used to implement additional functionality not + supported by the built-in default handler (such as SSL + client certicates, and HTTP authentication mechanisms). + 'url' is the URL for the request, the 'gateway' argument + described earlier will form the basis of this URL. + 'method' is the HTTP method which should be used when + carrying out the request. + 'timeout' requests not successfully completed after this + many seconds should be aborted. If timeout is None, then + the handler should not set a timeout. + 'headers' is a list of ("header-name","header-value") tuples + which must be passed to the pushgateway in the form of HTTP + request headers. + The function should raise an exception (e.g. IOError) on + failure. + 'content' is the data which should be used to form the HTTP + Message Body. + + This overwrites all metrics with the same job and grouping_key. + This uses the PUT HTTP method.''' + _use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler) + + +def pushadd_to_gateway( + gateway, job, registry, grouping_key=None, timeout=30, + handler=default_handler): + '''PushAdd metrics to the given pushgateway. + + `gateway` the url for your push gateway. Either of the form + 'http://pushgateway.local', or 'pushgateway.local'. + Scheme defaults to 'http' if none is provided + `job` is the job label to be attached to all pushed metrics + `registry` is an instance of CollectorRegistry + `grouping_key` please see the pushgateway documentation for details. + Defaults to None + `timeout` is how long push will attempt to connect before giving up. + Defaults to 30s, can be set to None for no timeout. + `handler` is an optional function which can be provided to perform + requests to the 'gateway'. + Defaults to None, in which case an http or https request + will be carried out by a default handler. + See the 'prometheus_client.push_to_gateway' documentation + for implementation requirements. + + This replaces metrics with the same name, job and grouping_key. + This uses the POST HTTP method.''' + _use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler) + + +def delete_from_gateway( + gateway, job, grouping_key=None, timeout=30, handler=default_handler): + '''Delete metrics from the given pushgateway. + + `gateway` the url for your push gateway. Either of the form + 'http://pushgateway.local', or 'pushgateway.local'. + Scheme defaults to 'http' if none is provided + `job` is the job label to be attached to all pushed metrics + `grouping_key` please see the pushgateway documentation for details. + Defaults to None + `timeout` is how long delete will attempt to connect before giving up. + Defaults to 30s, can be set to None for no timeout. + `handler` is an optional function which can be provided to perform + requests to the 'gateway'. + Defaults to None, in which case an http or https request + will be carried out by a default handler. + See the 'prometheus_client.push_to_gateway' documentation + for implementation requirements. + + This deletes metrics with the given job and grouping_key. + This uses the DELETE HTTP method.''' + _use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler) + + +def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler): + gateway_url = urlparse(gateway) + if not gateway_url.scheme or (PYTHON26_OR_OLDER and gateway_url.scheme not in ['http', 'https']): + gateway = 'http://{0}'.format(gateway) + url = '{0}/metrics/job/{1}'.format(gateway, quote_plus(job)) + + data = b'' + if method != 'DELETE': + data = generate_latest(registry) + + if grouping_key is None: + grouping_key = {} + url += ''.join( + '/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v))) + for k, v in sorted(grouping_key.items())) + + handler( + url=url, method=method, timeout=timeout, + headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data, + )() + + +def instance_ip_grouping_key(): + '''Grouping key with instance set to the IP Address of this host.''' + with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s: + s.connect(('localhost', 0)) + return {'instance': s.getsockname()[0]} diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/gc_collector.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/gc_collector.py new file mode 100644 index 00000000..de400d98 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/gc_collector.py @@ -0,0 +1,47 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +import gc + +from .metrics_core import CounterMetricFamily +from .registry import REGISTRY + + +class GCCollector(object): + """Collector for Garbage collection statistics.""" + + def __init__(self, registry=REGISTRY): + if not hasattr(gc, 'get_stats'): + return + registry.register(self) + + def collect(self): + collected = CounterMetricFamily( + 'python_gc_objects_collected', + 'Objects collected during gc', + labels=['generation'], + ) + uncollectable = CounterMetricFamily( + 'python_gc_objects_uncollectable', + 'Uncollectable object found during GC', + labels=['generation'], + ) + + collections = CounterMetricFamily( + 'python_gc_collections', + 'Number of times this generation was collected', + labels=['generation'], + ) + + for generation, stat in enumerate(gc.get_stats()): + generation = str(generation) + collected.add_metric([generation], value=stat['collected']) + uncollectable.add_metric([generation], value=stat['uncollectable']) + collections.add_metric([generation], value=stat['collections']) + + return [collected, uncollectable, collections] + + +GC_COLLECTOR = GCCollector() +"""Default GCCollector in default Registry REGISTRY.""" diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/metrics.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/metrics.py new file mode 100644 index 00000000..162e2322 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/metrics.py @@ -0,0 +1,644 @@ +import sys +from threading import Lock +import time +import types + +from . import values # retain this import style for testability +from .context_managers import ExceptionCounter, InprogressTracker, Timer +from .metrics_core import ( + Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE, + RESERVED_METRIC_LABEL_NAME_RE, +) +from .registry import REGISTRY +from .utils import floatToGoString, INF + +if sys.version_info > (3,): + unicode = str + + +def _build_full_name(metric_type, name, namespace, subsystem, unit): + full_name = '' + if namespace: + full_name += namespace + '_' + if subsystem: + full_name += subsystem + '_' + full_name += name + if unit and not full_name.endswith("_" + unit): + full_name += "_" + unit + if unit and metric_type in ('info', 'stateset'): + raise ValueError('Metric name is of a type that cannot have a unit: ' + full_name) + if metric_type == 'counter' and full_name.endswith('_total'): + full_name = full_name[:-6] # Munge to OpenMetrics. + return full_name + + +def _validate_labelnames(cls, labelnames): + labelnames = tuple(labelnames) + for l in labelnames: + if not METRIC_LABEL_NAME_RE.match(l): + raise ValueError('Invalid label metric name: ' + l) + if RESERVED_METRIC_LABEL_NAME_RE.match(l): + raise ValueError('Reserved label metric name: ' + l) + if l in cls._reserved_labelnames: + raise ValueError('Reserved label metric name: ' + l) + return labelnames + + +class MetricWrapperBase(object): + _type = None + _reserved_labelnames = () + + def _is_observable(self): + # Whether this metric is observable, i.e. + # * a metric without label names and values, or + # * the child of a labelled metric. + return not self._labelnames or (self._labelnames and self._labelvalues) + + def _is_parent(self): + return (self._labelnames and not self._labelvalues) + + def _get_metric(self): + return Metric(self._name, self._documentation, self._type, self._unit) + + def describe(self): + return [self._get_metric()] + + def collect(self): + metric = self._get_metric() + for suffix, labels, value in self._samples(): + metric.add_sample(self._name + suffix, labels, value) + return [metric] + + def __init__(self, + name, + documentation, + labelnames=(), + namespace='', + subsystem='', + unit='', + registry=REGISTRY, + labelvalues=None, + ): + self._name = _build_full_name(self._type, name, namespace, subsystem, unit) + self._labelnames = _validate_labelnames(self, labelnames) + self._labelvalues = tuple(labelvalues or ()) + self._kwargs = {} + self._documentation = documentation + self._unit = unit + + if not METRIC_NAME_RE.match(self._name): + raise ValueError('Invalid metric name: ' + self._name) + + if self._is_parent(): + # Prepare the fields needed for child metrics. + self._lock = Lock() + self._metrics = {} + + if self._is_observable(): + self._metric_init() + + if not self._labelvalues: + # Register the multi-wrapper parent metric, or if a label-less metric, the whole shebang. + if registry: + registry.register(self) + + def labels(self, *labelvalues, **labelkwargs): + '''Return the child for the given labelset. + + All metrics can have labels, allowing grouping of related time series. + Taking a counter as an example: + + from prometheus_client import Counter + + c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) + c.labels('get', '/').inc() + c.labels('post', '/submit').inc() + + Labels can also be provided as keyword arguments: + + from prometheus_client import Counter + + c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) + c.labels(method='get', endpoint='/').inc() + c.labels(method='post', endpoint='/submit').inc() + + See the best practices on [naming](http://prometheus.io/docs/practices/naming/) + and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). + ''' + if not self._labelnames: + raise ValueError('No label names were set when constructing %s' % self) + + if self._labelvalues: + raise ValueError('%s already has labels set (%s); can not chain calls to .labels()' % ( + self, + dict(zip(self._labelnames, self._labelvalues)) + )) + + if labelvalues and labelkwargs: + raise ValueError("Can't pass both *args and **kwargs") + + if labelkwargs: + if sorted(labelkwargs) != sorted(self._labelnames): + raise ValueError('Incorrect label names') + labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames) + else: + if len(labelvalues) != len(self._labelnames): + raise ValueError('Incorrect label count') + labelvalues = tuple(unicode(l) for l in labelvalues) + with self._lock: + if labelvalues not in self._metrics: + self._metrics[labelvalues] = self.__class__( + self._name, + documentation=self._documentation, + labelnames=self._labelnames, + unit=self._unit, + labelvalues=labelvalues, + **self._kwargs + ) + return self._metrics[labelvalues] + + def remove(self, *labelvalues): + if not self._labelnames: + raise ValueError('No label names were set when constructing %s' % self) + + '''Remove the given labelset from the metric.''' + if len(labelvalues) != len(self._labelnames): + raise ValueError('Incorrect label count (expected %d, got %s)' % (len(self._labelnames), labelvalues)) + labelvalues = tuple(unicode(l) for l in labelvalues) + with self._lock: + del self._metrics[labelvalues] + + def _samples(self): + if self._is_parent(): + return self._multi_samples() + else: + return self._child_samples() + + def _multi_samples(self): + with self._lock: + metrics = self._metrics.copy() + for labels, metric in metrics.items(): + series_labels = list(zip(self._labelnames, labels)) + for suffix, sample_labels, value in metric._samples(): + yield (suffix, dict(series_labels + list(sample_labels.items())), value) + + def _child_samples(self): # pragma: no cover + raise NotImplementedError('_child_samples() must be implemented by %r' % self) + + def _metric_init(self): # pragma: no cover + """ + Initialize the metric object as a child, i.e. when it has labels (if any) set. + + This is factored as a separate function to allow for deferred initialization. + """ + raise NotImplementedError('_metric_init() must be implemented by %r' % self) + + +class Counter(MetricWrapperBase): + '''A Counter tracks counts of events or running totals. + + Example use cases for Counters: + - Number of requests processed + - Number of items that were inserted into a queue + - Total amount of data that a system has processed + + Counters can only go up (and be reset when the process restarts). If your use case can go down, + you should use a Gauge instead. + + An example for a Counter: + + from prometheus_client import Counter + + c = Counter('my_failures_total', 'Description of counter') + c.inc() # Increment by 1 + c.inc(1.6) # Increment by given value + + There are utilities to count exceptions raised: + + @c.count_exceptions() + def f(): + pass + + with c.count_exceptions(): + pass + + # Count only one type of exception + with c.count_exceptions(ValueError): + pass + ''' + _type = 'counter' + + def _metric_init(self): + self._value = values.ValueClass(self._type, self._name, self._name + '_total', self._labelnames, self._labelvalues) + self._created = time.time() + + def inc(self, amount=1): + '''Increment counter by the given amount.''' + if amount < 0: + raise ValueError('Counters can only be incremented by non-negative amounts.') + self._value.inc(amount) + + def count_exceptions(self, exception=Exception): + '''Count exceptions in a block of code or function. + + Can be used as a function decorator or context manager. + Increments the counter when an exception of the given + type is raised up out of the code. + ''' + return ExceptionCounter(self, exception) + + def _child_samples(self): + return ( + ('_total', {}, self._value.get()), + ('_created', {}, self._created), + ) + + +class Gauge(MetricWrapperBase): + '''Gauge metric, to report instantaneous values. + + Examples of Gauges include: + - Inprogress requests + - Number of items in a queue + - Free memory + - Total memory + - Temperature + + Gauges can go both up and down. + + from prometheus_client import Gauge + + g = Gauge('my_inprogress_requests', 'Description of gauge') + g.inc() # Increment by 1 + g.dec(10) # Decrement by given value + g.set(4.2) # Set to a given value + + There are utilities for common use cases: + + g.set_to_current_time() # Set to current unixtime + + # Increment when entered, decrement when exited. + @g.track_inprogress() + def f(): + pass + + with g.track_inprogress(): + pass + + A Gauge can also take its value from a callback: + + d = Gauge('data_objects', 'Number of objects') + my_dict = {} + d.set_function(lambda: len(my_dict)) + ''' + _type = 'gauge' + _MULTIPROC_MODES = frozenset(('min', 'max', 'livesum', 'liveall', 'all')) + + + def __init__(self, + name, + documentation, + labelnames=(), + namespace='', + subsystem='', + unit='', + registry=REGISTRY, + labelvalues=None, + multiprocess_mode='all', + ): + self._multiprocess_mode = multiprocess_mode + if multiprocess_mode not in self._MULTIPROC_MODES: + raise ValueError('Invalid multiprocess mode: ' + multiprocess_mode) + super(Gauge, self).__init__( + name=name, + documentation=documentation, + labelnames=labelnames, + namespace=namespace, + subsystem=subsystem, + unit=unit, + registry=registry, + labelvalues=labelvalues, + ) + self._kwargs['multiprocess_mode'] = self._multiprocess_mode + + def _metric_init(self): + self._value = values.ValueClass( + self._type, self._name, self._name, self._labelnames, self._labelvalues, + multiprocess_mode=self._multiprocess_mode + ) + + def inc(self, amount=1): + '''Increment gauge by the given amount.''' + self._value.inc(amount) + + def dec(self, amount=1): + '''Decrement gauge by the given amount.''' + self._value.inc(-amount) + + def set(self, value): + '''Set gauge to the given value.''' + self._value.set(float(value)) + + def set_to_current_time(self): + '''Set gauge to the current unixtime.''' + self.set(time.time()) + + def track_inprogress(self): + '''Track inprogress blocks of code or functions. + + Can be used as a function decorator or context manager. + Increments the gauge when the code is entered, + and decrements when it is exited. + ''' + return InprogressTracker(self) + + def time(self): + '''Time a block of code or function, and set the duration in seconds. + + Can be used as a function decorator or context manager. + ''' + return Timer(self.set) + + def set_function(self, f): + '''Call the provided function to return the Gauge value. + + The function must return a float, and may be called from + multiple threads. All other methods of the Gauge become NOOPs. + ''' + + def samples(self): + return (('', {}, float(f())),) + + self._child_samples = types.MethodType(samples, self) + + def _child_samples(self): + return (('', {}, self._value.get()),) + + +class Summary(MetricWrapperBase): + '''A Summary tracks the size and number of events. + + Example use cases for Summaries: + - Response latency + - Request size + + Example for a Summary: + + from prometheus_client import Summary + + s = Summary('request_size_bytes', 'Request size (bytes)') + s.observe(512) # Observe 512 (bytes) + + Example for a Summary using time: + + from prometheus_client import Summary + + REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)') + + @REQUEST_TIME.time() + def create_response(request): + """A dummy function""" + time.sleep(1) + + Example for using the same Summary object as a context manager: + + with REQUEST_TIME.time(): + pass # Logic to be timed + ''' + _type = 'summary' + _reserved_labelnames = ['quantile'] + + def _metric_init(self): + self._count = values.ValueClass(self._type, self._name, self._name + '_count', self._labelnames, self._labelvalues) + self._sum = values.ValueClass(self._type, self._name, self._name + '_sum', self._labelnames, self._labelvalues) + self._created = time.time() + + def observe(self, amount): + '''Observe the given amount.''' + self._count.inc(1) + self._sum.inc(amount) + + def time(self): + '''Time a block of code or function, and observe the duration in seconds. + + Can be used as a function decorator or context manager. + ''' + return Timer(self.observe) + + def _child_samples(self): + return ( + ('_count', {}, self._count.get()), + ('_sum', {}, self._sum.get()), + ('_created', {}, self._created)) + + +class Histogram(MetricWrapperBase): + '''A Histogram tracks the size and number of events in buckets. + + You can use Histograms for aggregatable calculation of quantiles. + + Example use cases: + - Response latency + - Request size + + Example for a Histogram: + + from prometheus_client import Histogram + + h = Histogram('request_size_bytes', 'Request size (bytes)') + h.observe(512) # Observe 512 (bytes) + + Example for a Histogram using time: + + from prometheus_client import Histogram + + REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)') + + @REQUEST_TIME.time() + def create_response(request): + """A dummy function""" + time.sleep(1) + + Example of using the same Histogram object as a context manager: + + with REQUEST_TIME.time(): + pass # Logic to be timed + + The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. + They can be overridden by passing `buckets` keyword argument to `Histogram`. + ''' + _type = 'histogram' + _reserved_labelnames = ['le'] + DEFAULT_BUCKETS = (.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, INF) + + def __init__(self, + name, + documentation, + labelnames=(), + namespace='', + subsystem='', + unit='', + registry=REGISTRY, + labelvalues=None, + buckets=DEFAULT_BUCKETS, + ): + self._prepare_buckets(buckets) + super(Histogram, self).__init__( + name=name, + documentation=documentation, + labelnames=labelnames, + namespace=namespace, + subsystem=subsystem, + unit=unit, + registry=registry, + labelvalues=labelvalues, + ) + self._kwargs['buckets'] = buckets + + def _prepare_buckets(self, buckets): + buckets = [float(b) for b in buckets] + if buckets != sorted(buckets): + # This is probably an error on the part of the user, + # so raise rather than sorting for them. + raise ValueError('Buckets not in sorted order') + if buckets and buckets[-1] != INF: + buckets.append(INF) + if len(buckets) < 2: + raise ValueError('Must have at least two buckets') + self._upper_bounds = buckets + + def _metric_init(self): + self._buckets = [] + self._created = time.time() + bucket_labelnames = self._labelnames + ('le',) + self._sum = values.ValueClass(self._type, self._name, self._name + '_sum', self._labelnames, self._labelvalues) + for b in self._upper_bounds: + self._buckets.append(values.ValueClass( + self._type, + self._name, + self._name + '_bucket', + bucket_labelnames, + self._labelvalues + (floatToGoString(b),)) + ) + + def observe(self, amount): + '''Observe the given amount.''' + self._sum.inc(amount) + for i, bound in enumerate(self._upper_bounds): + if amount <= bound: + self._buckets[i].inc(1) + break + + def time(self): + '''Time a block of code or function, and observe the duration in seconds. + + Can be used as a function decorator or context manager. + ''' + return Timer(self.observe) + + def _child_samples(self): + samples = [] + acc = 0 + for i, bound in enumerate(self._upper_bounds): + acc += self._buckets[i].get() + samples.append(('_bucket', {'le': floatToGoString(bound)}, acc)) + samples.append(('_count', {}, acc)) + samples.append(('_sum', {}, self._sum.get())) + samples.append(('_created', {}, self._created)) + return tuple(samples) + + +class Info(MetricWrapperBase): + '''Info metric, key-value pairs. + + Examples of Info include: + - Build information + - Version information + - Potential target metadata + + Example usage: + from prometheus_client import Info + + i = Info('my_build', 'Description of info') + i.info({'version': '1.2.3', 'buildhost': 'foo@bar'}) + + Info metrics do not work in multiprocess mode. + ''' + _type = 'info' + + def _metric_init(self): + self._labelname_set = set(self._labelnames) + self._lock = Lock() + self._value = {} + + def info(self, val): + '''Set info metric.''' + if self._labelname_set.intersection(val.keys()): + raise ValueError('Overlapping labels for Info metric, metric: %s child: %s' % ( + self._labelnames, val)) + with self._lock: + self._value = dict(val) + + def _child_samples(self): + with self._lock: + return (('_info', self._value, 1.0,),) + + +class Enum(MetricWrapperBase): + '''Enum metric, which of a set of states is true. + + Example usage: + from prometheus_client import Enum + + e = Enum('task_state', 'Description of enum', + states=['starting', 'running', 'stopped']) + e.state('running') + + The first listed state will be the default. + Enum metrics do not work in multiprocess mode. + ''' + _type = 'stateset' + + def __init__(self, + name, + documentation, + labelnames=(), + namespace='', + subsystem='', + unit='', + registry=REGISTRY, + labelvalues=None, + states=None, + ): + super(Enum, self).__init__( + name=name, + documentation=documentation, + labelnames=labelnames, + namespace=namespace, + subsystem=subsystem, + unit=unit, + registry=registry, + labelvalues=labelvalues, + ) + if name in labelnames: + raise ValueError('Overlapping labels for Enum metric: %s' % (name,)) + if not states: + raise ValueError('No states provided for Enum metric: %s' % (name,)) + self._kwargs['states'] = self._states = states + + def _metric_init(self): + self._value = 0 + self._lock = Lock() + + def state(self, state): + '''Set enum metric state.''' + with self._lock: + self._value = self._states.index(state) + + def _child_samples(self): + with self._lock: + return [ + ('', {self._name: s}, 1 if i == self._value else 0,) + for i, s + in enumerate(self._states) + ] diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/metrics_core.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/metrics_core.py new file mode 100644 index 00000000..fd0989d0 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/metrics_core.py @@ -0,0 +1,325 @@ +import re + +from .samples import Sample + +METRIC_TYPES = ( + 'counter', 'gauge', 'summary', 'histogram', + 'gaugehistogram', 'unknown', 'info', 'stateset', +) +METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$') +METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') +RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$') + + +class Metric(object): + '''A single metric family and its samples. + + This is intended only for internal use by the instrumentation client. + + Custom collectors should use GaugeMetricFamily, CounterMetricFamily + and SummaryMetricFamily instead. + ''' + + def __init__(self, name, documentation, typ, unit=''): + if unit and not name.endswith("_" + unit): + name += "_" + unit + if not METRIC_NAME_RE.match(name): + raise ValueError('Invalid metric name: ' + name) + self.name = name + self.documentation = documentation + self.unit = unit + if typ == 'untyped': + typ = 'unknown' + if typ not in METRIC_TYPES: + raise ValueError('Invalid metric type: ' + typ) + self.type = typ + self.samples = [] + + def add_sample(self, name, labels, value, timestamp=None, exemplar=None): + '''Add a sample to the metric. + + Internal-only, do not use.''' + self.samples.append(Sample(name, labels, value, timestamp, exemplar)) + + def __eq__(self, other): + return (isinstance(other, Metric) and + self.name == other.name and + self.documentation == other.documentation and + self.type == other.type and + self.unit == other.unit and + self.samples == other.samples) + + def __repr__(self): + return "Metric(%s, %s, %s, %s, %s)" % ( + self.name, + self.documentation, + self.type, + self.unit, + self.samples, + ) + + +class UnknownMetricFamily(Metric): + '''A single unknwon metric and its samples. + For use by custom collectors. + ''' + + def __init__(self, name, documentation, value=None, labels=None, unit=''): + Metric.__init__(self, name, documentation, 'unknown', unit) + if labels is not None and value is not None: + raise ValueError('Can only specify at most one of value and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if value is not None: + self.add_metric([], value) + + def add_metric(self, labels, value, timestamp=None): + '''Add a metric to the metric family. + Args: + labels: A list of label values + value: The value of the metric. + ''' + self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp)) + + +# For backward compatibility. +UntypedMetricFamily = UnknownMetricFamily + + +class CounterMetricFamily(Metric): + '''A single counter and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, value=None, labels=None, created=None, unit=''): + # Glue code for pre-OpenMetrics metrics. + if name.endswith('_total'): + name = name[:-6] + Metric.__init__(self, name, documentation, 'counter', unit) + if labels is not None and value is not None: + raise ValueError('Can only specify at most one of value and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if value is not None: + self.add_metric([], value, created) + + def add_metric(self, labels, value, created=None, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + value: The value of the metric + created: Optional unix timestamp the child was created at. + ''' + self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp)) + if created is not None: + self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp)) + + +class GaugeMetricFamily(Metric): + '''A single gauge and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, value=None, labels=None, unit=''): + Metric.__init__(self, name, documentation, 'gauge', unit) + if labels is not None and value is not None: + raise ValueError('Can only specify at most one of value and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if value is not None: + self.add_metric([], value) + + def add_metric(self, labels, value, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + value: A float + ''' + self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp)) + + +class SummaryMetricFamily(Metric): + '''A single summary and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, count_value=None, sum_value=None, labels=None, unit=''): + Metric.__init__(self, name, documentation, 'summary', unit) + if (sum_value is None) != (count_value is None): + raise ValueError('count_value and sum_value must be provided together.') + if labels is not None and count_value is not None: + raise ValueError('Can only specify at most one of value and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if count_value is not None: + self.add_metric([], count_value, sum_value) + + def add_metric(self, labels, count_value, sum_value, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + count_value: The count value of the metric. + sum_value: The sum value of the metric. + ''' + self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp)) + self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp)) + + +class HistogramMetricFamily(Metric): + '''A single histogram and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, buckets=None, sum_value=None, labels=None, unit=''): + Metric.__init__(self, name, documentation, 'histogram', unit) + if (sum_value is None) != (buckets is None): + raise ValueError('buckets and sum_value must be provided together.') + if labels is not None and buckets is not None: + raise ValueError('Can only specify at most one of buckets and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if buckets is not None: + self.add_metric([], buckets, sum_value) + + def add_metric(self, labels, buckets, sum_value, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + buckets: A list of lists. + Each inner list can be a pair of bucket name and value, + or a triple of bucket name, value, and exemplar. + The buckets must be sorted, and +Inf present. + sum_value: The sum value of the metric. + ''' + for b in buckets: + bucket, value = b[:2] + exemplar = None + if len(b) == 3: + exemplar = b[2] + self.samples.append(Sample( + self.name + '_bucket', + dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), + value, + timestamp, + exemplar, + )) + # +Inf is last and provides the count value. + self.samples.extend([ + Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp), + Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp), + ]) + + +class GaugeHistogramMetricFamily(Metric): + '''A single gauge histogram and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, buckets=None, gsum_value=None, labels=None, unit=''): + Metric.__init__(self, name, documentation, 'gaugehistogram', unit) + if labels is not None and buckets is not None: + raise ValueError('Can only specify at most one of buckets and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if buckets is not None: + self.add_metric([], buckets, gsum_value) + + def add_metric(self, labels, buckets, gsum_value, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + buckets: A list of pairs of bucket names and values. + The buckets must be sorted, and +Inf present. + gsum_value: The sum value of the metric. + ''' + for bucket, value in buckets: + self.samples.append(Sample( + self.name + '_bucket', + dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), + value, timestamp)) + # +Inf is last and provides the count value. + self.samples.extend([ + Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp), + Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp), + ]) + + +class InfoMetricFamily(Metric): + '''A single info and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, value=None, labels=None): + Metric.__init__(self, name, documentation, 'info') + if labels is not None and value is not None: + raise ValueError('Can only specify at most one of value and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if value is not None: + self.add_metric([], value) + + def add_metric(self, labels, value, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + value: A dict of labels + ''' + self.samples.append(Sample( + self.name + '_info', + dict(dict(zip(self._labelnames, labels)), **value), + 1, + timestamp, + )) + + +class StateSetMetricFamily(Metric): + '''A single stateset and its samples. + + For use by custom collectors. + ''' + + def __init__(self, name, documentation, value=None, labels=None): + Metric.__init__(self, name, documentation, 'stateset') + if labels is not None and value is not None: + raise ValueError('Can only specify at most one of value and labels.') + if labels is None: + labels = [] + self._labelnames = tuple(labels) + if value is not None: + self.add_metric([], value) + + def add_metric(self, labels, value, timestamp=None): + '''Add a metric to the metric family. + + Args: + labels: A list of label values + value: A dict of string state names to booleans + ''' + labels = tuple(labels) + for state, enabled in sorted(value.items()): + v = (1 if enabled else 0) + self.samples.append(Sample( + self.name, + dict(zip(self._labelnames + (self.name,), labels + (state,))), + v, + timestamp, + )) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/mmap_dict.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/mmap_dict.py new file mode 100644 index 00000000..962d7593 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/mmap_dict.py @@ -0,0 +1,129 @@ +import json +import mmap +import os +import struct + +_INITIAL_MMAP_SIZE = 1 << 20 +_pack_integer_func = struct.Struct(b'i').pack +_pack_double_func = struct.Struct(b'd').pack +_unpack_integer = struct.Struct(b'i').unpack_from +_unpack_double = struct.Struct(b'd').unpack_from + + +# struct.pack_into has atomicity issues because it will temporarily write 0 into +# the mmap, resulting in false reads to 0 when experiencing a lot of writes. +# Using direct assignment solves this issue. + +def _pack_double(data, pos, value): + data[pos:pos + 8] = _pack_double_func(value) + + +def _pack_integer(data, pos, value): + data[pos:pos + 4] = _pack_integer_func(value) + + + +class MmapedDict(object): + """A dict of doubles, backed by an mmapped file. + + The file starts with a 4 byte int, indicating how much of it is used. + Then 4 bytes of padding. + There's then a number of entries, consisting of a 4 byte int which is the + size of the next field, a utf-8 encoded string key, padding to a 8 byte + alignment, and then a 8 byte float which is the value. + + Not thread safe. + """ + + def __init__(self, filename, read_mode=False): + self._f = open(filename, 'rb' if read_mode else 'a+b') + self._fname = filename + if os.fstat(self._f.fileno()).st_size == 0: + self._f.truncate(_INITIAL_MMAP_SIZE) + self._capacity = os.fstat(self._f.fileno()).st_size + self._m = mmap.mmap(self._f.fileno(), self._capacity, access=mmap.ACCESS_READ if read_mode else mmap.ACCESS_WRITE) + + self._positions = {} + self._used = _unpack_integer(self._m, 0)[0] + if self._used == 0: + self._used = 8 + _pack_integer(self._m, 0, self._used) + else: + if not read_mode: + for key, _, pos in self._read_all_values(): + self._positions[key] = pos + + def _init_value(self, key): + """Initialize a value. Lock must be held by caller.""" + encoded = key.encode('utf-8') + # Pad to be 8-byte aligned. + padded = encoded + (b' ' * (8 - (len(encoded) + 4) % 8)) + value = struct.pack('i{0}sd'.format(len(padded)).encode(), len(encoded), padded, 0.0) + while self._used + len(value) > self._capacity: + self._capacity *= 2 + self._f.truncate(self._capacity) + self._m = mmap.mmap(self._f.fileno(), self._capacity) + self._m[self._used:self._used + len(value)] = value + + # Update how much space we've used. + self._used += len(value) + _pack_integer(self._m, 0, self._used) + self._positions[key] = self._used - 8 + + def _read_all_values(self): + """Yield (key, value, pos). No locking is performed.""" + + pos = 8 + + # cache variables to local ones and prevent attributes lookup + # on every loop iteration + used = self._used + data = self._m + unpack_from = struct.unpack_from + + while pos < used: + encoded_len = _unpack_integer(data, pos)[0] + # check we are not reading beyond bounds + if encoded_len + pos > used: + msg = 'Read beyond file size detected, %s is corrupted.' + raise RuntimeError(msg % self._fname) + pos += 4 + encoded = unpack_from(('%ss' % encoded_len).encode(), data, pos)[0] + padded_len = encoded_len + (8 - (encoded_len + 4) % 8) + pos += padded_len + value = _unpack_double(data, pos)[0] + yield encoded.decode('utf-8'), value, pos + pos += 8 + + def read_all_values(self): + """Yield (key, value, pos). No locking is performed.""" + for k, v, _ in self._read_all_values(): + yield k, v + + def read_value(self, key): + if key not in self._positions: + self._init_value(key) + pos = self._positions[key] + # We assume that reading from an 8 byte aligned value is atomic + return _unpack_double(self._m, pos)[0] + + def write_value(self, key, value): + if key not in self._positions: + self._init_value(key) + pos = self._positions[key] + # We assume that writing to an 8 byte aligned value is atomic + _pack_double(self._m, pos, value) + + def close(self): + if self._f: + self._m.close() + self._m = None + self._f.close() + self._f = None + + +def mmap_key(metric_name, name, labelnames, labelvalues): + """Format a key for use in the mmap file.""" + # ensure labels are in consistent order for identity + labels = dict(zip(labelnames, labelvalues)) + return json.dumps([metric_name, name, labels], sort_keys=True) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/multiprocess.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/multiprocess.py new file mode 100644 index 00000000..30ed312c --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/multiprocess.py @@ -0,0 +1,127 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +from collections import defaultdict +import glob +import json +import os + +from .metrics_core import Metric +from .mmap_dict import MmapedDict +from .samples import Sample +from .utils import floatToGoString + + +class MultiProcessCollector(object): + """Collector for files for multi-process mode.""" + + def __init__(self, registry, path=None): + if path is None: + path = os.environ.get('prometheus_multiproc_dir') + if not path or not os.path.isdir(path): + raise ValueError('env prometheus_multiproc_dir is not set or not a directory') + self._path = path + if registry: + registry.register(self) + + def collect(self): + files = glob.glob(os.path.join(self._path, '*.db')) + return self.merge(files, accumulate=True) + + def merge(self, files, accumulate=True): + """Merge metrics from given mmap files. + + By default, histograms are accumulated, as per prometheus wire format. + But if writing the merged data back to mmap files, use + accumulate=False to avoid compound accumulation. + """ + metrics = {} + for f in files: + parts = os.path.basename(f).split('_') + typ = parts[0] + d = MmapedDict(f, read_mode=True) + for key, value in d.read_all_values(): + metric_name, name, labels = json.loads(key) + labels_key = tuple(sorted(labels.items())) + + metric = metrics.get(metric_name) + if metric is None: + metric = Metric(metric_name, 'Multiprocess metric', typ) + metrics[metric_name] = metric + + if typ == 'gauge': + pid = parts[2][:-3] + metric._multiprocess_mode = parts[1] + metric.add_sample(name, labels_key + (('pid', pid), ), value) + else: + # The duplicates and labels are fixed in the next for. + metric.add_sample(name, labels_key, value) + d.close() + + for metric in metrics.values(): + samples = defaultdict(float) + buckets = {} + for s in metric.samples: + name, labels, value = s.name, s.labels, s.value + if metric.type == 'gauge': + without_pid = tuple(l for l in labels if l[0] != 'pid') + if metric._multiprocess_mode == 'min': + current = samples.setdefault((name, without_pid), value) + if value < current: + samples[(s.name, without_pid)] = value + elif metric._multiprocess_mode == 'max': + current = samples.setdefault((name, without_pid), value) + if value > current: + samples[(s.name, without_pid)] = value + elif metric._multiprocess_mode == 'livesum': + samples[(name, without_pid)] += value + else: # all/liveall + samples[(name, labels)] = value + + elif metric.type == 'histogram': + bucket = tuple(float(l[1]) for l in labels if l[0] == 'le') + if bucket: + # _bucket + without_le = tuple(l for l in labels if l[0] != 'le') + buckets.setdefault(without_le, {}) + buckets[without_le].setdefault(bucket[0], 0.0) + buckets[without_le][bucket[0]] += value + else: + # _sum/_count + samples[(s.name, labels)] += value + + else: + # Counter and Summary. + samples[(s.name, labels)] += value + + # Accumulate bucket values. + if metric.type == 'histogram': + for labels, values in buckets.items(): + acc = 0.0 + for bucket, value in sorted(values.items()): + sample_key = ( + metric.name + '_bucket', + labels + (('le', floatToGoString(bucket)), ), + ) + if accumulate: + acc += value + samples[sample_key] = acc + else: + samples[sample_key] = value + if accumulate: + samples[(metric.name + '_count', labels)] = acc + + # Convert to correct sample format. + metric.samples = [Sample(name, dict(labels), value) for (name, labels), value in samples.items()] + return metrics.values() + + +def mark_process_dead(pid, path=None): + """Do bookkeeping for when one process dies in a multi-process setup.""" + if path is None: + path = os.environ.get('prometheus_multiproc_dir') + for f in glob.glob(os.path.join(path, 'gauge_livesum_{0}.db'.format(pid))): + os.remove(f) + for f in glob.glob(os.path.join(path, 'gauge_liveall_{0}.db'.format(pid))): + os.remove(f) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/__init__.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/exposition.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/exposition.py new file mode 100644 index 00000000..2f39c140 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/exposition.py @@ -0,0 +1,65 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +from ..utils import floatToGoString + +CONTENT_TYPE_LATEST = str('application/openmetrics-text; version=0.0.1; charset=utf-8') +'''Content type of the latest OpenMetrics text format''' + + +def generate_latest(registry): + '''Returns the metrics from the registry in latest text format as a string.''' + output = [] + for metric in registry.collect(): + try: + mname = metric.name + output.append('# HELP {0} {1}\n'.format( + mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))) + output.append('# TYPE {0} {1}\n'.format(mname, metric.type)) + if metric.unit: + output.append('# UNIT {0} {1}\n'.format(mname, metric.unit)) + for s in metric.samples: + if s.labels: + labelstr = '{{{0}}}'.format(','.join( + ['{0}="{1}"'.format( + k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) + for k, v in sorted(s.labels.items())])) + else: + labelstr = '' + if s.exemplar: + if metric.type not in ('histogram', 'gaugehistogram') or not s.name.endswith('_bucket'): + raise ValueError("Metric {0} has exemplars, but is not a histogram bucket".format(metric.name)) + labels = '{{{0}}}'.format(','.join( + ['{0}="{1}"'.format( + k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) + for k, v in sorted(s.exemplar.labels.items())])) + if s.exemplar.timestamp is not None: + exemplarstr = ' # {0} {1} {2}'.format( + labels, + floatToGoString(s.exemplar.value), + s.exemplar.timestamp, + ) + else: + exemplarstr = ' # {0} {1}'.format( + labels, + floatToGoString(s.exemplar.value), + ) + else: + exemplarstr = '' + timestamp = '' + if s.timestamp is not None: + timestamp = ' {0}'.format(s.timestamp) + output.append('{0}{1} {2}{3}{4}\n'.format( + s.name, + labelstr, + floatToGoString(s.value), + timestamp, + exemplarstr, + )) + except Exception as exception: + exception.args = (exception.args or ('',)) + (metric,) + raise + + output.append('# EOF\n') + return ''.join(output).encode('utf-8') diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/parser.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/parser.py new file mode 100644 index 00000000..fd35f9fe --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/openmetrics/parser.py @@ -0,0 +1,454 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +import math + +from ..metrics_core import Metric, METRIC_LABEL_NAME_RE +from ..samples import Exemplar, Sample, Timestamp +from ..utils import floatToGoString + +try: + import StringIO +except ImportError: + # Python 3 + import io as StringIO + + + +def text_string_to_metric_families(text): + """Parse Openmetrics text format from a unicode string. + + See text_fd_to_metric_families. + """ + for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)): + yield metric_family + + +def _unescape_help(text): + result = [] + slash = False + + for char in text: + if slash: + if char == '\\': + result.append('\\') + elif char == '"': + result.append('"') + elif char == 'n': + result.append('\n') + else: + result.append('\\' + char) + slash = False + else: + if char == '\\': + slash = True + else: + result.append(char) + + if slash: + result.append('\\') + + return ''.join(result) + + +def _parse_value(value): + value = ''.join(value) + if value != value.strip(): + raise ValueError("Invalid value: {0!r}".format(value)) + try: + return int(value) + except ValueError: + return float(value) + + +def _parse_timestamp(timestamp): + timestamp = ''.join(timestamp) + if not timestamp: + return None + if timestamp != timestamp.strip(): + raise ValueError("Invalid timestamp: {0!r}".format(timestamp)) + try: + # Simple int. + return Timestamp(int(timestamp), 0) + except ValueError: + try: + # aaaa.bbbb. Nanosecond resolution supported. + parts = timestamp.split('.', 1) + return Timestamp(int(parts[0]), int(parts[1][:9].ljust(9, "0"))) + except ValueError: + # Float. + ts = float(timestamp) + if math.isnan(ts) or math.isinf(ts): + raise ValueError("Invalid timestamp: {0!r}".format(timestamp)) + return ts + + +def _parse_labels(it, text): + # The { has already been parsed. + state = 'startoflabelname' + labelname = [] + labelvalue = [] + labels = {} + + for char in it: + if state == 'startoflabelname': + if char == '}': + state = 'endoflabels' + else: + state = 'labelname' + labelname.append(char) + elif state == 'labelname': + if char == '=': + state = 'labelvaluequote' + else: + labelname.append(char) + elif state == 'labelvaluequote': + if char == '"': + state = 'labelvalue' + else: + raise ValueError("Invalid line: " + text) + elif state == 'labelvalue': + if char == '\\': + state = 'labelvalueslash' + elif char == '"': + if not METRIC_LABEL_NAME_RE.match(''.join(labelname)): + raise ValueError("Invalid line: " + text) + labels[''.join(labelname)] = ''.join(labelvalue) + labelname = [] + labelvalue = [] + state = 'endoflabelvalue' + else: + labelvalue.append(char) + elif state == 'endoflabelvalue': + if char == ',': + state = 'labelname' + elif char == '}': + state = 'endoflabels' + else: + raise ValueError("Invalid line: " + text) + elif state == 'labelvalueslash': + state = 'labelvalue' + if char == '\\': + labelvalue.append('\\') + elif char == 'n': + labelvalue.append('\n') + elif char == '"': + labelvalue.append('"') + else: + labelvalue.append('\\' + char) + elif state == 'endoflabels': + if char == ' ': + break + else: + raise ValueError("Invalid line: " + text) + return labels + + +def _parse_sample(text): + name = [] + value = [] + timestamp = [] + labels = {} + exemplar_value = [] + exemplar_timestamp = [] + exemplar_labels = None + + state = 'name' + + it = iter(text) + for char in it: + if state == 'name': + if char == '{': + labels = _parse_labels(it, text) + # Space has already been parsed. + state = 'value' + elif char == ' ': + state = 'value' + else: + name.append(char) + elif state == 'value': + if char == ' ': + state = 'timestamp' + else: + value.append(char) + elif state == 'timestamp': + if char == '#' and not timestamp: + state = 'exemplarspace' + elif char == ' ': + state = 'exemplarhash' + else: + timestamp.append(char) + elif state == 'exemplarhash': + if char == '#': + state = 'exemplarspace' + else: + raise ValueError("Invalid line: " + text) + elif state == 'exemplarspace': + if char == ' ': + state = 'exemplarstartoflabels' + else: + raise ValueError("Invalid line: " + text) + elif state == 'exemplarstartoflabels': + if char == '{': + exemplar_labels = _parse_labels(it, text) + # Space has already been parsed. + state = 'exemplarvalue' + else: + raise ValueError("Invalid line: " + text) + elif state == 'exemplarvalue': + if char == ' ': + state = 'exemplartimestamp' + else: + exemplar_value.append(char) + elif state == 'exemplartimestamp': + exemplar_timestamp.append(char) + + # Trailing space after value. + if state == 'timestamp' and not timestamp: + raise ValueError("Invalid line: " + text) + + # Trailing space after value. + if state == 'exemplartimestamp' and not exemplar_timestamp: + raise ValueError("Invalid line: " + text) + + # Incomplete exemplar. + if state in ['exemplarhash', 'exemplarspace', 'exemplarstartoflabels']: + raise ValueError("Invalid line: " + text) + + if not value: + raise ValueError("Invalid line: " + text) + value = ''.join(value) + val = _parse_value(value) + ts = _parse_timestamp(timestamp) + exemplar = None + if exemplar_labels is not None: + exemplar_length = sum([len(k) + len(v) + 3 for k, v in exemplar_labels.items()]) + 2 + if exemplar_length > 64: + raise ValueError("Exmplar labels are too long: " + text) + exemplar = Exemplar( + exemplar_labels, + _parse_value(exemplar_value), + _parse_timestamp(exemplar_timestamp), + ) + + return Sample(''.join(name), labels, val, ts, exemplar) + + +def _group_for_sample(sample, name, typ): + if typ == 'info': + # We can't distinguish between groups for info metrics. + return {} + if typ == 'summary' and sample.name == name: + d = sample.labels.copy() + del d['quantile'] + return d + if typ == 'stateset': + d = sample.labels.copy() + del d[name] + return d + if typ in ['histogram', 'gaugehistogram'] and sample.name == name + '_bucket': + d = sample.labels.copy() + del d['le'] + return d + return sample.labels + + +def _check_histogram(samples, name): + group = None + timestamp = None + + def do_checks(): + if bucket != float('+Inf'): + raise ValueError("+Inf bucket missing: " + name) + if count is not None and value != count: + raise ValueError("Count does not match +Inf value: " + name) + + for s in samples: + suffix = s.name[len(name):] + g = _group_for_sample(s, name, 'histogram') + if g != group or s.timestamp != timestamp: + if group is not None: + do_checks() + count = None + bucket = -1 + value = 0 + group = g + timestamp = s.timestamp + + if suffix == '_bucket': + b = float(s.labels['le']) + if b <= bucket: + raise ValueError("Buckets out of order: " + name) + if s.value < value: + raise ValueError("Bucket values out of order: " + name) + bucket = b + value = s.value + elif suffix in ['_count', '_gcount']: + count = s.value + if group is not None: + do_checks() + + +def text_fd_to_metric_families(fd): + """Parse Prometheus text format from a file descriptor. + + This is a laxer parser than the main Go parser, + so successful parsing does not imply that the parsed + text meets the specification. + + Yields Metric's. + """ + name = None + allowed_names = [] + eof = False + + seen_metrics = set() + + def build_metric(name, documentation, typ, unit, samples): + if name in seen_metrics: + raise ValueError("Duplicate metric: " + name) + seen_metrics.add(name) + if typ is None: + typ = 'unknown' + if documentation is None: + documentation = '' + if unit is None: + unit = '' + if unit and not name.endswith("_" + unit): + raise ValueError("Unit does not match metric name: " + name) + if unit and typ in ['info', 'stateset']: + raise ValueError("Units not allowed for this metric type: " + name) + if typ in ['histogram', 'gaugehistogram']: + _check_histogram(samples, name) + metric = Metric(name, documentation, typ, unit) + # TODO: check labelvalues are valid utf8 + metric.samples = samples + return metric + + for line in fd: + if line[-1] == '\n': + line = line[:-1] + + if eof: + raise ValueError("Received line after # EOF: " + line) + + if line == '# EOF': + eof = True + elif line.startswith('#'): + parts = line.split(' ', 3) + if len(parts) < 4: + raise ValueError("Invalid line: " + line) + if parts[2] == name and samples: + raise ValueError("Received metadata after samples: " + line) + if parts[2] != name: + if name is not None: + yield build_metric(name, documentation, typ, unit, samples) + # New metric + name = parts[2] + unit = None + typ = None + documentation = None + group = None + seen_groups = set() + group_timestamp = None + group_timestamp_samples = set() + samples = [] + allowed_names = [parts[2]] + + if parts[1] == 'HELP': + if documentation is not None: + raise ValueError("More than one HELP for metric: " + line) + if len(parts) == 4: + documentation = _unescape_help(parts[3]) + elif len(parts) == 3: + raise ValueError("Invalid line: " + line) + elif parts[1] == 'TYPE': + if typ is not None: + raise ValueError("More than one TYPE for metric: " + line) + typ = parts[3] + if typ == 'untyped': + raise ValueError("Invalid TYPE for metric: " + line) + allowed_names = { + 'counter': ['_total', '_created'], + 'summary': ['_count', '_sum', '', '_created'], + 'histogram': ['_count', '_sum', '_bucket', '_created'], + 'gaugehistogram': ['_gcount', '_gsum', '_bucket'], + 'info': ['_info'], + }.get(typ, ['']) + allowed_names = [name + n for n in allowed_names] + elif parts[1] == 'UNIT': + if unit is not None: + raise ValueError("More than one UNIT for metric: " + line) + unit = parts[3] + else: + raise ValueError("Invalid line: " + line) + else: + sample = _parse_sample(line) + if sample.name not in allowed_names: + if name is not None: + yield build_metric(name, documentation, typ, unit, samples) + # Start an unknown metric. + name = sample.name + documentation = None + unit = None + typ = 'unknown' + samples = [] + group = None + group_timestamp = None + group_timestamp_samples = set() + seen_groups = set() + allowed_names = [sample.name] + + if typ == 'stateset' and name not in sample.labels: + raise ValueError("Stateset missing label: " + line) + if (typ in ['histogram', 'gaugehistogram'] and name + '_bucket' == sample.name + and (float(sample.labels.get('le', -1)) < 0 + or sample.labels['le'] != floatToGoString(sample.labels['le']))): + raise ValueError("Invalid le label: " + line) + if (typ == 'summary' and name == sample.name + and (not (0 <= float(sample.labels.get('quantile', -1)) <= 1) + or sample.labels['quantile'] != floatToGoString(sample.labels['quantile']))): + raise ValueError("Invalid quantile label: " + line) + + g = tuple(sorted(_group_for_sample(sample, name, typ).items())) + if group is not None and g != group and g in seen_groups: + raise ValueError("Invalid metric grouping: " + line) + if group is not None and g == group: + if (sample.timestamp is None) != (group_timestamp is None): + raise ValueError("Mix of timestamp presence within a group: " + line) + if group_timestamp is not None and group_timestamp > sample.timestamp and typ != 'info': + raise ValueError("Timestamps went backwards within a group: " + line) + else: + group_timestamp_samples = set() + + series_id = (sample.name, tuple(sorted(sample.labels.items()))) + if sample.timestamp != group_timestamp or series_id not in group_timestamp_samples: + # Not a duplicate due to timestamp truncation. + samples.append(sample) + group_timestamp_samples.add(series_id) + + group = g + group_timestamp = sample.timestamp + seen_groups.add(g) + + if typ == 'stateset' and sample.value not in [0, 1]: + raise ValueError("Stateset samples can only have values zero and one: " + line) + if typ == 'info' and sample.value != 1: + raise ValueError("Info samples can only have value one: " + line) + if typ == 'summary' and name == sample.name and sample.value < 0: + raise ValueError("Quantile values cannot be negative: " + line) + if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and math.isnan(sample.value): + raise ValueError("Counter-like samples cannot be NaN: " + line) + if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and sample.value < 0: + raise ValueError("Counter-like samples cannot be negative: " + line) + if sample.exemplar and not ( + typ in ['histogram', 'gaugehistogram'] + and sample.name.endswith('_bucket')): + raise ValueError("Invalid line only histogram/gaugehistogram buckets can have exemplars: " + line) + + if name is not None: + yield build_metric(name, documentation, typ, unit, samples) + + if not eof: + raise ValueError("Missing # EOF at end") diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/parser.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/parser.py new file mode 100644 index 00000000..cdf0f2ce --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/parser.py @@ -0,0 +1,233 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +import re + +from .metrics_core import Metric +from .samples import Sample + +try: + import StringIO +except ImportError: + # Python 3 + import io as StringIO + + + +def text_string_to_metric_families(text): + """Parse Prometheus text format from a unicode string. + + See text_fd_to_metric_families. + """ + for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)): + yield metric_family + + +ESCAPE_SEQUENCES = { + '\\\\': '\\', + '\\n': '\n', + '\\"': '"', +} + + +def replace_escape_sequence(match): + return ESCAPE_SEQUENCES[match.group(0)] + + +HELP_ESCAPING_RE = re.compile(r'\\[\\n]') +ESCAPING_RE = re.compile(r'\\[\\n"]') + + +def _replace_help_escaping(s): + return HELP_ESCAPING_RE.sub(replace_escape_sequence, s) + + +def _replace_escaping(s): + return ESCAPING_RE.sub(replace_escape_sequence, s) + + +def _is_character_escaped(s, charpos): + num_bslashes = 0 + while (charpos > num_bslashes and + s[charpos - 1 - num_bslashes] == '\\'): + num_bslashes += 1 + return num_bslashes % 2 == 1 + + +def _parse_labels(labels_string): + labels = {} + # Return if we don't have valid labels + if "=" not in labels_string: + return labels + + escaping = False + if "\\" in labels_string: + escaping = True + + # Copy original labels + sub_labels = labels_string + try: + # Process one label at a time + while sub_labels: + # The label name is before the equal + value_start = sub_labels.index("=") + label_name = sub_labels[:value_start] + sub_labels = sub_labels[value_start + 1:].lstrip() + # Find the first quote after the equal + quote_start = sub_labels.index('"') + 1 + value_substr = sub_labels[quote_start:] + + # Find the last unescaped quote + i = 0 + while i < len(value_substr): + i = value_substr.index('"', i) + if not _is_character_escaped(value_substr, i): + break + i += 1 + + # The label value is inbetween the first and last quote + quote_end = i + 1 + label_value = sub_labels[quote_start:quote_end] + # Replace escaping if needed + if escaping: + label_value = _replace_escaping(label_value) + labels[label_name.strip()] = label_value + + # Remove the processed label from the sub-slice for next iteration + sub_labels = sub_labels[quote_end + 1:] + next_comma = sub_labels.find(",") + 1 + sub_labels = sub_labels[next_comma:].lstrip() + + return labels + + except ValueError: + raise ValueError("Invalid labels: %s" % labels_string) + + +# If we have multiple values only consider the first +def _parse_value(s): + s = s.lstrip() + separator = " " + if separator not in s: + separator = "\t" + i = s.find(separator) + if i == -1: + return s + return s[:i] + + +def _parse_sample(text): + # Detect the labels in the text + try: + label_start, label_end = text.index("{"), text.rindex("}") + # The name is before the labels + name = text[:label_start].strip() + # We ignore the starting curly brace + label = text[label_start + 1:label_end] + # The value is after the label end (ignoring curly brace and space) + value = float(_parse_value(text[label_end + 2:])) + return Sample(name, _parse_labels(label), value) + + # We don't have labels + except ValueError: + # Detect what separator is used + separator = " " + if separator not in text: + separator = "\t" + name_end = text.index(separator) + name = text[:name_end] + # The value is after the name + value = float(_parse_value(text[name_end:])) + return Sample(name, {}, value) + + +def text_fd_to_metric_families(fd): + """Parse Prometheus text format from a file descriptor. + + This is a laxer parser than the main Go parser, + so successful parsing does not imply that the parsed + text meets the specification. + + Yields Metric's. + """ + name = '' + documentation = '' + typ = 'untyped' + samples = [] + allowed_names = [] + + def build_metric(name, documentation, typ, samples): + # Munge counters into OpenMetrics representation + # used internally. + if typ == 'counter': + if name.endswith('_total'): + name = name[:-6] + else: + new_samples = [] + for s in samples: + new_samples.append(Sample(s[0] + '_total', *s[1:])) + samples = new_samples + metric = Metric(name, documentation, typ) + metric.samples = samples + return metric + + for line in fd: + line = line.strip() + + if line.startswith('#'): + parts = line.split(None, 3) + if len(parts) < 2: + continue + if parts[1] == 'HELP': + if parts[2] != name: + if name != '': + yield build_metric(name, documentation, typ, samples) + # New metric + name = parts[2] + typ = 'untyped' + samples = [] + allowed_names = [parts[2]] + if len(parts) == 4: + documentation = _replace_help_escaping(parts[3]) + else: + documentation = '' + elif parts[1] == 'TYPE': + if parts[2] != name: + if name != '': + yield build_metric(name, documentation, typ, samples) + # New metric + name = parts[2] + documentation = '' + samples = [] + typ = parts[3] + allowed_names = { + 'counter': [''], + 'gauge': [''], + 'summary': ['_count', '_sum', ''], + 'histogram': ['_count', '_sum', '_bucket'], + }.get(typ, ['']) + allowed_names = [name + n for n in allowed_names] + else: + # Ignore other comment tokens + pass + elif line == '': + # Ignore blank lines + pass + else: + sample = _parse_sample(line) + if sample.name not in allowed_names: + if name != '': + yield build_metric(name, documentation, typ, samples) + # New metric, yield immediately as untyped singleton + name = '' + documentation = '' + typ = 'untyped' + samples = [] + allowed_names = [] + yield build_metric(sample[0], documentation, typ, [sample]) + else: + samples.append(sample) + + if name != '': + yield build_metric(name, documentation, typ, samples) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/platform_collector.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/platform_collector.py new file mode 100644 index 00000000..5097fcdb --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/platform_collector.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# -*- coding: utf-8 +from __future__ import unicode_literals + +import platform as pf + +from .metrics_core import GaugeMetricFamily +from .registry import REGISTRY + + +class PlatformCollector(object): + """Collector for python platform information""" + + def __init__(self, registry=REGISTRY, platform=None): + self._platform = pf if platform is None else platform + info = self._info() + system = self._platform.system() + if system == "Java": + info.update(self._java()) + self._metrics = [ + self._add_metric("python_info", "Python platform information", info) + ] + if registry: + registry.register(self) + + def collect(self): + return self._metrics + + @staticmethod + def _add_metric(name, documentation, data): + labels = data.keys() + values = [data[k] for k in labels] + g = GaugeMetricFamily(name, documentation, labels=labels) + g.add_metric(values, 1) + return g + + def _info(self): + major, minor, patchlevel = self._platform.python_version_tuple() + return { + "version": self._platform.python_version(), + "implementation": self._platform.python_implementation(), + "major": major, + "minor": minor, + "patchlevel": patchlevel + } + + def _java(self): + java_version, _, vminfo, osinfo = self._platform.java_ver() + vm_name, vm_release, vm_vendor = vminfo + return { + "jvm_version": java_version, + "jvm_release": vm_release, + "jvm_vendor": vm_vendor, + "jvm_name": vm_name + } + + +PLATFORM_COLLECTOR = PlatformCollector() +"""PlatformCollector in default Registry REGISTRY""" diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/process_collector.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/process_collector.py new file mode 100644 index 00000000..414226a2 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/process_collector.py @@ -0,0 +1,97 @@ +#!/usr/bin/python + +from __future__ import unicode_literals + +import os + +from .metrics_core import CounterMetricFamily, GaugeMetricFamily +from .registry import REGISTRY + +try: + import resource + _PAGESIZE = resource.getpagesize() +except ImportError: + # Not Unix + _PAGESIZE = 4096 + + +class ProcessCollector(object): + """Collector for Standard Exports such as cpu and memory.""" + + def __init__(self, namespace='', pid=lambda: 'self', proc='/proc', registry=REGISTRY): + self._namespace = namespace + self._pid = pid + self._proc = proc + if namespace: + self._prefix = namespace + '_process_' + else: + self._prefix = 'process_' + self._ticks = 100.0 + try: + self._ticks = os.sysconf('SC_CLK_TCK') + except (ValueError, TypeError, AttributeError): + pass + + # This is used to test if we can access /proc. + self._btime = 0 + try: + self._btime = self._boot_time() + except IOError: + pass + if registry: + registry.register(self) + + def _boot_time(self): + with open(os.path.join(self._proc, 'stat'), 'rb') as stat: + for line in stat: + if line.startswith(b'btime '): + return float(line.split()[1]) + + def collect(self): + if not self._btime: + return [] + + pid = os.path.join(self._proc, str(self._pid()).strip()) + + result = [] + try: + with open(os.path.join(pid, 'stat'), 'rb') as stat: + parts = (stat.read().split(b')')[-1].split()) + + vmem = GaugeMetricFamily(self._prefix + 'virtual_memory_bytes', + 'Virtual memory size in bytes.', value=float(parts[20])) + rss = GaugeMetricFamily(self._prefix + 'resident_memory_bytes', 'Resident memory size in bytes.', + value=float(parts[21]) * _PAGESIZE) + start_time_secs = float(parts[19]) / self._ticks + start_time = GaugeMetricFamily(self._prefix + 'start_time_seconds', + 'Start time of the process since unix epoch in seconds.', + value=start_time_secs + self._btime) + utime = float(parts[11]) / self._ticks + stime = float(parts[12]) / self._ticks + cpu = CounterMetricFamily(self._prefix + 'cpu_seconds_total', + 'Total user and system CPU time spent in seconds.', + value=utime + stime) + result.extend([vmem, rss, start_time, cpu]) + except IOError: + pass + + try: + with open(os.path.join(pid, 'limits'), 'rb') as limits: + for line in limits: + if line.startswith(b'Max open file'): + max_fds = GaugeMetricFamily(self._prefix + 'max_fds', + 'Maximum number of open file descriptors.', + value=float(line.split()[3])) + break + open_fds = GaugeMetricFamily(self._prefix + 'open_fds', + 'Number of open file descriptors.', + len(os.listdir(os.path.join(pid, 'fd')))) + result.extend([open_fds, max_fds]) + except (IOError, OSError): + pass + + return result + + +PROCESS_COLLECTOR = ProcessCollector() +"""Default ProcessCollector in default Registry REGISTRY.""" diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/registry.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/registry.py new file mode 100644 index 00000000..873853ed --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/registry.py @@ -0,0 +1,123 @@ +import copy +from threading import Lock + +from .metrics_core import Metric + + +class CollectorRegistry(object): + '''Metric collector registry. + + Collectors must have a no-argument method 'collect' that returns a list of + Metric objects. The returned metrics should be consistent with the Prometheus + exposition formats. + ''' + + def __init__(self, auto_describe=False): + self._collector_to_names = {} + self._names_to_collectors = {} + self._auto_describe = auto_describe + self._lock = Lock() + + def register(self, collector): + '''Add a collector to the registry.''' + with self._lock: + names = self._get_names(collector) + duplicates = set(self._names_to_collectors).intersection(names) + if duplicates: + raise ValueError( + 'Duplicated timeseries in CollectorRegistry: {0}'.format( + duplicates)) + for name in names: + self._names_to_collectors[name] = collector + self._collector_to_names[collector] = names + + def unregister(self, collector): + '''Remove a collector from the registry.''' + with self._lock: + for name in self._collector_to_names[collector]: + del self._names_to_collectors[name] + del self._collector_to_names[collector] + + def _get_names(self, collector): + '''Get names of timeseries the collector produces.''' + desc_func = None + # If there's a describe function, use it. + try: + desc_func = collector.describe + except AttributeError: + pass + # Otherwise, if auto describe is enabled use the collect function. + if not desc_func and self._auto_describe: + desc_func = collector.collect + + if not desc_func: + return [] + + result = [] + type_suffixes = { + 'counter': ['_total', '_created'], + 'summary': ['', '_sum', '_count', '_created'], + 'histogram': ['_bucket', '_sum', '_count', '_created'], + 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], + 'info': ['_info'], + } + for metric in desc_func(): + for suffix in type_suffixes.get(metric.type, ['']): + result.append(metric.name + suffix) + return result + + def collect(self): + '''Yields metrics from the collectors in the registry.''' + collectors = None + with self._lock: + collectors = copy.copy(self._collector_to_names) + for collector in collectors: + for metric in collector.collect(): + yield metric + + def restricted_registry(self, names): + '''Returns object that only collects some metrics. + + Returns an object which upon collect() will return + only samples with the given names. + + Intended usage is: + generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) + + Experimental.''' + names = set(names) + collectors = set() + with self._lock: + for name in names: + if name in self._names_to_collectors: + collectors.add(self._names_to_collectors[name]) + metrics = [] + for collector in collectors: + for metric in collector.collect(): + samples = [s for s in metric.samples if s[0] in names] + if samples: + m = Metric(metric.name, metric.documentation, metric.type) + m.samples = samples + metrics.append(m) + + class RestrictedRegistry(object): + def collect(self): + return metrics + + return RestrictedRegistry() + + def get_sample_value(self, name, labels=None): + '''Returns the sample value, or None if not found. + + This is inefficient, and intended only for use in unittests. + ''' + if labels is None: + labels = {} + for metric in self.collect(): + for s in metric.samples: + if s.name == name and s.labels == labels: + return s.value + return None + + +REGISTRY = CollectorRegistry(auto_describe=True) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/samples.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/samples.py new file mode 100644 index 00000000..86ac2270 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/samples.py @@ -0,0 +1,43 @@ +from collections import namedtuple + + +class Timestamp(object): + '''A nanosecond-resolution timestamp.''' + + def __init__(self, sec, nsec): + if nsec < 0 or nsec >= 1e9: + raise ValueError("Invalid value for nanoseconds in Timestamp: {0}".format(nsec)) + if sec < 0: + nsec = -nsec + self.sec = int(sec) + self.nsec = int(nsec) + + def __str__(self): + return "{0}.{1:09d}".format(self.sec, self.nsec) + + def __repr__(self): + return "Timestamp({0}, {1})".format(self.sec, self.nsec) + + def __float__(self): + return float(self.sec) + float(self.nsec) / 1e9 + + def __eq__(self, other): + return type(self) == type(other) and self.sec == other.sec and self.nsec == other.nsec + + def __ne__(self, other): + return not self == other + + def __gt__(self, other): + return self.sec > other.sec or self.nsec > other.nsec + + +# Timestamp and exemplar are optional. +# Value can be an int or a float. +# Timestamp can be a float containing a unixtime in seconds, +# a Timestamp object, or None. +# Exemplar can be an Exemplar object, or None. +Sample = namedtuple('Sample', ['name', 'labels', 'value', 'timestamp', 'exemplar']) +Sample.__new__.__defaults__ = (None, None) + +Exemplar = namedtuple('Exemplar', ['labels', 'value', 'timestamp']) +Exemplar.__new__.__defaults__ = (None,) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/twisted/__init__.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/twisted/__init__.py new file mode 100644 index 00000000..87e0b8a6 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/twisted/__init__.py @@ -0,0 +1,3 @@ +from ._exposition import MetricsResource + +__all__ = ['MetricsResource'] diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/twisted/_exposition.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/twisted/_exposition.py new file mode 100644 index 00000000..af3d0a6c --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/twisted/_exposition.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import, unicode_literals + +from twisted.web.resource import Resource + +from .. import exposition, REGISTRY + + +class MetricsResource(Resource): + """ + Twisted ``Resource`` that serves prometheus metrics. + """ + isLeaf = True + + def __init__(self, registry=REGISTRY): + self.registry = registry + + def render_GET(self, request): + encoder, content_type = exposition.choose_encoder(request.getHeader('Accept')) + request.setHeader(b'Content-Type', content_type.encode('ascii')) + return encoder(self.registry) diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/utils.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/utils.py new file mode 100644 index 00000000..bd894a1b --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/utils.py @@ -0,0 +1,23 @@ +import math + +INF = float("inf") +MINUS_INF = float("-inf") + + +def floatToGoString(d): + d = float(d) + if d == INF: + return '+Inf' + elif d == MINUS_INF: + return '-Inf' + elif math.isnan(d): + return 'NaN' + else: + s = repr(d) + dot = s.find('.') + # Go switches to exponents sooner than Python. + # We only need to care about positive values for le/quantile. + if d > 0 and dot > 6: + mantissa = '{0}.{1}{2}'.format(s[0], s[1:dot], s[dot+1:]).rstrip('0.') + return '{0}e+0{1}'.format(mantissa, dot-1) + return s diff --git a/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/values.py b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/values.py new file mode 100644 index 00000000..c9eed177 --- /dev/null +++ b/test/workspaces/pip-app/packages/prometheus_client-0.6.0/build/lib/prometheus_client/values.py @@ -0,0 +1,110 @@ +from __future__ import unicode_literals + +import os +from threading import Lock + +from .mmap_dict import mmap_key, MmapedDict + + +class MutexValue(object): + '''A float protected by a mutex.''' + + _multiprocess = False + + def __init__(self, typ, metric_name, name, labelnames, labelvalues, **kwargs): + self._value = 0.0 + self._lock = Lock() + + def inc(self, amount): + with self._lock: + self._value += amount + + def set(self, value): + with self._lock: + self._value = value + + def get(self): + with self._lock: + return self._value + + +def MultiProcessValue(_pidFunc=os.getpid): + files = {} + values = [] + pid = {'value': _pidFunc()} + # Use a single global lock when in multi-processing mode + # as we presume this means there is no threading going on. + # This avoids the need to also have mutexes in __MmapDict. + lock = Lock() + + class MmapedValue(object): + '''A float protected by a mutex backed by a per-process mmaped file.''' + + _multiprocess = True + + def __init__(self, typ, metric_name, name, labelnames, labelvalues, multiprocess_mode='', **kwargs): + self._params = typ, metric_name, name, labelnames, labelvalues, multiprocess_mode + with lock: + self.__check_for_pid_change() + self.__reset() + values.append(self) + + def __reset(self): + typ, metric_name, name, labelnames, labelvalues, multiprocess_mode = self._params + if typ == 'gauge': + file_prefix = typ + '_' + multiprocess_mode + else: + file_prefix = typ + if file_prefix not in files: + filename = os.path.join( + os.environ['prometheus_multiproc_dir'], + '{0}_{1}.db'.format(file_prefix, pid['value'])) + + files[file_prefix] = MmapedDict(filename) + self._file = files[file_prefix] + self._key = mmap_key(metric_name, name, labelnames, labelvalues) + self._value = self._file.read_value(self._key) + + def __check_for_pid_change(self): + actual_pid = _pidFunc() + if pid['value'] != actual_pid: + pid['value'] = actual_pid + # There has been a fork(), reset all the values. + for f in files.values(): + f.close() + files.clear() + for value in values: + value.__reset() + + def inc(self, amount): + with lock: + self.__check_for_pid_change() + self._value += amount + self._file.write_value(self._key, self._value) + + def set(self, value): + with lock: + self.__check_for_pid_change() + self._value = value + self._file.write_value(self._key, self._value) + + def get(self): + with lock: + self.__check_for_pid_change() + return self._value + + return MmapedValue + + +def get_value_class(): + # Should we enable multi-process mode? + # This needs to be chosen before the first metric is constructed, + # and as that may be in some arbitrary library the user/admin has + # no control over we use an environment variable. + if 'prometheus_multiproc_dir' in os.environ: + return MultiProcessValue() + else: + return MutexValue + + +ValueClass = get_value_class() diff --git a/test/workspaces/poetry-app-optional-dependencies/poetry.lock b/test/workspaces/poetry-app-optional-dependencies/poetry.lock new file mode 100644 index 00000000..4dece78e --- /dev/null +++ b/test/workspaces/poetry-app-optional-dependencies/poetry.lock @@ -0,0 +1,598 @@ +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "certifi" +version = "2024.2.2" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "googleapis-common-protos" +version = "1.62.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis-common-protos-1.62.0.tar.gz", hash = "sha256:83f0ece9f94e5672cced82f592d2a5edf527a96ed1794f0bab36d5735c996277"}, + {file = "googleapis_common_protos-1.62.0-py2.py3-none-any.whl", hash = "sha256:4750113612205514f9f6aa4cb00d523a94f3e8c06c5ad2fee466387dc4875f07"}, +] + +[package.dependencies] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "grpcio" +version = "1.62.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:136ffd79791b1eddda8d827b607a6285474ff8a1a5735c4947b58c481e5e4271"}, + {file = "grpcio-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d6a56ba703be6b6267bf19423d888600c3f574ac7c2cc5e6220af90662a4d6b0"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4cd356211579043fce9f52acc861e519316fff93980a212c8109cca8f47366b6"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e803e9b58d8f9b4ff0ea991611a8d51b31c68d2e24572cd1fe85e99e8cc1b4f8"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4c04fe33039b35b97c02d2901a164bbbb2f21fb9c4e2a45a959f0b044c3512c"}, + {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:95370c71b8c9062f9ea033a0867c4c73d6f0ff35113ebd2618171ec1f1e903e0"}, + {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c912688acc05e4ff012c8891803659d6a8a8b5106f0f66e0aed3fb7e77898fa6"}, + {file = "grpcio-1.62.0-cp310-cp310-win32.whl", hash = "sha256:821a44bd63d0f04e33cf4ddf33c14cae176346486b0df08b41a6132b976de5fc"}, + {file = "grpcio-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:81531632f93fece32b2762247c4c169021177e58e725494f9a746ca62c83acaa"}, + {file = "grpcio-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3fa15850a6aba230eed06b236287c50d65a98f05054a0f01ccedf8e1cc89d57f"}, + {file = "grpcio-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:36df33080cd7897623feff57831eb83c98b84640b016ce443305977fac7566fb"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7a195531828b46ea9c4623c47e1dc45650fc7206f8a71825898dd4c9004b0928"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab140a3542bbcea37162bdfc12ce0d47a3cda3f2d91b752a124cc9fe6776a9e2"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f9d6c3223914abb51ac564dc9c3782d23ca445d2864321b9059d62d47144021"}, + {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fbe0c20ce9a1cff75cfb828b21f08d0a1ca527b67f2443174af6626798a754a4"}, + {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38f69de9c28c1e7a8fd24e4af4264726637b72f27c2099eaea6e513e7142b47e"}, + {file = "grpcio-1.62.0-cp311-cp311-win32.whl", hash = "sha256:ce1aafdf8d3f58cb67664f42a617af0e34555fe955450d42c19e4a6ad41c84bd"}, + {file = "grpcio-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:eef1d16ac26c5325e7d39f5452ea98d6988c700c427c52cbc7ce3201e6d93334"}, + {file = "grpcio-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8aab8f90b2a41208c0a071ec39a6e5dbba16fd827455aaa070fec241624ccef8"}, + {file = "grpcio-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:62aa1659d8b6aad7329ede5d5b077e3d71bf488d85795db517118c390358d5f6"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0d7ae7fc7dbbf2d78d6323641ded767d9ec6d121aaf931ec4a5c50797b886532"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f359d635ee9428f0294bea062bb60c478a8ddc44b0b6f8e1f42997e5dc12e2ee"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d48e5b1f8f4204889f1acf30bb57c30378e17c8d20df5acbe8029e985f735c"}, + {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:662d3df5314ecde3184cf87ddd2c3a66095b3acbb2d57a8cada571747af03873"}, + {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92cdb616be44c8ac23a57cce0243af0137a10aa82234f23cd46e69e115071388"}, + {file = "grpcio-1.62.0-cp312-cp312-win32.whl", hash = "sha256:0b9179478b09ee22f4a36b40ca87ad43376acdccc816ce7c2193a9061bf35701"}, + {file = "grpcio-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:614c3ed234208e76991992342bab725f379cc81c7dd5035ee1de2f7e3f7a9842"}, + {file = "grpcio-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:7e1f51e2a460b7394670fdb615e26d31d3260015154ea4f1501a45047abe06c9"}, + {file = "grpcio-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:bcff647e7fe25495e7719f779cc219bbb90b9e79fbd1ce5bda6aae2567f469f2"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:56ca7ba0b51ed0de1646f1735154143dcbdf9ec2dbe8cc6645def299bb527ca1"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e84bfb2a734e4a234b116be208d6f0214e68dcf7804306f97962f93c22a1839"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c1488b31a521fbba50ae86423f5306668d6f3a46d124f7819c603979fc538c4"}, + {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98d8f4eb91f1ce0735bf0b67c3b2a4fea68b52b2fd13dc4318583181f9219b4b"}, + {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b3d3d755cfa331d6090e13aac276d4a3fb828bf935449dc16c3d554bf366136b"}, + {file = "grpcio-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a33f2bfd8a58a02aab93f94f6c61279be0f48f99fcca20ebaee67576cd57307b"}, + {file = "grpcio-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:5e709f7c8028ce0443bddc290fb9c967c1e0e9159ef7a030e8c21cac1feabd35"}, + {file = "grpcio-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:2f3d9a4d0abb57e5f49ed5039d3ed375826c2635751ab89dcc25932ff683bbb6"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:62ccb92f594d3d9fcd00064b149a0187c246b11e46ff1b7935191f169227f04c"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:921148f57c2e4b076af59a815467d399b7447f6e0ee10ef6d2601eb1e9c7f402"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f897b16190b46bc4d4aaf0a32a4b819d559a37a756d7c6b571e9562c360eed72"}, + {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1bc8449084fe395575ed24809752e1dc4592bb70900a03ca42bf236ed5bf008f"}, + {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81d444e5e182be4c7856cd33a610154fe9ea1726bd071d07e7ba13fafd202e38"}, + {file = "grpcio-1.62.0-cp38-cp38-win32.whl", hash = "sha256:88f41f33da3840b4a9bbec68079096d4caf629e2c6ed3a72112159d570d98ebe"}, + {file = "grpcio-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc2836cb829895ee190813446dce63df67e6ed7b9bf76060262c55fcd097d270"}, + {file = "grpcio-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fcc98cff4084467839d0a20d16abc2a76005f3d1b38062464d088c07f500d170"}, + {file = "grpcio-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:0d3dee701e48ee76b7d6fbbba18ba8bc142e5b231ef7d3d97065204702224e0e"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:b7a6be562dd18e5d5bec146ae9537f20ae1253beb971c0164f1e8a2f5a27e829"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29cb592c4ce64a023712875368bcae13938c7f03e99f080407e20ffe0a9aa33b"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eda79574aec8ec4d00768dcb07daba60ed08ef32583b62b90bbf274b3c279f7"}, + {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7eea57444a354ee217fda23f4b479a4cdfea35fb918ca0d8a0e73c271e52c09c"}, + {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0e97f37a3b7c89f9125b92d22e9c8323f4e76e7993ba7049b9f4ccbe8bae958a"}, + {file = "grpcio-1.62.0-cp39-cp39-win32.whl", hash = "sha256:39cd45bd82a2e510e591ca2ddbe22352e8413378852ae814549c162cf3992a93"}, + {file = "grpcio-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:b71c65427bf0ec6a8b48c68c17356cb9fbfc96b1130d20a07cb462f4e4dcdcd5"}, + {file = "grpcio-1.62.0.tar.gz", hash = "sha256:748496af9238ac78dcd98cce65421f1adce28c3979393e3609683fcd7f3880d7"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.62.0)"] + +[[package]] +name = "idna" +version = "3.6" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.11.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, + {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "opentelemetry-api" +version = "1.22.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_api-1.22.0-py3-none-any.whl", hash = "sha256:43621514301a7e9f5d06dd8013a1b450f30c2e9372b8e30aaeb4562abf2ce034"}, + {file = "opentelemetry_api-1.22.0.tar.gz", hash = "sha256:15ae4ca925ecf9cfdfb7a709250846fbb08072260fca08ade78056c502b86bed"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<7.0" + +[[package]] +name = "opentelemetry-distro" +version = "0.35b0" +description = "OpenTelemetry Python Distro" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_distro-0.35b0-py3-none-any.whl", hash = "sha256:838bc8271851b8f6d4e8af206c67a95ad3bfe12d53608f5de1d115e6b973710b"}, + {file = "opentelemetry_distro-0.35b0.tar.gz", hash = "sha256:367311bc48f1031fbeacb86acf4ff44c02bf7f3950d2f62f482ac24a9ba80865"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-exporter-otlp = {version = "1.14.0", optional = true, markers = "extra == \"otlp\""} +opentelemetry-instrumentation = "0.35b0" +opentelemetry-sdk = ">=1.13,<2.0" + +[package.extras] +otlp = ["opentelemetry-exporter-otlp (==1.14.0)"] + +[[package]] +name = "opentelemetry-exporter-otlp" +version = "1.14.0" +description = "OpenTelemetry Collector Exporters" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp-1.14.0-py3-none-any.whl", hash = "sha256:f33c68dee618b3d3e2cbe493e8a1a13a993a693faa8cf73ce387d275626802a7"}, + {file = "opentelemetry_exporter_otlp-1.14.0.tar.gz", hash = "sha256:54d215175fec6d93a4d77c9e4604f91d638863d3cddcf2cf5022bfef352a1b64"}, +] + +[package.dependencies] +opentelemetry-exporter-otlp-proto-grpc = "1.14.0" +opentelemetry-exporter-otlp-proto-http = "1.14.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.14.0" +description = "OpenTelemetry Collector Protobuf over gRPC Exporter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp_proto_grpc-1.14.0-py3-none-any.whl", hash = "sha256:cf8f59c3d243f6937c9d5101ed538bdac619d76fc7d4f4919896c3816b30fc88"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.14.0.tar.gz", hash = "sha256:00b8317d872d02b4b1ff45f22888cdbe91d532786d086e191baf185afc4c1ae0"}, +] + +[package.dependencies] +backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} +googleapis-common-protos = ">=1.52,<2.0" +grpcio = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-proto = "1.14.0" +opentelemetry-sdk = ">=1.12,<2.0" + +[package.extras] +test = ["pytest-grpc"] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.14.0" +description = "OpenTelemetry Collector Protobuf over HTTP Exporter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp_proto_http-1.14.0-py3-none-any.whl", hash = "sha256:b65e0fcae9daef7b3e9233fecb70ef871ab645f6d80fba1dad1d02f911746347"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.14.0.tar.gz", hash = "sha256:46d3c55586f7c2983b2b3f1b1809d229ea68569e5b78ca89de7cfee4854374fe"}, +] + +[package.dependencies] +backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} +googleapis-common-protos = ">=1.52,<2.0" +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-proto = "1.14.0" +opentelemetry-sdk = ">=1.12,<2.0" +requests = ">=2.7,<3.0" + +[package.extras] +test = ["responses (==0.22.0)"] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.35b0" +description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_instrumentation-0.35b0-py3-none-any.whl", hash = "sha256:0ca91ef51c6748e91892cd1322b1fceede77d1300dc718a5ec4c3deee5649beb"}, + {file = "opentelemetry_instrumentation-0.35b0.tar.gz", hash = "sha256:a7cb996b37920911db7534dc739ab3fe18e4f769431481bec01d6538a11cadd9"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.4,<2.0" +setuptools = ">=16.0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-proto" +version = "1.14.0" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_proto-1.14.0-py3-none-any.whl", hash = "sha256:8f2c4f39ce625fd349af572bbf7a8f801b4ea0a55a8ba3a32b76d912489b9f27"}, + {file = "opentelemetry_proto-1.14.0.tar.gz", hash = "sha256:1e3b379fef66e3ed46d5d67f0b61cca9db789e69d3a434fdf62d65fed4e091d5"}, +] + +[package.dependencies] +protobuf = ">=3.13,<4.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.22.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_sdk-1.22.0-py3-none-any.whl", hash = "sha256:a730555713d7c8931657612a88a141e3a4fe6eb5523d9e2d5a8b1e673d76efa6"}, + {file = "opentelemetry_sdk-1.22.0.tar.gz", hash = "sha256:45267ac1f38a431fc2eb5d6e0c0d83afc0b78de57ac345488aa58c28c17991d0"}, +] + +[package.dependencies] +opentelemetry-api = "1.22.0" +opentelemetry-semantic-conventions = "0.43b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.43b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_semantic_conventions-0.43b0-py3-none-any.whl", hash = "sha256:291284d7c1bf15fdaddf309b3bd6d3b7ce12a253cec6d27144439819a15d8445"}, + {file = "opentelemetry_semantic_conventions-0.43b0.tar.gz", hash = "sha256:b9576fb890df479626fa624e88dde42d3d60b8b6c8ae1152ad157a8b97358635"}, +] + +[[package]] +name = "protobuf" +version = "3.20.3" +description = "Protocol Buffers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, + {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, + {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, + {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, + {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, + {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, + {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, + {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, + {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, + {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, + {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, + {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, + {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, + {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, + {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, + {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "setuptools" +version = "69.1.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.1.0-py3-none-any.whl", hash = "sha256:c054629b81b946d63a9c6e732bc8b2513a7c3ea645f11d0139a2191d735c60c6"}, + {file = "setuptools-69.1.0.tar.gz", hash = "sha256:850894c4195f09c4ed30dba56213bf7c3f21d86ed6bdaafb5df5972593bfc401"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "typing-extensions" +version = "4.9.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, +] + +[[package]] +name = "urllib3" +version = "2.2.1" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "zipp" +version = "3.17.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, + {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + +[metadata] +lock-version = "2.0" +python-versions = "3.12.1" +content-hash = "401ae2d9f83aa09d5d4fa03463847330343b66a3d4f673271fd29f8851fb0454" diff --git a/test/workspaces/poetry-app-optional-dependencies/pyproject.toml b/test/workspaces/poetry-app-optional-dependencies/pyproject.toml new file mode 100644 index 00000000..ee49895d --- /dev/null +++ b/test/workspaces/poetry-app-optional-dependencies/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "extras-poetry" +version = "0.1.0" +description = "" +authors = ["MarcusArdelean "] +readme = "README.md" + +[tool.poetry.dependencies] +python = "3.12.1" +opentelemetry-distro = {version = "0.35b0", extras = ["otlp"]} + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api"